patches for DPDK stable branches
 help / color / mirror / Atom feed
* [dpdk-stable] [PATCH] net/mlx5: fix loopback for DV queue
@ 2021-05-12 14:36 Bing Zhao
  2021-05-12 14:48 ` Thomas Monjalon
                   ` (2 more replies)
  0 siblings, 3 replies; 13+ messages in thread
From: Bing Zhao @ 2021-05-12 14:36 UTC (permalink / raw)
  To: viacheslavo, matan, thomas; +Cc: dev, orika, rasland, stable

When switching to the Devx interface, the kernel driver behavior may
be different from using Verbs. The Tx loopback cannot work properly
even if the Tx and Rx queues are configured with loopback attribute.
To support self loopback for Tx, a Verbs dummy queue pair needs to
be created to trigger the kernel to enable the loopback.

This is only required when TIR is created for Rx and loopback is
needed. Only CQ and QP are needed for this case, no WQ(RQ) needs to
be created.

This requirement comes from bugzilla 312, more details can refer to:
https://bugs.dpdk.org/show_bug.cgi?id=645

Bugzilla ID: 645

Fixes: 6deb19e1b2d2 ("net/mlx5: separate Rx queue object creations")
Cc: stable@dpdk.org

Signed-off-by: Bing Zhao <bingz@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/linux/mlx5_verbs.c | 119 ++++++++++++++++++++++++++++
 drivers/net/mlx5/linux/mlx5_verbs.h |   2 +
 drivers/net/mlx5/mlx5.h             |   9 +++
 drivers/net/mlx5/mlx5_trigger.c     |   9 +++
 4 files changed, 139 insertions(+)

diff --git a/drivers/net/mlx5/linux/mlx5_verbs.c b/drivers/net/mlx5/linux/mlx5_verbs.c
index 0b0759f33f..2ca94b5712 100644
--- a/drivers/net/mlx5/linux/mlx5_verbs.c
+++ b/drivers/net/mlx5/linux/mlx5_verbs.c
@@ -1055,6 +1055,125 @@ mlx5_txq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx)
 	return -rte_errno;
 }
 
+/*
+ * Create the dummy QP with minimal resources for loopback.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_rxq_ibv_obj_dummy_lb_create(struct rte_eth_dev *dev)
+{
+#if defined(HAVE_IBV_DEVICE_TUNNEL_SUPPORT) && defined(HAVE_IBV_FLOW_DV_SUPPORT)
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_dev_ctx_shared *sh = priv->sh;
+	struct ibv_context *ctx = sh->ctx;
+	struct mlx5dv_qp_init_attr qp_init_attr = {0};
+	struct {
+		struct ibv_cq_init_attr_ex ibv;
+		struct mlx5dv_cq_init_attr mlx5;
+	} cq_attr = {{0}};
+
+	if (dev->data->dev_conf.lpbk_mode) {
+		/* Allow packet sent from NIC loop back w/o source MAC check. */
+		qp_init_attr.comp_mask |=
+				MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
+		qp_init_attr.create_flags |=
+				MLX5DV_QP_CREATE_TIR_ALLOW_SELF_LOOPBACK_UC;
+	} else {
+		return 0;
+	}
+	/* Only need to check refcnt, 0 after "sh" is allocated. */
+	if (!!(__atomic_fetch_add(&sh->self_lb.refcnt, 1, __ATOMIC_RELAXED))) {
+		MLX5_ASSERT(sh->self_lb.ibv_cq && sh->self_lb.qp);
+		priv->lb_used = 1;
+		return 0;
+	}
+	cq_attr.ibv = (struct ibv_cq_init_attr_ex){
+		.cqe = 1,
+		.channel = NULL,
+		.comp_mask = 0,
+	};
+	cq_attr.mlx5 = (struct mlx5dv_cq_init_attr){
+		.comp_mask = 0,
+	};
+	/* Only CQ is needed, no WQ(RQ) is required in this case. */
+	sh->self_lb.ibv_cq = mlx5_glue->cq_ex_to_cq(mlx5_glue->dv_create_cq(ctx,
+							&cq_attr.ibv,
+							&cq_attr.mlx5));
+	if (!sh->self_lb.ibv_cq) {
+		DRV_LOG(ERR, "Port %u cannot allocate CQ for loopback.",
+			dev->data->port_id);
+		rte_errno = errno;
+		goto error;
+	}
+	sh->self_lb.qp = mlx5_glue->dv_create_qp(ctx,
+				&(struct ibv_qp_init_attr_ex){
+					.qp_type = IBV_QPT_RAW_PACKET,
+					.comp_mask = IBV_QP_INIT_ATTR_PD,
+					.pd = sh->pd,
+					.send_cq = sh->self_lb.ibv_cq,
+					.recv_cq = sh->self_lb.ibv_cq,
+					.cap.max_recv_wr = 1,
+				},
+				&qp_init_attr);
+	if (!sh->self_lb.qp) {
+		DRV_LOG(DEBUG, "Port %u cannot allocate QP for loopback.",
+			dev->data->port_id);
+		rte_errno = errno;
+		goto error;
+	}
+	priv->lb_used = 1;
+	return 0;
+error:
+	if (sh->self_lb.ibv_cq) {
+		claim_zero(mlx5_glue->destroy_cq(sh->self_lb.ibv_cq));
+		sh->self_lb.ibv_cq = NULL;
+	}
+	(void)__atomic_sub_fetch(&sh->self_lb.refcnt, 1, __ATOMIC_RELAXED);
+	return -rte_errno;
+#else
+	RTE_SET_USED(dev);
+	return 0;
+#endif
+}
+
+/*
+ * Release the dummy queue resources for loopback.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ */
+void
+mlx5_rxq_ibv_obj_dummy_lb_release(struct rte_eth_dev *dev)
+{
+#if defined(HAVE_IBV_DEVICE_TUNNEL_SUPPORT) && defined(HAVE_IBV_FLOW_DV_SUPPORT)
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_dev_ctx_shared *sh = priv->sh;
+
+	if (!priv->lb_used)
+		return;
+	MLX5_ASSERT(__atomic_load_n(&sh->self_lb.refcnt, __ATOMIC_RELAXED));
+	if (!(__atomic_sub_fetch(&sh->self_lb.refcnt, 1, __ATOMIC_RELAXED))) {
+		if (sh->self_lb.qp) {
+			claim_zero(mlx5_glue->destroy_qp(sh->self_lb.qp));
+			sh->self_lb.qp = NULL;
+		}
+		if (sh->self_lb.ibv_cq) {
+			claim_zero(mlx5_glue->destroy_cq(sh->self_lb.ibv_cq));
+			sh->self_lb.ibv_cq = NULL;
+		}
+	}
+	priv->lb_used = 0;
+#else
+	RTE_SET_USED(dev);
+	return;
+#endif
+}
+
 /**
  * Release an Tx verbs queue object.
  *
diff --git a/drivers/net/mlx5/linux/mlx5_verbs.h b/drivers/net/mlx5/linux/mlx5_verbs.h
index 76a79bf4f4..f7e8e2fe98 100644
--- a/drivers/net/mlx5/linux/mlx5_verbs.h
+++ b/drivers/net/mlx5/linux/mlx5_verbs.h
@@ -9,6 +9,8 @@
 
 int mlx5_txq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx);
 void mlx5_txq_ibv_obj_release(struct mlx5_txq_obj *txq_obj);
+int mlx5_rxq_ibv_obj_dummy_lb_create(struct rte_eth_dev *dev);
+void mlx5_rxq_ibv_obj_dummy_lb_release(struct rte_eth_dev *dev);
 
 /* Verbs ops struct */
 extern const struct mlx5_mr_ops mlx5_mr_verbs_ops;
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 7eca6a6fa6..ad57a4f5b0 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -287,6 +287,13 @@ struct mlx5_drop {
 	struct mlx5_rxq_obj *rxq; /* Rx queue object. */
 };
 
+/* Loopback dummy queue resources required due to Verbs API. */
+struct mlx5_lb_ctx {
+	struct ibv_qp *qp; /* QP object. */
+	void *ibv_cq; /* Completion queue. */
+	uint16_t refcnt; /* Reference count for representors. */
+};
+
 #define MLX5_COUNTERS_PER_POOL 512
 #define MLX5_MAX_PENDING_QUERIES 4
 #define MLX5_CNT_CONTAINER_RESIZE 64
@@ -1124,6 +1131,7 @@ struct mlx5_dev_ctx_shared {
 	/* Meter management structure. */
 	struct mlx5_aso_ct_pools_mng *ct_mng;
 	/* Management data for ASO connection tracking. */
+	struct mlx5_lb_ctx self_lb; /* QP to enable self loopback for Devx. */
 	struct mlx5_dev_shared_port port[]; /* per device port data array. */
 };
 
@@ -1312,6 +1320,7 @@ struct mlx5_priv {
 	unsigned int sampler_en:1; /* Whether support sampler. */
 	unsigned int mtr_en:1; /* Whether support meter. */
 	unsigned int mtr_reg_share:1; /* Whether support meter REG_C share. */
+	unsigned int lb_used:1; /* Loopback queue is referred to. */
 	uint16_t domain_id; /* Switch domain identifier. */
 	uint16_t vport_id; /* Associated VF vport index (if any). */
 	uint32_t vport_meta_tag; /* Used for vport index match ove VF LAG. */
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index eb8c99cd93..32ab90c9b3 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -19,6 +19,7 @@
 #include "mlx5_tx.h"
 #include "mlx5_utils.h"
 #include "rte_pmd_mlx5.h"
+#include "mlx5_verbs.h"
 
 /**
  * Stop traffic on Tx queues.
@@ -1068,6 +1069,12 @@ mlx5_dev_start(struct rte_eth_dev *dev)
 			dev->data->port_id, strerror(rte_errno));
 		goto error;
 	}
+	if (priv->config.devx && priv->config.dv_flow_en &&
+	    priv->config.dest_tir) {
+		ret = mlx5_rxq_ibv_obj_dummy_lb_create(dev);
+		if (ret)
+			goto error;
+	}
 	ret = mlx5_txq_start(dev);
 	if (ret) {
 		DRV_LOG(ERR, "port %u Tx queue allocation failed: %s",
@@ -1148,6 +1155,7 @@ mlx5_dev_start(struct rte_eth_dev *dev)
 	mlx5_traffic_disable(dev);
 	mlx5_txq_stop(dev);
 	mlx5_rxq_stop(dev);
+	mlx5_rxq_ibv_obj_dummy_lb_release(dev);
 	mlx5_txpp_stop(dev); /* Stop last. */
 	rte_errno = ret; /* Restore rte_errno. */
 	return -rte_errno;
@@ -1186,6 +1194,7 @@ mlx5_dev_stop(struct rte_eth_dev *dev)
 	priv->sh->port[priv->dev_port - 1].devx_ih_port_id = RTE_MAX_ETHPORTS;
 	mlx5_txq_stop(dev);
 	mlx5_rxq_stop(dev);
+	mlx5_rxq_ibv_obj_dummy_lb_release(dev);
 	mlx5_txpp_stop(dev);
 
 	return 0;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [dpdk-stable] [PATCH] net/mlx5: fix loopback for DV queue
  2021-05-12 14:36 [dpdk-stable] [PATCH] net/mlx5: fix loopback for DV queue Bing Zhao
@ 2021-05-12 14:48 ` Thomas Monjalon
  2021-05-12 14:51   ` Bing Zhao
  2021-05-13 11:13 ` [dpdk-stable] [PATCH v2] " Bing Zhao
  2021-05-17 15:18 ` [dpdk-stable] [PATCH v3] " Bing Zhao
  2 siblings, 1 reply; 13+ messages in thread
From: Thomas Monjalon @ 2021-05-12 14:48 UTC (permalink / raw)
  To: Bing Zhao; +Cc: viacheslavo, matan, dev, orika, rasland, stable

12/05/2021 16:36, Bing Zhao:
> When switching to the Devx interface, the kernel driver behavior may

What do you mean by "switching"?

> be different from using Verbs. The Tx loopback cannot work properly
> even if the Tx and Rx queues are configured with loopback attribute.
> To support self loopback for Tx, a Verbs dummy queue pair needs to
> be created to trigger the kernel to enable the loopback.
> 
> This is only required when TIR is created for Rx and loopback is
> needed. Only CQ and QP are needed for this case, no WQ(RQ) needs to
> be created.
> 
> This requirement comes from bugzilla 312, more details can refer to:

It is not 312.

> https://bugs.dpdk.org/show_bug.cgi?id=645

You can remove above lines, because the tag below is enough.
> 
> Bugzilla ID: 645
> 
> Fixes: 6deb19e1b2d2 ("net/mlx5: separate Rx queue object creations")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Bing Zhao <bingz@nvidia.com>
> Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>




^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [dpdk-stable] [PATCH] net/mlx5: fix loopback for DV queue
  2021-05-12 14:48 ` Thomas Monjalon
@ 2021-05-12 14:51   ` Bing Zhao
  2021-05-12 15:30     ` Thomas Monjalon
  0 siblings, 1 reply; 13+ messages in thread
From: Bing Zhao @ 2021-05-12 14:51 UTC (permalink / raw)
  To: NBU-Contact-Thomas Monjalon
  Cc: Slava Ovsiienko, Matan Azrad, dev, Ori Kam, Raslan Darawsheh, stable

Hi Thomas,

> -----Original Message-----
> From: Thomas Monjalon <thomas@monjalon.net>
> Sent: Wednesday, May 12, 2021 10:49 PM
> To: Bing Zhao <bingz@nvidia.com>
> Cc: Slava Ovsiienko <viacheslavo@nvidia.com>; Matan Azrad
> <matan@nvidia.com>; dev@dpdk.org; Ori Kam <orika@nvidia.com>; Raslan
> Darawsheh <rasland@nvidia.com>; stable@dpdk.org
> Subject: Re: [PATCH] net/mlx5: fix loopback for DV queue
> 
> External email: Use caution opening links or attachments
> 
> 
> 12/05/2021 16:36, Bing Zhao:
> > When switching to the Devx interface, the kernel driver behavior
> may
> 
> What do you mean by "switching"?

I mean when changing the Rxq from Verbs API to Devx API.

> 
> > be different from using Verbs. The Tx loopback cannot work
> properly
> > even if the Tx and Rx queues are configured with loopback
> attribute.
> > To support self loopback for Tx, a Verbs dummy queue pair needs to
> be
> > created to trigger the kernel to enable the loopback.
> >
> > This is only required when TIR is created for Rx and loopback is
> > needed. Only CQ and QP are needed for this case, no WQ(RQ) needs
> to be
> > created.
> >
> > This requirement comes from bugzilla 312, more details can refer
> to:
> 
> It is not 312.
> 
> > https://bugs.dpdk.org/show_bug.cgi?id=645

I will change it.

> 
> You can remove above lines, because the tag below is enough.
> >
> > Bugzilla ID: 645
> >
> > Fixes: 6deb19e1b2d2 ("net/mlx5: separate Rx queue object
> creations")
> > Cc: stable@dpdk.org
> >
> > Signed-off-by: Bing Zhao <bingz@nvidia.com>
> > Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
> 
> 

Thanks


^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [dpdk-stable] [PATCH] net/mlx5: fix loopback for DV queue
  2021-05-12 14:51   ` Bing Zhao
@ 2021-05-12 15:30     ` Thomas Monjalon
  2021-05-12 15:48       ` Slava Ovsiienko
  0 siblings, 1 reply; 13+ messages in thread
From: Thomas Monjalon @ 2021-05-12 15:30 UTC (permalink / raw)
  To: Bing Zhao
  Cc: Slava Ovsiienko, Matan Azrad, dev, Ori Kam, Raslan Darawsheh, stable

12/05/2021 16:51, Bing Zhao:
> From: Thomas Monjalon <thomas@monjalon.net>
> > 12/05/2021 16:36, Bing Zhao:
> > > When switching to the Devx interface, the kernel driver behavior
> > may
> > 
> > What do you mean by "switching"?
> 
> I mean when changing the Rxq from Verbs API to Devx API.

How do we switch?

> > > be different from using Verbs. The Tx loopback cannot work
> > properly
> > > even if the Tx and Rx queues are configured with loopback
> > attribute.
> > > To support self loopback for Tx, a Verbs dummy queue pair needs to
> > be
> > > created to trigger the kernel to enable the loopback.
> > >
> > > This is only required when TIR is created for Rx and loopback is
> > > needed. Only CQ and QP are needed for this case, no WQ(RQ) needs
> > to be
> > > created.




^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [dpdk-stable] [PATCH] net/mlx5: fix loopback for DV queue
  2021-05-12 15:30     ` Thomas Monjalon
@ 2021-05-12 15:48       ` Slava Ovsiienko
  2021-05-12 16:03         ` Thomas Monjalon
  0 siblings, 1 reply; 13+ messages in thread
From: Slava Ovsiienko @ 2021-05-12 15:48 UTC (permalink / raw)
  To: NBU-Contact-Thomas Monjalon, Bing Zhao
  Cc: Matan Azrad, dev, Ori Kam, Raslan Darawsheh, stable

Hi, Thomas

> How do we switch?

Historically mlx5 created Queues and other HW objects with Verbs.
Now we are migrating ("switched") to DevX - queues are being created with DevX calls.
If there is no E-Switch - the new DevX approach is engaged,
for E-Switch configurations we still use Verbs.

With best regards,
Slava

> -----Original Message-----
> From: Thomas Monjalon <thomas@monjalon.net>
> Sent: Wednesday, May 12, 2021 18:31
> To: Bing Zhao <bingz@nvidia.com>
> Cc: Slava Ovsiienko <viacheslavo@nvidia.com>; Matan Azrad
> <matan@nvidia.com>; dev@dpdk.org; Ori Kam <orika@nvidia.com>; Raslan
> Darawsheh <rasland@nvidia.com>; stable@dpdk.org
> Subject: Re: [PATCH] net/mlx5: fix loopback for DV queue
> 
> 12/05/2021 16:51, Bing Zhao:
> > From: Thomas Monjalon <thomas@monjalon.net>
> > > 12/05/2021 16:36, Bing Zhao:
> > > > When switching to the Devx interface, the kernel driver behavior
> > > may
> > >
> > > What do you mean by "switching"?
> >
> > I mean when changing the Rxq from Verbs API to Devx API.
> 
> 
> > > > be different from using Verbs. The Tx loopback cannot work
> > > properly
> > > > even if the Tx and Rx queues are configured with loopback
> > > attribute.
> > > > To support self loopback for Tx, a Verbs dummy queue pair needs to
> > > be
> > > > created to trigger the kernel to enable the loopback.
> > > >
> > > > This is only required when TIR is created for Rx and loopback is
> > > > needed. Only CQ and QP are needed for this case, no WQ(RQ) needs
> > > to be
> > > > created.
> 
> 


^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [dpdk-stable] [PATCH] net/mlx5: fix loopback for DV queue
  2021-05-12 15:48       ` Slava Ovsiienko
@ 2021-05-12 16:03         ` Thomas Monjalon
  2021-05-13 11:14           ` Bing Zhao
  0 siblings, 1 reply; 13+ messages in thread
From: Thomas Monjalon @ 2021-05-12 16:03 UTC (permalink / raw)
  To: Bing Zhao, Slava Ovsiienko
  Cc: Matan Azrad, dev, Ori Kam, Raslan Darawsheh, stable

12/05/2021 17:48, Slava Ovsiienko:
> Hi, Thomas
> 
> > How do we switch?
> 
> Historically mlx5 created Queues and other HW objects with Verbs.
> Now we are migrating ("switched") to DevX - queues are being created with DevX calls.
> If there is no E-Switch - the new DevX approach is engaged,
> for E-Switch configurations we still use Verbs.

OK this is what I thought, but it is not clear in the commit message.
We could think it is the user switching a queue :)
Please reword by comparing Verbs and DevX without saying "switch".


> From: Thomas Monjalon <thomas@monjalon.net>
> > 12/05/2021 16:51, Bing Zhao:
> > > From: Thomas Monjalon <thomas@monjalon.net>
> > > > 12/05/2021 16:36, Bing Zhao:
> > > > > When switching to the Devx interface, the kernel driver behavior
> > > > may
> > > >
> > > > What do you mean by "switching"?
> > >
> > > I mean when changing the Rxq from Verbs API to Devx API.
> > 
> > 
> > > > > be different from using Verbs. The Tx loopback cannot work
> > > > properly
> > > > > even if the Tx and Rx queues are configured with loopback
> > > > attribute.
> > > > > To support self loopback for Tx, a Verbs dummy queue pair needs to
> > > > be
> > > > > created to trigger the kernel to enable the loopback.
> > > > >
> > > > > This is only required when TIR is created for Rx and loopback is
> > > > > needed. Only CQ and QP are needed for this case, no WQ(RQ) needs
> > > > to be
> > > > > created.




^ permalink raw reply	[flat|nested] 13+ messages in thread

* [dpdk-stable] [PATCH v2] net/mlx5: fix loopback for DV queue
  2021-05-12 14:36 [dpdk-stable] [PATCH] net/mlx5: fix loopback for DV queue Bing Zhao
  2021-05-12 14:48 ` Thomas Monjalon
@ 2021-05-13 11:13 ` Bing Zhao
  2021-05-13 12:58   ` Thomas Monjalon
  2021-05-17 15:18 ` [dpdk-stable] [PATCH v3] " Bing Zhao
  2 siblings, 1 reply; 13+ messages in thread
From: Bing Zhao @ 2021-05-13 11:13 UTC (permalink / raw)
  To: viacheslavo, matan, thomas; +Cc: dev, orika, rasland, stable

In the past, all the queues and other hardware objects were created
through Verbs interface. Currently, most objects creation are
migrated to Devx interface by default, including queues. Only when
the DV is disabled by device arg or eswitch is enabled, are all or
some of the objects created through Verbs interface.

When using Devx interface to create queues, the kernel driver
behavior is different from using Verbs. The Tx loopback cannot work
properly even if the Tx and Rx queues are configured with loopback
attribute. To fix the support self loopback for Tx, a Verbs dummy
queue pair needs to be created to trigger the kernel to enable the
global loopback capability.

This is only required when TIR is created for Rx and loopback is
needed. Only CQ and QP are needed for this case, no WQ(RQ) needs to
be created.

This requirement comes from bugzilla 645, more details can be found
in the bugzilla link.

Bugzilla ID: 645

Fixes: 6deb19e1b2d2 ("net/mlx5: separate Rx queue object creations")
Cc: stable@dpdk.org

Signed-off-by: Bing Zhao <bingz@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/linux/mlx5_verbs.c | 119 ++++++++++++++++++++++++++++
 drivers/net/mlx5/linux/mlx5_verbs.h |   2 +
 drivers/net/mlx5/mlx5.h             |   9 +++
 drivers/net/mlx5/mlx5_trigger.c     |   9 +++
 4 files changed, 139 insertions(+)

diff --git a/drivers/net/mlx5/linux/mlx5_verbs.c b/drivers/net/mlx5/linux/mlx5_verbs.c
index 0b0759f33f..2ca94b5712 100644
--- a/drivers/net/mlx5/linux/mlx5_verbs.c
+++ b/drivers/net/mlx5/linux/mlx5_verbs.c
@@ -1055,6 +1055,125 @@ mlx5_txq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx)
 	return -rte_errno;
 }
 
+/*
+ * Create the dummy QP with minimal resources for loopback.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_rxq_ibv_obj_dummy_lb_create(struct rte_eth_dev *dev)
+{
+#if defined(HAVE_IBV_DEVICE_TUNNEL_SUPPORT) && defined(HAVE_IBV_FLOW_DV_SUPPORT)
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_dev_ctx_shared *sh = priv->sh;
+	struct ibv_context *ctx = sh->ctx;
+	struct mlx5dv_qp_init_attr qp_init_attr = {0};
+	struct {
+		struct ibv_cq_init_attr_ex ibv;
+		struct mlx5dv_cq_init_attr mlx5;
+	} cq_attr = {{0}};
+
+	if (dev->data->dev_conf.lpbk_mode) {
+		/* Allow packet sent from NIC loop back w/o source MAC check. */
+		qp_init_attr.comp_mask |=
+				MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
+		qp_init_attr.create_flags |=
+				MLX5DV_QP_CREATE_TIR_ALLOW_SELF_LOOPBACK_UC;
+	} else {
+		return 0;
+	}
+	/* Only need to check refcnt, 0 after "sh" is allocated. */
+	if (!!(__atomic_fetch_add(&sh->self_lb.refcnt, 1, __ATOMIC_RELAXED))) {
+		MLX5_ASSERT(sh->self_lb.ibv_cq && sh->self_lb.qp);
+		priv->lb_used = 1;
+		return 0;
+	}
+	cq_attr.ibv = (struct ibv_cq_init_attr_ex){
+		.cqe = 1,
+		.channel = NULL,
+		.comp_mask = 0,
+	};
+	cq_attr.mlx5 = (struct mlx5dv_cq_init_attr){
+		.comp_mask = 0,
+	};
+	/* Only CQ is needed, no WQ(RQ) is required in this case. */
+	sh->self_lb.ibv_cq = mlx5_glue->cq_ex_to_cq(mlx5_glue->dv_create_cq(ctx,
+							&cq_attr.ibv,
+							&cq_attr.mlx5));
+	if (!sh->self_lb.ibv_cq) {
+		DRV_LOG(ERR, "Port %u cannot allocate CQ for loopback.",
+			dev->data->port_id);
+		rte_errno = errno;
+		goto error;
+	}
+	sh->self_lb.qp = mlx5_glue->dv_create_qp(ctx,
+				&(struct ibv_qp_init_attr_ex){
+					.qp_type = IBV_QPT_RAW_PACKET,
+					.comp_mask = IBV_QP_INIT_ATTR_PD,
+					.pd = sh->pd,
+					.send_cq = sh->self_lb.ibv_cq,
+					.recv_cq = sh->self_lb.ibv_cq,
+					.cap.max_recv_wr = 1,
+				},
+				&qp_init_attr);
+	if (!sh->self_lb.qp) {
+		DRV_LOG(DEBUG, "Port %u cannot allocate QP for loopback.",
+			dev->data->port_id);
+		rte_errno = errno;
+		goto error;
+	}
+	priv->lb_used = 1;
+	return 0;
+error:
+	if (sh->self_lb.ibv_cq) {
+		claim_zero(mlx5_glue->destroy_cq(sh->self_lb.ibv_cq));
+		sh->self_lb.ibv_cq = NULL;
+	}
+	(void)__atomic_sub_fetch(&sh->self_lb.refcnt, 1, __ATOMIC_RELAXED);
+	return -rte_errno;
+#else
+	RTE_SET_USED(dev);
+	return 0;
+#endif
+}
+
+/*
+ * Release the dummy queue resources for loopback.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ */
+void
+mlx5_rxq_ibv_obj_dummy_lb_release(struct rte_eth_dev *dev)
+{
+#if defined(HAVE_IBV_DEVICE_TUNNEL_SUPPORT) && defined(HAVE_IBV_FLOW_DV_SUPPORT)
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_dev_ctx_shared *sh = priv->sh;
+
+	if (!priv->lb_used)
+		return;
+	MLX5_ASSERT(__atomic_load_n(&sh->self_lb.refcnt, __ATOMIC_RELAXED));
+	if (!(__atomic_sub_fetch(&sh->self_lb.refcnt, 1, __ATOMIC_RELAXED))) {
+		if (sh->self_lb.qp) {
+			claim_zero(mlx5_glue->destroy_qp(sh->self_lb.qp));
+			sh->self_lb.qp = NULL;
+		}
+		if (sh->self_lb.ibv_cq) {
+			claim_zero(mlx5_glue->destroy_cq(sh->self_lb.ibv_cq));
+			sh->self_lb.ibv_cq = NULL;
+		}
+	}
+	priv->lb_used = 0;
+#else
+	RTE_SET_USED(dev);
+	return;
+#endif
+}
+
 /**
  * Release an Tx verbs queue object.
  *
diff --git a/drivers/net/mlx5/linux/mlx5_verbs.h b/drivers/net/mlx5/linux/mlx5_verbs.h
index 76a79bf4f4..f7e8e2fe98 100644
--- a/drivers/net/mlx5/linux/mlx5_verbs.h
+++ b/drivers/net/mlx5/linux/mlx5_verbs.h
@@ -9,6 +9,8 @@
 
 int mlx5_txq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx);
 void mlx5_txq_ibv_obj_release(struct mlx5_txq_obj *txq_obj);
+int mlx5_rxq_ibv_obj_dummy_lb_create(struct rte_eth_dev *dev);
+void mlx5_rxq_ibv_obj_dummy_lb_release(struct rte_eth_dev *dev);
 
 /* Verbs ops struct */
 extern const struct mlx5_mr_ops mlx5_mr_verbs_ops;
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 7eca6a6fa6..ad57a4f5b0 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -287,6 +287,13 @@ struct mlx5_drop {
 	struct mlx5_rxq_obj *rxq; /* Rx queue object. */
 };
 
+/* Loopback dummy queue resources required due to Verbs API. */
+struct mlx5_lb_ctx {
+	struct ibv_qp *qp; /* QP object. */
+	void *ibv_cq; /* Completion queue. */
+	uint16_t refcnt; /* Reference count for representors. */
+};
+
 #define MLX5_COUNTERS_PER_POOL 512
 #define MLX5_MAX_PENDING_QUERIES 4
 #define MLX5_CNT_CONTAINER_RESIZE 64
@@ -1124,6 +1131,7 @@ struct mlx5_dev_ctx_shared {
 	/* Meter management structure. */
 	struct mlx5_aso_ct_pools_mng *ct_mng;
 	/* Management data for ASO connection tracking. */
+	struct mlx5_lb_ctx self_lb; /* QP to enable self loopback for Devx. */
 	struct mlx5_dev_shared_port port[]; /* per device port data array. */
 };
 
@@ -1312,6 +1320,7 @@ struct mlx5_priv {
 	unsigned int sampler_en:1; /* Whether support sampler. */
 	unsigned int mtr_en:1; /* Whether support meter. */
 	unsigned int mtr_reg_share:1; /* Whether support meter REG_C share. */
+	unsigned int lb_used:1; /* Loopback queue is referred to. */
 	uint16_t domain_id; /* Switch domain identifier. */
 	uint16_t vport_id; /* Associated VF vport index (if any). */
 	uint32_t vport_meta_tag; /* Used for vport index match ove VF LAG. */
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index eb8c99cd93..32ab90c9b3 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -19,6 +19,7 @@
 #include "mlx5_tx.h"
 #include "mlx5_utils.h"
 #include "rte_pmd_mlx5.h"
+#include "mlx5_verbs.h"
 
 /**
  * Stop traffic on Tx queues.
@@ -1068,6 +1069,12 @@ mlx5_dev_start(struct rte_eth_dev *dev)
 			dev->data->port_id, strerror(rte_errno));
 		goto error;
 	}
+	if (priv->config.devx && priv->config.dv_flow_en &&
+	    priv->config.dest_tir) {
+		ret = mlx5_rxq_ibv_obj_dummy_lb_create(dev);
+		if (ret)
+			goto error;
+	}
 	ret = mlx5_txq_start(dev);
 	if (ret) {
 		DRV_LOG(ERR, "port %u Tx queue allocation failed: %s",
@@ -1148,6 +1155,7 @@ mlx5_dev_start(struct rte_eth_dev *dev)
 	mlx5_traffic_disable(dev);
 	mlx5_txq_stop(dev);
 	mlx5_rxq_stop(dev);
+	mlx5_rxq_ibv_obj_dummy_lb_release(dev);
 	mlx5_txpp_stop(dev); /* Stop last. */
 	rte_errno = ret; /* Restore rte_errno. */
 	return -rte_errno;
@@ -1186,6 +1194,7 @@ mlx5_dev_stop(struct rte_eth_dev *dev)
 	priv->sh->port[priv->dev_port - 1].devx_ih_port_id = RTE_MAX_ETHPORTS;
 	mlx5_txq_stop(dev);
 	mlx5_rxq_stop(dev);
+	mlx5_rxq_ibv_obj_dummy_lb_release(dev);
 	mlx5_txpp_stop(dev);
 
 	return 0;
-- 
2.27.0


^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [dpdk-stable] [PATCH] net/mlx5: fix loopback for DV queue
  2021-05-12 16:03         ` Thomas Monjalon
@ 2021-05-13 11:14           ` Bing Zhao
  0 siblings, 0 replies; 13+ messages in thread
From: Bing Zhao @ 2021-05-13 11:14 UTC (permalink / raw)
  To: NBU-Contact-Thomas Monjalon, Slava Ovsiienko
  Cc: Matan Azrad, dev, Ori Kam, Raslan Darawsheh, stable

Hi,

> -----Original Message-----
> From: Thomas Monjalon <thomas@monjalon.net>
> Sent: Thursday, May 13, 2021 12:04 AM
> To: Bing Zhao <bingz@nvidia.com>; Slava Ovsiienko
> <viacheslavo@nvidia.com>
> Cc: Matan Azrad <matan@nvidia.com>; dev@dpdk.org; Ori Kam
> <orika@nvidia.com>; Raslan Darawsheh <rasland@nvidia.com>;
> stable@dpdk.org
> Subject: Re: [PATCH] net/mlx5: fix loopback for DV queue
> 
> External email: Use caution opening links or attachments
> 
> 
> 12/05/2021 17:48, Slava Ovsiienko:
> > Hi, Thomas
> >
> > > How do we switch?
> >
> > Historically mlx5 created Queues and other HW objects with Verbs.
> > Now we are migrating ("switched") to DevX - queues are being
> created with DevX calls.
> > If there is no E-Switch - the new DevX approach is engaged, for
> > E-Switch configurations we still use Verbs.
> 
> OK this is what I thought, but it is not clear in the commit message.
> We could think it is the user switching a queue :) Please reword by
> comparing Verbs and DevX without saying "switch".
> 

Thanks you all for your comments. Patch v2 was sent.

> 
> > From: Thomas Monjalon <thomas@monjalon.net>
> > > 12/05/2021 16:51, Bing Zhao:
> > > > From: Thomas Monjalon <thomas@monjalon.net>
> > > > > 12/05/2021 16:36, Bing Zhao:
> > > > > > When switching to the Devx interface, the kernel driver
> > > > > > behavior
> > > > > may
> > > > >
> > > > > What do you mean by "switching"?
> > > >
> > > > I mean when changing the Rxq from Verbs API to Devx API.
> > >
> > >
> > > > > > be different from using Verbs. The Tx loopback cannot work
> > > > > properly
> > > > > > even if the Tx and Rx queues are configured with loopback
> > > > > attribute.
> > > > > > To support self loopback for Tx, a Verbs dummy queue pair
> > > > > > needs to
> > > > > be
> > > > > > created to trigger the kernel to enable the loopback.
> > > > > >
> > > > > > This is only required when TIR is created for Rx and
> loopback
> > > > > > is needed. Only CQ and QP are needed for this case, no
> WQ(RQ)
> > > > > > needs
> > > > > to be
> > > > > > created.
> 
> 

BR. Bing

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [dpdk-stable] [PATCH v2] net/mlx5: fix loopback for DV queue
  2021-05-13 11:13 ` [dpdk-stable] [PATCH v2] " Bing Zhao
@ 2021-05-13 12:58   ` Thomas Monjalon
  2021-05-13 12:59     ` Bing Zhao
  0 siblings, 1 reply; 13+ messages in thread
From: Thomas Monjalon @ 2021-05-13 12:58 UTC (permalink / raw)
  To: Bing Zhao; +Cc: viacheslavo, matan, stable, dev, orika, rasland, asafp

13/05/2021 13:13, Bing Zhao:
> --- a/drivers/net/mlx5/mlx5_trigger.c
> +++ b/drivers/net/mlx5/mlx5_trigger.c
> @@ -19,6 +19,7 @@
>  #include "mlx5_tx.h"
>  #include "mlx5_utils.h"
>  #include "rte_pmd_mlx5.h"
> +#include "mlx5_verbs.h"

You cannot include a Linux-only header here.
It fails on Windows:
http://mails.dpdk.org/archives/test-report/2021-May/194731.html




^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [dpdk-stable] [PATCH v2] net/mlx5: fix loopback for DV queue
  2021-05-13 12:58   ` Thomas Monjalon
@ 2021-05-13 12:59     ` Bing Zhao
  0 siblings, 0 replies; 13+ messages in thread
From: Bing Zhao @ 2021-05-13 12:59 UTC (permalink / raw)
  To: NBU-Contact-Thomas Monjalon
  Cc: Slava Ovsiienko, Matan Azrad, stable, dev, Ori Kam,
	Raslan Darawsheh, Asaf Penso

Hi,
I just noticed this, will try to fix it. Thanks.

> -----Original Message-----
> From: Thomas Monjalon <thomas@monjalon.net>
> Sent: Thursday, May 13, 2021 8:58 PM
> To: Bing Zhao <bingz@nvidia.com>
> Cc: Slava Ovsiienko <viacheslavo@nvidia.com>; Matan Azrad
> <matan@nvidia.com>; stable@dpdk.org; dev@dpdk.org; Ori Kam
> <orika@nvidia.com>; Raslan Darawsheh <rasland@nvidia.com>; Asaf
> Penso <asafp@nvidia.com>
> Subject: Re: [dpdk-stable] [PATCH v2] net/mlx5: fix loopback for DV
> queue
> 
> External email: Use caution opening links or attachments
> 
> 
> 13/05/2021 13:13, Bing Zhao:
> > --- a/drivers/net/mlx5/mlx5_trigger.c
> > +++ b/drivers/net/mlx5/mlx5_trigger.c
> > @@ -19,6 +19,7 @@
> >  #include "mlx5_tx.h"
> >  #include "mlx5_utils.h"
> >  #include "rte_pmd_mlx5.h"
> > +#include "mlx5_verbs.h"
> 
> You cannot include a Linux-only header here.
> It fails on Windows:
> http://mails.dpdk.org/archives/test-report/2021-May/194731.html
> 
> 


^ permalink raw reply	[flat|nested] 13+ messages in thread

* [dpdk-stable] [PATCH v3] net/mlx5: fix loopback for DV queue
  2021-05-12 14:36 [dpdk-stable] [PATCH] net/mlx5: fix loopback for DV queue Bing Zhao
  2021-05-12 14:48 ` Thomas Monjalon
  2021-05-13 11:13 ` [dpdk-stable] [PATCH v2] " Bing Zhao
@ 2021-05-17 15:18 ` Bing Zhao
  2021-05-18  8:32   ` [dpdk-stable] [dpdk-dev] " Thomas Monjalon
  2 siblings, 1 reply; 13+ messages in thread
From: Bing Zhao @ 2021-05-17 15:18 UTC (permalink / raw)
  To: viacheslavo, matan, thomas; +Cc: dev, orika, rasland, stable, talshn

In the past, all the queues and other hardware objects were created
through Verbs interface. Currently, most of the objects creation are
migrated to Devx interface by default, including queues. Only when
the DV is disabled by device arg or eswitch is enabled, all or some
of the objects are created through Verbs interface.

When using Devx interface to create queues, the kernel driver
behavior is different from the case using Verbs. The Tx loopback
cannot work properly even if the Tx and Rx queues are configured
with loopback attribute. To fix the support self loopback for Tx, a
Verbs dummy queue pair needs to be created to trigger the kernel to
enable the global loopback capability.

This is only required when TIR is created for Rx and loopback is
needed. Only CQ and QP are needed for this case, no WQ(RQ) needs to
be created.

This requirement comes from bugzilla 645, more details can be found
in the bugzilla link.

Bugzilla ID: 645

Fixes: 6deb19e1b2d2 ("net/mlx5: separate Rx queue object creations")
Cc: stable@dpdk.org

Signed-off-by: Bing Zhao <bingz@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/linux/mlx5_os.c    |   5 +-
 drivers/net/mlx5/linux/mlx5_verbs.c | 121 ++++++++++++++++++++++++++++
 drivers/net/mlx5/linux/mlx5_verbs.h |   2 +
 drivers/net/mlx5/mlx5.h             |  11 +++
 drivers/net/mlx5/mlx5_devx.c        |   2 +
 drivers/net/mlx5/mlx5_trigger.c     |  10 +++
 6 files changed, 150 insertions(+), 1 deletion(-)

diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index ef7ccba5de..534a56a555 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -1632,7 +1632,10 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
 		priv->obj_ops.txq_obj_new = mlx5_os_txq_obj_new;
 		priv->obj_ops.txq_obj_release = mlx5_os_txq_obj_release;
 		mlx5_queue_counter_id_prepare(eth_dev);
-
+		priv->obj_ops.lb_dummy_queue_create =
+					mlx5_rxq_ibv_obj_dummy_lb_create;
+		priv->obj_ops.lb_dummy_queue_release =
+					mlx5_rxq_ibv_obj_dummy_lb_release;
 	} else {
 		priv->obj_ops = ibv_obj_ops;
 	}
diff --git a/drivers/net/mlx5/linux/mlx5_verbs.c b/drivers/net/mlx5/linux/mlx5_verbs.c
index 0b0759f33f..d4fa202ac4 100644
--- a/drivers/net/mlx5/linux/mlx5_verbs.c
+++ b/drivers/net/mlx5/linux/mlx5_verbs.c
@@ -1055,6 +1055,125 @@ mlx5_txq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx)
 	return -rte_errno;
 }
 
+/*
+ * Create the dummy QP with minimal resources for loopback.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_rxq_ibv_obj_dummy_lb_create(struct rte_eth_dev *dev)
+{
+#if defined(HAVE_IBV_DEVICE_TUNNEL_SUPPORT) && defined(HAVE_IBV_FLOW_DV_SUPPORT)
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_dev_ctx_shared *sh = priv->sh;
+	struct ibv_context *ctx = sh->ctx;
+	struct mlx5dv_qp_init_attr qp_init_attr = {0};
+	struct {
+		struct ibv_cq_init_attr_ex ibv;
+		struct mlx5dv_cq_init_attr mlx5;
+	} cq_attr = {{0}};
+
+	if (dev->data->dev_conf.lpbk_mode) {
+		/* Allow packet sent from NIC loop back w/o source MAC check. */
+		qp_init_attr.comp_mask |=
+				MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
+		qp_init_attr.create_flags |=
+				MLX5DV_QP_CREATE_TIR_ALLOW_SELF_LOOPBACK_UC;
+	} else {
+		return 0;
+	}
+	/* Only need to check refcnt, 0 after "sh" is allocated. */
+	if (!!(__atomic_fetch_add(&sh->self_lb.refcnt, 1, __ATOMIC_RELAXED))) {
+		MLX5_ASSERT(sh->self_lb.ibv_cq && sh->self_lb.qp);
+		priv->lb_used = 1;
+		return 0;
+	}
+	cq_attr.ibv = (struct ibv_cq_init_attr_ex){
+		.cqe = 1,
+		.channel = NULL,
+		.comp_mask = 0,
+	};
+	cq_attr.mlx5 = (struct mlx5dv_cq_init_attr){
+		.comp_mask = 0,
+	};
+	/* Only CQ is needed, no WQ(RQ) is required in this case. */
+	sh->self_lb.ibv_cq = mlx5_glue->cq_ex_to_cq(mlx5_glue->dv_create_cq(ctx,
+							&cq_attr.ibv,
+							&cq_attr.mlx5));
+	if (!sh->self_lb.ibv_cq) {
+		DRV_LOG(ERR, "Port %u cannot allocate CQ for loopback.",
+			dev->data->port_id);
+		rte_errno = errno;
+		goto error;
+	}
+	sh->self_lb.qp = mlx5_glue->dv_create_qp(ctx,
+				&(struct ibv_qp_init_attr_ex){
+					.qp_type = IBV_QPT_RAW_PACKET,
+					.comp_mask = IBV_QP_INIT_ATTR_PD,
+					.pd = sh->pd,
+					.send_cq = sh->self_lb.ibv_cq,
+					.recv_cq = sh->self_lb.ibv_cq,
+					.cap.max_recv_wr = 1,
+				},
+				&qp_init_attr);
+	if (!sh->self_lb.qp) {
+		DRV_LOG(DEBUG, "Port %u cannot allocate QP for loopback.",
+			dev->data->port_id);
+		rte_errno = errno;
+		goto error;
+	}
+	priv->lb_used = 1;
+	return 0;
+error:
+	if (sh->self_lb.ibv_cq) {
+		claim_zero(mlx5_glue->destroy_cq(sh->self_lb.ibv_cq));
+		sh->self_lb.ibv_cq = NULL;
+	}
+	(void)__atomic_sub_fetch(&sh->self_lb.refcnt, 1, __ATOMIC_RELAXED);
+	return -rte_errno;
+#else
+	RTE_SET_USED(dev);
+	return 0;
+#endif
+}
+
+/*
+ * Release the dummy queue resources for loopback.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ */
+void
+mlx5_rxq_ibv_obj_dummy_lb_release(struct rte_eth_dev *dev)
+{
+#if defined(HAVE_IBV_DEVICE_TUNNEL_SUPPORT) && defined(HAVE_IBV_FLOW_DV_SUPPORT)
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_dev_ctx_shared *sh = priv->sh;
+
+	if (!priv->lb_used)
+		return;
+	MLX5_ASSERT(__atomic_load_n(&sh->self_lb.refcnt, __ATOMIC_RELAXED));
+	if (!(__atomic_sub_fetch(&sh->self_lb.refcnt, 1, __ATOMIC_RELAXED))) {
+		if (sh->self_lb.qp) {
+			claim_zero(mlx5_glue->destroy_qp(sh->self_lb.qp));
+			sh->self_lb.qp = NULL;
+		}
+		if (sh->self_lb.ibv_cq) {
+			claim_zero(mlx5_glue->destroy_cq(sh->self_lb.ibv_cq));
+			sh->self_lb.ibv_cq = NULL;
+		}
+	}
+	priv->lb_used = 0;
+#else
+	RTE_SET_USED(dev);
+	return;
+#endif
+}
+
 /**
  * Release an Tx verbs queue object.
  *
@@ -1084,4 +1203,6 @@ struct mlx5_obj_ops ibv_obj_ops = {
 	.txq_obj_new = mlx5_txq_ibv_obj_new,
 	.txq_obj_modify = mlx5_ibv_modify_qp,
 	.txq_obj_release = mlx5_txq_ibv_obj_release,
+	.lb_dummy_queue_create = NULL,
+	.lb_dummy_queue_release = NULL,
 };
diff --git a/drivers/net/mlx5/linux/mlx5_verbs.h b/drivers/net/mlx5/linux/mlx5_verbs.h
index 76a79bf4f4..f7e8e2fe98 100644
--- a/drivers/net/mlx5/linux/mlx5_verbs.h
+++ b/drivers/net/mlx5/linux/mlx5_verbs.h
@@ -9,6 +9,8 @@
 
 int mlx5_txq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx);
 void mlx5_txq_ibv_obj_release(struct mlx5_txq_obj *txq_obj);
+int mlx5_rxq_ibv_obj_dummy_lb_create(struct rte_eth_dev *dev);
+void mlx5_rxq_ibv_obj_dummy_lb_release(struct rte_eth_dev *dev);
 
 /* Verbs ops struct */
 extern const struct mlx5_mr_ops mlx5_mr_verbs_ops;
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index b8a29dd369..32b2817bf2 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -287,6 +287,13 @@ struct mlx5_drop {
 	struct mlx5_rxq_obj *rxq; /* Rx queue object. */
 };
 
+/* Loopback dummy queue resources required due to Verbs API. */
+struct mlx5_lb_ctx {
+	struct ibv_qp *qp; /* QP object. */
+	void *ibv_cq; /* Completion queue. */
+	uint16_t refcnt; /* Reference count for representors. */
+};
+
 #define MLX5_COUNTERS_PER_POOL 512
 #define MLX5_MAX_PENDING_QUERIES 4
 #define MLX5_CNT_CONTAINER_RESIZE 64
@@ -1128,6 +1135,7 @@ struct mlx5_dev_ctx_shared {
 	/* Meter management structure. */
 	struct mlx5_aso_ct_pools_mng *ct_mng;
 	/* Management data for ASO connection tracking. */
+	struct mlx5_lb_ctx self_lb; /* QP to enable self loopback for Devx. */
 	struct mlx5_dev_shared_port port[]; /* per device port data array. */
 };
 
@@ -1287,6 +1295,8 @@ struct mlx5_obj_ops {
 	int (*txq_obj_modify)(struct mlx5_txq_obj *obj,
 			      enum mlx5_txq_modify_type type, uint8_t dev_port);
 	void (*txq_obj_release)(struct mlx5_txq_obj *txq_obj);
+	int (*lb_dummy_queue_create)(struct rte_eth_dev *dev);
+	void (*lb_dummy_queue_release)(struct rte_eth_dev *dev);
 };
 
 #define MLX5_RSS_HASH_FIELDS_LEN RTE_DIM(mlx5_rss_hash_fields)
@@ -1316,6 +1326,7 @@ struct mlx5_priv {
 	unsigned int sampler_en:1; /* Whether support sampler. */
 	unsigned int mtr_en:1; /* Whether support meter. */
 	unsigned int mtr_reg_share:1; /* Whether support meter REG_C share. */
+	unsigned int lb_used:1; /* Loopback queue is referred to. */
 	uint16_t domain_id; /* Switch domain identifier. */
 	uint16_t vport_id; /* Associated VF vport index (if any). */
 	uint32_t vport_meta_tag; /* Used for vport index match ove VF LAG. */
diff --git a/drivers/net/mlx5/mlx5_devx.c b/drivers/net/mlx5/mlx5_devx.c
index 531a81d7fa..78b88f99b4 100644
--- a/drivers/net/mlx5/mlx5_devx.c
+++ b/drivers/net/mlx5/mlx5_devx.c
@@ -1188,4 +1188,6 @@ struct mlx5_obj_ops devx_obj_ops = {
 	.txq_obj_new = mlx5_txq_devx_obj_new,
 	.txq_obj_modify = mlx5_devx_modify_sq,
 	.txq_obj_release = mlx5_txq_devx_obj_release,
+	.lb_dummy_queue_create = NULL,
+	.lb_dummy_queue_release = NULL,
 };
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index 879d3171e9..ae7fcca229 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -1068,6 +1068,12 @@ mlx5_dev_start(struct rte_eth_dev *dev)
 			dev->data->port_id, strerror(rte_errno));
 		goto error;
 	}
+	if ((priv->config.devx && priv->config.dv_flow_en &&
+	    priv->config.dest_tir) && priv->obj_ops.lb_dummy_queue_create) {
+		ret = priv->obj_ops.lb_dummy_queue_create(dev);
+		if (ret)
+			goto error;
+	}
 	ret = mlx5_txq_start(dev);
 	if (ret) {
 		DRV_LOG(ERR, "port %u Tx queue allocation failed: %s",
@@ -1148,6 +1154,8 @@ mlx5_dev_start(struct rte_eth_dev *dev)
 	mlx5_traffic_disable(dev);
 	mlx5_txq_stop(dev);
 	mlx5_rxq_stop(dev);
+	if (priv->obj_ops.lb_dummy_queue_release)
+		priv->obj_ops.lb_dummy_queue_release(dev);
 	mlx5_txpp_stop(dev); /* Stop last. */
 	rte_errno = ret; /* Restore rte_errno. */
 	return -rte_errno;
@@ -1186,6 +1194,8 @@ mlx5_dev_stop(struct rte_eth_dev *dev)
 	priv->sh->port[priv->dev_port - 1].devx_ih_port_id = RTE_MAX_ETHPORTS;
 	mlx5_txq_stop(dev);
 	mlx5_rxq_stop(dev);
+	if (priv->obj_ops.lb_dummy_queue_release)
+		priv->obj_ops.lb_dummy_queue_release(dev);
 	mlx5_txpp_stop(dev);
 
 	return 0;
-- 
2.27.0


^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [dpdk-stable] [dpdk-dev] [PATCH v3] net/mlx5: fix loopback for DV queue
  2021-05-17 15:18 ` [dpdk-stable] [PATCH v3] " Bing Zhao
@ 2021-05-18  8:32   ` Thomas Monjalon
  2021-05-18  8:33     ` Bing Zhao
  0 siblings, 1 reply; 13+ messages in thread
From: Thomas Monjalon @ 2021-05-18  8:32 UTC (permalink / raw)
  To: Bing Zhao; +Cc: viacheslavo, matan, dev, orika, rasland, stable, talshn

17/05/2021 17:18, Bing Zhao:
> In the past, all the queues and other hardware objects were created
> through Verbs interface. Currently, most of the objects creation are
> migrated to Devx interface by default, including queues. Only when
> the DV is disabled by device arg or eswitch is enabled, all or some
> of the objects are created through Verbs interface.
> 
> When using Devx interface to create queues, the kernel driver
> behavior is different from the case using Verbs. The Tx loopback
> cannot work properly even if the Tx and Rx queues are configured
> with loopback attribute. To fix the support self loopback for Tx, a
> Verbs dummy queue pair needs to be created to trigger the kernel to
> enable the global loopback capability.
> 
> This is only required when TIR is created for Rx and loopback is
> needed. Only CQ and QP are needed for this case, no WQ(RQ) needs to
> be created.
> 
> This requirement comes from bugzilla 645, more details can be found
> in the bugzilla link.
> 
> Bugzilla ID: 645
> 
> Fixes: 6deb19e1b2d2 ("net/mlx5: separate Rx queue object creations")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Bing Zhao <bingz@nvidia.com>
> Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>

Applied to next-net-mlx, thanks.




^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [dpdk-stable] [dpdk-dev] [PATCH v3] net/mlx5: fix loopback for DV queue
  2021-05-18  8:32   ` [dpdk-stable] [dpdk-dev] " Thomas Monjalon
@ 2021-05-18  8:33     ` Bing Zhao
  0 siblings, 0 replies; 13+ messages in thread
From: Bing Zhao @ 2021-05-18  8:33 UTC (permalink / raw)
  To: NBU-Contact-Thomas Monjalon
  Cc: Slava Ovsiienko, Matan Azrad, dev, Ori Kam, Raslan Darawsheh,
	stable, Tal Shnaiderman

Thanks a lot, Thomas.

> -----Original Message-----
> From: Thomas Monjalon <thomas@monjalon.net>
> Sent: Tuesday, May 18, 2021 4:33 PM
> To: Bing Zhao <bingz@nvidia.com>
> Cc: Slava Ovsiienko <viacheslavo@nvidia.com>; Matan Azrad
> <matan@nvidia.com>; dev@dpdk.org; Ori Kam <orika@nvidia.com>; Raslan
> Darawsheh <rasland@nvidia.com>; stable@dpdk.org; Tal Shnaiderman
> <talshn@nvidia.com>
> Subject: Re: [dpdk-dev] [PATCH v3] net/mlx5: fix loopback for DV
> queue
> 
> External email: Use caution opening links or attachments
> 
> 
> 17/05/2021 17:18, Bing Zhao:
> > In the past, all the queues and other hardware objects were
> created
> > through Verbs interface. Currently, most of the objects creation
> are
> > migrated to Devx interface by default, including queues. Only when
> the
> > DV is disabled by device arg or eswitch is enabled, all or some of
> the
> > objects are created through Verbs interface.
> >
> > When using Devx interface to create queues, the kernel driver
> behavior
> > is different from the case using Verbs. The Tx loopback cannot
> work
> > properly even if the Tx and Rx queues are configured with loopback
> > attribute. To fix the support self loopback for Tx, a Verbs dummy
> > queue pair needs to be created to trigger the kernel to enable the
> > global loopback capability.
> >
> > This is only required when TIR is created for Rx and loopback is
> > needed. Only CQ and QP are needed for this case, no WQ(RQ) needs
> to be
> > created.
> >
> > This requirement comes from bugzilla 645, more details can be
> found in
> > the bugzilla link.
> >
> > Bugzilla ID: 645
> >
> > Fixes: 6deb19e1b2d2 ("net/mlx5: separate Rx queue object
> creations")
> > Cc: stable@dpdk.org
> >
> > Signed-off-by: Bing Zhao <bingz@nvidia.com>
> > Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
> 
> Applied to next-net-mlx, thanks.
> 
> 


^ permalink raw reply	[flat|nested] 13+ messages in thread

end of thread, other threads:[~2021-05-18  8:33 UTC | newest]

Thread overview: 13+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-05-12 14:36 [dpdk-stable] [PATCH] net/mlx5: fix loopback for DV queue Bing Zhao
2021-05-12 14:48 ` Thomas Monjalon
2021-05-12 14:51   ` Bing Zhao
2021-05-12 15:30     ` Thomas Monjalon
2021-05-12 15:48       ` Slava Ovsiienko
2021-05-12 16:03         ` Thomas Monjalon
2021-05-13 11:14           ` Bing Zhao
2021-05-13 11:13 ` [dpdk-stable] [PATCH v2] " Bing Zhao
2021-05-13 12:58   ` Thomas Monjalon
2021-05-13 12:59     ` Bing Zhao
2021-05-17 15:18 ` [dpdk-stable] [PATCH v3] " Bing Zhao
2021-05-18  8:32   ` [dpdk-stable] [dpdk-dev] " Thomas Monjalon
2021-05-18  8:33     ` Bing Zhao

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).