patches for DPDK stable branches
 help / color / mirror / Atom feed
* [dpdk-stable] [PATCH 1/2] common/mlx5: get number of ports that can be bonded
@ 2020-10-26 11:10 Xueming Li
  2020-10-26 11:10 ` [dpdk-stable] [PATCH 2/2] vdpa/mlx5: specify lag port affinity Xueming Li
                   ` (3 more replies)
  0 siblings, 4 replies; 9+ messages in thread
From: Xueming Li @ 2020-10-26 11:10 UTC (permalink / raw)
  To: Matan Azrad, Viacheslav Ovsiienko; +Cc: dev, xuemingl, Asaf Penso, stable

Get HCA capability: number of physical ports that can be bonded.

Cc: stable@dpdk.org

Signed-off-by: Xueming Li <xuemingl@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
 drivers/common/mlx5/mlx5_devx_cmds.c | 5 +++--
 drivers/common/mlx5/mlx5_devx_cmds.h | 1 +
 2 files changed, 4 insertions(+), 2 deletions(-)

diff --git a/drivers/common/mlx5/mlx5_devx_cmds.c b/drivers/common/mlx5/mlx5_devx_cmds.c
index 8aee12d527..e748d034d0 100644
--- a/drivers/common/mlx5/mlx5_devx_cmds.c
+++ b/drivers/common/mlx5/mlx5_devx_cmds.c
@@ -711,6 +711,7 @@ mlx5_devx_cmd_query_hca_attr(void *ctx,
 	attr->non_wire_sq = MLX5_GET(cmd_hca_cap, hcattr, non_wire_sq);
 	attr->log_max_static_sq_wq = MLX5_GET(cmd_hca_cap, hcattr,
 					      log_max_static_sq_wq);
+	attr->num_lag_ports = MLX5_GET(cmd_hca_cap, hcattr, num_lag_ports);
 	attr->dev_freq_khz = MLX5_GET(cmd_hca_cap, hcattr,
 				      device_frequency_khz);
 	attr->scatter_fcs_w_decap_disable =
@@ -1429,8 +1430,8 @@ mlx5_devx_cmd_create_tis(void *ctx,
 	tis_ctx = MLX5_ADDR_OF(create_tis_in, in, ctx);
 	MLX5_SET(tisc, tis_ctx, strict_lag_tx_port_affinity,
 		 tis_attr->strict_lag_tx_port_affinity);
-	MLX5_SET(tisc, tis_ctx, strict_lag_tx_port_affinity,
-		 tis_attr->strict_lag_tx_port_affinity);
+	MLX5_SET(tisc, tis_ctx, lag_tx_port_affinity,
+		 tis_attr->lag_tx_port_affinity);
 	MLX5_SET(tisc, tis_ctx, prio, tis_attr->prio);
 	MLX5_SET(tisc, tis_ctx, transport_domain,
 		 tis_attr->transport_domain);
diff --git a/drivers/common/mlx5/mlx5_devx_cmds.h b/drivers/common/mlx5/mlx5_devx_cmds.h
index abbea67784..3781fedd9e 100644
--- a/drivers/common/mlx5/mlx5_devx_cmds.h
+++ b/drivers/common/mlx5/mlx5_devx_cmds.h
@@ -99,6 +99,7 @@ struct mlx5_hca_attr {
 	uint32_t cross_channel:1;
 	uint32_t non_wire_sq:1; /* SQ with non-wire ops is supported. */
 	uint32_t log_max_static_sq_wq:5; /* Static WQE size SQ. */
+	uint32_t num_lag_ports:4; /* Number of ports can be bonded. */
 	uint32_t dev_freq_khz; /* Timestamp counter frequency, kHz. */
 	uint32_t scatter_fcs_w_decap_disable:1;
 	uint32_t regex:1;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 9+ messages in thread

* [dpdk-stable] [PATCH 2/2] vdpa/mlx5: specify lag port affinity
  2020-10-26 11:10 [dpdk-stable] [PATCH 1/2] common/mlx5: get number of ports that can be bonded Xueming Li
@ 2020-10-26 11:10 ` Xueming Li
  2020-10-28  9:35   ` [dpdk-stable] [dpdk-dev] " Maxime Coquelin
  2020-10-28  9:30 ` [dpdk-stable] [dpdk-dev] [PATCH 1/2] common/mlx5: get number of ports that can be bonded Maxime Coquelin
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 9+ messages in thread
From: Xueming Li @ 2020-10-26 11:10 UTC (permalink / raw)
  To: Matan Azrad, Viacheslav Ovsiienko; +Cc: dev, xuemingl, Asaf Penso, stable

If set TIS lag port affinity to auto, firmware assign port affinity on
each creation with Round Robin. In case of 2 PFs, if create virtq,
destroy and create again, then each virtq will get same port affinity.

To resolve this fw limitation, this patch sets create TIS with specified
affinity for each PF.

Cc: stable@dpdk.org

Signed-off-by: Xueming Li <xuemingl@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
 drivers/vdpa/mlx5/mlx5_vdpa.c       |  3 +++
 drivers/vdpa/mlx5/mlx5_vdpa.h       |  3 ++-
 drivers/vdpa/mlx5/mlx5_vdpa_virtq.c | 23 ++++++++++++++---------
 3 files changed, 19 insertions(+), 10 deletions(-)

diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.c b/drivers/vdpa/mlx5/mlx5_vdpa.c
index a8f3e4b1de..2e17ed4fca 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa.c
@@ -730,6 +730,9 @@ mlx5_vdpa_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	}
 	priv->caps = attr.vdpa;
 	priv->log_max_rqt_size = attr.log_max_rqt_size;
+	priv->num_lag_ports = attr.num_lag_ports;
+	if (attr.num_lag_ports == 0)
+		priv->num_lag_ports = 1;
 	priv->ctx = ctx;
 	priv->pci_dev = pci_dev;
 	priv->var = mlx5_glue->dv_alloc_var(ctx, 0);
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.h b/drivers/vdpa/mlx5/mlx5_vdpa.h
index fcbc12ab0c..c8c1adfde4 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa.h
+++ b/drivers/vdpa/mlx5/mlx5_vdpa.h
@@ -146,8 +146,9 @@ struct mlx5_vdpa_priv {
 	struct mlx5dv_devx_uar *uar;
 	struct rte_intr_handle intr_handle;
 	struct mlx5_devx_obj *td;
-	struct mlx5_devx_obj *tis;
+	struct mlx5_devx_obj *tiss[16]; /* TIS list for each LAG port. */
 	uint16_t nr_virtqs;
+	uint8_t num_lag_ports;
 	uint64_t features; /* Negotiated features. */
 	uint16_t log_max_rqt_size;
 	struct mlx5_vdpa_steer steer;
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
index 17e71cf4f4..4724baca4e 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
@@ -103,12 +103,13 @@ void
 mlx5_vdpa_virtqs_release(struct mlx5_vdpa_priv *priv)
 {
 	int i;
-
 	for (i = 0; i < priv->nr_virtqs; i++)
 		mlx5_vdpa_virtq_unset(&priv->virtqs[i]);
-	if (priv->tis) {
-		claim_zero(mlx5_devx_cmd_destroy(priv->tis));
-		priv->tis = NULL;
+	for (i = 0; i < priv->num_lag_ports; i++) {
+		if (priv->tiss[i]) {
+			claim_zero(mlx5_devx_cmd_destroy(priv->tiss[i]));
+			priv->tiss[i] = NULL;
+		}
 	}
 	if (priv->td) {
 		claim_zero(mlx5_devx_cmd_destroy(priv->td));
@@ -302,7 +303,7 @@ mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index)
 	attr.hw_used_index = last_used_idx;
 	attr.q_size = vq.size;
 	attr.mkey = priv->gpa_mkey_index;
-	attr.tis_id = priv->tis->id;
+	attr.tis_id = priv->tiss[(index / 2) % priv->num_lag_ports]->id;
 	attr.queue_index = index;
 	attr.pd = priv->pdn;
 	virtq->virtq = mlx5_devx_cmd_create_virtq(priv->ctx, &attr);
@@ -432,10 +433,14 @@ mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv)
 		return -rte_errno;
 	}
 	tis_attr.transport_domain = priv->td->id;
-	priv->tis = mlx5_devx_cmd_create_tis(priv->ctx, &tis_attr);
-	if (!priv->tis) {
-		DRV_LOG(ERR, "Failed to create TIS.");
-		goto error;
+	for (i = 0; i < priv->num_lag_ports; i++) {
+		/* 0 is auto affinity, non-zero value to propose port. */
+		tis_attr.lag_tx_port_affinity = i + 1;
+		priv->tiss[i] = mlx5_devx_cmd_create_tis(priv->ctx, &tis_attr);
+		if (!priv->tiss[i]) {
+			DRV_LOG(ERR, "Failed to create TIS %u.", i);
+			goto error;
+		}
 	}
 	priv->nr_virtqs = nr_vring;
 	for (i = 0; i < nr_vring; i++)
-- 
2.25.1


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [dpdk-stable] [dpdk-dev] [PATCH 1/2] common/mlx5: get number of ports that can be bonded
  2020-10-26 11:10 [dpdk-stable] [PATCH 1/2] common/mlx5: get number of ports that can be bonded Xueming Li
  2020-10-26 11:10 ` [dpdk-stable] [PATCH 2/2] vdpa/mlx5: specify lag port affinity Xueming Li
@ 2020-10-28  9:30 ` Maxime Coquelin
  2020-10-28 10:44 ` [dpdk-stable] [PATCH v1 " Xueming Li
  2020-10-28 10:44 ` [dpdk-stable] [PATCH v1 2/2] vdpa/mlx5: specify lag port affinity Xueming Li
  3 siblings, 0 replies; 9+ messages in thread
From: Maxime Coquelin @ 2020-10-28  9:30 UTC (permalink / raw)
  To: Xueming Li, Matan Azrad, Viacheslav Ovsiienko; +Cc: dev, Asaf Penso, stable

Hi,

On 10/26/20 12:10 PM, Xueming Li wrote:
> Get HCA capability: number of physical ports that can be bonded.
> 
> Cc: stable@dpdk.org

Is that a fix or a new feature? The commit title and description seem to
indicate the former. But stable ML being cc'ed seems to indicate the
latter.

If this is a fix, please describe what it is fixing, and add Fixes tag
pointing on the commit it is fixing.

Thanks,
Maxime

> Signed-off-by: Xueming Li <xuemingl@nvidia.com>
> Acked-by: Matan Azrad <matan@nvidia.com>
> ---
>  drivers/common/mlx5/mlx5_devx_cmds.c | 5 +++--
>  drivers/common/mlx5/mlx5_devx_cmds.h | 1 +
>  2 files changed, 4 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/common/mlx5/mlx5_devx_cmds.c b/drivers/common/mlx5/mlx5_devx_cmds.c
> index 8aee12d527..e748d034d0 100644
> --- a/drivers/common/mlx5/mlx5_devx_cmds.c
> +++ b/drivers/common/mlx5/mlx5_devx_cmds.c
> @@ -711,6 +711,7 @@ mlx5_devx_cmd_query_hca_attr(void *ctx,
>  	attr->non_wire_sq = MLX5_GET(cmd_hca_cap, hcattr, non_wire_sq);
>  	attr->log_max_static_sq_wq = MLX5_GET(cmd_hca_cap, hcattr,
>  					      log_max_static_sq_wq);
> +	attr->num_lag_ports = MLX5_GET(cmd_hca_cap, hcattr, num_lag_ports);
>  	attr->dev_freq_khz = MLX5_GET(cmd_hca_cap, hcattr,
>  				      device_frequency_khz);
>  	attr->scatter_fcs_w_decap_disable =
> @@ -1429,8 +1430,8 @@ mlx5_devx_cmd_create_tis(void *ctx,
>  	tis_ctx = MLX5_ADDR_OF(create_tis_in, in, ctx);
>  	MLX5_SET(tisc, tis_ctx, strict_lag_tx_port_affinity,
>  		 tis_attr->strict_lag_tx_port_affinity);
> -	MLX5_SET(tisc, tis_ctx, strict_lag_tx_port_affinity,
> -		 tis_attr->strict_lag_tx_port_affinity);
> +	MLX5_SET(tisc, tis_ctx, lag_tx_port_affinity,
> +		 tis_attr->lag_tx_port_affinity);
>  	MLX5_SET(tisc, tis_ctx, prio, tis_attr->prio);
>  	MLX5_SET(tisc, tis_ctx, transport_domain,
>  		 tis_attr->transport_domain);
> diff --git a/drivers/common/mlx5/mlx5_devx_cmds.h b/drivers/common/mlx5/mlx5_devx_cmds.h
> index abbea67784..3781fedd9e 100644
> --- a/drivers/common/mlx5/mlx5_devx_cmds.h
> +++ b/drivers/common/mlx5/mlx5_devx_cmds.h
> @@ -99,6 +99,7 @@ struct mlx5_hca_attr {
>  	uint32_t cross_channel:1;
>  	uint32_t non_wire_sq:1; /* SQ with non-wire ops is supported. */
>  	uint32_t log_max_static_sq_wq:5; /* Static WQE size SQ. */
> +	uint32_t num_lag_ports:4; /* Number of ports can be bonded. */
>  	uint32_t dev_freq_khz; /* Timestamp counter frequency, kHz. */
>  	uint32_t scatter_fcs_w_decap_disable:1;
>  	uint32_t regex:1;
> 


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [dpdk-stable] [dpdk-dev] [PATCH 2/2] vdpa/mlx5: specify lag port affinity
  2020-10-26 11:10 ` [dpdk-stable] [PATCH 2/2] vdpa/mlx5: specify lag port affinity Xueming Li
@ 2020-10-28  9:35   ` Maxime Coquelin
  2020-10-28 10:32     ` Xueming(Steven) Li
  0 siblings, 1 reply; 9+ messages in thread
From: Maxime Coquelin @ 2020-10-28  9:35 UTC (permalink / raw)
  To: Xueming Li, Matan Azrad, Viacheslav Ovsiienko; +Cc: dev, Asaf Penso, stable



On 10/26/20 12:10 PM, Xueming Li wrote:
> If set TIS lag port affinity to auto, firmware assign port affinity on
> each creation with Round Robin. In case of 2 PFs, if create virtq,
> destroy and create again, then each virtq will get same port affinity.
> 
> To resolve this fw limitation, this patch sets create TIS with specified
> affinity for each PF.

OK, this patch describes better the issue the series is addressing.
Could you add a Fixes tag, so that it helps maintainers to backport it?

Other than that:
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>

Thanks,
Maxime

> Cc: stable@dpdk.org
> 
> Signed-off-by: Xueming Li <xuemingl@nvidia.com>
> Acked-by: Matan Azrad <matan@nvidia.com>
> ---
>  drivers/vdpa/mlx5/mlx5_vdpa.c       |  3 +++
>  drivers/vdpa/mlx5/mlx5_vdpa.h       |  3 ++-
>  drivers/vdpa/mlx5/mlx5_vdpa_virtq.c | 23 ++++++++++++++---------
>  3 files changed, 19 insertions(+), 10 deletions(-)
> 


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [dpdk-stable] [dpdk-dev] [PATCH 2/2] vdpa/mlx5: specify lag port affinity
  2020-10-28  9:35   ` [dpdk-stable] [dpdk-dev] " Maxime Coquelin
@ 2020-10-28 10:32     ` Xueming(Steven) Li
  0 siblings, 0 replies; 9+ messages in thread
From: Xueming(Steven) Li @ 2020-10-28 10:32 UTC (permalink / raw)
  To: Maxime Coquelin, Matan Azrad, Slava Ovsiienko; +Cc: dev, Asaf Penso, stable



> -----Original Message-----
> From: Maxime Coquelin <maxime.coquelin@redhat.com>
> Sent: Wednesday, October 28, 2020 5:35 PM
> To: Xueming(Steven) Li <xuemingl@nvidia.com>; Matan Azrad
> <matan@nvidia.com>; Slava Ovsiienko <viacheslavo@nvidia.com>
> Cc: dev@dpdk.org; Asaf Penso <asafp@nvidia.com>; stable@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH 2/2] vdpa/mlx5: specify lag port affinity
> 
> 
> 
> On 10/26/20 12:10 PM, Xueming Li wrote:
> > If set TIS lag port affinity to auto, firmware assign port affinity on
> > each creation with Round Robin. In case of 2 PFs, if create virtq,
> > destroy and create again, then each virtq will get same port affinity.
> >
> > To resolve this fw limitation, this patch sets create TIS with
> > specified affinity for each PF.
> 
> OK, this patch describes better the issue the series is addressing.
> Could you add a Fixes tag, so that it helps maintainers to backport it?
> 
Sure, I'll come up with a new version, thanks!

> Other than that:
> Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
> 
> Thanks,
> Maxime
> 
> > Cc: stable@dpdk.org
> >
> > Signed-off-by: Xueming Li <xuemingl@nvidia.com>
> > Acked-by: Matan Azrad <matan@nvidia.com>
> > ---
> >  drivers/vdpa/mlx5/mlx5_vdpa.c       |  3 +++
> >  drivers/vdpa/mlx5/mlx5_vdpa.h       |  3 ++-
> >  drivers/vdpa/mlx5/mlx5_vdpa_virtq.c | 23 ++++++++++++++---------
> >  3 files changed, 19 insertions(+), 10 deletions(-)
> >


^ permalink raw reply	[flat|nested] 9+ messages in thread

* [dpdk-stable] [PATCH v1 1/2] common/mlx5: get number of ports that can be bonded
  2020-10-26 11:10 [dpdk-stable] [PATCH 1/2] common/mlx5: get number of ports that can be bonded Xueming Li
  2020-10-26 11:10 ` [dpdk-stable] [PATCH 2/2] vdpa/mlx5: specify lag port affinity Xueming Li
  2020-10-28  9:30 ` [dpdk-stable] [dpdk-dev] [PATCH 1/2] common/mlx5: get number of ports that can be bonded Maxime Coquelin
@ 2020-10-28 10:44 ` Xueming Li
  2020-10-29  8:29   ` Maxime Coquelin
  2020-10-28 10:44 ` [dpdk-stable] [PATCH v1 2/2] vdpa/mlx5: specify lag port affinity Xueming Li
  3 siblings, 1 reply; 9+ messages in thread
From: Xueming Li @ 2020-10-28 10:44 UTC (permalink / raw)
  To: Matan Azrad, Viacheslav Ovsiienko, Maxime Coquelin
  Cc: dev, xuemingl, Asaf Penso, stable

Get HCA capability: number of physical ports that can be bonded.

Cc: stable@dpdk.org

Signed-off-by: Xueming Li <xuemingl@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
 drivers/common/mlx5/mlx5_devx_cmds.c | 5 +++--
 drivers/common/mlx5/mlx5_devx_cmds.h | 1 +
 2 files changed, 4 insertions(+), 2 deletions(-)

diff --git a/drivers/common/mlx5/mlx5_devx_cmds.c b/drivers/common/mlx5/mlx5_devx_cmds.c
index 8aee12d527..e748d034d0 100644
--- a/drivers/common/mlx5/mlx5_devx_cmds.c
+++ b/drivers/common/mlx5/mlx5_devx_cmds.c
@@ -711,6 +711,7 @@ mlx5_devx_cmd_query_hca_attr(void *ctx,
 	attr->non_wire_sq = MLX5_GET(cmd_hca_cap, hcattr, non_wire_sq);
 	attr->log_max_static_sq_wq = MLX5_GET(cmd_hca_cap, hcattr,
 					      log_max_static_sq_wq);
+	attr->num_lag_ports = MLX5_GET(cmd_hca_cap, hcattr, num_lag_ports);
 	attr->dev_freq_khz = MLX5_GET(cmd_hca_cap, hcattr,
 				      device_frequency_khz);
 	attr->scatter_fcs_w_decap_disable =
@@ -1429,8 +1430,8 @@ mlx5_devx_cmd_create_tis(void *ctx,
 	tis_ctx = MLX5_ADDR_OF(create_tis_in, in, ctx);
 	MLX5_SET(tisc, tis_ctx, strict_lag_tx_port_affinity,
 		 tis_attr->strict_lag_tx_port_affinity);
-	MLX5_SET(tisc, tis_ctx, strict_lag_tx_port_affinity,
-		 tis_attr->strict_lag_tx_port_affinity);
+	MLX5_SET(tisc, tis_ctx, lag_tx_port_affinity,
+		 tis_attr->lag_tx_port_affinity);
 	MLX5_SET(tisc, tis_ctx, prio, tis_attr->prio);
 	MLX5_SET(tisc, tis_ctx, transport_domain,
 		 tis_attr->transport_domain);
diff --git a/drivers/common/mlx5/mlx5_devx_cmds.h b/drivers/common/mlx5/mlx5_devx_cmds.h
index abbea67784..3781fedd9e 100644
--- a/drivers/common/mlx5/mlx5_devx_cmds.h
+++ b/drivers/common/mlx5/mlx5_devx_cmds.h
@@ -99,6 +99,7 @@ struct mlx5_hca_attr {
 	uint32_t cross_channel:1;
 	uint32_t non_wire_sq:1; /* SQ with non-wire ops is supported. */
 	uint32_t log_max_static_sq_wq:5; /* Static WQE size SQ. */
+	uint32_t num_lag_ports:4; /* Number of ports can be bonded. */
 	uint32_t dev_freq_khz; /* Timestamp counter frequency, kHz. */
 	uint32_t scatter_fcs_w_decap_disable:1;
 	uint32_t regex:1;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 9+ messages in thread

* [dpdk-stable] [PATCH v1 2/2] vdpa/mlx5: specify lag port affinity
  2020-10-26 11:10 [dpdk-stable] [PATCH 1/2] common/mlx5: get number of ports that can be bonded Xueming Li
                   ` (2 preceding siblings ...)
  2020-10-28 10:44 ` [dpdk-stable] [PATCH v1 " Xueming Li
@ 2020-10-28 10:44 ` Xueming Li
  2020-10-29  8:29   ` Maxime Coquelin
  3 siblings, 1 reply; 9+ messages in thread
From: Xueming Li @ 2020-10-28 10:44 UTC (permalink / raw)
  To: Matan Azrad, Viacheslav Ovsiienko, Maxime Coquelin
  Cc: dev, xuemingl, Asaf Penso, matan, stable

If set TIS lag port affinity to auto, firmware assign port affinity on
each creation with Round Robin. In case of 2 PFs, if create virtq,
destroy and create again, then each virtq will get same port affinity.

To resolve this fw limitation, this patch sets create TIS with specified
affinity for each PF.

Fixes: bff735011078 ("vdpa/mlx5: prepare virtio queues")
Cc: matan@mellanox.com
Cc: stable@dpdk.org

Signed-off-by: Xueming Li <xuemingl@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
 drivers/vdpa/mlx5/mlx5_vdpa.c       |  3 +++
 drivers/vdpa/mlx5/mlx5_vdpa.h       |  3 ++-
 drivers/vdpa/mlx5/mlx5_vdpa_virtq.c | 23 ++++++++++++++---------
 3 files changed, 19 insertions(+), 10 deletions(-)

diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.c b/drivers/vdpa/mlx5/mlx5_vdpa.c
index 2d88633bfd..43e84f034e 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa.c
@@ -730,6 +730,9 @@ mlx5_vdpa_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	}
 	priv->caps = attr.vdpa;
 	priv->log_max_rqt_size = attr.log_max_rqt_size;
+	priv->num_lag_ports = attr.num_lag_ports;
+	if (attr.num_lag_ports == 0)
+		priv->num_lag_ports = 1;
 	priv->ctx = ctx;
 	priv->pci_dev = pci_dev;
 	priv->var = mlx5_glue->dv_alloc_var(ctx, 0);
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.h b/drivers/vdpa/mlx5/mlx5_vdpa.h
index fcbc12ab0c..c8c1adfde4 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa.h
+++ b/drivers/vdpa/mlx5/mlx5_vdpa.h
@@ -146,8 +146,9 @@ struct mlx5_vdpa_priv {
 	struct mlx5dv_devx_uar *uar;
 	struct rte_intr_handle intr_handle;
 	struct mlx5_devx_obj *td;
-	struct mlx5_devx_obj *tis;
+	struct mlx5_devx_obj *tiss[16]; /* TIS list for each LAG port. */
 	uint16_t nr_virtqs;
+	uint8_t num_lag_ports;
 	uint64_t features; /* Negotiated features. */
 	uint16_t log_max_rqt_size;
 	struct mlx5_vdpa_steer steer;
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
index 17e71cf4f4..4724baca4e 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
@@ -103,12 +103,13 @@ void
 mlx5_vdpa_virtqs_release(struct mlx5_vdpa_priv *priv)
 {
 	int i;
-
 	for (i = 0; i < priv->nr_virtqs; i++)
 		mlx5_vdpa_virtq_unset(&priv->virtqs[i]);
-	if (priv->tis) {
-		claim_zero(mlx5_devx_cmd_destroy(priv->tis));
-		priv->tis = NULL;
+	for (i = 0; i < priv->num_lag_ports; i++) {
+		if (priv->tiss[i]) {
+			claim_zero(mlx5_devx_cmd_destroy(priv->tiss[i]));
+			priv->tiss[i] = NULL;
+		}
 	}
 	if (priv->td) {
 		claim_zero(mlx5_devx_cmd_destroy(priv->td));
@@ -302,7 +303,7 @@ mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index)
 	attr.hw_used_index = last_used_idx;
 	attr.q_size = vq.size;
 	attr.mkey = priv->gpa_mkey_index;
-	attr.tis_id = priv->tis->id;
+	attr.tis_id = priv->tiss[(index / 2) % priv->num_lag_ports]->id;
 	attr.queue_index = index;
 	attr.pd = priv->pdn;
 	virtq->virtq = mlx5_devx_cmd_create_virtq(priv->ctx, &attr);
@@ -432,10 +433,14 @@ mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv)
 		return -rte_errno;
 	}
 	tis_attr.transport_domain = priv->td->id;
-	priv->tis = mlx5_devx_cmd_create_tis(priv->ctx, &tis_attr);
-	if (!priv->tis) {
-		DRV_LOG(ERR, "Failed to create TIS.");
-		goto error;
+	for (i = 0; i < priv->num_lag_ports; i++) {
+		/* 0 is auto affinity, non-zero value to propose port. */
+		tis_attr.lag_tx_port_affinity = i + 1;
+		priv->tiss[i] = mlx5_devx_cmd_create_tis(priv->ctx, &tis_attr);
+		if (!priv->tiss[i]) {
+			DRV_LOG(ERR, "Failed to create TIS %u.", i);
+			goto error;
+		}
 	}
 	priv->nr_virtqs = nr_vring;
 	for (i = 0; i < nr_vring; i++)
-- 
2.25.1


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [dpdk-stable] [PATCH v1 1/2] common/mlx5: get number of ports that can be bonded
  2020-10-28 10:44 ` [dpdk-stable] [PATCH v1 " Xueming Li
@ 2020-10-29  8:29   ` Maxime Coquelin
  0 siblings, 0 replies; 9+ messages in thread
From: Maxime Coquelin @ 2020-10-29  8:29 UTC (permalink / raw)
  To: Xueming Li, Matan Azrad, Viacheslav Ovsiienko; +Cc: dev, Asaf Penso, stable



On 10/28/20 11:44 AM, Xueming Li wrote:
> Get HCA capability: number of physical ports that can be bonded.
> 
> Cc: stable@dpdk.org
> 
> Signed-off-by: Xueming Li <xuemingl@nvidia.com>
> Acked-by: Matan Azrad <matan@nvidia.com>
> Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
> ---
>  drivers/common/mlx5/mlx5_devx_cmds.c | 5 +++--
>  drivers/common/mlx5/mlx5_devx_cmds.h | 1 +
>  2 files changed, 4 insertions(+), 2 deletions(-)


Applied to dpdk-next-virtio/main.

Thanks!
Maxime


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [dpdk-stable] [PATCH v1 2/2] vdpa/mlx5: specify lag port affinity
  2020-10-28 10:44 ` [dpdk-stable] [PATCH v1 2/2] vdpa/mlx5: specify lag port affinity Xueming Li
@ 2020-10-29  8:29   ` Maxime Coquelin
  0 siblings, 0 replies; 9+ messages in thread
From: Maxime Coquelin @ 2020-10-29  8:29 UTC (permalink / raw)
  To: Xueming Li, Matan Azrad, Viacheslav Ovsiienko
  Cc: dev, Asaf Penso, matan, stable



On 10/28/20 11:44 AM, Xueming Li wrote:
> If set TIS lag port affinity to auto, firmware assign port affinity on
> each creation with Round Robin. In case of 2 PFs, if create virtq,
> destroy and create again, then each virtq will get same port affinity.
> 
> To resolve this fw limitation, this patch sets create TIS with specified
> affinity for each PF.
> 
> Fixes: bff735011078 ("vdpa/mlx5: prepare virtio queues")
> Cc: matan@mellanox.com
> Cc: stable@dpdk.org
> 
> Signed-off-by: Xueming Li <xuemingl@nvidia.com>
> Acked-by: Matan Azrad <matan@nvidia.com>
> Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
> ---
>  drivers/vdpa/mlx5/mlx5_vdpa.c       |  3 +++
>  drivers/vdpa/mlx5/mlx5_vdpa.h       |  3 ++-
>  drivers/vdpa/mlx5/mlx5_vdpa_virtq.c | 23 ++++++++++++++---------
>  3 files changed, 19 insertions(+), 10 deletions(-)


Applied to dpdk-next-virtio/main.

Thanks!
Maxime


^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2020-10-29  8:30 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-10-26 11:10 [dpdk-stable] [PATCH 1/2] common/mlx5: get number of ports that can be bonded Xueming Li
2020-10-26 11:10 ` [dpdk-stable] [PATCH 2/2] vdpa/mlx5: specify lag port affinity Xueming Li
2020-10-28  9:35   ` [dpdk-stable] [dpdk-dev] " Maxime Coquelin
2020-10-28 10:32     ` Xueming(Steven) Li
2020-10-28  9:30 ` [dpdk-stable] [dpdk-dev] [PATCH 1/2] common/mlx5: get number of ports that can be bonded Maxime Coquelin
2020-10-28 10:44 ` [dpdk-stable] [PATCH v1 " Xueming Li
2020-10-29  8:29   ` Maxime Coquelin
2020-10-28 10:44 ` [dpdk-stable] [PATCH v1 2/2] vdpa/mlx5: specify lag port affinity Xueming Li
2020-10-29  8:29   ` Maxime Coquelin

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).