automatic DPDK test reports
 help / color / mirror / Atom feed
* [dpdk-test-report] |WARNING| pw102334-102335 [PATCH] [2/2] net/mlx5: set txq affinity in round-robin
@ 2021-10-20  3:49 dpdklab
  0 siblings, 0 replies; only message in thread
From: dpdklab @ 2021-10-20  3:49 UTC (permalink / raw)
  To: test-report; +Cc: dpdk-test-reports

[-- Attachment #1: Type: text/plain, Size: 5191 bytes --]

Test-Label: iol-testing
Test-Status: WARNING
http://dpdk.org/patch/102334

_apply patch failure_

Submitter: Rongwei Liu <rongweil@nvidia.com>
Date: Wednesday, October 20 2021 03:19:38 
Applied on: CommitID:69a3c6319140b34fb714fa5bd6990cceb2ea2997
Apply patch set 102334-102335 failed:

Checking patch doc/guides/nics/mlx5.rst...
Checking patch drivers/net/mlx5/linux/mlx5_os.c...
Hunk #2 succeeded at 1685 (offset -21 lines).
Checking patch drivers/net/mlx5/mlx5.c...
error: while searching for:
	return err;
}

/**
 * Allocate shared device context. If there is multiport device the
 * master and representors will share this context, if there is single

error: patch failed: drivers/net/mlx5/mlx5.c:1118
Hunk #2 succeeded at 1253 (offset 46 lines).
Hunk #3 succeeded at 1323 (offset 46 lines).
error: while searching for:
	MLX5_ASSERT(sh);
	if (sh->share_cache.cache.table)
		mlx5_mr_btree_free(&sh->share_cache.cache);
	if (sh->tis)
		claim_zero(mlx5_devx_cmd_destroy(sh->tis));
	if (sh->td)
		claim_zero(mlx5_devx_cmd_destroy(sh->td));
	if (sh->devx_rx_uar)
		mlx5_glue->devx_free_uar(sh->devx_rx_uar);
	if (sh->tx_uar)

error: patch failed: drivers/net/mlx5/mlx5.c:1282
error: while searching for:
void
mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)
{
	pthread_mutex_lock(&mlx5_dev_ctx_list_mutex);
#ifdef RTE_LIBRTE_MLX5_DEBUG
	/* Check the object presence in the list. */

error: patch failed: drivers/net/mlx5/mlx5.c:1310
Hunk #6 succeeded at 1488 (offset 64 lines).
Checking patch drivers/net/mlx5/mlx5.h...
Hunk #1 succeeded at 1127 (offset 16 lines).
Hunk #2 succeeded at 1202 (offset 18 lines).
Hunk #3 succeeded at 1470 (offset 18 lines).
Checking patch drivers/net/mlx5/mlx5_devx.c...
Checking patch drivers/net/mlx5/mlx5_txpp.c...
Applied patch doc/guides/nics/mlx5.rst cleanly.
Applied patch drivers/net/mlx5/linux/mlx5_os.c cleanly.
Applying patch drivers/net/mlx5/mlx5.c with 3 rejects...
Rejected hunk #1.
Hunk #2 applied cleanly.
Hunk #3 applied cleanly.
Rejected hunk #4.
Rejected hunk #5.
Hunk #6 applied cleanly.
Applied patch drivers/net/mlx5/mlx5.h cleanly.
Applied patch drivers/net/mlx5/mlx5_devx.c cleanly.
Applied patch drivers/net/mlx5/mlx5_txpp.c cleanly.
diff a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c	(rejected hunks)
@@ -1118,6 +1118,68 @@ mlx5_alloc_rxtx_uars(struct mlx5_dev_ctx_shared *sh,
 	return err;
 }
 
+/**
+ * Set up multiple TISs with different affinities according to
+ * number of bonding ports
+ *
+ * @param priv
+ * Pointer of shared context.
+ *
+ * @return
+ * Zero on success, -1 otherwise.
+ */
+static int
+mlx5_setup_tis(struct mlx5_dev_ctx_shared *sh)
+{
+	int i;
+	struct mlx5_devx_lag_context lag_ctx = { 0 };
+	struct mlx5_devx_tis_attr tis_attr = { 0 };
+
+	tis_attr.transport_domain = sh->td->id;
+	if (sh->bond.n_port) {
+		if (!mlx5_devx_cmd_query_lag(sh->ctx, &lag_ctx)) {
+			sh->lag.tx_remap_affinity[0] =
+				lag_ctx.tx_remap_affinity_1;
+			sh->lag.tx_remap_affinity[1] =
+				lag_ctx.tx_remap_affinity_2;
+			sh->lag.affinity_mode = lag_ctx.port_select_mode;
+		} else {
+			DRV_LOG(ERR, "Failed to query lag affinity.");
+			return -1;
+		}
+		if (sh->lag.affinity_mode == MLX5_LAG_MODE_TIS) {
+			for (i = 0; i < sh->bond.n_port; i++) {
+				tis_attr.lag_tx_port_affinity =
+					MLX5_IFC_LAG_MAP_TIS_AFFINITY(i,
+							sh->bond.n_port);
+				sh->tis[i] = mlx5_devx_cmd_create_tis(sh->ctx,
+						&tis_attr);
+				if (!sh->tis[i]) {
+					DRV_LOG(ERR, "Failed to TIS %d/%d for bonding device"
+						" %s.", i, sh->bond.n_port,
+						sh->ibdev_name);
+					return -1;
+				}
+			}
+			DRV_LOG(DEBUG, "LAG number of ports : %d, affinity_1 & 2 : pf%d & %d.\n",
+				sh->bond.n_port, lag_ctx.tx_remap_affinity_1,
+				lag_ctx.tx_remap_affinity_2);
+			return 0;
+		}
+		if (sh->lag.affinity_mode == MLX5_LAG_MODE_HASH)
+			DRV_LOG(INFO, "Device %s enabled HW hash based LAG.",
+					sh->ibdev_name);
+	}
+	tis_attr.lag_tx_port_affinity = 0;
+	sh->tis[0] = mlx5_devx_cmd_create_tis(sh->ctx, &tis_attr);
+	if (!sh->tis[0]) {
+		DRV_LOG(ERR, "Failed to TIS 0 for bonding device"
+			" %s.", sh->ibdev_name);
+		return -1;
+	}
+	return 0;
+}
+
 /**
  * Allocate shared device context. If there is multiport device the
  * master and representors will share this context, if there is single
@@ -1282,10 +1341,13 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
 	MLX5_ASSERT(sh);
 	if (sh->share_cache.cache.table)
 		mlx5_mr_btree_free(&sh->share_cache.cache);
-	if (sh->tis)
-		claim_zero(mlx5_devx_cmd_destroy(sh->tis));
 	if (sh->td)
 		claim_zero(mlx5_devx_cmd_destroy(sh->td));
+	i = 0;
+	do {
+		if (sh->tis[i])
+			claim_zero(mlx5_devx_cmd_destroy(sh->tis[i]));
+	} while (++i < (uint32_t)sh->bond.n_port);
 	if (sh->devx_rx_uar)
 		mlx5_glue->devx_free_uar(sh->devx_rx_uar);
 	if (sh->tx_uar)
@@ -1310,6 +1372,7 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
 void
 mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)
 {
+	int i = 0;
 	pthread_mutex_lock(&mlx5_dev_ctx_list_mutex);
 #ifdef RTE_LIBRTE_MLX5_DEBUG
 	/* Check the object presence in the list. */

https://lab.dpdk.org/results/dashboard/patchsets/19565/

UNH-IOL DPDK Community Lab

^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2021-10-20  3:49 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-10-20  3:49 [dpdk-test-report] |WARNING| pw102334-102335 [PATCH] [2/2] net/mlx5: set txq affinity in round-robin dpdklab

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).