DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH 0/2] set txq affinity in round-robin
@ 2021-10-20  3:19 Rongwei Liu
  2021-10-20  3:19 ` [dpdk-dev] [PATCH 1/2] common/mlx5: support lag context query Rongwei Liu
  2021-10-20  3:19 ` [dpdk-dev] [PATCH 2/2] net/mlx5: set txq affinity in round-robin Rongwei Liu
  0 siblings, 2 replies; 3+ messages in thread
From: Rongwei Liu @ 2021-10-20  3:19 UTC (permalink / raw)
  To: matan, viacheslavo, orika, thomas; +Cc: dev, rasland

Create multiple TISs (number of PF) and bind TXQ to different TISs.
The TXQ affinity is predictable and unchanged.
Traffic is load-balanced per PMD process.

Rongwei Liu (2):
  common/mlx5: support lag context query
  net/mlx5: set txq affinity in round-robin

 doc/guides/nics/mlx5.rst             |  4 ++
 drivers/common/mlx5/mlx5_devx_cmds.c | 40 ++++++++++++++
 drivers/common/mlx5/mlx5_devx_cmds.h | 13 +++++
 drivers/common/mlx5/mlx5_prm.h       | 45 +++++++++++++++-
 drivers/common/mlx5/version.map      |  1 +
 drivers/net/mlx5/linux/mlx5_os.c     |  2 +-
 drivers/net/mlx5/mlx5.c              | 81 +++++++++++++++++++++++++---
 drivers/net/mlx5/mlx5.h              | 10 +++-
 drivers/net/mlx5/mlx5_devx.c         | 37 ++++++++++++-
 drivers/net/mlx5/mlx5_txpp.c         |  4 +-
 10 files changed, 222 insertions(+), 15 deletions(-)

-- 
2.27.0


^ permalink raw reply	[flat|nested] 3+ messages in thread

* [dpdk-dev] [PATCH 1/2] common/mlx5: support lag context query
  2021-10-20  3:19 [dpdk-dev] [PATCH 0/2] set txq affinity in round-robin Rongwei Liu
@ 2021-10-20  3:19 ` Rongwei Liu
  2021-10-20  3:19 ` [dpdk-dev] [PATCH 2/2] net/mlx5: set txq affinity in round-robin Rongwei Liu
  1 sibling, 0 replies; 3+ messages in thread
From: Rongwei Liu @ 2021-10-20  3:19 UTC (permalink / raw)
  To: matan, viacheslavo, orika, thomas, Ray Kinsella; +Cc: dev, rasland, Jiawei Wang

Added a new api mlx5_devx_cmd_query_lag() to query lag
property from firmware including state/affinity/mode etc.

Signed-off-by: Jiawei Wang <jiaweiw@nvidia.com>
Signed-off-by: Rongwei Liu <rongweil@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
 drivers/common/mlx5/mlx5_devx_cmds.c | 40 +++++++++++++++++++++++++
 drivers/common/mlx5/mlx5_devx_cmds.h | 13 ++++++++
 drivers/common/mlx5/mlx5_prm.h       | 45 +++++++++++++++++++++++++++-
 drivers/common/mlx5/version.map      |  1 +
 4 files changed, 98 insertions(+), 1 deletion(-)

diff --git a/drivers/common/mlx5/mlx5_devx_cmds.c b/drivers/common/mlx5/mlx5_devx_cmds.c
index 6538bce57b..fb7c8e986f 100644
--- a/drivers/common/mlx5/mlx5_devx_cmds.c
+++ b/drivers/common/mlx5/mlx5_devx_cmds.c
@@ -2800,3 +2800,43 @@ mlx5_devx_cmd_create_crypto_login_obj(void *ctx,
 	crypto_login_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
 	return crypto_login_obj;
 }
+
+/**
+ * Query LAG context.
+ *
+ * @param[in] ctx
+ *   Pointer to ibv_context, returned from mlx5dv_open_device.
+ * @param[out] lag_ctx
+ *   Pointer to struct mlx5_devx_lag_context, to be set by the routine.
+ *
+ * @return
+ *   0 on success, a negative value otherwise.
+ */
+int
+mlx5_devx_cmd_query_lag(void *ctx,
+			struct mlx5_devx_lag_context *lag_ctx)
+{
+	uint32_t in[MLX5_ST_SZ_DW(query_lag_in)] = {0};
+	uint32_t out[MLX5_ST_SZ_DW(query_lag_out)] = {0};
+	void *lctx;
+	int rc;
+
+	MLX5_SET(query_lag_in, in, opcode, MLX5_CMD_OP_QUERY_LAG);
+	rc = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
+	if (rc)
+		goto error;
+	lctx = MLX5_ADDR_OF(query_lag_out, out, context);
+	lag_ctx->fdb_selection_mode = MLX5_GET(lag_context, lctx,
+					       fdb_selection_mode);
+	lag_ctx->port_select_mode = MLX5_GET(lag_context, lctx,
+					       port_select_mode);
+	lag_ctx->lag_state = MLX5_GET(lag_context, lctx, lag_state);
+	lag_ctx->tx_remap_affinity_2 = MLX5_GET(lag_context, lctx,
+						tx_remap_affinity_2);
+	lag_ctx->tx_remap_affinity_1 = MLX5_GET(lag_context, lctx,
+						tx_remap_affinity_1);
+	return 0;
+error:
+	rc = (rc > 0) ? -rc : rc;
+	return rc;
+}
diff --git a/drivers/common/mlx5/mlx5_devx_cmds.h b/drivers/common/mlx5/mlx5_devx_cmds.h
index 6948cadd37..5e4f3b749e 100644
--- a/drivers/common/mlx5/mlx5_devx_cmds.h
+++ b/drivers/common/mlx5/mlx5_devx_cmds.h
@@ -197,6 +197,15 @@ struct mlx5_hca_attr {
 	uint32_t umr_indirect_mkey_disabled:1;
 };
 
+/* LAG Context. */
+struct mlx5_devx_lag_context {
+	uint32_t fdb_selection_mode:1;
+	uint32_t port_select_mode:3;
+	uint32_t lag_state:3;
+	uint32_t tx_remap_affinity_1:4;
+	uint32_t tx_remap_affinity_2:4;
+};
+
 struct mlx5_devx_wq_attr {
 	uint32_t wq_type:4;
 	uint32_t wq_signature:1;
@@ -681,4 +690,8 @@ struct mlx5_devx_obj *
 mlx5_devx_cmd_create_crypto_login_obj(void *ctx,
 				      struct mlx5_devx_crypto_login_attr *attr);
 
+__rte_internal
+int
+mlx5_devx_cmd_query_lag(void *ctx,
+			struct mlx5_devx_lag_context *lag_ctx);
 #endif /* RTE_PMD_MLX5_DEVX_CMDS_H_ */
diff --git a/drivers/common/mlx5/mlx5_prm.h b/drivers/common/mlx5/mlx5_prm.h
index 54e62aa153..eab80eaead 100644
--- a/drivers/common/mlx5/mlx5_prm.h
+++ b/drivers/common/mlx5/mlx5_prm.h
@@ -1048,6 +1048,7 @@ enum {
 	MLX5_CMD_OP_DEALLOC_PD = 0x801,
 	MLX5_CMD_OP_ACCESS_REGISTER = 0x805,
 	MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN = 0x816,
+	MLX5_CMD_OP_QUERY_LAG = 0x842,
 	MLX5_CMD_OP_CREATE_TIR = 0x900,
 	MLX5_CMD_OP_MODIFY_TIR = 0x901,
 	MLX5_CMD_OP_CREATE_SQ = 0X904,
@@ -1507,7 +1508,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
 	u8 uar_4k[0x1];
 	u8 reserved_at_241[0x9];
 	u8 uar_sz[0x6];
-	u8 reserved_at_250[0x8];
+	u8 port_selection_cap[0x1];
+	u8 reserved_at_251[0x7];
 	u8 log_pg_sz[0x8];
 	u8 bf[0x1];
 	u8 driver_version[0x1];
@@ -1974,6 +1976,14 @@ struct mlx5_ifc_query_nic_vport_context_in_bits {
 	u8 reserved_at_68[0x18];
 };
 
+/*
+ * lag_tx_port_affinity: 0 auto-selection, 1 PF1, 2 PF2 vice versa.
+ * Each TIS binds to one PF by setting lag_tx_port_affinity (>0).
+ * Once LAG enabled, we create multiple TISs and bind each one to
+ * different PFs, then TIS[i] gets affinity i+1 and goes to PF i+1.
+ */
+#define MLX5_IFC_LAG_MAP_TIS_AFFINITY(index, num) ((num) ? \
+						    (index) % (num) + 1 : 0)
 struct mlx5_ifc_tisc_bits {
 	u8 strict_lag_tx_port_affinity[0x1];
 	u8 reserved_at_1[0x3];
@@ -2007,6 +2017,39 @@ struct mlx5_ifc_query_tis_in_bits {
 	u8 reserved_at_60[0x20];
 };
 
+/* port_select_mode definition. */
+enum mlx5_lag_mode_type {
+	MLX5_LAG_MODE_TIS = 0,
+	MLX5_LAG_MODE_HASH = 1,
+};
+
+struct mlx5_ifc_lag_context_bits {
+	u8 fdb_selection_mode[0x1];
+	u8 reserved_at_1[0x14];
+	u8 port_select_mode[0x3];
+	u8 reserved_at_18[0x5];
+	u8 lag_state[0x3];
+	u8 reserved_at_20[0x14];
+	u8 tx_remap_affinity_2[0x4];
+	u8 reserved_at_38[0x4];
+	u8 tx_remap_affinity_1[0x4];
+};
+
+struct mlx5_ifc_query_lag_in_bits {
+	u8 opcode[0x10];
+	u8 uid[0x10];
+	u8 reserved_at_20[0x10];
+	u8 op_mod[0x10];
+	u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_query_lag_out_bits {
+	u8 status[0x8];
+	u8 reserved_at_8[0x18];
+	u8 syndrome[0x20];
+	struct mlx5_ifc_lag_context_bits context;
+};
+
 struct mlx5_ifc_alloc_transport_domain_out_bits {
 	u8 status[0x8];
 	u8 reserved_at_8[0x18];
diff --git a/drivers/common/mlx5/version.map b/drivers/common/mlx5/version.map
index d3c5040aac..95f8bddb94 100644
--- a/drivers/common/mlx5/version.map
+++ b/drivers/common/mlx5/version.map
@@ -53,6 +53,7 @@ INTERNAL {
 	mlx5_devx_cmd_modify_virtq;
 	mlx5_devx_cmd_qp_query_tis_td;
 	mlx5_devx_cmd_query_hca_attr;
+	mlx5_devx_cmd_query_lag;
 	mlx5_devx_cmd_query_parse_samples;
 	mlx5_devx_cmd_query_virtio_q_counters; # WINDOWS_NO_EXPORT
 	mlx5_devx_cmd_query_virtq;
-- 
2.27.0


^ permalink raw reply	[flat|nested] 3+ messages in thread

* [dpdk-dev] [PATCH 2/2] net/mlx5: set txq affinity in round-robin
  2021-10-20  3:19 [dpdk-dev] [PATCH 0/2] set txq affinity in round-robin Rongwei Liu
  2021-10-20  3:19 ` [dpdk-dev] [PATCH 1/2] common/mlx5: support lag context query Rongwei Liu
@ 2021-10-20  3:19 ` Rongwei Liu
  1 sibling, 0 replies; 3+ messages in thread
From: Rongwei Liu @ 2021-10-20  3:19 UTC (permalink / raw)
  To: matan, viacheslavo, orika, thomas; +Cc: dev, rasland

Previously, we set txq affinity to 0 and let firmware
to perform round-robin when bonding. Firmware uses a
global counter to assign txq affinity to different
physical ports accord to remainder after division.

There are three dis-advantages:
1. The global counter is shared between kernel and dpdk.
2. After restarting pmd or port, the previous counter value
is reused, so the new affinity is unpredictable.
3. There is no way to get what affinity is set by firmware.

In this update, we will create several TISs up to the
number of bonding ports and bind each TIS to one PF port.

For each port, it will start to pick up TIS using its port
index. Upper layer application can quickly calculate each txq's
affinity without querying.

At DPDK layer, when creating txq with 2 bonding ports, the
affinity is set like:
port 0: 1-->2-->1-->2
port 1: 2-->1-->2-->1
port 2: 1-->2-->1-->2

Note: Only applicable to DevX api.
This affinity subjects to HW hash.

Signed-off-by: Rongwei Liu <rongweil@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
 doc/guides/nics/mlx5.rst         |  4 ++
 drivers/net/mlx5/linux/mlx5_os.c |  2 +-
 drivers/net/mlx5/mlx5.c          | 81 ++++++++++++++++++++++++++++----
 drivers/net/mlx5/mlx5.h          | 10 +++-
 drivers/net/mlx5/mlx5_devx.c     | 37 ++++++++++++++-
 drivers/net/mlx5/mlx5_txpp.c     |  4 +-
 6 files changed, 124 insertions(+), 14 deletions(-)

diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
index bae73f42d8..d817caedac 100644
--- a/doc/guides/nics/mlx5.rst
+++ b/doc/guides/nics/mlx5.rst
@@ -464,6 +464,10 @@ Limitations
   - In order to achieve best insertion rate, application should manage the flows per lcore.
   - Better to disable memory reclaim by setting ``reclaim_mem_mode`` to 0 to accelerate the flow object allocation and release with cache.
 
+- HW hashed bonding
+
+  - TXQ affinity subjects to HW hash once enabled.
+
 Statistics
 ----------
 
diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index a823d26beb..1d7fa7dc6c 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -928,7 +928,6 @@ mlx5_representor_match(struct mlx5_dev_spawn_data *spawn,
 	return false;
 }
 
-
 /**
  * Spawn an Ethernet device from Verbs information.
  *
@@ -1707,6 +1706,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
 	 */
 	MLX5_ASSERT(spawn->ifindex);
 	priv->if_index = spawn->ifindex;
+	priv->lag_affinity_idx = sh->refcnt - 1;
 	eth_dev->data->dev_private = priv;
 	priv->dev_data = eth_dev->data;
 	eth_dev->data->mac_addrs = priv->mac;
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index e28cc461b9..e049a367f0 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -1118,6 +1118,68 @@ mlx5_alloc_rxtx_uars(struct mlx5_dev_ctx_shared *sh,
 	return err;
 }
 
+/**
+ * Set up multiple TISs with different affinities according to
+ * number of bonding ports
+ *
+ * @param priv
+ * Pointer of shared context.
+ *
+ * @return
+ * Zero on success, -1 otherwise.
+ */
+static int
+mlx5_setup_tis(struct mlx5_dev_ctx_shared *sh)
+{
+	int i;
+	struct mlx5_devx_lag_context lag_ctx = { 0 };
+	struct mlx5_devx_tis_attr tis_attr = { 0 };
+
+	tis_attr.transport_domain = sh->td->id;
+	if (sh->bond.n_port) {
+		if (!mlx5_devx_cmd_query_lag(sh->ctx, &lag_ctx)) {
+			sh->lag.tx_remap_affinity[0] =
+				lag_ctx.tx_remap_affinity_1;
+			sh->lag.tx_remap_affinity[1] =
+				lag_ctx.tx_remap_affinity_2;
+			sh->lag.affinity_mode = lag_ctx.port_select_mode;
+		} else {
+			DRV_LOG(ERR, "Failed to query lag affinity.");
+			return -1;
+		}
+		if (sh->lag.affinity_mode == MLX5_LAG_MODE_TIS) {
+			for (i = 0; i < sh->bond.n_port; i++) {
+				tis_attr.lag_tx_port_affinity =
+					MLX5_IFC_LAG_MAP_TIS_AFFINITY(i,
+							sh->bond.n_port);
+				sh->tis[i] = mlx5_devx_cmd_create_tis(sh->ctx,
+						&tis_attr);
+				if (!sh->tis[i]) {
+					DRV_LOG(ERR, "Failed to TIS %d/%d for bonding device"
+						" %s.", i, sh->bond.n_port,
+						sh->ibdev_name);
+					return -1;
+				}
+			}
+			DRV_LOG(DEBUG, "LAG number of ports : %d, affinity_1 & 2 : pf%d & %d.\n",
+				sh->bond.n_port, lag_ctx.tx_remap_affinity_1,
+				lag_ctx.tx_remap_affinity_2);
+			return 0;
+		}
+		if (sh->lag.affinity_mode == MLX5_LAG_MODE_HASH)
+			DRV_LOG(INFO, "Device %s enabled HW hash based LAG.",
+					sh->ibdev_name);
+	}
+	tis_attr.lag_tx_port_affinity = 0;
+	sh->tis[0] = mlx5_devx_cmd_create_tis(sh->ctx, &tis_attr);
+	if (!sh->tis[0]) {
+		DRV_LOG(ERR, "Failed to TIS 0 for bonding device"
+			" %s.", sh->ibdev_name);
+		return -1;
+	}
+	return 0;
+}
+
 /**
  * Allocate shared device context. If there is multiport device the
  * master and representors will share this context, if there is single
@@ -1145,7 +1207,6 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
 	struct mlx5_dev_ctx_shared *sh;
 	int err = 0;
 	uint32_t i;
-	struct mlx5_devx_tis_attr tis_attr = { 0 };
 
 	MLX5_ASSERT(spawn);
 	/* Secondary process should not create the shared context. */
@@ -1216,9 +1277,7 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
 			err = ENOMEM;
 			goto error;
 		}
-		tis_attr.transport_domain = sh->td->id;
-		sh->tis = mlx5_devx_cmd_create_tis(sh->ctx, &tis_attr);
-		if (!sh->tis) {
+		if (mlx5_setup_tis(sh)) {
 			DRV_LOG(ERR, "TIS allocation failure");
 			err = ENOMEM;
 			goto error;
@@ -1282,10 +1341,13 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
 	MLX5_ASSERT(sh);
 	if (sh->share_cache.cache.table)
 		mlx5_mr_btree_free(&sh->share_cache.cache);
-	if (sh->tis)
-		claim_zero(mlx5_devx_cmd_destroy(sh->tis));
 	if (sh->td)
 		claim_zero(mlx5_devx_cmd_destroy(sh->td));
+	i = 0;
+	do {
+		if (sh->tis[i])
+			claim_zero(mlx5_devx_cmd_destroy(sh->tis[i]));
+	} while (++i < (uint32_t)sh->bond.n_port);
 	if (sh->devx_rx_uar)
 		mlx5_glue->devx_free_uar(sh->devx_rx_uar);
 	if (sh->tx_uar)
@@ -1310,6 +1372,7 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
 void
 mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)
 {
+	int i = 0;
 	pthread_mutex_lock(&mlx5_dev_ctx_list_mutex);
 #ifdef RTE_LIBRTE_MLX5_DEBUG
 	/* Check the object presence in the list. */
@@ -1361,8 +1424,10 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)
 	}
 	if (sh->pd)
 		claim_zero(mlx5_os_dealloc_pd(sh->pd));
-	if (sh->tis)
-		claim_zero(mlx5_devx_cmd_destroy(sh->tis));
+	do {
+		if (sh->tis[i])
+			claim_zero(mlx5_devx_cmd_destroy(sh->tis[i]));
+	} while (++i < sh->bond.n_port);
 	if (sh->td)
 		claim_zero(mlx5_devx_cmd_destroy(sh->td));
 	if (sh->devx_rx_uar)
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index a15f86616d..7ff5feaf4a 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -1111,6 +1111,12 @@ struct mlx5_aso_ct_pools_mng {
 	struct mlx5_aso_sq aso_sq; /* ASO queue objects. */
 };
 
+/* LAG attr. */
+struct mlx5_lag {
+	uint8_t tx_remap_affinity[16]; /* The PF port number of affinity */
+	uint8_t affinity_mode; /* TIS or hash based affinity */
+};
+
 /*
  * Shared Infiniband device context for Master/Representors
  * which belong to same IB device with multiple IB ports.
@@ -1178,8 +1184,9 @@ struct mlx5_dev_ctx_shared {
 	struct rte_intr_handle intr_handle; /* Interrupt handler for device. */
 	struct rte_intr_handle intr_handle_devx; /* DEVX interrupt handler. */
 	void *devx_comp; /* DEVX async comp obj. */
-	struct mlx5_devx_obj *tis; /* TIS object. */
+	struct mlx5_devx_obj *tis[16]; /* TIS object. */
 	struct mlx5_devx_obj *td; /* Transport domain. */
+	struct mlx5_lag lag; /* LAG attributes */
 	void *tx_uar; /* Tx/packet pacing shared UAR. */
 	struct mlx5_flex_parser_profiles fp[MLX5_FLEX_PARSER_MAX];
 	/* Flex parser profiles information. */
@@ -1445,6 +1452,7 @@ struct mlx5_priv {
 	uint32_t rss_shared_actions; /* RSS shared actions. */
 	struct mlx5_devx_obj *q_counters; /* DevX queue counter object. */
 	uint32_t counter_set_id; /* Queue counter ID to set in DevX objects. */
+	uint32_t lag_affinity_idx; /* LAG mode queue 0 affinity starting. */
 };
 
 #define PORT_ID(priv) ((priv)->dev_data->port_id)
diff --git a/drivers/net/mlx5/mlx5_devx.c b/drivers/net/mlx5/mlx5_devx.c
index a1db53577a..bff81c7df2 100644
--- a/drivers/net/mlx5/mlx5_devx.c
+++ b/drivers/net/mlx5/mlx5_devx.c
@@ -888,6 +888,37 @@ mlx5_devx_drop_action_destroy(struct rte_eth_dev *dev)
 	rte_errno = ENOTSUP;
 }
 
+/**
+ * Select TXQ TIS number.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @param queue_idx
+ *   Queue index in DPDK Tx queue array.
+ *
+ * @return
+ *   > 0 on success, a negative errno value otherwise.
+ */
+static uint32_t
+mlx5_get_txq_tis_num(struct rte_eth_dev *dev, uint16_t queue_idx)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	int tis_idx;
+
+	if (priv->sh->bond.n_port && priv->sh->lag.affinity_mode ==
+			MLX5_LAG_MODE_TIS) {
+		tis_idx = (priv->lag_affinity_idx + queue_idx) %
+			priv->sh->bond.n_port;
+		DRV_LOG(INFO, "port %d txq %d gets affinity %d and maps to PF %d.",
+			dev->data->port_id, queue_idx, tis_idx + 1,
+			priv->sh->lag.tx_remap_affinity[tis_idx]);
+	} else {
+		tis_idx = 0;
+	}
+	MLX5_ASSERT(priv->sh->tis[tis_idx]);
+	return priv->sh->tis[tis_idx]->id;
+}
+
 /**
  * Create the Tx hairpin queue object.
  *
@@ -935,7 +966,8 @@ mlx5_txq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
 	attr.wq_attr.log_hairpin_num_packets =
 			attr.wq_attr.log_hairpin_data_sz -
 			MLX5_HAIRPIN_QUEUE_STRIDE;
-	attr.tis_num = priv->sh->tis->id;
+
+	attr.tis_num = mlx5_get_txq_tis_num(dev, idx);
 	tmpl->sq = mlx5_devx_cmd_create_sq(priv->sh->ctx, &attr);
 	if (!tmpl->sq) {
 		DRV_LOG(ERR,
@@ -992,14 +1024,15 @@ mlx5_txq_create_devx_sq_resources(struct rte_eth_dev *dev, uint16_t idx,
 		.allow_swp = !!priv->config.swp,
 		.cqn = txq_obj->cq_obj.cq->id,
 		.tis_lst_sz = 1,
-		.tis_num = priv->sh->tis->id,
 		.wq_attr = (struct mlx5_devx_wq_attr){
 			.pd = priv->sh->pdn,
 			.uar_page =
 				 mlx5_os_get_devx_uar_page_id(priv->sh->tx_uar),
 		},
 		.ts_format = mlx5_ts_format_conv(priv->sh->sq_ts_format),
+		.tis_num = mlx5_get_txq_tis_num(dev, idx),
 	};
+
 	/* Create Send Queue object with DevX. */
 	return mlx5_devx_sq_create(priv->sh->ctx, &txq_obj->sq_obj, log_desc_n,
 				   &sq_attr, priv->sh->numa_node);
diff --git a/drivers/net/mlx5/mlx5_txpp.c b/drivers/net/mlx5/mlx5_txpp.c
index 2be7e71f89..6e874fa090 100644
--- a/drivers/net/mlx5/mlx5_txpp.c
+++ b/drivers/net/mlx5/mlx5_txpp.c
@@ -230,7 +230,7 @@ mlx5_txpp_create_rearm_queue(struct mlx5_dev_ctx_shared *sh)
 		.cd_master = 1,
 		.state = MLX5_SQC_STATE_RST,
 		.tis_lst_sz = 1,
-		.tis_num = sh->tis->id,
+		.tis_num = sh->tis[0]->id,
 		.wq_attr = (struct mlx5_devx_wq_attr){
 			.pd = sh->pdn,
 			.uar_page = mlx5_os_get_devx_uar_page_id(sh->tx_uar),
@@ -433,7 +433,7 @@ mlx5_txpp_create_clock_queue(struct mlx5_dev_ctx_shared *sh)
 	/* Create send queue object for Clock Queue. */
 	if (sh->txpp.test) {
 		sq_attr.tis_lst_sz = 1;
-		sq_attr.tis_num = sh->tis->id;
+		sq_attr.tis_num = sh->tis[0]->id;
 		sq_attr.non_wire = 0;
 		sq_attr.static_sq_wq = 1;
 	} else {
-- 
2.27.0


^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2021-10-20  3:20 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-10-20  3:19 [dpdk-dev] [PATCH 0/2] set txq affinity in round-robin Rongwei Liu
2021-10-20  3:19 ` [dpdk-dev] [PATCH 1/2] common/mlx5: support lag context query Rongwei Liu
2021-10-20  3:19 ` [dpdk-dev] [PATCH 2/2] net/mlx5: set txq affinity in round-robin Rongwei Liu

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).