DPDK patches and discussions
 help / color / mirror / Atom feed
From: "Renyong Wan" <wanry@yunsilicon.com>
To: <dev@dpdk.org>
Cc: <thomas@monjalon.net>, <stephen@networkplumber.org>,
	 <qianr@yunsilicon.com>, <nana@yunsilicon.com>,
	<zhangxx@yunsilicon.com>,  <xudw@yunsilicon.com>,
	<jacky@yunsilicon.com>, <weihg@yunsilicon.com>,
	 <zhenghy@yunsilicon.com>
Subject: [PATCH 09/14] net/xsc: optimize QP and CQ memory allocation
Date: Fri, 29 Aug 2025 16:24:26 +0800	[thread overview]
Message-ID: <20250829082425.24369-10-wanry@yunsilicon.com> (raw)
In-Reply-To: <20250829082406.24369-1-wanry@yunsilicon.com>

Refactor memory allocation for XSC QP and CQ to use the local NUMA node.
Previously, allocations used SOCKET_ID_ANY, which could lead to remote
memory accesses. This patch ensures PAS and CQE memory are allocated
on the same NUMA node as the device, improving locality and performance.

Signed-off-by: Rong Qian <qianr@yunsilicon.com>
Signed-off-by: Renyong Wan <wanry@yunsilicon.com>
---
 drivers/net/xsc/xsc_rx.c   | 11 +++++++++--
 drivers/net/xsc/xsc_rxtx.h |  3 +++
 drivers/net/xsc/xsc_tx.c   |  2 ++
 drivers/net/xsc/xsc_vfio.c | 32 ++++++++++++++++++++++++++------
 4 files changed, 40 insertions(+), 8 deletions(-)

diff --git a/drivers/net/xsc/xsc_rx.c b/drivers/net/xsc/xsc_rx.c
index c65afab51d..332ceb9606 100644
--- a/drivers/net/xsc/xsc_rx.c
+++ b/drivers/net/xsc/xsc_rx.c
@@ -383,6 +383,7 @@ xsc_rss_qp_create(struct xsc_ethdev_priv *priv, int port_id)
 	struct xsc_cmd_create_qp_request *req, *info;
 	struct xsc_hwinfo *hwinfo = &xdev->hwinfo;
 	char name[RTE_ETH_NAME_MAX_LEN] = { 0 };
+	uint32_t numa_node = priv->eth_dev->device->numa_node;
 
 	rxq_data = xsc_rxq_get(priv, 0);
 	if (rxq_data == NULL) {
@@ -390,6 +391,10 @@ xsc_rss_qp_create(struct xsc_ethdev_priv *priv, int port_id)
 		return -rte_errno;
 	}
 
+	if (numa_node != rxq_data->socket)
+		PMD_DRV_LOG(WARNING, "Port %u: rxq numa_node=%u, device numa_node=%u",
+			    port_id, rxq_data->socket, numa_node);
+
 	log_ele = rte_log2_u32(sizeof(struct xsc_wqe_data_seg));
 	wqe_n = rxq_data->wqe_s;
 	log_rq_sz = rte_log2_u32(wqe_n * hwinfo->recv_seg_num);
@@ -429,8 +434,9 @@ xsc_rss_qp_create(struct xsc_ethdev_priv *priv, int port_id)
 		snprintf(name, sizeof(name), "wqe_mem_rx_%d_%d", port_id, i);
 		rxq_data->rq_pas = rte_memzone_reserve_aligned(name,
 							       (XSC_PAGE_SIZE * pa_num),
-							       SOCKET_ID_ANY,
-							       0, XSC_PAGE_SIZE);
+							       rxq_data->socket,
+							       RTE_MEMZONE_IOVA_CONTIG,
+							       XSC_PAGE_SIZE);
 		if (rxq_data->rq_pas == NULL) {
 			rte_errno = ENOMEM;
 			PMD_DRV_LOG(ERR, "Failed to alloc rxq pas memory");
@@ -519,6 +525,7 @@ xsc_rxq_rss_obj_new(struct xsc_ethdev_priv *priv, uint16_t port_id)
 		cq_params.port_id = rxq_data->port_id;
 		cq_params.qp_id = rxq_data->idx;
 		cq_params.wqe_s = rxq_data->wqe_s;
+		cq_params.socket_id = rxq_data->socket;
 
 		ret = xsc_dev_rx_cq_create(xdev, &cq_params, &cq_info);
 		if (ret) {
diff --git a/drivers/net/xsc/xsc_rxtx.h b/drivers/net/xsc/xsc_rxtx.h
index 3606c151e6..129bbd2fa4 100644
--- a/drivers/net/xsc/xsc_rxtx.h
+++ b/drivers/net/xsc/xsc_rxtx.h
@@ -124,6 +124,7 @@ struct xsc_tx_cq_params {
 	uint16_t port_id;
 	uint16_t qp_id;
 	uint16_t elts_n;
+	int socket_id;
 };
 
 struct xsc_tx_cq_info {
@@ -141,6 +142,7 @@ struct xsc_tx_qp_params {
 	uint16_t port_id;
 	uint16_t qp_id;
 	uint16_t elts_n;
+	int socket_id;
 };
 
 struct xsc_tx_qp_info {
@@ -165,6 +167,7 @@ struct xsc_rx_cq_params {
 	uint16_t port_id;
 	uint16_t qp_id;
 	uint16_t wqe_s;
+	int socket_id;
 };
 
 struct xsc_rx_cq_info {
diff --git a/drivers/net/xsc/xsc_tx.c b/drivers/net/xsc/xsc_tx.c
index 07888a5641..265439fec0 100644
--- a/drivers/net/xsc/xsc_tx.c
+++ b/drivers/net/xsc/xsc_tx.c
@@ -37,6 +37,7 @@ xsc_txq_obj_new(struct xsc_dev *xdev, struct xsc_txq_data *txq_data, uint16_t id
 	cq_params.port_id = txq_data->port_id;
 	cq_params.qp_id = txq_data->idx;
 	cq_params.elts_n = txq_data->elts_n;
+	cq_params.socket_id = txq_data->socket;
 	ret = xsc_dev_tx_cq_create(xdev, &cq_params, &cq_info);
 	if (ret) {
 		rte_errno = errno;
@@ -60,6 +61,7 @@ xsc_txq_obj_new(struct xsc_dev *xdev, struct xsc_txq_data *txq_data, uint16_t id
 	qp_params.port_id = txq_data->port_id;
 	qp_params.qp_id = idx;
 	qp_params.elts_n = txq_data->elts_n;
+	qp_params.socket_id = txq_data->socket;
 	ret = xsc_dev_tx_qp_create(xdev, &qp_params, &qp_info);
 
 	if (ret != 0) {
diff --git a/drivers/net/xsc/xsc_vfio.c b/drivers/net/xsc/xsc_vfio.c
index 34b2a4c58b..1650a3ab2b 100644
--- a/drivers/net/xsc/xsc_vfio.c
+++ b/drivers/net/xsc/xsc_vfio.c
@@ -556,6 +556,11 @@ xsc_vfio_rx_cq_create(struct xsc_dev *xdev, struct xsc_rx_cq_params *cq_params,
 	struct xsc_cmd_create_cq_mbox_in *in = NULL;
 	struct xsc_cmd_create_cq_mbox_out *out = NULL;
 	void *cmd_buf;
+	int numa_node = xdev->pci_dev->device.numa_node;
+
+	if (numa_node != cq_params->socket_id)
+		PMD_DRV_LOG(WARNING, "Port %u rxq %u: cq numa_node=%u, device numa_node=%u",
+			    port_id, idx, cq_params->socket_id, numa_node);
 
 	cqe_n = cq_params->wqe_s;
 	log_cq_sz = rte_log2_u32(cqe_n);
@@ -592,8 +597,9 @@ xsc_vfio_rx_cq_create(struct xsc_dev *xdev, struct xsc_rx_cq_params *cq_params,
 	snprintf(name, sizeof(name), "mz_cqe_mem_rx_%u_%u", port_id, idx);
 	cq_pas = rte_memzone_reserve_aligned(name,
 					     (XSC_PAGE_SIZE * pa_num),
-					     SOCKET_ID_ANY,
-					     0, XSC_PAGE_SIZE);
+					     cq_params->socket_id,
+					     RTE_MEMZONE_IOVA_CONTIG,
+					     XSC_PAGE_SIZE);
 	if (cq_pas == NULL) {
 		rte_errno = ENOMEM;
 		PMD_DRV_LOG(ERR, "Failed to alloc rx cq pas memory");
@@ -658,6 +664,12 @@ xsc_vfio_tx_cq_create(struct xsc_dev *xdev, struct xsc_tx_cq_params *cq_params,
 	uint64_t iova;
 	int i;
 	void *cmd_buf = NULL;
+	int numa_node = xdev->pci_dev->device.numa_node;
+
+	if (numa_node != cq_params->socket_id)
+		PMD_DRV_LOG(WARNING, "Port %u txq %u: cq numa_node=%u, device numa_node=%u",
+			    cq_params->port_id, cq_params->qp_id,
+			    cq_params->socket_id, numa_node);
 
 	cq = rte_zmalloc(NULL, sizeof(struct xsc_vfio_cq), 0);
 	if (cq == NULL) {
@@ -672,8 +684,9 @@ xsc_vfio_tx_cq_create(struct xsc_dev *xdev, struct xsc_tx_cq_params *cq_params,
 	snprintf(name, sizeof(name), "mz_cqe_mem_tx_%u_%u", cq_params->port_id, cq_params->qp_id);
 	cq_pas = rte_memzone_reserve_aligned(name,
 					     (XSC_PAGE_SIZE * pa_num),
-					     SOCKET_ID_ANY,
-					     0, XSC_PAGE_SIZE);
+					     cq_params->socket_id,
+					     RTE_MEMZONE_IOVA_CONTIG,
+					     XSC_PAGE_SIZE);
 	if (cq_pas == NULL) {
 		rte_errno = ENOMEM;
 		PMD_DRV_LOG(ERR, "Failed to alloc tx cq pas memory");
@@ -762,6 +775,12 @@ xsc_vfio_tx_qp_create(struct xsc_dev *xdev, struct xsc_tx_qp_params *qp_params,
 	char name[RTE_ETH_NAME_MAX_LEN] = {0};
 	void *cmd_buf = NULL;
 	bool tso_en = !!(qp_params->tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO);
+	int numa_node = xdev->pci_dev->device.numa_node;
+
+	if (numa_node != qp_params->socket_id)
+		PMD_DRV_LOG(WARNING, "Port %u: txq %u numa_node=%u, device numa_node=%u",
+			    qp_params->port_id, qp_params->qp_id,
+			    qp_params->socket_id, numa_node);
 
 	qp = rte_zmalloc(NULL, sizeof(struct xsc_vfio_qp), 0);
 	if (qp == NULL) {
@@ -777,8 +796,9 @@ xsc_vfio_tx_qp_create(struct xsc_dev *xdev, struct xsc_tx_qp_params *qp_params,
 	snprintf(name, sizeof(name), "mz_wqe_mem_tx_%u_%u", qp_params->port_id, qp_params->qp_id);
 	qp_pas = rte_memzone_reserve_aligned(name,
 					     (XSC_PAGE_SIZE * pa_num),
-					     SOCKET_ID_ANY,
-					     0, XSC_PAGE_SIZE);
+					     qp_params->socket_id,
+					     RTE_MEMZONE_IOVA_CONTIG,
+					     XSC_PAGE_SIZE);
 	if (qp_pas == NULL) {
 		rte_errno = ENOMEM;
 		PMD_DRV_LOG(ERR, "Failed to alloc tx qp pas memory");
-- 
2.25.1

  parent reply	other threads:[~2025-08-29  8:25 UTC|newest]

Thread overview: 15+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-08-29  8:24 [PATCH 00/14] net/xsc: PMD updates Renyong Wan
2025-08-29  8:24 ` [PATCH 01/14] net/xsc: add FW version get support Renyong Wan
2025-08-29  8:24 ` [PATCH 02/14] net/xsc: add TSO support Renyong Wan
2025-08-29  8:24 ` [PATCH 03/14] net/xsc: support module EEPROM dump Renyong Wan
2025-08-29  8:24 ` [PATCH 04/14] net/xsc: support promiscuous mode Renyong Wan
2025-08-29  8:24 ` [PATCH 05/14] net/xsc: add link status support Renyong Wan
2025-08-29  8:24 ` [PATCH 06/14] net/xsc: add link status event support Renyong Wan
2025-08-29  8:24 ` [PATCH 07/14] net/xsc: add FEC get and set support Renyong Wan
2025-08-29  8:24 ` [PATCH 08/14] net/xsc: optimize RSS queue creation Renyong Wan
2025-08-29  8:24 ` Renyong Wan [this message]
2025-08-29  8:24 ` [PATCH 10/14] net/xsc: optimize Rx path Renyong Wan
2025-08-29  8:24 ` [PATCH 11/14] net/xsc: optimize stop and close Renyong Wan
2025-08-29  8:24 ` [PATCH 12/14] net/xsc: support per port for multi-process Renyong Wan
2025-08-29  8:24 ` [PATCH 13/14] net/xsc: fix uninitialized value Renyong Wan
2025-08-29  8:24 ` [PATCH 14/14] net/xsc: update release notes for xsc PMD Renyong Wan

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20250829082425.24369-10-wanry@yunsilicon.com \
    --to=wanry@yunsilicon.com \
    --cc=dev@dpdk.org \
    --cc=jacky@yunsilicon.com \
    --cc=nana@yunsilicon.com \
    --cc=qianr@yunsilicon.com \
    --cc=stephen@networkplumber.org \
    --cc=thomas@monjalon.net \
    --cc=weihg@yunsilicon.com \
    --cc=xudw@yunsilicon.com \
    --cc=zhangxx@yunsilicon.com \
    --cc=zhenghy@yunsilicon.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).