patches for DPDK stable branches
 help / color / mirror / Atom feed
* [PATCH 21.11 1/4] net/dpaa: use internal mempool for SG table
@ 2022-10-28 11:32 Gagandeep Singh
  2022-10-28 11:32 ` [PATCH 21.11 2/4] net/dpaa: fix buffer freeing on SG Tx Gagandeep Singh
                   ` (3 more replies)
  0 siblings, 4 replies; 5+ messages in thread
From: Gagandeep Singh @ 2022-10-28 11:32 UTC (permalink / raw)
  To: ktraynor, stable; +Cc: Gagandeep Singh, Hemant Agrawal

[ upstream commit 533c31cc8331cc1ed0c4ffb2940e02b0d1e65255 ]

Creating and using driver's mempool for
allocating the SG table memory required for
FD creation.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 drivers/net/dpaa/dpaa_ethdev.c | 18 ++++++++++++++++++
 drivers/net/dpaa/dpaa_ethdev.h |  9 +++++++++
 drivers/net/dpaa/dpaa_rxtx.c   |  9 ++++-----
 3 files changed, 31 insertions(+), 5 deletions(-)

diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index 9847ca1be1..034f446561 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -133,6 +133,8 @@ static const struct rte_dpaa_xstats_name_off dpaa_xstats_strings[] = {
 };
 
 static struct rte_dpaa_driver rte_dpaa_pmd;
+int dpaa_valid_dev;
+struct rte_mempool *dpaa_tx_sg_pool;
 
 static int
 dpaa_eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info);
@@ -2209,7 +2211,20 @@ rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv,
 	/* Invoke PMD device initialization function */
 	diag = dpaa_dev_init(eth_dev);
 	if (diag == 0) {
+		if (!dpaa_tx_sg_pool) {
+			dpaa_tx_sg_pool =
+				rte_pktmbuf_pool_create("dpaa_mbuf_tx_sg_pool",
+				DPAA_POOL_SIZE,
+				DPAA_POOL_CACHE_SIZE, 0,
+				DPAA_MAX_SGS * sizeof(struct qm_sg_entry),
+				rte_socket_id());
+			if (dpaa_tx_sg_pool == NULL) {
+				DPAA_PMD_ERR("SG pool creation failed\n");
+				return -ENOMEM;
+			}
+		}
 		rte_eth_dev_probing_finish(eth_dev);
+		dpaa_valid_dev++;
 		return 0;
 	}
 
@@ -2227,6 +2242,9 @@ rte_dpaa_remove(struct rte_dpaa_device *dpaa_dev)
 
 	eth_dev = dpaa_dev->eth_dev;
 	dpaa_eth_dev_close(eth_dev);
+	dpaa_valid_dev--;
+	if (!dpaa_valid_dev)
+		rte_mempool_free(dpaa_tx_sg_pool);
 	ret = rte_eth_dev_release_port(eth_dev);
 
 	return ret;
diff --git a/drivers/net/dpaa/dpaa_ethdev.h b/drivers/net/dpaa/dpaa_ethdev.h
index 6fdd57dbc3..f9c0554530 100644
--- a/drivers/net/dpaa/dpaa_ethdev.h
+++ b/drivers/net/dpaa/dpaa_ethdev.h
@@ -33,6 +33,13 @@
 
 #define DPAA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */
 
+/* Maximum SG segments supported on all cores*/
+#define DPAA_MAX_SGS 128
+/* SG pool size */
+#define DPAA_POOL_SIZE 2048
+/* SG pool cache size */
+#define DPAA_POOL_CACHE_SIZE 256
+
 /* RX queue tail drop threshold (CGR Based) in frame count */
 #define CGR_RX_PERFQ_THRESH 256
 #define CGR_TX_CGR_THRESH 512
@@ -103,6 +110,8 @@
 
 #define FMC_FILE "/tmp/fmc.bin"
 
+extern struct rte_mempool *dpaa_tx_sg_pool;
+
 /* Each network interface is represented by one of these */
 struct dpaa_if {
 	int valid;
diff --git a/drivers/net/dpaa/dpaa_rxtx.c b/drivers/net/dpaa/dpaa_rxtx.c
index 956fe946fa..e0aa268645 100644
--- a/drivers/net/dpaa/dpaa_rxtx.c
+++ b/drivers/net/dpaa/dpaa_rxtx.c
@@ -793,8 +793,7 @@ uint16_t dpaa_eth_queue_rx(void *q,
 
 static int
 dpaa_eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
-		struct qm_fd *fd,
-		struct dpaa_bp_info *bp_info)
+		struct qm_fd *fd)
 {
 	struct rte_mbuf *cur_seg = mbuf, *prev_seg = NULL;
 	struct rte_mbuf *temp, *mi;
@@ -803,7 +802,7 @@ dpaa_eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
 
 	DPAA_DP_LOG(DEBUG, "Creating SG FD to transmit");
 
-	temp = rte_pktmbuf_alloc(bp_info->mp);
+	temp = rte_pktmbuf_alloc(dpaa_tx_sg_pool);
 	if (!temp) {
 		DPAA_PMD_ERR("Failure in allocation of mbuf");
 		return -1;
@@ -839,7 +838,7 @@ dpaa_eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
 	fd->format = QM_FD_SG;
 	fd->addr = temp->buf_iova;
 	fd->offset = temp->data_off;
-	fd->bpid = bp_info ? bp_info->bpid : 0xff;
+	fd->bpid = DPAA_MEMPOOL_TO_BPID(dpaa_tx_sg_pool);
 	fd->length20 = mbuf->pkt_len;
 
 	while (i < DPAA_SGT_MAX_ENTRIES) {
@@ -957,7 +956,7 @@ tx_on_dpaa_pool(struct rte_mbuf *mbuf,
 		tx_on_dpaa_pool_unsegmented(mbuf, bp_info, fd_arr);
 	} else if (mbuf->nb_segs > 1 &&
 		   mbuf->nb_segs <= DPAA_SGT_MAX_ENTRIES) {
-		if (dpaa_eth_mbuf_to_sg_fd(mbuf, fd_arr, bp_info)) {
+		if (dpaa_eth_mbuf_to_sg_fd(mbuf, fd_arr)) {
 			DPAA_PMD_DEBUG("Unable to create Scatter Gather FD");
 			return 1;
 		}
-- 
2.25.1


^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH 21.11 2/4] net/dpaa: fix buffer freeing on SG Tx
  2022-10-28 11:32 [PATCH 21.11 1/4] net/dpaa: use internal mempool for SG table Gagandeep Singh
@ 2022-10-28 11:32 ` Gagandeep Singh
  2022-10-28 11:32 ` [PATCH 21.11 3/4] net/dpaa2: use internal mempool for SG table Gagandeep Singh
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 5+ messages in thread
From: Gagandeep Singh @ 2022-10-28 11:32 UTC (permalink / raw)
  To: ktraynor, stable; +Cc: Gagandeep Singh, Hemant Agrawal

[ upstream commit 8716c0ec06e2bb58273184ebfa9f951565fa9ce2 ]

When using SG list to TX with external and direct buffers,
HW free direct buffers and driver free external buffers.

Software scans the complete SG mbuf list to find the external
buffers to free, but this is wrong as hardware can free the
direct buffers if any present in the list and same can be
re-allocated for other purpose in multi thread or high speed
running traffic environment with new data in it. So the software
which is scanning the SG mbuf list, if that list has any direct
buffer present then that direct buffer's next pointer can give
wrong pointer value, if already freed by hardware which
can do the mempool corruption or memory leak.

In this patch instead of relying on user given SG mbuf list
we are storing the buffers in an internal list which will
be scanned by driver after transmit to free non-direct
buffers.

This patch also fixes below issues.

Driver is freeing complete SG list by checking external buffer
flag in first segment only, but external buffer can be attached
to any of the segment. Because of this, driver either can double
free buffers or there can be memory leak.

In case of indirect buffers, driver is modifying the original
buffer list to free the indirect buffers but this original buffer
list is being used by driver even after transmit packets for
non-direct buffer cleanup. This can cause the buffer leak issue.

Fixes: f191d5abda54 ("net/dpaa: support external buffers in Tx")

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 drivers/net/dpaa/dpaa_ethdev.h | 10 ++++++
 drivers/net/dpaa/dpaa_rxtx.c   | 61 ++++++++++++++++++++++------------
 2 files changed, 49 insertions(+), 22 deletions(-)

diff --git a/drivers/net/dpaa/dpaa_ethdev.h b/drivers/net/dpaa/dpaa_ethdev.h
index f9c0554530..502c1c88b8 100644
--- a/drivers/net/dpaa/dpaa_ethdev.h
+++ b/drivers/net/dpaa/dpaa_ethdev.h
@@ -112,6 +112,16 @@
 
 extern struct rte_mempool *dpaa_tx_sg_pool;
 
+/* structure to free external and indirect
+ * buffers.
+ */
+struct dpaa_sw_buf_free {
+	/* To which packet this segment belongs */
+	uint16_t pkt_id;
+	/* The actual segment */
+	struct rte_mbuf *seg;
+};
+
 /* Each network interface is represented by one of these */
 struct dpaa_if {
 	int valid;
diff --git a/drivers/net/dpaa/dpaa_rxtx.c b/drivers/net/dpaa/dpaa_rxtx.c
index e0aa268645..d06e128338 100644
--- a/drivers/net/dpaa/dpaa_rxtx.c
+++ b/drivers/net/dpaa/dpaa_rxtx.c
@@ -793,9 +793,12 @@ uint16_t dpaa_eth_queue_rx(void *q,
 
 static int
 dpaa_eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
-		struct qm_fd *fd)
+		struct qm_fd *fd,
+		struct dpaa_sw_buf_free *free_buf,
+		uint32_t *free_count,
+		uint32_t pkt_id)
 {
-	struct rte_mbuf *cur_seg = mbuf, *prev_seg = NULL;
+	struct rte_mbuf *cur_seg = mbuf;
 	struct rte_mbuf *temp, *mi;
 	struct qm_sg_entry *sg_temp, *sgt;
 	int i = 0;
@@ -859,10 +862,11 @@ dpaa_eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
 				sg_temp->bpid =
 					DPAA_MEMPOOL_TO_BPID(cur_seg->pool);
 			}
-			cur_seg = cur_seg->next;
 		} else if (RTE_MBUF_HAS_EXTBUF(cur_seg)) {
+			free_buf[*free_count].seg = cur_seg;
+			free_buf[*free_count].pkt_id = pkt_id;
+			++*free_count;
 			sg_temp->bpid = 0xff;
-			cur_seg = cur_seg->next;
 		} else {
 			/* Get owner MBUF from indirect buffer */
 			mi = rte_mbuf_from_indirect(cur_seg);
@@ -875,11 +879,11 @@ dpaa_eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
 				sg_temp->bpid = DPAA_MEMPOOL_TO_BPID(mi->pool);
 				rte_mbuf_refcnt_update(mi, 1);
 			}
-			prev_seg = cur_seg;
-			cur_seg = cur_seg->next;
-			prev_seg->next = NULL;
-			rte_pktmbuf_free(prev_seg);
+			free_buf[*free_count].seg = cur_seg;
+			free_buf[*free_count].pkt_id = pkt_id;
+			++*free_count;
 		}
+		cur_seg = cur_seg->next;
 		if (cur_seg == NULL) {
 			sg_temp->final = 1;
 			cpu_to_hw_sg(sg_temp);
@@ -894,7 +898,10 @@ dpaa_eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
 static inline void
 tx_on_dpaa_pool_unsegmented(struct rte_mbuf *mbuf,
 			    struct dpaa_bp_info *bp_info,
-			    struct qm_fd *fd_arr)
+			    struct qm_fd *fd_arr,
+			    struct dpaa_sw_buf_free *buf_to_free,
+			    uint32_t *free_count,
+			    uint32_t pkt_id)
 {
 	struct rte_mbuf *mi = NULL;
 
@@ -913,6 +920,9 @@ tx_on_dpaa_pool_unsegmented(struct rte_mbuf *mbuf,
 			DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, bp_info->bpid);
 		}
 	} else if (RTE_MBUF_HAS_EXTBUF(mbuf)) {
+		buf_to_free[*free_count].seg = mbuf;
+		buf_to_free[*free_count].pkt_id = pkt_id;
+		++*free_count;
 		DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr,
 				bp_info ? bp_info->bpid : 0xff);
 	} else {
@@ -936,7 +946,9 @@ tx_on_dpaa_pool_unsegmented(struct rte_mbuf *mbuf,
 			DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr,
 						bp_info ? bp_info->bpid : 0xff);
 		}
-		rte_pktmbuf_free(mbuf);
+		buf_to_free[*free_count].seg = mbuf;
+		buf_to_free[*free_count].pkt_id = pkt_id;
+		++*free_count;
 	}
 
 	if (mbuf->ol_flags & DPAA_TX_CKSUM_OFFLOAD_MASK)
@@ -947,16 +959,21 @@ tx_on_dpaa_pool_unsegmented(struct rte_mbuf *mbuf,
 static inline uint16_t
 tx_on_dpaa_pool(struct rte_mbuf *mbuf,
 		struct dpaa_bp_info *bp_info,
-		struct qm_fd *fd_arr)
+		struct qm_fd *fd_arr,
+		struct dpaa_sw_buf_free *buf_to_free,
+		uint32_t *free_count,
+		uint32_t pkt_id)
 {
 	DPAA_DP_LOG(DEBUG, "BMAN offloaded buffer, mbuf: %p", mbuf);
 
 	if (mbuf->nb_segs == 1) {
 		/* Case for non-segmented buffers */
-		tx_on_dpaa_pool_unsegmented(mbuf, bp_info, fd_arr);
+		tx_on_dpaa_pool_unsegmented(mbuf, bp_info, fd_arr,
+				buf_to_free, free_count, pkt_id);
 	} else if (mbuf->nb_segs > 1 &&
 		   mbuf->nb_segs <= DPAA_SGT_MAX_ENTRIES) {
-		if (dpaa_eth_mbuf_to_sg_fd(mbuf, fd_arr)) {
+		if (dpaa_eth_mbuf_to_sg_fd(mbuf, fd_arr, buf_to_free,
+					   free_count, pkt_id)) {
 			DPAA_PMD_DEBUG("Unable to create Scatter Gather FD");
 			return 1;
 		}
@@ -1060,7 +1077,8 @@ dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
 	uint16_t state;
 	int ret, realloc_mbuf = 0;
 	uint32_t seqn, index, flags[DPAA_TX_BURST_SIZE] = {0};
-	struct rte_mbuf **orig_bufs = bufs;
+	struct dpaa_sw_buf_free buf_to_free[DPAA_MAX_SGS * DPAA_MAX_DEQUEUE_NUM_FRAMES];
+	uint32_t free_count = 0;
 
 	if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
 		ret = rte_dpaa_portal_init((void *)0);
@@ -1143,7 +1161,10 @@ dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
 			}
 indirect_buf:
 			state = tx_on_dpaa_pool(mbuf, bp_info,
-						&fd_arr[loop]);
+						&fd_arr[loop],
+						buf_to_free,
+						&free_count,
+						loop);
 			if (unlikely(state)) {
 				/* Set frames_to_send & nb_bufs so
 				 * that packets are transmitted till
@@ -1168,13 +1189,9 @@ dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
 
 	DPAA_DP_LOG(DEBUG, "Transmitted %d buffers on queue: %p", sent, q);
 
-
-	loop = 0;
-	while (loop < sent) {
-		if (unlikely(RTE_MBUF_HAS_EXTBUF(*orig_bufs)))
-			rte_pktmbuf_free(*orig_bufs);
-		orig_bufs++;
-		loop++;
+	for (loop = 0; loop < free_count; loop++) {
+		if (buf_to_free[loop].pkt_id < sent)
+			rte_pktmbuf_free_seg(buf_to_free[loop].seg);
 	}
 
 	return sent;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH 21.11 3/4] net/dpaa2: use internal mempool for SG table
  2022-10-28 11:32 [PATCH 21.11 1/4] net/dpaa: use internal mempool for SG table Gagandeep Singh
  2022-10-28 11:32 ` [PATCH 21.11 2/4] net/dpaa: fix buffer freeing on SG Tx Gagandeep Singh
@ 2022-10-28 11:32 ` Gagandeep Singh
  2022-10-28 11:32 ` [PATCH 21.11 4/4] net/dpaa2: fix buffer freeing on SG Tx Gagandeep Singh
  2022-11-01 14:53 ` [PATCH 21.11 1/4] net/dpaa: use internal mempool for SG table Kevin Traynor
  3 siblings, 0 replies; 5+ messages in thread
From: Gagandeep Singh @ 2022-10-28 11:32 UTC (permalink / raw)
  To: ktraynor, stable; +Cc: Gagandeep Singh, Hemant Agrawal

[ upstream commit 75e2a1d47326ff3c169a16156eb32ebeea44bc0d ]

Creating and using driver's mempool for
allocating the SG table memory required for
FD creation instead of relying on user mempool.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 drivers/net/dpaa2/dpaa2_ethdev.c | 19 +++++++++++++++++++
 drivers/net/dpaa2/dpaa2_ethdev.h |  9 +++++++++
 drivers/net/dpaa2/dpaa2_rxtx.c   | 12 ++++++------
 3 files changed, 34 insertions(+), 6 deletions(-)

diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index b875139689..eb2d87ecbc 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -74,6 +74,9 @@ int dpaa2_timestamp_dynfield_offset = -1;
 /* Enable error queue */
 bool dpaa2_enable_err_queue;
 
+int dpaa2_valid_dev;
+struct rte_mempool *dpaa2_tx_sg_pool;
+
 struct rte_dpaa2_xstats_name_off {
 	char name[RTE_ETH_XSTATS_NAME_SIZE];
 	uint8_t page_id; /* dpni statistics page id */
@@ -2864,7 +2867,20 @@ rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv,
 	/* Invoke PMD device initialization function */
 	diag = dpaa2_dev_init(eth_dev);
 	if (diag == 0) {
+		if (!dpaa2_tx_sg_pool) {
+			dpaa2_tx_sg_pool =
+				rte_pktmbuf_pool_create("dpaa2_mbuf_tx_sg_pool",
+				DPAA2_POOL_SIZE,
+				DPAA2_POOL_CACHE_SIZE, 0,
+				DPAA2_MAX_SGS * sizeof(struct qbman_sge),
+				rte_socket_id());
+			if (dpaa2_tx_sg_pool == NULL) {
+				DPAA2_PMD_ERR("SG pool creation failed\n");
+				return -ENOMEM;
+			}
+		}
 		rte_eth_dev_probing_finish(eth_dev);
+		dpaa2_valid_dev++;
 		return 0;
 	}
 
@@ -2880,6 +2896,9 @@ rte_dpaa2_remove(struct rte_dpaa2_device *dpaa2_dev)
 
 	eth_dev = dpaa2_dev->eth_dev;
 	dpaa2_dev_close(eth_dev);
+	dpaa2_valid_dev--;
+	if (!dpaa2_valid_dev)
+		rte_mempool_free(dpaa2_tx_sg_pool);
 	ret = rte_eth_dev_release_port(eth_dev);
 
 	return ret;
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.h b/drivers/net/dpaa2/dpaa2_ethdev.h
index fd4eabed4e..3de2114901 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.h
+++ b/drivers/net/dpaa2/dpaa2_ethdev.h
@@ -104,6 +104,15 @@
 #define DPAA2_PKT_TYPE_VLAN_1		0x0160
 #define DPAA2_PKT_TYPE_VLAN_2		0x0260
 
+/* Global pool used by driver for SG list TX */
+extern struct rte_mempool *dpaa2_tx_sg_pool;
+/* Maximum SG segments */
+#define DPAA2_MAX_SGS 128
+/* SG pool size */
+#define DPAA2_POOL_SIZE 2048
+/* SG pool cache size */
+#define DPAA2_POOL_CACHE_SIZE 256
+
 /* enable timestamp in mbuf*/
 extern bool dpaa2_enable_ts[];
 extern uint64_t dpaa2_timestamp_rx_dynflag;
diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c
index 9fb6c5f91d..6ffbfa8c78 100644
--- a/drivers/net/dpaa2/dpaa2_rxtx.c
+++ b/drivers/net/dpaa2/dpaa2_rxtx.c
@@ -381,7 +381,7 @@ eth_fd_to_mbuf(const struct qbman_fd *fd,
 static int __rte_noinline __rte_hot
 eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
 		  struct qbman_fd *fd,
-		  struct rte_mempool *mp, uint16_t bpid)
+		  uint16_t bpid)
 {
 	struct rte_mbuf *cur_seg = mbuf, *prev_seg, *mi, *temp;
 	struct qbman_sge *sgt, *sge = NULL;
@@ -407,12 +407,12 @@ eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
 		}
 		DPAA2_SET_FD_OFFSET(fd, offset);
 	} else {
-		temp = rte_pktmbuf_alloc(mp);
+		temp = rte_pktmbuf_alloc(dpaa2_tx_sg_pool);
 		if (temp == NULL) {
 			DPAA2_PMD_DP_DEBUG("No memory to allocate S/G table\n");
 			return -ENOMEM;
 		}
-		DPAA2_SET_ONLY_FD_BPID(fd, bpid);
+		DPAA2_SET_ONLY_FD_BPID(fd, mempool_to_bpid(dpaa2_tx_sg_pool));
 		DPAA2_SET_FD_OFFSET(fd, temp->data_off);
 	}
 	DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(temp));
@@ -1273,9 +1273,10 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 
 			if (unlikely(RTE_MBUF_HAS_EXTBUF(*bufs))) {
 				if (unlikely((*bufs)->nb_segs > 1)) {
+					mp = (*bufs)->pool;
 					if (eth_mbuf_to_sg_fd(*bufs,
 							      &fd_arr[loop],
-							      mp, 0))
+							      mempool_to_bpid(mp)))
 						goto send_n_return;
 				} else {
 					eth_mbuf_to_fd(*bufs,
@@ -1324,7 +1325,7 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 				if (unlikely((*bufs)->nb_segs > 1)) {
 					if (eth_mbuf_to_sg_fd(*bufs,
 							&fd_arr[loop],
-							mp, bpid))
+							bpid))
 						goto send_n_return;
 				} else {
 					eth_mbuf_to_fd(*bufs,
@@ -1599,7 +1600,6 @@ dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 				if (unlikely((*bufs)->nb_segs > 1)) {
 					if (eth_mbuf_to_sg_fd(*bufs,
 							      &fd_arr[loop],
-							      mp,
 							      bpid))
 						goto send_n_return;
 				} else {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH 21.11 4/4] net/dpaa2: fix buffer freeing on SG Tx
  2022-10-28 11:32 [PATCH 21.11 1/4] net/dpaa: use internal mempool for SG table Gagandeep Singh
  2022-10-28 11:32 ` [PATCH 21.11 2/4] net/dpaa: fix buffer freeing on SG Tx Gagandeep Singh
  2022-10-28 11:32 ` [PATCH 21.11 3/4] net/dpaa2: use internal mempool for SG table Gagandeep Singh
@ 2022-10-28 11:32 ` Gagandeep Singh
  2022-11-01 14:53 ` [PATCH 21.11 1/4] net/dpaa: use internal mempool for SG table Kevin Traynor
  3 siblings, 0 replies; 5+ messages in thread
From: Gagandeep Singh @ 2022-10-28 11:32 UTC (permalink / raw)
  To: ktraynor, stable; +Cc: Gagandeep Singh, Hemant Agrawal

[ upstream commit b0074a7ba1f4874eee5e83aa869357893944121c ]

When using SG list to TX with external and direct buffers,
HW free the direct buffers and driver free the external buffers.

Software scans the complete SG mbuf list to find the external
buffers to free, but this is wrong as hardware can free the
direct buffers if any present in the list and same can be
re-allocated for other purpose in multi thread or high speed
running traffic environment with new data in it. So the software
which is scanning the SG mbuf list, if that list has any direct
buffer present then that direct buffer's next pointer can give
wrong pointer value, if already freed by hardware which
can do the mempool corruption or memory leak.

In this patch instead of relying on user given SG mbuf list
we are storing the buffers in an internal list which will
be scanned by driver after transmit to free non-direct
buffers.

This patch also fixes 2 more memory leak issues.

Driver is freeing complete SG list by checking external buffer
flag in first segment only, but external buffer can be attached
to any of the segment. Because of which driver either can double
free buffers or there can be memory leak.

In case of indirect buffers, driver is modifying the original
buffer list to free the indirect buffers but this original buffer
list is being used even after transmit packets for software
buffer cleanup. This can cause the buffer leak issue.

Fixes: 6bfbafe18d15 ("net/dpaa2: support external buffers in Tx")

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 drivers/net/dpaa2/dpaa2_ethdev.h |  9 +++
 drivers/net/dpaa2/dpaa2_rxtx.c   | 95 +++++++++++++++++++++++---------
 2 files changed, 78 insertions(+), 26 deletions(-)

diff --git a/drivers/net/dpaa2/dpaa2_ethdev.h b/drivers/net/dpaa2/dpaa2_ethdev.h
index 3de2114901..19ff57ba74 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.h
+++ b/drivers/net/dpaa2/dpaa2_ethdev.h
@@ -112,6 +112,15 @@ extern struct rte_mempool *dpaa2_tx_sg_pool;
 #define DPAA2_POOL_SIZE 2048
 /* SG pool cache size */
 #define DPAA2_POOL_CACHE_SIZE 256
+/* structure to free external and indirect
+ * buffers.
+ */
+struct sw_buf_free {
+	/* To which packet this segment belongs */
+	uint16_t pkt_id;
+	/* The actual segment */
+	struct rte_mbuf *seg;
+};
 
 /* enable timestamp in mbuf*/
 extern bool dpaa2_enable_ts[];
diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c
index 6ffbfa8c78..e372937926 100644
--- a/drivers/net/dpaa2/dpaa2_rxtx.c
+++ b/drivers/net/dpaa2/dpaa2_rxtx.c
@@ -381,9 +381,12 @@ eth_fd_to_mbuf(const struct qbman_fd *fd,
 static int __rte_noinline __rte_hot
 eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
 		  struct qbman_fd *fd,
+		  struct sw_buf_free *free_buf,
+		  uint32_t *free_count,
+		  uint32_t pkt_id,
 		  uint16_t bpid)
 {
-	struct rte_mbuf *cur_seg = mbuf, *prev_seg, *mi, *temp;
+	struct rte_mbuf *cur_seg = mbuf, *mi, *temp;
 	struct qbman_sge *sgt, *sge = NULL;
 	int i, offset = 0;
 
@@ -452,10 +455,11 @@ eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
 						mempool_to_bpid(cur_seg->pool));
 				}
 			}
-			cur_seg = cur_seg->next;
 		} else if (RTE_MBUF_HAS_EXTBUF(cur_seg)) {
+			free_buf[*free_count].seg = cur_seg;
+			free_buf[*free_count].pkt_id = pkt_id;
+			++*free_count;
 			DPAA2_SET_FLE_IVP(sge);
-			cur_seg = cur_seg->next;
 		} else {
 			/* Get owner MBUF from indirect buffer */
 			mi = rte_mbuf_from_indirect(cur_seg);
@@ -469,11 +473,11 @@ eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
 						   mempool_to_bpid(mi->pool));
 				rte_mbuf_refcnt_update(mi, 1);
 			}
-			prev_seg = cur_seg;
-			cur_seg = cur_seg->next;
-			prev_seg->next = NULL;
-			rte_pktmbuf_free(prev_seg);
+			free_buf[*free_count].seg = cur_seg;
+			free_buf[*free_count].pkt_id = pkt_id;
+			++*free_count;
 		}
+		cur_seg = cur_seg->next;
 	}
 	DPAA2_SG_SET_FINAL(sge, true);
 	return 0;
@@ -481,11 +485,19 @@ eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
 
 static void
 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
-	       struct qbman_fd *fd, uint16_t bpid) __rte_unused;
+	       struct qbman_fd *fd,
+	       struct sw_buf_free *buf_to_free,
+	       uint32_t *free_count,
+	       uint32_t pkt_id,
+	       uint16_t bpid) __rte_unused;
 
 static void __rte_noinline __rte_hot
 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
-	       struct qbman_fd *fd, uint16_t bpid)
+	       struct qbman_fd *fd,
+	       struct sw_buf_free *buf_to_free,
+	       uint32_t *free_count,
+	       uint32_t pkt_id,
+	       uint16_t bpid)
 {
 	DPAA2_MBUF_TO_CONTIG_FD(mbuf, fd, bpid);
 
@@ -501,6 +513,9 @@ eth_mbuf_to_fd(struct rte_mbuf *mbuf,
 			rte_mbuf_refcnt_update(mbuf, -1);
 		}
 	} else if (RTE_MBUF_HAS_EXTBUF(mbuf)) {
+		buf_to_free[*free_count].seg = mbuf;
+		buf_to_free[*free_count].pkt_id = pkt_id;
+		++*free_count;
 		DPAA2_SET_FD_IVP(fd);
 	} else {
 		struct rte_mbuf *mi;
@@ -510,7 +525,10 @@ eth_mbuf_to_fd(struct rte_mbuf *mbuf,
 			DPAA2_SET_FD_IVP(fd);
 		else
 			rte_mbuf_refcnt_update(mi, 1);
-		rte_pktmbuf_free(mbuf);
+
+		buf_to_free[*free_count].seg = mbuf;
+		buf_to_free[*free_count].pkt_id = pkt_id;
+		++*free_count;
 	}
 }
 
@@ -1183,7 +1201,8 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
 	uint32_t flags[MAX_TX_RING_SLOTS] = {0};
-	struct rte_mbuf **orig_bufs = bufs;
+	struct sw_buf_free buf_to_free[DPAA2_MAX_SGS * dpaa2_dqrr_size];
+	uint32_t free_count = 0;
 
 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
 		ret = dpaa2_affine_qbman_swp();
@@ -1276,11 +1295,17 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 					mp = (*bufs)->pool;
 					if (eth_mbuf_to_sg_fd(*bufs,
 							      &fd_arr[loop],
+							      buf_to_free,
+							      &free_count,
+							      loop,
 							      mempool_to_bpid(mp)))
 						goto send_n_return;
 				} else {
 					eth_mbuf_to_fd(*bufs,
-						       &fd_arr[loop], 0);
+							&fd_arr[loop],
+							buf_to_free,
+							&free_count,
+							loop, 0);
 				}
 				bufs++;
 #ifdef RTE_LIBRTE_IEEE1588
@@ -1325,11 +1350,17 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 				if (unlikely((*bufs)->nb_segs > 1)) {
 					if (eth_mbuf_to_sg_fd(*bufs,
 							&fd_arr[loop],
+							buf_to_free,
+							&free_count,
+							loop,
 							bpid))
 						goto send_n_return;
 				} else {
 					eth_mbuf_to_fd(*bufs,
-						       &fd_arr[loop], bpid);
+							&fd_arr[loop],
+							buf_to_free,
+							&free_count,
+							loop, bpid);
 				}
 			}
 #ifdef RTE_LIBRTE_IEEE1588
@@ -1362,12 +1393,9 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 	}
 	dpaa2_q->tx_pkts += num_tx;
 
-	loop = 0;
-	while (loop < num_tx) {
-		if (unlikely(RTE_MBUF_HAS_EXTBUF(*orig_bufs)))
-			rte_pktmbuf_free(*orig_bufs);
-		orig_bufs++;
-		loop++;
+	for (loop = 0; loop < free_count; loop++) {
+		if (buf_to_free[loop].pkt_id < num_tx)
+			rte_pktmbuf_free_seg(buf_to_free[loop].seg);
 	}
 
 	return num_tx;
@@ -1397,12 +1425,9 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 skip_tx:
 	dpaa2_q->tx_pkts += num_tx;
 
-	loop = 0;
-	while (loop < num_tx) {
-		if (unlikely(RTE_MBUF_HAS_EXTBUF(*orig_bufs)))
-			rte_pktmbuf_free(*orig_bufs);
-		orig_bufs++;
-		loop++;
+	for (loop = 0; loop < free_count; loop++) {
+		if (buf_to_free[loop].pkt_id < num_tx)
+			rte_pktmbuf_free_seg(buf_to_free[loop].seg);
 	}
 
 	return num_tx;
@@ -1488,6 +1513,8 @@ dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 	int32_t ret;
 	uint16_t num_tx = 0;
 	uint16_t bpid;
+	struct sw_buf_free buf_to_free[DPAA2_MAX_SGS * dpaa2_dqrr_size];
+	uint32_t free_count = 0;
 
 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
 		ret = dpaa2_affine_qbman_swp();
@@ -1600,11 +1627,17 @@ dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 				if (unlikely((*bufs)->nb_segs > 1)) {
 					if (eth_mbuf_to_sg_fd(*bufs,
 							      &fd_arr[loop],
+							      buf_to_free,
+							      &free_count,
+							      loop,
 							      bpid))
 						goto send_n_return;
 				} else {
 					eth_mbuf_to_fd(*bufs,
-						       &fd_arr[loop], bpid);
+							&fd_arr[loop],
+							buf_to_free,
+							&free_count,
+							loop, bpid);
 				}
 			}
 			bufs++;
@@ -1633,6 +1666,11 @@ dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 		nb_pkts -= loop;
 	}
 	dpaa2_q->tx_pkts += num_tx;
+	for (loop = 0; loop < free_count; loop++) {
+		if (buf_to_free[loop].pkt_id < num_tx)
+			rte_pktmbuf_free_seg(buf_to_free[loop].seg);
+	}
+
 	return num_tx;
 
 send_n_return:
@@ -1657,6 +1695,11 @@ dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 	}
 skip_tx:
 	dpaa2_q->tx_pkts += num_tx;
+	for (loop = 0; loop < free_count; loop++) {
+		if (buf_to_free[loop].pkt_id < num_tx)
+			rte_pktmbuf_free_seg(buf_to_free[loop].seg);
+	}
+
 	return num_tx;
 }
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH 21.11 1/4] net/dpaa: use internal mempool for SG table
  2022-10-28 11:32 [PATCH 21.11 1/4] net/dpaa: use internal mempool for SG table Gagandeep Singh
                   ` (2 preceding siblings ...)
  2022-10-28 11:32 ` [PATCH 21.11 4/4] net/dpaa2: fix buffer freeing on SG Tx Gagandeep Singh
@ 2022-11-01 14:53 ` Kevin Traynor
  3 siblings, 0 replies; 5+ messages in thread
From: Kevin Traynor @ 2022-11-01 14:53 UTC (permalink / raw)
  To: Gagandeep Singh, stable; +Cc: Hemant Agrawal

On 28/10/2022 12:32, Gagandeep Singh wrote:
> [ upstream commit 533c31cc8331cc1ed0c4ffb2940e02b0d1e65255 ]
> 
> Creating and using driver's mempool for
> allocating the SG table memory required for
> FD creation.
> 
> Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
> Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>
> ---
>   drivers/net/dpaa/dpaa_ethdev.c | 18 ++++++++++++++++++
>   drivers/net/dpaa/dpaa_ethdev.h |  9 +++++++++
>   drivers/net/dpaa/dpaa_rxtx.c   |  9 ++++-----
>   3 files changed, 31 insertions(+), 5 deletions(-)
> 

Series applied. Thanks for rebasing.


^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2022-11-01 14:53 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-10-28 11:32 [PATCH 21.11 1/4] net/dpaa: use internal mempool for SG table Gagandeep Singh
2022-10-28 11:32 ` [PATCH 21.11 2/4] net/dpaa: fix buffer freeing on SG Tx Gagandeep Singh
2022-10-28 11:32 ` [PATCH 21.11 3/4] net/dpaa2: use internal mempool for SG table Gagandeep Singh
2022-10-28 11:32 ` [PATCH 21.11 4/4] net/dpaa2: fix buffer freeing on SG Tx Gagandeep Singh
2022-11-01 14:53 ` [PATCH 21.11 1/4] net/dpaa: use internal mempool for SG table Kevin Traynor

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).