DPDK patches and discussions
 help / color / mirror / Atom feed
From: Gagandeep Singh <g.singh@nxp.com>
To: ferruh.yigit@amd.com, dev@dpdk.org
Cc: Gagandeep Singh <g.singh@nxp.com>, stable@dpdk.org
Subject: [PATCH 08/15] net/dpaa2: fix buffer free on transmit SG packets
Date: Wed, 28 Sep 2022 10:55:09 +0530	[thread overview]
Message-ID: <20220928052516.1279442-9-g.singh@nxp.com> (raw)
In-Reply-To: <20220928052516.1279442-1-g.singh@nxp.com>

When using SG list to TX with external and direct buffers,
HW free the direct buffers and driver free the external buffers.

Software scans the complete SG mbuf list to find the external
buffers to free, but this is wrong as hardware can free the
direct buffers if any present in the list and same can be
re-allocated for other purpose in multi thread or high spead
running traffic environment with new data in it. So the software
which is scanning the SG mbuf list, if that list has any direct
buffer present then that direct buffer's next pointor can give
wrong pointer value, if already freed by hardware which
can do the mempool corruption or memory leak.

In this patch instead of relying on user given SG mbuf list
we are storing the buffers in an internal list which will
be scanned by driver after transmit to free non-direct
buffers.

This patch also fixes 2 more memory leak issues.

Driver is freeing complete SG list by checking external buffer
flag in first segment only, but external buffer can be attached
to any of the segment. Because of which driver either can double
free buffers or there can be memory leak.

In case of indirect buffers, driver is modifying the original
buffer list to free the indirect buffers but this orginal buffer
list is being used even after transmit packets for software
buffer cleanup. This can cause the buffer leak issue.

Fixes: 6bfbafe18d15 ("net/dpaa2: support external buffers in Tx")
Cc: stable@dpdk.org

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/net/dpaa2/dpaa2_ethdev.h |   9 +++
 drivers/net/dpaa2/dpaa2_rxtx.c   | 111 +++++++++++++++++++++++--------
 2 files changed, 92 insertions(+), 28 deletions(-)

diff --git a/drivers/net/dpaa2/dpaa2_ethdev.h b/drivers/net/dpaa2/dpaa2_ethdev.h
index 872dced517..c88c8146dc 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.h
+++ b/drivers/net/dpaa2/dpaa2_ethdev.h
@@ -129,6 +129,15 @@ extern struct rte_mempool *dpaa2_tx_sg_pool;
 #define DPAA2_POOL_SIZE 2048
 /* SG pool cache size */
 #define DPAA2_POOL_CACHE_SIZE 256
+/* structure to free external and indirect
+ * buffers.
+ */
+struct sw_buf_free {
+	/* To which packet this segment belongs */
+	uint16_t pkt_id;
+	/* The actual segment */
+	struct rte_mbuf *seg;
+};
 
 /* enable timestamp in mbuf*/
 extern bool dpaa2_enable_ts[];
diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c
index dcd86c4056..94815485b8 100644
--- a/drivers/net/dpaa2/dpaa2_rxtx.c
+++ b/drivers/net/dpaa2/dpaa2_rxtx.c
@@ -403,9 +403,12 @@ eth_fd_to_mbuf(const struct qbman_fd *fd,
 static int __rte_noinline __rte_hot
 eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
 		  struct qbman_fd *fd,
+		  struct sw_buf_free *free_buf,
+		  uint32_t *free_count,
+		  uint32_t pkt_id,
 		  uint16_t bpid)
 {
-	struct rte_mbuf *cur_seg = mbuf, *prev_seg, *mi, *temp;
+	struct rte_mbuf *cur_seg = mbuf, *mi, *temp;
 	struct qbman_sge *sgt, *sge = NULL;
 	int i, offset = 0;
 
@@ -486,10 +489,11 @@ eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
 #endif
 				}
 			}
-			cur_seg = cur_seg->next;
 		} else if (RTE_MBUF_HAS_EXTBUF(cur_seg)) {
+			free_buf[*free_count].seg = cur_seg;
+			free_buf[*free_count].pkt_id = pkt_id;
+			++*free_count;
 			DPAA2_SET_FLE_IVP(sge);
-			cur_seg = cur_seg->next;
 		} else {
 			/* Get owner MBUF from indirect buffer */
 			mi = rte_mbuf_from_indirect(cur_seg);
@@ -503,11 +507,11 @@ eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
 						   mempool_to_bpid(mi->pool));
 				rte_mbuf_refcnt_update(mi, 1);
 			}
-			prev_seg = cur_seg;
-			cur_seg = cur_seg->next;
-			prev_seg->next = NULL;
-			rte_pktmbuf_free(prev_seg);
+			free_buf[*free_count].seg = cur_seg;
+			free_buf[*free_count].pkt_id = pkt_id;
+			++*free_count;
 		}
+		cur_seg = cur_seg->next;
 	}
 	DPAA2_SG_SET_FINAL(sge, true);
 	return 0;
@@ -515,11 +519,19 @@ eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
 
 static void
 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
-	       struct qbman_fd *fd, uint16_t bpid) __rte_unused;
+	       struct qbman_fd *fd,
+	       struct sw_buf_free *buf_to_free,
+	       uint32_t *free_count,
+	       uint32_t pkt_id,
+	       uint16_t bpid) __rte_unused;
 
 static void __rte_noinline __rte_hot
 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
-	       struct qbman_fd *fd, uint16_t bpid)
+	       struct qbman_fd *fd,
+	       struct sw_buf_free *buf_to_free,
+	       uint32_t *free_count,
+	       uint32_t pkt_id,
+	       uint16_t bpid)
 {
 	DPAA2_MBUF_TO_CONTIG_FD(mbuf, fd, bpid);
 
@@ -540,6 +552,9 @@ eth_mbuf_to_fd(struct rte_mbuf *mbuf,
 				(void **)&mbuf, 1, 0);
 #endif
 	} else if (RTE_MBUF_HAS_EXTBUF(mbuf)) {
+		buf_to_free[*free_count].seg = mbuf;
+		buf_to_free[*free_count].pkt_id = pkt_id;
+		++*free_count;
 		DPAA2_SET_FD_IVP(fd);
 	} else {
 		struct rte_mbuf *mi;
@@ -549,7 +564,10 @@ eth_mbuf_to_fd(struct rte_mbuf *mbuf,
 			DPAA2_SET_FD_IVP(fd);
 		else
 			rte_mbuf_refcnt_update(mi, 1);
-		rte_pktmbuf_free(mbuf);
+
+		buf_to_free[*free_count].seg = mbuf;
+		buf_to_free[*free_count].pkt_id = pkt_id;
+		++*free_count;
 	}
 }
 
@@ -1226,7 +1244,8 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
 	uint32_t flags[MAX_TX_RING_SLOTS] = {0};
-	struct rte_mbuf **orig_bufs = bufs;
+	struct sw_buf_free buf_to_free[DPAA2_MAX_SGS * dpaa2_dqrr_size];
+	uint32_t free_count = 0;
 
 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
 		ret = dpaa2_affine_qbman_swp();
@@ -1324,11 +1343,17 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 					mp = (*bufs)->pool;
 					if (eth_mbuf_to_sg_fd(*bufs,
 							      &fd_arr[loop],
+							      buf_to_free,
+							      &free_count,
+							      loop,
 							      mempool_to_bpid(mp)))
 						goto send_n_return;
 				} else {
 					eth_mbuf_to_fd(*bufs,
-						       &fd_arr[loop], 0);
+							&fd_arr[loop],
+							buf_to_free,
+							&free_count,
+							loop, 0);
 				}
 				bufs++;
 #ifdef RTE_LIBRTE_IEEE1588
@@ -1373,11 +1398,17 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 				if (unlikely((*bufs)->nb_segs > 1)) {
 					if (eth_mbuf_to_sg_fd(*bufs,
 							&fd_arr[loop],
+							buf_to_free,
+							&free_count,
+							loop,
 							bpid))
 						goto send_n_return;
 				} else {
 					eth_mbuf_to_fd(*bufs,
-						       &fd_arr[loop], bpid);
+							&fd_arr[loop],
+							buf_to_free,
+							&free_count,
+							loop, bpid);
 				}
 			}
 #ifdef RTE_LIBRTE_IEEE1588
@@ -1410,12 +1441,9 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 	}
 	dpaa2_q->tx_pkts += num_tx;
 
-	loop = 0;
-	while (loop < num_tx) {
-		if (unlikely(RTE_MBUF_HAS_EXTBUF(*orig_bufs)))
-			rte_pktmbuf_free(*orig_bufs);
-		orig_bufs++;
-		loop++;
+	for (loop = 0; loop < free_count; loop++) {
+		if (buf_to_free[loop].pkt_id < num_tx)
+			rte_pktmbuf_free_seg(buf_to_free[loop].seg);
 	}
 
 	return num_tx;
@@ -1445,12 +1473,9 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 skip_tx:
 	dpaa2_q->tx_pkts += num_tx;
 
-	loop = 0;
-	while (loop < num_tx) {
-		if (unlikely(RTE_MBUF_HAS_EXTBUF(*orig_bufs)))
-			rte_pktmbuf_free(*orig_bufs);
-		orig_bufs++;
-		loop++;
+	for (loop = 0; loop < free_count; loop++) {
+		if (buf_to_free[loop].pkt_id < num_tx)
+			rte_pktmbuf_free_seg(buf_to_free[loop].seg);
 	}
 
 	return num_tx;
@@ -1523,7 +1548,7 @@ dpaa2_dev_tx_multi_txq_ordered(void **queue,
 		struct rte_mbuf **bufs, uint16_t nb_pkts)
 {
 	/* Function to transmit the frames to multiple queues respectively.*/
-	uint32_t loop, retry_count;
+	uint32_t loop, i, retry_count;
 	int32_t ret;
 	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
 	uint32_t frames_to_send, num_free_eq_desc = 0;
@@ -1536,6 +1561,8 @@ dpaa2_dev_tx_multi_txq_ordered(void **queue,
 	struct rte_eth_dev_data *eth_data;
 	struct dpaa2_dev_priv *priv;
 	struct dpaa2_queue *order_sendq;
+	struct sw_buf_free buf_to_free[DPAA2_MAX_SGS * dpaa2_dqrr_size];
+	uint32_t free_count = 0;
 
 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
 		ret = dpaa2_affine_qbman_swp();
@@ -1647,11 +1674,17 @@ dpaa2_dev_tx_multi_txq_ordered(void **queue,
 			if (unlikely((*bufs)->nb_segs > 1)) {
 				if (eth_mbuf_to_sg_fd(*bufs,
 						      &fd_arr[loop],
+						      buf_to_free,
+						      &free_count,
+						      loop,
 						      bpid))
 					goto send_frames;
 			} else {
 				eth_mbuf_to_fd(*bufs,
-					       &fd_arr[loop], bpid);
+						&fd_arr[loop],
+						buf_to_free,
+						&free_count,
+						loop, bpid);
 			}
 		}
 
@@ -1676,6 +1709,10 @@ dpaa2_dev_tx_multi_txq_ordered(void **queue,
 		}
 	}
 
+	for (i = 0; i < free_count; i++) {
+		if (buf_to_free[i].pkt_id < loop)
+			rte_pktmbuf_free_seg(buf_to_free[i].seg);
+	}
 	return loop;
 }
 
@@ -1698,6 +1735,8 @@ dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 	int32_t ret;
 	uint16_t num_tx = 0;
 	uint16_t bpid;
+	struct sw_buf_free buf_to_free[DPAA2_MAX_SGS * dpaa2_dqrr_size];
+	uint32_t free_count = 0;
 
 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
 		ret = dpaa2_affine_qbman_swp();
@@ -1810,11 +1849,17 @@ dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 				if (unlikely((*bufs)->nb_segs > 1)) {
 					if (eth_mbuf_to_sg_fd(*bufs,
 							      &fd_arr[loop],
+							      buf_to_free,
+							      &free_count,
+							      loop,
 							      bpid))
 						goto send_n_return;
 				} else {
 					eth_mbuf_to_fd(*bufs,
-						       &fd_arr[loop], bpid);
+							&fd_arr[loop],
+							buf_to_free,
+							&free_count,
+							loop, bpid);
 				}
 			}
 			bufs++;
@@ -1843,6 +1888,11 @@ dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 		nb_pkts -= loop;
 	}
 	dpaa2_q->tx_pkts += num_tx;
+	for (loop = 0; loop < free_count; loop++) {
+		if (buf_to_free[loop].pkt_id < num_tx)
+			rte_pktmbuf_free_seg(buf_to_free[loop].seg);
+	}
+
 	return num_tx;
 
 send_n_return:
@@ -1867,6 +1917,11 @@ dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 	}
 skip_tx:
 	dpaa2_q->tx_pkts += num_tx;
+	for (loop = 0; loop < free_count; loop++) {
+		if (buf_to_free[loop].pkt_id < num_tx)
+			rte_pktmbuf_free_seg(buf_to_free[loop].seg);
+	}
+
 	return num_tx;
 }
 
-- 
2.25.1


  parent reply	other threads:[~2022-09-28  5:26 UTC|newest]

Thread overview: 52+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-09-28  5:25 [PATCH 00/15] DPAA and DPAA2 driver changes Gagandeep Singh
2022-09-28  5:25 ` [PATCH 01/15] bus/dpaa: use non-block mode for FD open Gagandeep Singh
2022-09-28  5:25 ` [PATCH 02/15] net/enetfec: fix restart issue Gagandeep Singh
2022-09-28  5:25 ` [PATCH 03/15] net/enetfec: fix buffer leak issue Gagandeep Singh
2022-09-28  5:25 ` [PATCH 04/15] net/dpaa2: fix dpdmux configuration for error behaviour Gagandeep Singh
2022-09-28  5:25 ` [PATCH 05/15] net/dpaa2: check free enqueue descriptors before Tx Gagandeep Singh
2022-10-05 14:30   ` Ferruh Yigit
2022-09-28  5:25 ` [PATCH 06/15] net/dpaa: support ESP packet type in packet parsing Gagandeep Singh
2022-10-05 14:20   ` Ferruh Yigit
2022-10-06  8:48     ` Gagandeep Singh
2022-09-28  5:25 ` [PATCH 07/15] net/dpaa2: use internal mempool for SG table Gagandeep Singh
2022-10-05 14:20   ` Ferruh Yigit
2022-10-06  8:49     ` Gagandeep Singh
2022-10-06  9:38       ` Ferruh Yigit
2022-10-06 11:18         ` Gagandeep Singh
2022-09-28  5:25 ` Gagandeep Singh [this message]
2022-10-06  7:48   ` [PATCH 08/15] net/dpaa2: fix buffer free on transmit SG packets Ferruh Yigit
2022-09-28  5:25 ` [PATCH 09/15] bus/fslmc: add timeout in MC send command API Gagandeep Singh
2022-09-28  5:25 ` [PATCH 10/15] net/dpaa: fix Jumbo packet Rx in case of VSP Gagandeep Singh
2022-09-28  5:25 ` [PATCH 11/15] bus/dpaa: pass interface name as a string instead of pointer Gagandeep Singh
2022-10-05 14:21   ` Ferruh Yigit
2022-10-06  8:51     ` Gagandeep Singh
2022-10-06  9:39       ` Ferruh Yigit
2022-10-06 11:18         ` Gagandeep Singh
2022-09-28  5:25 ` [PATCH 12/15] net/dpaa: use internal mempool for SG table Gagandeep Singh
2022-09-28  5:25 ` [PATCH 13/15] bus/dpaa: mempool ops registration change Gagandeep Singh
2022-09-28  5:25 ` [PATCH 14/15] net/dpaa: fix buffer free on transmit SG packets Gagandeep Singh
2022-09-28  5:25 ` [PATCH 15/15] net/dpaa: fix buffer free in slow path Gagandeep Singh
2022-10-05 14:21   ` Ferruh Yigit
2022-10-06  8:51     ` Gagandeep Singh
2022-10-06  9:42       ` Ferruh Yigit
2022-10-06 11:19         ` Gagandeep Singh
2022-10-05 14:27 ` [PATCH 00/15] DPAA and DPAA2 driver changes Ferruh Yigit
2022-10-06 11:15 ` Hemant Agrawal
2022-10-07  3:27 ` [PATCH v2 00/16] " Gagandeep Singh
2022-10-07  3:27   ` [PATCH v2 01/16] bus/dpaa: use non-block mode for FD open Gagandeep Singh
2022-10-07  3:27   ` [PATCH v2 02/16] net/enetfec: fix restart issue Gagandeep Singh
2022-10-07  3:27   ` [PATCH v2 03/16] net/enetfec: fix buffer leak issue Gagandeep Singh
2022-10-07  3:27   ` [PATCH v2 04/16] net/dpaa2: fix dpdmux configuration for error behaviour Gagandeep Singh
2022-10-07  3:27   ` [PATCH v2 05/16] net/dpaa2: check free enqueue descriptors before Tx Gagandeep Singh
2022-10-07  3:27   ` [PATCH v2 06/16] net/dpaa: support ESP packet type in packet parsing Gagandeep Singh
2022-10-07  3:27   ` [PATCH v2 07/16] net/dpaa2: use internal mempool for SG table Gagandeep Singh
2022-10-07  3:27   ` [PATCH v2 08/16] net/dpaa2: fix buffer free on transmit SG packets Gagandeep Singh
2022-10-07  3:27   ` [PATCH v2 09/16] bus/fslmc: add timeout in MC send command API Gagandeep Singh
2022-10-07  3:27   ` [PATCH v2 10/16] net/dpaa: fix Jumbo packet Rx in case of VSP Gagandeep Singh
2022-10-07  3:27   ` [PATCH v2 11/16] doc: add kernel version compatible information Gagandeep Singh
2022-10-07  3:27   ` [PATCH v2 12/16] bus/dpaa: pass interface name as a string instead of pointer Gagandeep Singh
2022-10-07  3:27   ` [PATCH v2 13/16] net/dpaa: use internal mempool for SG table Gagandeep Singh
2022-10-07  3:27   ` [PATCH v2 14/16] bus/dpaa: mempool ops registration change Gagandeep Singh
2022-10-07  3:27   ` [PATCH v2 15/16] net/dpaa: fix buffer free on transmit SG packets Gagandeep Singh
2022-10-07  3:27   ` [PATCH v2 16/16] net/dpaa: fix buffer free in slow path Gagandeep Singh
2022-10-07 15:22   ` [PATCH v2 00/16] DPAA and DPAA2 driver changes Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220928052516.1279442-9-g.singh@nxp.com \
    --to=g.singh@nxp.com \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@amd.com \
    --cc=stable@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).