From: Gagandeep Singh <g.singh@nxp.com>
To: ferruh.yigit@amd.com, dev@dpdk.org
Cc: Gagandeep Singh <g.singh@nxp.com>,
stable@dpdk.org, Hemant Agrawal <hemant.agrawal@nxp.com>
Subject: [PATCH v2 15/16] net/dpaa: fix buffer free on transmit SG packets
Date: Fri, 7 Oct 2022 08:57:42 +0530 [thread overview]
Message-ID: <20221007032743.2129353-16-g.singh@nxp.com> (raw)
In-Reply-To: <20221007032743.2129353-1-g.singh@nxp.com>
When using SG list to TX with external and direct buffers,
HW free direct buffers and driver free external buffers.
Software scans the complete SG mbuf list to find the external
buffers to free, but this is wrong as hardware can free the
direct buffers if any present in the list and same can be
re-allocated for other purpose in multi thread or high speed
running traffic environment with new data in it. So the software
which is scanning the SG mbuf list, if that list has any direct
buffer present then that direct buffer's next pointer can give
wrong pointer value, if already freed by hardware which
can do the mempool corruption or memory leak.
In this patch instead of relying on user given SG mbuf list
we are storing the buffers in an internal list which will
be scanned by driver after transmit to free non-direct
buffers.
This patch also fixes below issues.
Driver is freeing complete SG list by checking external buffer
flag in first segment only, but external buffer can be attached
to any of the segment. Because of this, driver either can double
free buffers or there can be memory leak.
In case of indirect buffers, driver is modifying the original
buffer list to free the indirect buffers but this original buffer
list is being used by driver even after transmit packets for
non-direct buffer cleanup. This can cause the buffer leak issue.
Fixes: f191d5abda54 ("net/dpaa: support external buffers in Tx")
Cc: stable@dpdk.org
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
drivers/net/dpaa/dpaa_ethdev.h | 10 ++++++
drivers/net/dpaa/dpaa_rxtx.c | 61 ++++++++++++++++++++++------------
2 files changed, 49 insertions(+), 22 deletions(-)
diff --git a/drivers/net/dpaa/dpaa_ethdev.h b/drivers/net/dpaa/dpaa_ethdev.h
index f9c0554530..502c1c88b8 100644
--- a/drivers/net/dpaa/dpaa_ethdev.h
+++ b/drivers/net/dpaa/dpaa_ethdev.h
@@ -112,6 +112,16 @@
extern struct rte_mempool *dpaa_tx_sg_pool;
+/* structure to free external and indirect
+ * buffers.
+ */
+struct dpaa_sw_buf_free {
+ /* To which packet this segment belongs */
+ uint16_t pkt_id;
+ /* The actual segment */
+ struct rte_mbuf *seg;
+};
+
/* Each network interface is represented by one of these */
struct dpaa_if {
int valid;
diff --git a/drivers/net/dpaa/dpaa_rxtx.c b/drivers/net/dpaa/dpaa_rxtx.c
index e23206bf5c..4d285b4f38 100644
--- a/drivers/net/dpaa/dpaa_rxtx.c
+++ b/drivers/net/dpaa/dpaa_rxtx.c
@@ -803,9 +803,12 @@ uint16_t dpaa_eth_queue_rx(void *q,
static int
dpaa_eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
- struct qm_fd *fd)
+ struct qm_fd *fd,
+ struct dpaa_sw_buf_free *free_buf,
+ uint32_t *free_count,
+ uint32_t pkt_id)
{
- struct rte_mbuf *cur_seg = mbuf, *prev_seg = NULL;
+ struct rte_mbuf *cur_seg = mbuf;
struct rte_mbuf *temp, *mi;
struct qm_sg_entry *sg_temp, *sgt;
int i = 0;
@@ -869,10 +872,11 @@ dpaa_eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
sg_temp->bpid =
DPAA_MEMPOOL_TO_BPID(cur_seg->pool);
}
- cur_seg = cur_seg->next;
} else if (RTE_MBUF_HAS_EXTBUF(cur_seg)) {
+ free_buf[*free_count].seg = cur_seg;
+ free_buf[*free_count].pkt_id = pkt_id;
+ ++*free_count;
sg_temp->bpid = 0xff;
- cur_seg = cur_seg->next;
} else {
/* Get owner MBUF from indirect buffer */
mi = rte_mbuf_from_indirect(cur_seg);
@@ -885,11 +889,11 @@ dpaa_eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
sg_temp->bpid = DPAA_MEMPOOL_TO_BPID(mi->pool);
rte_mbuf_refcnt_update(mi, 1);
}
- prev_seg = cur_seg;
- cur_seg = cur_seg->next;
- prev_seg->next = NULL;
- rte_pktmbuf_free(prev_seg);
+ free_buf[*free_count].seg = cur_seg;
+ free_buf[*free_count].pkt_id = pkt_id;
+ ++*free_count;
}
+ cur_seg = cur_seg->next;
if (cur_seg == NULL) {
sg_temp->final = 1;
cpu_to_hw_sg(sg_temp);
@@ -904,7 +908,10 @@ dpaa_eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
static inline void
tx_on_dpaa_pool_unsegmented(struct rte_mbuf *mbuf,
struct dpaa_bp_info *bp_info,
- struct qm_fd *fd_arr)
+ struct qm_fd *fd_arr,
+ struct dpaa_sw_buf_free *buf_to_free,
+ uint32_t *free_count,
+ uint32_t pkt_id)
{
struct rte_mbuf *mi = NULL;
@@ -923,6 +930,9 @@ tx_on_dpaa_pool_unsegmented(struct rte_mbuf *mbuf,
DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, bp_info->bpid);
}
} else if (RTE_MBUF_HAS_EXTBUF(mbuf)) {
+ buf_to_free[*free_count].seg = mbuf;
+ buf_to_free[*free_count].pkt_id = pkt_id;
+ ++*free_count;
DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr,
bp_info ? bp_info->bpid : 0xff);
} else {
@@ -946,7 +956,9 @@ tx_on_dpaa_pool_unsegmented(struct rte_mbuf *mbuf,
DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr,
bp_info ? bp_info->bpid : 0xff);
}
- rte_pktmbuf_free(mbuf);
+ buf_to_free[*free_count].seg = mbuf;
+ buf_to_free[*free_count].pkt_id = pkt_id;
+ ++*free_count;
}
if (mbuf->ol_flags & DPAA_TX_CKSUM_OFFLOAD_MASK)
@@ -957,16 +969,21 @@ tx_on_dpaa_pool_unsegmented(struct rte_mbuf *mbuf,
static inline uint16_t
tx_on_dpaa_pool(struct rte_mbuf *mbuf,
struct dpaa_bp_info *bp_info,
- struct qm_fd *fd_arr)
+ struct qm_fd *fd_arr,
+ struct dpaa_sw_buf_free *buf_to_free,
+ uint32_t *free_count,
+ uint32_t pkt_id)
{
DPAA_DP_LOG(DEBUG, "BMAN offloaded buffer, mbuf: %p", mbuf);
if (mbuf->nb_segs == 1) {
/* Case for non-segmented buffers */
- tx_on_dpaa_pool_unsegmented(mbuf, bp_info, fd_arr);
+ tx_on_dpaa_pool_unsegmented(mbuf, bp_info, fd_arr,
+ buf_to_free, free_count, pkt_id);
} else if (mbuf->nb_segs > 1 &&
mbuf->nb_segs <= DPAA_SGT_MAX_ENTRIES) {
- if (dpaa_eth_mbuf_to_sg_fd(mbuf, fd_arr)) {
+ if (dpaa_eth_mbuf_to_sg_fd(mbuf, fd_arr, buf_to_free,
+ free_count, pkt_id)) {
DPAA_PMD_DEBUG("Unable to create Scatter Gather FD");
return 1;
}
@@ -1070,7 +1087,8 @@ dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
uint16_t state;
int ret, realloc_mbuf = 0;
uint32_t seqn, index, flags[DPAA_TX_BURST_SIZE] = {0};
- struct rte_mbuf **orig_bufs = bufs;
+ struct dpaa_sw_buf_free buf_to_free[DPAA_MAX_SGS * DPAA_MAX_DEQUEUE_NUM_FRAMES];
+ uint32_t free_count = 0;
if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
ret = rte_dpaa_portal_init((void *)0);
@@ -1153,7 +1171,10 @@ dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
}
indirect_buf:
state = tx_on_dpaa_pool(mbuf, bp_info,
- &fd_arr[loop]);
+ &fd_arr[loop],
+ buf_to_free,
+ &free_count,
+ loop);
if (unlikely(state)) {
/* Set frames_to_send & nb_bufs so
* that packets are transmitted till
@@ -1178,13 +1199,9 @@ dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
DPAA_DP_LOG(DEBUG, "Transmitted %d buffers on queue: %p", sent, q);
-
- loop = 0;
- while (loop < sent) {
- if (unlikely(RTE_MBUF_HAS_EXTBUF(*orig_bufs)))
- rte_pktmbuf_free(*orig_bufs);
- orig_bufs++;
- loop++;
+ for (loop = 0; loop < free_count; loop++) {
+ if (buf_to_free[loop].pkt_id < sent)
+ rte_pktmbuf_free_seg(buf_to_free[loop].seg);
}
return sent;
--
2.25.1
next prev parent reply other threads:[~2022-10-07 3:29 UTC|newest]
Thread overview: 22+ messages / expand[flat|nested] mbox.gz Atom feed top
[not found] <20220928052516.1279442-1-g.singh@nxp.com>
2022-09-28 5:25 ` [PATCH 02/15] net/enetfec: fix restart issue Gagandeep Singh
2022-09-28 5:25 ` [PATCH 03/15] net/enetfec: fix buffer leak issue Gagandeep Singh
2022-09-28 5:25 ` [PATCH 04/15] net/dpaa2: fix dpdmux configuration for error behaviour Gagandeep Singh
2022-09-28 5:25 ` [PATCH 05/15] net/dpaa2: check free enqueue descriptors before Tx Gagandeep Singh
2022-10-05 14:30 ` Ferruh Yigit
2022-09-28 5:25 ` [PATCH 08/15] net/dpaa2: fix buffer free on transmit SG packets Gagandeep Singh
2022-10-06 7:48 ` Ferruh Yigit
2022-09-28 5:25 ` [PATCH 10/15] net/dpaa: fix Jumbo packet Rx in case of VSP Gagandeep Singh
2022-09-28 5:25 ` [PATCH 14/15] net/dpaa: fix buffer free on transmit SG packets Gagandeep Singh
2022-09-28 5:25 ` [PATCH 15/15] net/dpaa: fix buffer free in slow path Gagandeep Singh
2022-10-05 14:21 ` Ferruh Yigit
2022-10-06 8:51 ` Gagandeep Singh
2022-10-06 9:42 ` Ferruh Yigit
2022-10-06 11:19 ` Gagandeep Singh
[not found] ` <20221007032743.2129353-1-g.singh@nxp.com>
2022-10-07 3:27 ` [PATCH v2 02/16] net/enetfec: fix restart issue Gagandeep Singh
2022-10-07 3:27 ` [PATCH v2 03/16] net/enetfec: fix buffer leak issue Gagandeep Singh
2022-10-07 3:27 ` [PATCH v2 04/16] net/dpaa2: fix dpdmux configuration for error behaviour Gagandeep Singh
2022-10-07 3:27 ` [PATCH v2 05/16] net/dpaa2: check free enqueue descriptors before Tx Gagandeep Singh
2022-10-07 3:27 ` [PATCH v2 08/16] net/dpaa2: fix buffer free on transmit SG packets Gagandeep Singh
2022-10-07 3:27 ` [PATCH v2 10/16] net/dpaa: fix Jumbo packet Rx in case of VSP Gagandeep Singh
2022-10-07 3:27 ` Gagandeep Singh [this message]
2022-10-07 3:27 ` [PATCH v2 16/16] net/dpaa: fix buffer free in slow path Gagandeep Singh
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20221007032743.2129353-16-g.singh@nxp.com \
--to=g.singh@nxp.com \
--cc=dev@dpdk.org \
--cc=ferruh.yigit@amd.com \
--cc=hemant.agrawal@nxp.com \
--cc=stable@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).