From: Gagandeep Singh <g.singh@nxp.com>
To: ktraynor@redhat.com, stable@dpdk.org
Cc: Gagandeep Singh <g.singh@nxp.com>,
Hemant Agrawal <hemant.agrawal@nxp.com>
Subject: [PATCH 21.11 1/4] net/dpaa: use internal mempool for SG table
Date: Fri, 28 Oct 2022 17:02:02 +0530 [thread overview]
Message-ID: <20221028113205.2349198-1-g.singh@nxp.com> (raw)
[ upstream commit 533c31cc8331cc1ed0c4ffb2940e02b0d1e65255 ]
Creating and using driver's mempool for
allocating the SG table memory required for
FD creation.
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
drivers/net/dpaa/dpaa_ethdev.c | 18 ++++++++++++++++++
drivers/net/dpaa/dpaa_ethdev.h | 9 +++++++++
drivers/net/dpaa/dpaa_rxtx.c | 9 ++++-----
3 files changed, 31 insertions(+), 5 deletions(-)
diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index 9847ca1be1..034f446561 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -133,6 +133,8 @@ static const struct rte_dpaa_xstats_name_off dpaa_xstats_strings[] = {
};
static struct rte_dpaa_driver rte_dpaa_pmd;
+int dpaa_valid_dev;
+struct rte_mempool *dpaa_tx_sg_pool;
static int
dpaa_eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info);
@@ -2209,7 +2211,20 @@ rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv,
/* Invoke PMD device initialization function */
diag = dpaa_dev_init(eth_dev);
if (diag == 0) {
+ if (!dpaa_tx_sg_pool) {
+ dpaa_tx_sg_pool =
+ rte_pktmbuf_pool_create("dpaa_mbuf_tx_sg_pool",
+ DPAA_POOL_SIZE,
+ DPAA_POOL_CACHE_SIZE, 0,
+ DPAA_MAX_SGS * sizeof(struct qm_sg_entry),
+ rte_socket_id());
+ if (dpaa_tx_sg_pool == NULL) {
+ DPAA_PMD_ERR("SG pool creation failed\n");
+ return -ENOMEM;
+ }
+ }
rte_eth_dev_probing_finish(eth_dev);
+ dpaa_valid_dev++;
return 0;
}
@@ -2227,6 +2242,9 @@ rte_dpaa_remove(struct rte_dpaa_device *dpaa_dev)
eth_dev = dpaa_dev->eth_dev;
dpaa_eth_dev_close(eth_dev);
+ dpaa_valid_dev--;
+ if (!dpaa_valid_dev)
+ rte_mempool_free(dpaa_tx_sg_pool);
ret = rte_eth_dev_release_port(eth_dev);
return ret;
diff --git a/drivers/net/dpaa/dpaa_ethdev.h b/drivers/net/dpaa/dpaa_ethdev.h
index 6fdd57dbc3..f9c0554530 100644
--- a/drivers/net/dpaa/dpaa_ethdev.h
+++ b/drivers/net/dpaa/dpaa_ethdev.h
@@ -33,6 +33,13 @@
#define DPAA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */
+/* Maximum SG segments supported on all cores*/
+#define DPAA_MAX_SGS 128
+/* SG pool size */
+#define DPAA_POOL_SIZE 2048
+/* SG pool cache size */
+#define DPAA_POOL_CACHE_SIZE 256
+
/* RX queue tail drop threshold (CGR Based) in frame count */
#define CGR_RX_PERFQ_THRESH 256
#define CGR_TX_CGR_THRESH 512
@@ -103,6 +110,8 @@
#define FMC_FILE "/tmp/fmc.bin"
+extern struct rte_mempool *dpaa_tx_sg_pool;
+
/* Each network interface is represented by one of these */
struct dpaa_if {
int valid;
diff --git a/drivers/net/dpaa/dpaa_rxtx.c b/drivers/net/dpaa/dpaa_rxtx.c
index 956fe946fa..e0aa268645 100644
--- a/drivers/net/dpaa/dpaa_rxtx.c
+++ b/drivers/net/dpaa/dpaa_rxtx.c
@@ -793,8 +793,7 @@ uint16_t dpaa_eth_queue_rx(void *q,
static int
dpaa_eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
- struct qm_fd *fd,
- struct dpaa_bp_info *bp_info)
+ struct qm_fd *fd)
{
struct rte_mbuf *cur_seg = mbuf, *prev_seg = NULL;
struct rte_mbuf *temp, *mi;
@@ -803,7 +802,7 @@ dpaa_eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
DPAA_DP_LOG(DEBUG, "Creating SG FD to transmit");
- temp = rte_pktmbuf_alloc(bp_info->mp);
+ temp = rte_pktmbuf_alloc(dpaa_tx_sg_pool);
if (!temp) {
DPAA_PMD_ERR("Failure in allocation of mbuf");
return -1;
@@ -839,7 +838,7 @@ dpaa_eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
fd->format = QM_FD_SG;
fd->addr = temp->buf_iova;
fd->offset = temp->data_off;
- fd->bpid = bp_info ? bp_info->bpid : 0xff;
+ fd->bpid = DPAA_MEMPOOL_TO_BPID(dpaa_tx_sg_pool);
fd->length20 = mbuf->pkt_len;
while (i < DPAA_SGT_MAX_ENTRIES) {
@@ -957,7 +956,7 @@ tx_on_dpaa_pool(struct rte_mbuf *mbuf,
tx_on_dpaa_pool_unsegmented(mbuf, bp_info, fd_arr);
} else if (mbuf->nb_segs > 1 &&
mbuf->nb_segs <= DPAA_SGT_MAX_ENTRIES) {
- if (dpaa_eth_mbuf_to_sg_fd(mbuf, fd_arr, bp_info)) {
+ if (dpaa_eth_mbuf_to_sg_fd(mbuf, fd_arr)) {
DPAA_PMD_DEBUG("Unable to create Scatter Gather FD");
return 1;
}
--
2.25.1
next reply other threads:[~2022-10-28 11:32 UTC|newest]
Thread overview: 5+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-10-28 11:32 Gagandeep Singh [this message]
2022-10-28 11:32 ` [PATCH 21.11 2/4] net/dpaa: fix buffer freeing on SG Tx Gagandeep Singh
2022-10-28 11:32 ` [PATCH 21.11 3/4] net/dpaa2: use internal mempool for SG table Gagandeep Singh
2022-10-28 11:32 ` [PATCH 21.11 4/4] net/dpaa2: fix buffer freeing on SG Tx Gagandeep Singh
2022-11-01 14:53 ` [PATCH 21.11 1/4] net/dpaa: use internal mempool for SG table Kevin Traynor
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20221028113205.2349198-1-g.singh@nxp.com \
--to=g.singh@nxp.com \
--cc=hemant.agrawal@nxp.com \
--cc=ktraynor@redhat.com \
--cc=stable@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).