From: Ajit Khaparde <ajit.khaparde@broadcom.com>
To: dev@dpdk.org
Cc: ferruh.yigit@intel.com,
Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
Subject: [dpdk-dev] [PATCH 6/6] net/bnxt: support bulk free of Tx mbufs
Date: Wed, 15 May 2019 11:08:17 -0700 [thread overview]
Message-ID: <20190515180817.71523-7-ajit.khaparde@broadcom.com> (raw)
In-Reply-To: <20190515180817.71523-1-ajit.khaparde@broadcom.com>
The driver currently uses rte_pktmbuf_free() to free each mbuf
after transmit completion. This is optimized to free multiple
mbufs using rte_mempool_put_bulk().
Signed-off-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Signed-off-by: Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
---
drivers/net/bnxt/bnxt_txq.c | 11 ++++++++
drivers/net/bnxt/bnxt_txq.h | 1 +
drivers/net/bnxt/bnxt_txr.c | 50 +++++++++++++++++++++++++++++--------
3 files changed, 52 insertions(+), 10 deletions(-)
diff --git a/drivers/net/bnxt/bnxt_txq.c b/drivers/net/bnxt/bnxt_txq.c
index b9b975e4c..5a7bfaf3e 100644
--- a/drivers/net/bnxt/bnxt_txq.c
+++ b/drivers/net/bnxt/bnxt_txq.c
@@ -69,6 +69,7 @@ void bnxt_tx_queue_release_op(void *tx_queue)
rte_memzone_free(txq->mz);
txq->mz = NULL;
+ rte_free(txq->free);
rte_free(txq);
}
}
@@ -110,6 +111,16 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
rc = -ENOMEM;
goto out;
}
+
+ txq->free = rte_zmalloc_socket(NULL,
+ sizeof(struct rte_mbuf *) * nb_desc,
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (!txq->free) {
+ PMD_DRV_LOG(ERR, "allocation of tx mbuf free array failed!");
+ rte_free(txq);
+ rc = -ENOMEM;
+ goto out;
+ }
txq->bp = bp;
txq->nb_tx_desc = nb_desc;
txq->tx_free_thresh = tx_conf->tx_free_thresh;
diff --git a/drivers/net/bnxt/bnxt_txq.h b/drivers/net/bnxt/bnxt_txq.h
index 720ca90cf..a0d4678d9 100644
--- a/drivers/net/bnxt/bnxt_txq.h
+++ b/drivers/net/bnxt/bnxt_txq.h
@@ -33,6 +33,7 @@ struct bnxt_tx_queue {
unsigned int cp_nr_rings;
struct bnxt_cp_ring_info *cp_ring;
const struct rte_memzone *mz;
+ struct rte_mbuf **free;
};
void bnxt_free_txq_stats(struct bnxt_tx_queue *txq);
diff --git a/drivers/net/bnxt/bnxt_txr.c b/drivers/net/bnxt/bnxt_txr.c
index b15778b39..9de12e0d0 100644
--- a/drivers/net/bnxt/bnxt_txr.c
+++ b/drivers/net/bnxt/bnxt_txr.c
@@ -320,6 +320,7 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
RTE_VERIFY(m_seg->data_len);
txr->tx_prod = RING_NEXT(txr->tx_ring_struct, txr->tx_prod);
tx_buf = &txr->tx_buf_ring[txr->tx_prod];
+ tx_buf->mbuf = m_seg;
txbd = &txr->tx_desc_ring[txr->tx_prod];
txbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova(m_seg));
@@ -339,24 +340,53 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
static void bnxt_tx_cmp(struct bnxt_tx_queue *txq, int nr_pkts)
{
struct bnxt_tx_ring_info *txr = txq->tx_ring;
+ struct rte_mempool *pool = NULL;
+ struct rte_mbuf **free = txq->free;
uint16_t cons = txr->tx_cons;
+ unsigned int blk = 0;
int i, j;
for (i = 0; i < nr_pkts; i++) {
- struct bnxt_sw_tx_bd *tx_buf;
struct rte_mbuf *mbuf;
+ struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[cons];
+ unsigned short nr_bds = tx_buf->nr_bds;
- tx_buf = &txr->tx_buf_ring[cons];
- cons = RING_NEXT(txr->tx_ring_struct, cons);
- mbuf = tx_buf->mbuf;
- tx_buf->mbuf = NULL;
-
- /* EW - no need to unmap DMA memory? */
-
- for (j = 1; j < tx_buf->nr_bds; j++)
+ for (j = 0; j < nr_bds; j++) {
+ mbuf = tx_buf->mbuf;
+ tx_buf->mbuf = NULL;
cons = RING_NEXT(txr->tx_ring_struct, cons);
- rte_pktmbuf_free(mbuf);
+ tx_buf = &txr->tx_buf_ring[cons];
+ if (!mbuf) /* long_bd's tx_buf ? */
+ continue;
+
+ mbuf = rte_pktmbuf_prefree_seg(mbuf);
+ if (unlikely(!mbuf))
+ continue;
+
+ /* EW - no need to unmap DMA memory? */
+
+ if (likely(mbuf->pool == pool)) {
+ /* Add mbuf to the bulk free array */
+ free[blk++] = mbuf;
+ } else {
+ /* Found an mbuf from a different pool. Free
+ * mbufs accumulated so far to the previous
+ * pool
+ */
+ if (likely(pool != NULL))
+ rte_mempool_put_bulk(pool,
+ (void *)free,
+ blk);
+
+ /* Start accumulating mbufs in a new pool */
+ free[0] = mbuf;
+ pool = mbuf->pool;
+ blk = 1;
+ }
+ }
}
+ if (blk)
+ rte_mempool_put_bulk(pool, (void *)free, blk);
txr->tx_cons = cons;
}
--
2.20.1 (Apple Git-117)
next prev parent reply other threads:[~2019-05-15 18:08 UTC|newest]
Thread overview: 15+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-05-15 18:08 [dpdk-dev] [PATCH 0/6] bnxt patchset for Tx performance optimization Ajit Khaparde
2019-05-15 18:08 ` Ajit Khaparde
2019-05-15 18:08 ` [dpdk-dev] [PATCH 1/6] net/bnxt: fix an issue seen with TSO Ajit Khaparde
2019-05-15 18:08 ` Ajit Khaparde
2019-05-15 18:08 ` [dpdk-dev] [PATCH 2/6] net/bnxt: add support to update ipid Ajit Khaparde
2019-05-15 18:08 ` Ajit Khaparde
2019-05-15 18:08 ` [dpdk-dev] [PATCH 3/6] net/bnxt: check for some error conditions in Tx path Ajit Khaparde
2019-05-15 18:08 ` Ajit Khaparde
2019-05-15 18:08 ` [dpdk-dev] [PATCH 4/6] net/bnxt: fix a couple of issues with Tx batching Ajit Khaparde
2019-05-15 18:08 ` Ajit Khaparde
2019-05-15 18:08 ` [dpdk-dev] [PATCH 5/6] net/bnxt: optimize Tx batching code Ajit Khaparde
2019-05-15 18:08 ` Ajit Khaparde
2019-05-15 18:08 ` Ajit Khaparde [this message]
2019-05-15 18:08 ` [dpdk-dev] [PATCH 6/6] net/bnxt: support bulk free of Tx mbufs Ajit Khaparde
2019-05-27 16:16 ` [dpdk-dev] [PATCH 0/6] bnxt patchset for Tx performance optimization Ferruh Yigit
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190515180817.71523-7-ajit.khaparde@broadcom.com \
--to=ajit.khaparde@broadcom.com \
--cc=dev@dpdk.org \
--cc=ferruh.yigit@intel.com \
--cc=sriharsha.basavapatna@broadcom.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).