From: Joyce Kong <joyce.kong@arm.com>
To: jgrajcia@cisco.com, stephen@networkplumber.org,
huzaifa.rahman@emumba.com
Cc: dev@dpdk.org, nd@arm.com, mb@smartsharesystems.com,
ruifeng.wang@arm.com, Joyce Kong <joyce.kong@arm.com>
Subject: [PATCH v3 1/2] net/memif: add a Rx fast path
Date: Mon, 22 Aug 2022 03:47:30 +0000 [thread overview]
Message-ID: <20220822034731.528424-2-joyce.kong@arm.com> (raw)
In-Reply-To: <20220822034731.528424-1-joyce.kong@arm.com>
For memif non-zero-copy mode, there is a branch to compare
the mbuf and memif buffer size during memory copying. Mbuf
and memif buffer size is defined at compile time. If memif
buf size <= mbuf size, add a fast Rx memory copy path by
removing this branch and mbuf bulk alloc.
The removal of the branch and bulk alloc lead to considerable
performance uplift.
Test with 1p1q on N1SDP AArch64 server,
--------------------------------------------
buf size | memif <= mbuf | memif > mbuf |
--------------------------------------------
non-zc gain | 26.85% | -0.37% |
--------------------------------------------
zc gain | 8.57% | 3.04% |
--------------------------------------------
Test with 1p1q on Cascade Lake Xeon X86 server,
--------------------------------------------
buf size | memif <= mbuf | memif > mbuf |
--------------------------------------------
non-zc gain | 17.54% | -0.42% |
--------------------------------------------
zc gain | 10.67% | 0.26% |
--------------------------------------------
Signed-off-by: Joyce Kong <joyce.kong@arm.com>
Reviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
---
drivers/net/memif/rte_eth_memif.c | 137 +++++++++++++++++++++---------
1 file changed, 96 insertions(+), 41 deletions(-)
diff --git a/drivers/net/memif/rte_eth_memif.c b/drivers/net/memif/rte_eth_memif.c
index dd951b8296..2ea2a8e266 100644
--- a/drivers/net/memif/rte_eth_memif.c
+++ b/drivers/net/memif/rte_eth_memif.c
@@ -342,66 +342,122 @@ eth_memif_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
goto refill;
n_slots = last_slot - cur_slot;
- while (n_slots && n_rx_pkts < nb_pkts) {
- mbuf_head = rte_pktmbuf_alloc(mq->mempool);
- if (unlikely(mbuf_head == NULL))
- goto no_free_bufs;
- mbuf = mbuf_head;
- mbuf->port = mq->in_port;
- dst_off = 0;
+ if (likely(mbuf_size >= pmd->cfg.pkt_buffer_size)) {
+ struct rte_mbuf *mbufs[nb_pkts];
+ ret = rte_pktmbuf_alloc_bulk(mq->mempool, mbufs, nb_pkts);
+ if (unlikely(ret < 0))
+ goto no_free_bufs;
+
+ while (n_slots && n_rx_pkts < nb_pkts) {
+ mbuf_head = mbufs[n_rx_pkts];
+ mbuf = mbuf_head;
+
+next_slot1:
+ mbuf->port = mq->in_port;
+ s0 = cur_slot & mask;
+ d0 = &ring->desc[s0];
-next_slot:
- s0 = cur_slot & mask;
- d0 = &ring->desc[s0];
+ cp_len = d0->length;
- src_len = d0->length;
- src_off = 0;
+ rte_pktmbuf_data_len(mbuf) = cp_len;
+ rte_pktmbuf_pkt_len(mbuf) = cp_len;
+ if (mbuf != mbuf_head)
+ rte_pktmbuf_pkt_len(mbuf_head) += cp_len;
- do {
- dst_len = mbuf_size - dst_off;
- if (dst_len == 0) {
- dst_off = 0;
- dst_len = mbuf_size;
+ rte_memcpy(rte_pktmbuf_mtod(mbuf, void *),
+ (uint8_t *)memif_get_buffer(proc_private, d0), cp_len);
- /* store pointer to tail */
+ cur_slot++;
+ n_slots--;
+
+ if (d0->flags & MEMIF_DESC_FLAG_NEXT) {
mbuf_tail = mbuf;
mbuf = rte_pktmbuf_alloc(mq->mempool);
- if (unlikely(mbuf == NULL))
+ if (unlikely(mbuf == NULL)) {
+ rte_pktmbuf_free_bulk(mbufs + n_rx_pkts,
+ nb_pkts - n_rx_pkts);
goto no_free_bufs;
- mbuf->port = mq->in_port;
+ }
ret = memif_pktmbuf_chain(mbuf_head, mbuf_tail, mbuf);
if (unlikely(ret < 0)) {
MIF_LOG(ERR, "number-of-segments-overflow");
rte_pktmbuf_free(mbuf);
+ rte_pktmbuf_free_bulk(mbufs + n_rx_pkts,
+ nb_pkts - n_rx_pkts);
goto no_free_bufs;
}
+ goto next_slot1;
}
- cp_len = RTE_MIN(dst_len, src_len);
- rte_pktmbuf_data_len(mbuf) += cp_len;
- rte_pktmbuf_pkt_len(mbuf) = rte_pktmbuf_data_len(mbuf);
- if (mbuf != mbuf_head)
- rte_pktmbuf_pkt_len(mbuf_head) += cp_len;
+ mq->n_bytes += rte_pktmbuf_pkt_len(mbuf_head);
+ *bufs++ = mbuf_head;
+ n_rx_pkts++;
+ }
- rte_memcpy(rte_pktmbuf_mtod_offset(mbuf, void *,
- dst_off),
- (uint8_t *)memif_get_buffer(proc_private, d0) +
- src_off, cp_len);
+ if (n_rx_pkts < nb_pkts)
+ rte_pktmbuf_free_bulk(mbufs + n_rx_pkts, nb_pkts - n_rx_pkts);
+ } else {
+ while (n_slots && n_rx_pkts < nb_pkts) {
+ mbuf_head = rte_pktmbuf_alloc(mq->mempool);
+ if (unlikely(mbuf_head == NULL))
+ goto no_free_bufs;
+ mbuf = mbuf_head;
+ mbuf->port = mq->in_port;
+
+next_slot2:
+ s0 = cur_slot & mask;
+ d0 = &ring->desc[s0];
- src_off += cp_len;
- dst_off += cp_len;
- src_len -= cp_len;
- } while (src_len);
+ src_len = d0->length;
+ dst_off = 0;
+ src_off = 0;
- cur_slot++;
- n_slots--;
+ do {
+ dst_len = mbuf_size - dst_off;
+ if (dst_len == 0) {
+ dst_off = 0;
+ dst_len = mbuf_size;
+
+ /* store pointer to tail */
+ mbuf_tail = mbuf;
+ mbuf = rte_pktmbuf_alloc(mq->mempool);
+ if (unlikely(mbuf == NULL))
+ goto no_free_bufs;
+ mbuf->port = mq->in_port;
+ ret = memif_pktmbuf_chain(mbuf_head, mbuf_tail, mbuf);
+ if (unlikely(ret < 0)) {
+ MIF_LOG(ERR, "number-of-segments-overflow");
+ rte_pktmbuf_free(mbuf);
+ goto no_free_bufs;
+ }
+ }
+ cp_len = RTE_MIN(dst_len, src_len);
- if (d0->flags & MEMIF_DESC_FLAG_NEXT)
- goto next_slot;
+ rte_pktmbuf_data_len(mbuf) += cp_len;
+ rte_pktmbuf_pkt_len(mbuf) = rte_pktmbuf_data_len(mbuf);
+ if (mbuf != mbuf_head)
+ rte_pktmbuf_pkt_len(mbuf_head) += cp_len;
- mq->n_bytes += rte_pktmbuf_pkt_len(mbuf_head);
- *bufs++ = mbuf_head;
- n_rx_pkts++;
+ rte_memcpy(rte_pktmbuf_mtod_offset(mbuf, void *,
+ dst_off),
+ (uint8_t *)memif_get_buffer(proc_private, d0) +
+ src_off, cp_len);
+
+ src_off += cp_len;
+ dst_off += cp_len;
+ src_len -= cp_len;
+ } while (src_len);
+
+ cur_slot++;
+ n_slots--;
+
+ if (d0->flags & MEMIF_DESC_FLAG_NEXT)
+ goto next_slot2;
+
+ mq->n_bytes += rte_pktmbuf_pkt_len(mbuf_head);
+ *bufs++ = mbuf_head;
+ n_rx_pkts++;
+ }
}
no_free_bufs:
@@ -694,7 +750,6 @@ eth_memif_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
return n_tx_pkts;
}
-
static int
memif_tx_one_zc(struct pmd_process_private *proc_private, struct memif_queue *mq,
memif_ring_t *ring, struct rte_mbuf *mbuf, const uint16_t mask,
--
2.25.1
next prev parent reply other threads:[~2022-08-22 3:48 UTC|newest]
Thread overview: 26+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-04-12 9:32 [RFC] net/memif: add a fast path for Rx Joyce Kong
2022-05-17 10:51 ` [PATCH v1 0/2] add a fast path for memif Rx/Tx Joyce Kong
2022-05-17 10:51 ` [PATCH v1 1/2] net/memif: add a Rx fast path Joyce Kong
2022-05-18 16:53 ` Ferruh Yigit
2022-05-19 7:00 ` Joyce Kong
2022-05-19 8:44 ` Joyce Kong
2022-05-18 17:06 ` Ferruh Yigit
2022-05-19 15:09 ` Joyce Kong
2022-05-19 16:38 ` Ferruh Yigit
2022-05-17 10:51 ` [PATCH v1 2/2] net/memif: add a Tx " Joyce Kong
2022-05-17 13:59 ` [PATCH v1 0/2] add a fast path for memif Rx/Tx Morten Brørup
2022-05-18 2:48 ` Ruifeng Wang
2022-07-01 10:28 ` [PATCH v2 " Joyce Kong
2022-07-01 10:28 ` [PATCH v2 1/2] net/memif: add a Rx fast path Joyce Kong
2022-07-01 16:51 ` Stephen Hemminger
2022-08-22 3:47 ` [PATCH v3 0/2] add a fast path for memif Rx/Tx Joyce Kong
2022-08-22 3:47 ` Joyce Kong [this message]
2022-08-31 16:25 ` [PATCH v3 1/2] net/memif: add a Rx fast path Stephen Hemminger
2022-09-07 6:06 ` Joyce Kong
2022-08-22 3:47 ` [PATCH v3 2/2] net/memif: add a Tx " Joyce Kong
2022-07-01 10:28 ` [PATCH v2 " Joyce Kong
2022-09-15 6:58 ` [PATCH v4 0/2] add a fast path for memif Rx/Tx Joyce Kong
2022-09-15 6:58 ` [PATCH v4 1/2] net/memif: add a Rx fast path Joyce Kong
2022-09-15 6:58 ` [PATCH v4 2/2] net/memif: add a Tx " Joyce Kong
2022-09-22 9:12 ` [PATCH v4 0/2] add a fast path for memif Rx/Tx Ferruh Yigit
2022-12-09 13:59 ` Ferruh Yigit
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220822034731.528424-2-joyce.kong@arm.com \
--to=joyce.kong@arm.com \
--cc=dev@dpdk.org \
--cc=huzaifa.rahman@emumba.com \
--cc=jgrajcia@cisco.com \
--cc=mb@smartsharesystems.com \
--cc=nd@arm.com \
--cc=ruifeng.wang@arm.com \
--cc=stephen@networkplumber.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).