From: Joyce Kong <joyce.kong@arm.com>
To: Jakub Grajciar <jgrajcia@cisco.com>
Cc: dev@dpdk.org, nd@arm.com, Joyce Kong <joyce.kong@arm.com>
Subject: [RFC] net/memif: add a fast path for Rx
Date: Tue, 12 Apr 2022 09:32:43 +0000 [thread overview]
Message-ID: <20220412093243.3670187-1-joyce.kong@arm.com> (raw)
For memif non-zero-copy mode, there is a branch to compare
the mbuf and memif buffer size during memory copying. Add
a fast memory copy path by removing this branch with mbuf
and memif buffer size defined at compile time. The removal
of the branch leads to performance uplift.
When mbuf >= memif buffer size, Rx chooses the fast memcpy
path. Test with 1p1q on Ampere Altra AArch64 server, there
is 2.6% perf gain with non-zero-copy mode, and 1.36% perf
gain with zero-copy mode. Test with 1p1q on Cascade Lake
Xeon X86 server, there is 3.04% perf gain with non-zero-copy
mode, and 0.27% perf gain with zero-copy mode.
Signed-off-by: Joyce Kong <joyce.kong@arm.com>
---
drivers/net/memif/rte_eth_memif.c | 124 ++++++++++++++++++++----------
1 file changed, 84 insertions(+), 40 deletions(-)
diff --git a/drivers/net/memif/rte_eth_memif.c b/drivers/net/memif/rte_eth_memif.c
index 587ad45576..f55776ca46 100644
--- a/drivers/net/memif/rte_eth_memif.c
+++ b/drivers/net/memif/rte_eth_memif.c
@@ -342,66 +342,111 @@ eth_memif_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
goto refill;
n_slots = last_slot - cur_slot;
- while (n_slots && n_rx_pkts < nb_pkts) {
- mbuf_head = rte_pktmbuf_alloc(mq->mempool);
- if (unlikely(mbuf_head == NULL))
- goto no_free_bufs;
- mbuf = mbuf_head;
- mbuf->port = mq->in_port;
+ if (likely(mbuf_size >= pmd->cfg.pkt_buffer_size)) {
+ while (n_slots && n_rx_pkts < nb_pkts) {
+ mbuf_head = rte_pktmbuf_alloc(mq->mempool);
+ if (unlikely(mbuf_head == NULL))
+ goto no_free_bufs;
+ mbuf = mbuf_head;
+ mbuf->port = mq->in_port;
+
+next_slot1:
+ s0 = cur_slot & mask;
+ d0 = &ring->desc[s0];
-next_slot:
- s0 = cur_slot & mask;
- d0 = &ring->desc[s0];
+ cp_len = d0->length;
- src_len = d0->length;
- dst_off = 0;
- src_off = 0;
+ rte_pktmbuf_data_len(mbuf) = cp_len;
+ rte_pktmbuf_pkt_len(mbuf) = cp_len;
+ if (mbuf != mbuf_head)
+ rte_pktmbuf_pkt_len(mbuf_head) += cp_len;
- do {
- dst_len = mbuf_size - dst_off;
- if (dst_len == 0) {
- dst_off = 0;
- dst_len = mbuf_size;
+ rte_memcpy(rte_pktmbuf_mtod(mbuf, void *),
+ (uint8_t *)memif_get_buffer(proc_private, d0), cp_len);
+
+ cur_slot++;
+ n_slots--;
- /* store pointer to tail */
+ if (d0->flags & MEMIF_DESC_FLAG_NEXT) {
mbuf_tail = mbuf;
mbuf = rte_pktmbuf_alloc(mq->mempool);
if (unlikely(mbuf == NULL))
goto no_free_bufs;
- mbuf->port = mq->in_port;
ret = memif_pktmbuf_chain(mbuf_head, mbuf_tail, mbuf);
if (unlikely(ret < 0)) {
MIF_LOG(ERR, "number-of-segments-overflow");
rte_pktmbuf_free(mbuf);
goto no_free_bufs;
}
+ goto next_slot1;
}
- cp_len = RTE_MIN(dst_len, src_len);
- rte_pktmbuf_data_len(mbuf) += cp_len;
- rte_pktmbuf_pkt_len(mbuf) = rte_pktmbuf_data_len(mbuf);
- if (mbuf != mbuf_head)
- rte_pktmbuf_pkt_len(mbuf_head) += cp_len;
+ mq->n_bytes += rte_pktmbuf_pkt_len(mbuf_head);
+ *bufs++ = mbuf_head;
+ n_rx_pkts++;
+ }
+ } else {
+ while (n_slots && n_rx_pkts < nb_pkts) {
+ mbuf_head = rte_pktmbuf_alloc(mq->mempool);
+ if (unlikely(mbuf_head == NULL))
+ goto no_free_bufs;
+ mbuf = mbuf_head;
+ mbuf->port = mq->in_port;
+
+next_slot2:
+ s0 = cur_slot & mask;
+ d0 = &ring->desc[s0];
- rte_memcpy(rte_pktmbuf_mtod_offset(mbuf, void *,
- dst_off),
- (uint8_t *)memif_get_buffer(proc_private, d0) +
- src_off, cp_len);
+ src_len = d0->length;
+ dst_off = 0;
+ src_off = 0;
- src_off += cp_len;
- dst_off += cp_len;
- src_len -= cp_len;
- } while (src_len);
+ do {
+ dst_len = mbuf_size - dst_off;
+ if (dst_len == 0) {
+ dst_off = 0;
+ dst_len = mbuf_size;
+
+ /* store pointer to tail */
+ mbuf_tail = mbuf;
+ mbuf = rte_pktmbuf_alloc(mq->mempool);
+ if (unlikely(mbuf == NULL))
+ goto no_free_bufs;
+ mbuf->port = mq->in_port;
+ ret = memif_pktmbuf_chain(mbuf_head, mbuf_tail, mbuf);
+ if (unlikely(ret < 0)) {
+ MIF_LOG(ERR, "number-of-segments-overflow");
+ rte_pktmbuf_free(mbuf);
+ goto no_free_bufs;
+ }
+ }
+ cp_len = RTE_MIN(dst_len, src_len);
- cur_slot++;
- n_slots--;
+ rte_pktmbuf_data_len(mbuf) += cp_len;
+ rte_pktmbuf_pkt_len(mbuf) = rte_pktmbuf_data_len(mbuf);
+ if (mbuf != mbuf_head)
+ rte_pktmbuf_pkt_len(mbuf_head) += cp_len;
- if (d0->flags & MEMIF_DESC_FLAG_NEXT)
- goto next_slot;
+ rte_memcpy(rte_pktmbuf_mtod_offset(mbuf, void *,
+ dst_off),
+ (uint8_t *)memif_get_buffer(proc_private, d0) +
+ src_off, cp_len);
- mq->n_bytes += rte_pktmbuf_pkt_len(mbuf_head);
- *bufs++ = mbuf_head;
- n_rx_pkts++;
+ src_off += cp_len;
+ dst_off += cp_len;
+ src_len -= cp_len;
+ } while (src_len);
+
+ cur_slot++;
+ n_slots--;
+
+ if (d0->flags & MEMIF_DESC_FLAG_NEXT)
+ goto next_slot2;
+
+ mq->n_bytes += rte_pktmbuf_pkt_len(mbuf_head);
+ *bufs++ = mbuf_head;
+ n_rx_pkts++;
+ }
}
no_free_bufs:
@@ -694,7 +739,6 @@ eth_memif_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
return n_tx_pkts;
}
-
static int
memif_tx_one_zc(struct pmd_process_private *proc_private, struct memif_queue *mq,
memif_ring_t *ring, struct rte_mbuf *mbuf, const uint16_t mask,
--
2.25.1
next reply other threads:[~2022-04-12 9:33 UTC|newest]
Thread overview: 26+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-04-12 9:32 Joyce Kong [this message]
2022-05-17 10:51 ` [PATCH v1 0/2] add a fast path for memif Rx/Tx Joyce Kong
2022-05-17 10:51 ` [PATCH v1 1/2] net/memif: add a Rx fast path Joyce Kong
2022-05-18 16:53 ` Ferruh Yigit
2022-05-19 7:00 ` Joyce Kong
2022-05-19 8:44 ` Joyce Kong
2022-05-18 17:06 ` Ferruh Yigit
2022-05-19 15:09 ` Joyce Kong
2022-05-19 16:38 ` Ferruh Yigit
2022-05-17 10:51 ` [PATCH v1 2/2] net/memif: add a Tx " Joyce Kong
2022-05-17 13:59 ` [PATCH v1 0/2] add a fast path for memif Rx/Tx Morten Brørup
2022-05-18 2:48 ` Ruifeng Wang
2022-07-01 10:28 ` [PATCH v2 " Joyce Kong
2022-07-01 10:28 ` [PATCH v2 1/2] net/memif: add a Rx fast path Joyce Kong
2022-07-01 16:51 ` Stephen Hemminger
2022-08-22 3:47 ` [PATCH v3 0/2] add a fast path for memif Rx/Tx Joyce Kong
2022-08-22 3:47 ` [PATCH v3 1/2] net/memif: add a Rx fast path Joyce Kong
2022-08-31 16:25 ` Stephen Hemminger
2022-09-07 6:06 ` Joyce Kong
2022-08-22 3:47 ` [PATCH v3 2/2] net/memif: add a Tx " Joyce Kong
2022-07-01 10:28 ` [PATCH v2 " Joyce Kong
2022-09-15 6:58 ` [PATCH v4 0/2] add a fast path for memif Rx/Tx Joyce Kong
2022-09-15 6:58 ` [PATCH v4 1/2] net/memif: add a Rx fast path Joyce Kong
2022-09-15 6:58 ` [PATCH v4 2/2] net/memif: add a Tx " Joyce Kong
2022-09-22 9:12 ` [PATCH v4 0/2] add a fast path for memif Rx/Tx Ferruh Yigit
2022-12-09 13:59 ` Ferruh Yigit
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220412093243.3670187-1-joyce.kong@arm.com \
--to=joyce.kong@arm.com \
--cc=dev@dpdk.org \
--cc=jgrajcia@cisco.com \
--cc=nd@arm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).