DPDK patches and discussions
 help / color / mirror / Atom feed
From: Ciara Loftus <ciara.loftus@intel.com>
To: dev@dpdk.org
Cc: Ciara Loftus <ciara.loftus@intel.com>
Subject: [dpdk-dev] [PATCH v3 1/3] net/af_xdp: allow bigger batch sizes
Date: Wed, 10 Mar 2021 07:48:14 +0000	[thread overview]
Message-ID: <20210310074816.3029-2-ciara.loftus@intel.com> (raw)
In-Reply-To: <20210310074816.3029-1-ciara.loftus@intel.com>

Prior to this commit, the maximum batch sizes for zero-copy and copy-mode
rx and copy-mode tx were set to 32. Apart from zero-copy tx, the user
could never rx/tx any more than 32 packets at a time and without inspecting
the code the user wouldn't be aware of this.

This commit removes these upper limits placed on the user and instead
sets an internal batch size equal to the default ring size (2048). Batches
larger than this are still processed, however they are split into smaller
batches similar to how it's done in other drivers. This is necessary
because some arrays used during rx/tx need to be sized at compile-time.

Allowing a larger batch size allows for fewer batches and thus larger bulk
operations, fewer ring accesses and fewer syscalls which should yield
improved performance.

Signed-off-by: Ciara Loftus <ciara.loftus@intel.com>
---
 drivers/net/af_xdp/rte_eth_af_xdp.c | 67 ++++++++++++++++++++++++-----
 1 file changed, 57 insertions(+), 10 deletions(-)

diff --git a/drivers/net/af_xdp/rte_eth_af_xdp.c b/drivers/net/af_xdp/rte_eth_af_xdp.c
index 3957227bf0..be524e4784 100644
--- a/drivers/net/af_xdp/rte_eth_af_xdp.c
+++ b/drivers/net/af_xdp/rte_eth_af_xdp.c
@@ -66,8 +66,8 @@ RTE_LOG_REGISTER(af_xdp_logtype, pmd.net.af_xdp, NOTICE);
 #define ETH_AF_XDP_DFLT_START_QUEUE_IDX	0
 #define ETH_AF_XDP_DFLT_QUEUE_COUNT	1
 
-#define ETH_AF_XDP_RX_BATCH_SIZE	32
-#define ETH_AF_XDP_TX_BATCH_SIZE	32
+#define ETH_AF_XDP_RX_BATCH_SIZE	XSK_RING_CONS__DEFAULT_NUM_DESCS
+#define ETH_AF_XDP_TX_BATCH_SIZE	XSK_RING_CONS__DEFAULT_NUM_DESCS
 
 
 struct xsk_umem_info {
@@ -329,8 +329,7 @@ af_xdp_rx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 	struct rte_mbuf *mbufs[ETH_AF_XDP_RX_BATCH_SIZE];
 
 	if (xsk_prod_nb_free(fq, free_thresh) >= free_thresh)
-		(void)reserve_fill_queue(umem, ETH_AF_XDP_RX_BATCH_SIZE,
-					 NULL, fq);
+		(void)reserve_fill_queue(umem, nb_pkts, NULL, fq);
 
 	nb_pkts = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx);
 	if (nb_pkts == 0) {
@@ -379,10 +378,8 @@ af_xdp_rx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 #endif
 
 static uint16_t
-eth_af_xdp_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+af_xdp_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 {
-	nb_pkts = RTE_MIN(nb_pkts, ETH_AF_XDP_RX_BATCH_SIZE);
-
 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
 	return af_xdp_rx_zc(queue, bufs, nb_pkts);
 #else
@@ -390,6 +387,32 @@ eth_af_xdp_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 #endif
 }
 
+static uint16_t
+eth_af_xdp_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+{
+	uint16_t nb_rx;
+
+	if (likely(nb_pkts <= ETH_AF_XDP_RX_BATCH_SIZE))
+		return af_xdp_rx(queue, bufs, nb_pkts);
+
+	/* Split larger batch into smaller batches of size
+	 * ETH_AF_XDP_RX_BATCH_SIZE or less.
+	 */
+	nb_rx = 0;
+	while (nb_pkts) {
+		uint16_t ret, n;
+
+		n = (uint16_t)RTE_MIN(nb_pkts, ETH_AF_XDP_RX_BATCH_SIZE);
+		ret = af_xdp_rx(queue, &bufs[nb_rx], n);
+		nb_rx = (uint16_t)(nb_rx + ret);
+		nb_pkts = (uint16_t)(nb_pkts - ret);
+		if (ret < n)
+			break;
+	}
+
+	return nb_rx;
+}
+
 static void
 pull_umem_cq(struct xsk_umem_info *umem, int size, struct xsk_ring_cons *cq)
 {
@@ -535,8 +558,6 @@ af_xdp_tx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 	uint32_t idx_tx;
 	struct xsk_ring_cons *cq = &txq->pair->cq;
 
-	nb_pkts = RTE_MIN(nb_pkts, ETH_AF_XDP_TX_BATCH_SIZE);
-
 	pull_umem_cq(umem, nb_pkts, cq);
 
 	nb_pkts = rte_ring_dequeue_bulk(umem->buf_ring, addrs,
@@ -575,6 +596,32 @@ af_xdp_tx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 
 	return nb_pkts;
 }
+
+static uint16_t
+af_xdp_tx_cp_batch(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+{
+	uint16_t nb_tx;
+
+	if (likely(nb_pkts <= ETH_AF_XDP_TX_BATCH_SIZE))
+		return af_xdp_tx_cp(queue, bufs, nb_pkts);
+
+	nb_tx = 0;
+	while (nb_pkts) {
+		uint16_t ret, n;
+
+		/* Split larger batch into smaller batches of size
+		 * ETH_AF_XDP_TX_BATCH_SIZE or less.
+		 */
+		n = (uint16_t)RTE_MIN(nb_pkts, ETH_AF_XDP_TX_BATCH_SIZE);
+		ret = af_xdp_tx_cp(queue, &bufs[nb_tx], n);
+		nb_tx = (uint16_t)(nb_tx + ret);
+		nb_pkts = (uint16_t)(nb_pkts - ret);
+		if (ret < n)
+			break;
+	}
+
+	return nb_tx;
+}
 #endif
 
 static uint16_t
@@ -583,7 +630,7 @@ eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
 	return af_xdp_tx_zc(queue, bufs, nb_pkts);
 #else
-	return af_xdp_tx_cp(queue, bufs, nb_pkts);
+	return af_xdp_tx_cp_batch(queue, bufs, nb_pkts);
 #endif
 }
 
-- 
2.17.1


  reply	other threads:[~2021-03-10  8:19 UTC|newest]

Thread overview: 27+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-02-18  9:23 [dpdk-dev] [PATCH RFC 0/3] AF_XDP Preferred Busy Polling Ciara Loftus
2021-02-18  9:23 ` [dpdk-dev] [PATCH RFC 1/3] net/af_xdp: Increase max batch size to 512 Ciara Loftus
2021-02-18  9:23 ` [dpdk-dev] [PATCH RFC 2/3] net/af_xdp: Use recvfrom() instead of poll() Ciara Loftus
2021-02-18  9:23 ` [dpdk-dev] [PATCH RFC 3/3] net/af_xdp: preferred busy polling Ciara Loftus
2021-02-24 11:18 ` [dpdk-dev] [PATCH 0/3] AF_XDP Preferred Busy Polling Ciara Loftus
2021-02-24 11:18   ` [dpdk-dev] [PATCH 1/3] net/af_xdp: Increase max batch size to 512 Ciara Loftus
2021-03-01 16:31     ` Ferruh Yigit
2021-03-03 15:07       ` Loftus, Ciara
2021-03-03 15:38         ` Ferruh Yigit
2021-02-24 11:18   ` [dpdk-dev] [PATCH 2/3] net/af_xdp: Use recvfrom() instead of poll() Ciara Loftus
2021-02-24 11:18   ` [dpdk-dev] [PATCH 3/3] net/af_xdp: preferred busy polling Ciara Loftus
2021-03-01 16:32     ` Ferruh Yigit
2021-03-04 12:26       ` Loftus, Ciara
2021-03-08 15:54         ` Loftus, Ciara
2021-03-09 10:19   ` [dpdk-dev] [PATCH v2 0/3] AF_XDP Preferred Busy Polling Ciara Loftus
2021-03-09 10:19     ` [dpdk-dev] [PATCH v2 1/3] net/af_xdp: allow bigger batch sizes Ciara Loftus
2021-03-09 16:33       ` Ferruh Yigit
2021-03-10  7:21         ` Loftus, Ciara
2021-03-09 10:19     ` [dpdk-dev] [PATCH v2 2/3] net/af_xdp: Use recvfrom() instead of poll() Ciara Loftus
2021-03-09 10:19     ` [dpdk-dev] [PATCH v2 3/3] net/af_xdp: preferred busy polling Ciara Loftus
2021-03-09 16:33       ` Ferruh Yigit
2021-03-10  7:55         ` Loftus, Ciara
2021-03-10  7:48     ` [dpdk-dev] [PATCH v3 0/3] AF_XDP Preferred Busy Polling Ciara Loftus
2021-03-10  7:48       ` Ciara Loftus [this message]
2021-03-10  7:48       ` [dpdk-dev] [PATCH v3 2/3] net/af_xdp: Use recvfrom() instead of poll() Ciara Loftus
2021-03-10  7:48       ` [dpdk-dev] [PATCH v3 3/3] net/af_xdp: preferred busy polling Ciara Loftus
2021-03-10 17:50       ` [dpdk-dev] [PATCH v3 0/3] AF_XDP Preferred Busy Polling Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210310074816.3029-2-ciara.loftus@intel.com \
    --to=ciara.loftus@intel.com \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).