DPDK patches and discussions
 help / color / mirror / Atom feed
From: John Miller <john.miller@atomicrules.com>
To: dev@dpdk.org, ferruh.yigit@intel.com
Cc: John Miller <john.miller@atomicrules.com>
Subject: [PATCH 2/4] net/ark: support arbitrary mbuf size
Date: Wed, 19 Jan 2022 13:12:53 -0600	[thread overview]
Message-ID: <20220119191255.273988-2-john.miller@atomicrules.com> (raw)
In-Reply-To: <20220119191255.273988-1-john.miller@atomicrules.com>

Support arbitrary mbuf size per queue.

Signed-off-by: John Miller <john.miller@atomicrules.com>
---
 drivers/net/ark/ark_ethdev.c    |  8 --------
 drivers/net/ark/ark_ethdev_rx.c | 23 +++++++++++++++++++----
 drivers/net/ark/ark_udm.h       |  2 +-
 3 files changed, 20 insertions(+), 13 deletions(-)

diff --git a/drivers/net/ark/ark_ethdev.c b/drivers/net/ark/ark_ethdev.c
index 0414c78bb5..b9843414b1 100644
--- a/drivers/net/ark/ark_ethdev.c
+++ b/drivers/net/ark/ark_ethdev.c
@@ -511,14 +511,6 @@ ark_config_device(struct rte_eth_dev *dev)
 		mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET);
 	}
 
-	ark_udm_stop(ark->udm.v, 0);
-	ark_udm_configure(ark->udm.v,
-			  RTE_PKTMBUF_HEADROOM,
-			  RTE_MBUF_DEFAULT_DATAROOM,
-			  ARK_RX_WRITE_TIME_NS);
-	ark_udm_stats_reset(ark->udm.v);
-	ark_udm_stop(ark->udm.v, 0);
-
 	/* TX -- DDM */
 	if (ark_ddm_stop(ark->ddm.v, 1))
 		ARK_PMD_LOG(ERR, "Unable to stop DDM\n");
diff --git a/drivers/net/ark/ark_ethdev_rx.c b/drivers/net/ark/ark_ethdev_rx.c
index 98658ce621..1000f50be0 100644
--- a/drivers/net/ark/ark_ethdev_rx.c
+++ b/drivers/net/ark/ark_ethdev_rx.c
@@ -12,7 +12,6 @@
 
 #define ARK_RX_META_SIZE 32
 #define ARK_RX_META_OFFSET (RTE_PKTMBUF_HEADROOM - ARK_RX_META_SIZE)
-#define ARK_RX_MAX_NOCHAIN (RTE_MBUF_DEFAULT_DATAROOM)
 
 /* Forward declarations */
 struct ark_rx_queue;
@@ -41,6 +40,9 @@ struct ark_rx_queue {
 	rx_user_meta_hook_fn rx_user_meta_hook;
 	void *ext_user_data;
 
+	uint32_t dataroom;
+	uint32_t headroom;
+
 	uint32_t queue_size;
 	uint32_t queue_mask;
 
@@ -164,6 +166,9 @@ eth_ark_dev_rx_queue_setup(struct rte_eth_dev *dev,
 
 	/* NOTE zmalloc is used, no need to 0 indexes, etc. */
 	queue->mb_pool = mb_pool;
+	queue->dataroom = rte_pktmbuf_data_room_size(mb_pool) -
+		RTE_PKTMBUF_HEADROOM;
+	queue->headroom = RTE_PKTMBUF_HEADROOM;
 	queue->phys_qid = qidx;
 	queue->queue_index = queue_idx;
 	queue->queue_size = nb_desc;
@@ -196,6 +201,15 @@ eth_ark_dev_rx_queue_setup(struct rte_eth_dev *dev,
 	queue->udm = RTE_PTR_ADD(ark->udm.v, qidx * ARK_UDM_QOFFSET);
 	queue->mpu = RTE_PTR_ADD(ark->mpurx.v, qidx * ARK_MPU_QOFFSET);
 
+	/* Configure UDM per queue */
+	ark_udm_stop(queue->udm, 0);
+	ark_udm_configure(queue->udm,
+			  RTE_PKTMBUF_HEADROOM,
+			  queue->dataroom,
+			  ARK_RX_WRITE_TIME_NS);
+	ark_udm_stats_reset(queue->udm);
+	ark_udm_stop(queue->udm, 0);
+
 	/* populate mbuf reserve */
 	status = eth_ark_rx_seed_mbufs(queue);
 
@@ -276,6 +290,7 @@ eth_ark_recv_pkts(void *rx_queue,
 		mbuf->data_len = meta->pkt_len;
 
 		if (ARK_DEBUG_CORE) {	/* debug sanity checks */
+
 			if ((meta->pkt_len > (1024 * 16)) ||
 			    (meta->pkt_len == 0)) {
 				ARK_PMD_LOG(DEBUG, "RX: Bad Meta Q: %u"
@@ -304,7 +319,7 @@ eth_ark_recv_pkts(void *rx_queue,
 			}
 		}
 
-		if (unlikely(meta->pkt_len > ARK_RX_MAX_NOCHAIN))
+		if (unlikely(meta->pkt_len > queue->dataroom))
 			cons_index = eth_ark_rx_jumbo
 				(queue, meta, mbuf, cons_index + 1);
 		else
@@ -345,14 +360,14 @@ eth_ark_rx_jumbo(struct ark_rx_queue *queue,
 	/* first buf populated by called */
 	mbuf_prev = mbuf0;
 	segments = 1;
-	data_len = RTE_MIN(meta->pkt_len, RTE_MBUF_DEFAULT_DATAROOM);
+	data_len = RTE_MIN(meta->pkt_len, queue->dataroom);
 	remaining = meta->pkt_len - data_len;
 	mbuf0->data_len = data_len;
 
 	/* HW guarantees that the data does not exceed prod_index! */
 	while (remaining != 0) {
 		data_len = RTE_MIN(remaining,
-				   RTE_MBUF_DEFAULT_DATAROOM);
+				   queue->dataroom);
 
 		remaining -= data_len;
 		segments += 1;
diff --git a/drivers/net/ark/ark_udm.h b/drivers/net/ark/ark_udm.h
index 4e51a5e82c..1cbcd94a98 100644
--- a/drivers/net/ark/ark_udm.h
+++ b/drivers/net/ark/ark_udm.h
@@ -33,7 +33,7 @@ struct ark_rx_meta {
 #define ARK_RX_WRITE_TIME_NS 2500
 #define ARK_UDM_SETUP 0
 #define ARK_UDM_CONST2 0xbACECACE
-#define ARK_UDM_CONST3 0x334d4455
+#define ARK_UDM_CONST3 0x344d4455
 #define ARK_UDM_CONST ARK_UDM_CONST3
 struct ark_udm_setup_t {
 	uint32_t r0;
-- 
2.25.1


  reply	other threads:[~2022-01-19 19:13 UTC|newest]

Thread overview: 9+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-01-19 19:12 [PATCH 1/4] net/ark: add device capabilities record John Miller
2022-01-19 19:12 ` John Miller [this message]
2022-01-26 16:46   ` [PATCH 2/4] net/ark: support arbitrary mbuf size Ferruh Yigit
2022-01-19 19:12 ` [PATCH 3/4] net/ark: publish include file for external access John Miller
2022-01-26 16:48   ` Ferruh Yigit
2022-01-26 16:49     ` Ferruh Yigit
2022-01-19 19:12 ` [PATCH 4/4] net/ark: support chunk DMA transfers John Miller
2022-01-26 16:52   ` Ferruh Yigit
2022-01-26 16:45 ` [PATCH 1/4] net/ark: add device capabilities record Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220119191255.273988-2-john.miller@atomicrules.com \
    --to=john.miller@atomicrules.com \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).