From: Wei Hu <weh@microsoft.com>
To: dev@dpdk.org, Long Li <longli@microsoft.com>
Cc: Wei Hu <weh@microsoft.com>, stable@dpdk.org
Subject: [PATCH 1/1] net/mana: add 32 bit short doorbell
Date: Sat, 9 Sep 2023 12:23:47 +0000 [thread overview]
Message-ID: <20230909122347.2043969-1-weh@microsoft.com> (raw)
Add 32 bit short doorbell support. Ring short doorbell when running
in 32 bit applicactions.
Cc: stable@dpdk.org
Signed-off-by: Wei Hu <weh@microsoft.com>
---
drivers/net/mana/gdma.c | 95 +++++++++++++++++++++++++++++++++++++++++
drivers/net/mana/mana.h | 25 +++++++++++
drivers/net/mana/rx.c | 52 ++++++++++++++++++++++
drivers/net/mana/tx.c | 28 ++++++++++++
4 files changed, 200 insertions(+)
diff --git a/drivers/net/mana/gdma.c b/drivers/net/mana/gdma.c
index 65685fe236..d1da025d1b 100644
--- a/drivers/net/mana/gdma.c
+++ b/drivers/net/mana/gdma.c
@@ -166,6 +166,97 @@ gdma_post_work_request(struct mana_gdma_queue *queue,
return 0;
}
+#ifdef RTE_ARCH_32
+union gdma_short_doorbell_entry {
+ uint32_t as_uint32;
+
+ struct {
+ uint32_t tail_ptr_incr : 16; /* Number of CQEs */
+ uint32_t id : 12;
+ uint32_t reserved : 3;
+ uint32_t arm : 1;
+ } cq;
+
+ struct {
+ uint32_t tail_ptr_incr : 16; /* In number of bytes */
+ uint32_t id : 12;
+ uint32_t reserved : 4;
+ } rq;
+
+ struct {
+ uint32_t tail_ptr_incr : 16; /* In number of bytes */
+ uint32_t id : 12;
+ uint32_t reserved : 4;
+ } sq;
+
+ struct {
+ uint32_t tail_ptr_incr : 16; /* Number of EQEs */
+ uint32_t id : 12;
+ uint32_t reserved : 3;
+ uint32_t arm : 1;
+ } eq;
+}; /* HW DATA */
+
+enum {
+ DOORBELL_SHORT_OFFSET_SQ = 0x10,
+ DOORBELL_SHORT_OFFSET_RQ = 0x410,
+ DOORBELL_SHORT_OFFSET_CQ = 0x810,
+ DOORBELL_SHORT_OFFSET_EQ = 0xFF0,
+};
+
+/*
+ * Write to hardware doorbell to notify new activity.
+ */
+int
+mana_ring_short_doorbell(void *db_page, enum gdma_queue_types queue_type,
+ uint32_t queue_id, uint32_t tail_incr, uint8_t arm)
+{
+ uint8_t *addr = db_page;
+ union gdma_short_doorbell_entry e = {};
+
+ if ((queue_id & ~GDMA_SHORT_DB_QID_MASK) ||
+ (tail_incr & ~GDMA_SHORT_DB_INC_MASK)) {
+ DP_LOG(ERR, "%s: queue_id %u or "
+ "tail_incr %u overflowed, queue type %d",
+ __func__, queue_id, tail_incr, queue_type);
+ return -EINVAL;
+ }
+
+ switch (queue_type) {
+ case GDMA_QUEUE_SEND:
+ e.sq.id = queue_id;
+ e.sq.tail_ptr_incr = tail_incr;
+ addr += DOORBELL_SHORT_OFFSET_SQ;
+ break;
+
+ case GDMA_QUEUE_RECEIVE:
+ e.rq.id = queue_id;
+ e.rq.tail_ptr_incr = tail_incr;
+ addr += DOORBELL_SHORT_OFFSET_RQ;
+ break;
+
+ case GDMA_QUEUE_COMPLETION:
+ e.cq.id = queue_id;
+ e.cq.tail_ptr_incr = tail_incr;
+ e.cq.arm = arm;
+ addr += DOORBELL_SHORT_OFFSET_CQ;
+ break;
+
+ default:
+ DP_LOG(ERR, "Unsupported queue type %d", queue_type);
+ return -1;
+ }
+
+ /* Ensure all writes are done before ringing doorbell */
+ rte_wmb();
+
+ DP_LOG(DEBUG, "db_page %p addr %p queue_id %u type %u tail %u arm %u",
+ db_page, addr, queue_id, queue_type, tail_incr, arm);
+
+ rte_write32(e.as_uint32, addr);
+ return 0;
+}
+#else
union gdma_doorbell_entry {
uint64_t as_uint64;
@@ -248,6 +339,7 @@ mana_ring_doorbell(void *db_page, enum gdma_queue_types queue_type,
rte_write64(e.as_uint64, addr);
return 0;
}
+#endif
/*
* Poll completion queue for completions.
@@ -287,6 +379,9 @@ gdma_poll_completion_queue(struct mana_gdma_queue *cq,
num_comp++;
cq->head++;
+#ifdef RTE_ARCH_32
+ cq->head_incr_to_short_db++;
+#endif
DP_LOG(DEBUG, "comp new 0x%x old 0x%x cqe 0x%x wq %u sq %u head %u",
new_owner_bits, old_owner_bits, cqe_owner_bits,
diff --git a/drivers/net/mana/mana.h b/drivers/net/mana/mana.h
index 5801491d75..848d87c096 100644
--- a/drivers/net/mana/mana.h
+++ b/drivers/net/mana/mana.h
@@ -50,6 +50,19 @@ struct mana_shared_data {
#define MAX_TX_WQE_SIZE 512
#define MAX_RX_WQE_SIZE 256
+/* For 32 bit only */
+#ifdef RTE_ARCH_32
+#define GDMA_SHORT_DB_INC_MASK 0xffff
+#define GDMA_SHORT_DB_QID_MASK 0xfff
+
+#define GDMA_SHORT_DB_MAX_WQE (0x10000 / GDMA_WQE_ALIGNMENT_UNIT_SIZE)
+
+#define TX_WQE_SHORT_DB_THRESHOLD \
+ (GDMA_SHORT_DB_MAX_WQE - (2 * MAX_TX_WQE_SIZE))
+#define RX_WQE_SHORT_DB_THRESHOLD \
+ (GDMA_SHORT_DB_MAX_WQE - (2 * MAX_RX_WQE_SIZE))
+#endif
+
/* Values from the GDMA specification document, WQE format description */
#define INLINE_OOB_SMALL_SIZE_IN_BYTES 8
#define INLINE_OOB_LARGE_SIZE_IN_BYTES 24
@@ -375,6 +388,9 @@ struct mana_gdma_queue {
uint32_t id;
uint32_t head;
uint32_t tail;
+#ifdef RTE_ARCH_32
+ uint32_t head_incr_to_short_db;
+#endif
};
#define MANA_MR_BTREE_PER_QUEUE_N 64
@@ -425,6 +441,9 @@ struct mana_rxq {
*/
uint32_t desc_ring_head, desc_ring_tail;
+ /* For storing wqe increment count btw each short doorbell ring */
+ uint32_t wqe_cnt_to_short_db;
+
struct mana_gdma_queue gdma_rq;
struct mana_gdma_queue gdma_cq;
struct gdma_comp *gdma_comp_buf;
@@ -455,8 +474,14 @@ extern int mana_logtype_init;
#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
+#ifdef RTE_ARCH_32
+int mana_ring_short_doorbell(void *db_page, enum gdma_queue_types queue_type,
+ uint32_t queue_id, uint32_t tail_incr,
+ uint8_t arm);
+#else
int mana_ring_doorbell(void *db_page, enum gdma_queue_types queue_type,
uint32_t queue_id, uint32_t tail, uint8_t arm);
+#endif
int mana_rq_ring_doorbell(struct mana_rxq *rxq);
int gdma_post_work_request(struct mana_gdma_queue *queue,
diff --git a/drivers/net/mana/rx.c b/drivers/net/mana/rx.c
index 14d9085801..303d129e5b 100644
--- a/drivers/net/mana/rx.c
+++ b/drivers/net/mana/rx.c
@@ -39,10 +39,23 @@ mana_rq_ring_doorbell(struct mana_rxq *rxq)
/* Hardware Spec specifies that software client should set 0 for
* wqe_cnt for Receive Queues.
*/
+#ifdef RTE_ARCH_32
+ if (rxq->wqe_cnt_to_short_db) {
+ ret = mana_ring_short_doorbell(db_page, GDMA_QUEUE_RECEIVE,
+ rxq->gdma_rq.id,
+ rxq->wqe_cnt_to_short_db *
+ GDMA_WQE_ALIGNMENT_UNIT_SIZE,
+ 0);
+ } else {
+ /* No need to ring, just return */
+ ret = 0;
+ }
+#else
ret = mana_ring_doorbell(db_page, GDMA_QUEUE_RECEIVE,
rxq->gdma_rq.id,
rxq->gdma_rq.head * GDMA_WQE_ALIGNMENT_UNIT_SIZE,
0);
+#endif
if (ret)
DP_LOG(ERR, "failed to ring RX doorbell ret %d", ret);
@@ -97,6 +110,7 @@ mana_alloc_and_post_rx_wqe(struct mana_rxq *rxq)
/* update queue for tracking pending packets */
desc->pkt = mbuf;
desc->wqe_size_in_bu = wqe_size_in_bu;
+ rxq->wqe_cnt_to_short_db += wqe_size_in_bu;
rxq->desc_ring_head = (rxq->desc_ring_head + 1) % rxq->num_desc;
} else {
DP_LOG(DEBUG, "failed to post recv ret %d", ret);
@@ -115,12 +129,22 @@ mana_alloc_and_post_rx_wqes(struct mana_rxq *rxq)
int ret;
uint32_t i;
+#ifdef RTE_ARCH_32
+ rxq->wqe_cnt_to_short_db = 0;
+#endif
for (i = 0; i < rxq->num_desc; i++) {
ret = mana_alloc_and_post_rx_wqe(rxq);
if (ret) {
DP_LOG(ERR, "failed to post RX ret = %d", ret);
return ret;
}
+
+#ifdef RTE_ARCH_32
+ if (rxq->wqe_cnt_to_short_db > RX_WQE_SHORT_DB_THRESHOLD) {
+ mana_rq_ring_doorbell(rxq);
+ rxq->wqe_cnt_to_short_db = 0;
+ }
+#endif
}
mana_rq_ring_doorbell(rxq);
@@ -349,6 +373,9 @@ mana_start_rx_queues(struct rte_eth_dev *dev)
/* CQ head starts with count */
rxq->gdma_cq.head = rxq->gdma_cq.count;
+#ifdef RTE_ARCH_32
+ rxq->gdma_cq.head_incr_to_short_db = 0;
+#endif
DRV_LOG(INFO, "rxq cq id %u buf %p count %u size %u",
rxq->gdma_cq.id, rxq->gdma_cq.buffer,
@@ -397,6 +424,10 @@ mana_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
uint32_t i;
int polled = 0;
+#ifdef RTE_ARCH_32
+ rxq->wqe_cnt_to_short_db = 0;
+#endif
+
repoll:
/* Polling on new completions if we have no backlog */
if (rxq->comp_buf_idx == rxq->comp_buf_len) {
@@ -505,6 +536,16 @@ mana_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
wqe_posted++;
if (pkt_received == pkts_n)
break;
+
+#ifdef RTE_ARCH_32
+ /* Ring short doorbell if approaching the wqe increment
+ * limit.
+ */
+ if (rxq->wqe_cnt_to_short_db > RX_WQE_SHORT_DB_THRESHOLD) {
+ mana_rq_ring_doorbell(rxq);
+ rxq->wqe_cnt_to_short_db = 0;
+ }
+#endif
}
rxq->backlog_idx = pkt_idx;
@@ -529,6 +570,16 @@ static int
mana_arm_cq(struct mana_rxq *rxq, uint8_t arm)
{
struct mana_priv *priv = rxq->priv;
+#ifdef RTE_ARCH_32
+ uint16_t cqe_incr = (uint16_t)rxq->gdma_cq.head_incr_to_short_db;
+
+ rxq->gdma_cq.head_incr_to_short_db = 0;
+ DP_LOG(DEBUG, "Ringing completion queue ID %u incr %u arm %d",
+ rxq->gdma_cq.id, cqe_incr, arm);
+
+ return mana_ring_short_doorbell(priv->db_page, GDMA_QUEUE_COMPLETION,
+ rxq->gdma_cq.id, cqe_incr, arm);
+#else
uint32_t head = rxq->gdma_cq.head %
(rxq->gdma_cq.count << COMPLETION_QUEUE_ENTRY_OWNER_BITS_SIZE);
@@ -537,6 +588,7 @@ mana_arm_cq(struct mana_rxq *rxq, uint8_t arm)
return mana_ring_doorbell(priv->db_page, GDMA_QUEUE_COMPLETION,
rxq->gdma_cq.id, head, arm);
+#endif
}
int
diff --git a/drivers/net/mana/tx.c b/drivers/net/mana/tx.c
index 11ba2ee1ac..8ff81bde09 100644
--- a/drivers/net/mana/tx.c
+++ b/drivers/net/mana/tx.c
@@ -137,6 +137,9 @@ mana_start_tx_queues(struct rte_eth_dev *dev)
/* CQ head starts with count (not 0) */
txq->gdma_cq.head = txq->gdma_cq.count;
+#ifdef RTE_ARCH_32
+ txq->gdma_cq.head_incr_to_short_db = 0;
+#endif
DRV_LOG(INFO, "txq cq id %u buf %p count %u size %u head %u",
txq->gdma_cq.id, txq->gdma_cq.buffer,
@@ -176,6 +179,9 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
void *db_page;
uint16_t pkt_sent = 0;
uint32_t num_comp, i;
+#ifdef RTE_ARCH_32
+ uint32_t wqe_count = 0;
+#endif
/* Process send completions from GDMA */
num_comp = gdma_poll_completion_queue(&txq->gdma_cq,
@@ -418,6 +424,20 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
DP_LOG(DEBUG, "nb_pkts %u pkt[%d] sent",
nb_pkts, pkt_idx);
+#ifdef RTE_ARCH_32
+ wqe_count += wqe_size_in_bu;
+ if (wqe_count > TX_WQE_SHORT_DB_THRESHOLD) {
+ /* wqe_count approaching to short doorbell
+ * increment limit. Stop processing further
+ * more packets and just ring short
+ * doorbell.
+ */
+ DP_LOG(DEBUG, "wqe_count %u reaching limit, "
+ "pkt_sent %d",
+ wqe_count, pkt_sent);
+ break;
+ }
+#endif
} else {
DP_LOG(DEBUG, "pkt[%d] failed to post send ret %d",
pkt_idx, ret);
@@ -436,11 +456,19 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
}
if (pkt_sent) {
+#ifdef RTE_ARCH_32
+ ret = mana_ring_short_doorbell(db_page, GDMA_QUEUE_SEND,
+ txq->gdma_sq.id,
+ wqe_count *
+ GDMA_WQE_ALIGNMENT_UNIT_SIZE,
+ 0);
+#else
ret = mana_ring_doorbell(db_page, GDMA_QUEUE_SEND,
txq->gdma_sq.id,
txq->gdma_sq.head *
GDMA_WQE_ALIGNMENT_UNIT_SIZE,
0);
+#endif
if (ret)
DP_LOG(ERR, "mana_ring_doorbell failed ret %d", ret);
}
--
2.34.1
next reply other threads:[~2023-09-09 12:24 UTC|newest]
Thread overview: 12+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-09-09 12:23 Wei Hu [this message]
2023-09-13 21:11 ` Long Li
2023-09-14 5:11 ` Wei Hu
2023-09-18 18:02 ` Ferruh Yigit
2023-09-19 3:38 ` Wei Hu
2023-09-19 11:27 ` Ferruh Yigit
2023-09-20 3:11 ` Wei Hu
2023-09-18 20:01 ` Long Li
2023-09-19 2:13 ` Wei Hu
2023-09-19 19:23 ` Long Li
2023-09-20 8:10 ` Wei Hu
2023-09-20 17:28 ` Long Li
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230909122347.2043969-1-weh@microsoft.com \
--to=weh@microsoft.com \
--cc=dev@dpdk.org \
--cc=longli@microsoft.com \
--cc=stable@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).