* [PATCH 1/2] net/mana: enable 32 bit build for mana driver
2023-09-21 8:34 [PATCH 0/2] net/mana: 32 bit support Wei Hu
@ 2023-09-21 8:34 ` Wei Hu
2023-09-21 10:58 ` Ferruh Yigit
2023-09-21 20:53 ` Long Li
2023-09-21 8:34 ` [PATCH 2/2] net/mana: add 32 bit short doorbell Wei Hu
2023-09-21 10:58 ` [PATCH 0/2] net/mana: 32 bit support Ferruh Yigit
2 siblings, 2 replies; 9+ messages in thread
From: Wei Hu @ 2023-09-21 8:34 UTC (permalink / raw)
To: dev, ferruh.yigit, ktraynor, xuemingl, bluca, Long Li; +Cc: Wei Hu, stable
Enable 32 bit build on x86 Linux. Fixed build warnings and errors
when building in 32 bit.
With this patch, mana will be able to build into 32 bit. However,
another patch for mana short doorbell support is needed to make
mana fully functional for 32 bit applicatons.
Cc: stable@dpdk.org
Signed-off-by: Wei Hu <weh@microsoft.com>
---
drivers/net/mana/mana.c | 2 +-
drivers/net/mana/meson.build | 4 ++--
drivers/net/mana/mr.c | 18 +++++++++---------
3 files changed, 12 insertions(+), 12 deletions(-)
diff --git a/drivers/net/mana/mana.c b/drivers/net/mana/mana.c
index 7630118d4f..896b53ed35 100644
--- a/drivers/net/mana/mana.c
+++ b/drivers/net/mana/mana.c
@@ -1260,7 +1260,7 @@ mana_probe_port(struct ibv_device *ibdev, struct ibv_device_attr_ex *dev_attr,
/* Create a parent domain with the port number */
attr.pd = priv->ib_pd;
attr.comp_mask = IBV_PARENT_DOMAIN_INIT_ATTR_PD_CONTEXT;
- attr.pd_context = (void *)(uint64_t)port;
+ attr.pd_context = (void *)(uintptr_t)port;
priv->ib_parent_pd = ibv_alloc_parent_domain(ctx, &attr);
if (!priv->ib_parent_pd) {
DRV_LOG(ERR, "ibv_alloc_parent_domain failed port %d", port);
diff --git a/drivers/net/mana/meson.build b/drivers/net/mana/meson.build
index 493f0d26d4..2d72eca5a8 100644
--- a/drivers/net/mana/meson.build
+++ b/drivers/net/mana/meson.build
@@ -1,9 +1,9 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2022 Microsoft Corporation
-if not is_linux or not dpdk_conf.has('RTE_ARCH_X86_64')
+if not is_linux or not dpdk_conf.has('RTE_ARCH_X86')
build = false
- reason = 'only supported on x86_64 Linux'
+ reason = 'only supported on x86 Linux'
subdir_done()
endif
diff --git a/drivers/net/mana/mr.c b/drivers/net/mana/mr.c
index fec0dc961c..b8e6ea0bbf 100644
--- a/drivers/net/mana/mr.c
+++ b/drivers/net/mana/mr.c
@@ -53,7 +53,7 @@ mana_new_pmd_mr(struct mana_mr_btree *local_tree, struct mana_priv *priv,
}
DP_LOG(DEBUG,
- "registering memory chunk start 0x%" PRIx64 " len %u",
+ "registering memory chunk start 0x%" PRIxPTR " len %u",
ranges[i].start, ranges[i].len);
if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
@@ -62,7 +62,7 @@ mana_new_pmd_mr(struct mana_mr_btree *local_tree, struct mana_priv *priv,
ranges[i].len);
if (ret) {
DP_LOG(ERR,
- "MR failed start 0x%" PRIx64 " len %u",
+ "MR failed start 0x%" PRIxPTR " len %u",
ranges[i].start, ranges[i].len);
return ret;
}
@@ -72,7 +72,7 @@ mana_new_pmd_mr(struct mana_mr_btree *local_tree, struct mana_priv *priv,
ibv_mr = ibv_reg_mr(priv->ib_pd, (void *)ranges[i].start,
ranges[i].len, IBV_ACCESS_LOCAL_WRITE);
if (ibv_mr) {
- DP_LOG(DEBUG, "MR lkey %u addr %p len %" PRIu64,
+ DP_LOG(DEBUG, "MR lkey %u addr %p len %zu",
ibv_mr->lkey, ibv_mr->addr, ibv_mr->length);
mr = rte_calloc("MANA MR", 1, sizeof(*mr), 0);
@@ -99,7 +99,7 @@ mana_new_pmd_mr(struct mana_mr_btree *local_tree, struct mana_priv *priv,
return ret;
}
} else {
- DP_LOG(ERR, "MR failed at 0x%" PRIx64 " len %u",
+ DP_LOG(ERR, "MR failed at 0x%" PRIxPTR " len %u",
ranges[i].start, ranges[i].len);
return -errno;
}
@@ -141,7 +141,7 @@ mana_find_pmd_mr(struct mana_mr_btree *local_mr_btree, struct mana_priv *priv,
mr = mana_mr_btree_lookup(local_mr_btree, &idx,
(uintptr_t)mbuf->buf_addr, mbuf->buf_len);
if (mr) {
- DP_LOG(DEBUG, "Local mr lkey %u addr 0x%" PRIx64 " len %" PRIu64,
+ DP_LOG(DEBUG, "Local mr lkey %u addr 0x%" PRIxPTR " len %zu",
mr->lkey, mr->addr, mr->len);
return mr;
}
@@ -162,7 +162,7 @@ mana_find_pmd_mr(struct mana_mr_btree *local_mr_btree, struct mana_priv *priv,
}
DP_LOG(DEBUG,
- "Added local MR key %u addr 0x%" PRIx64 " len %" PRIu64,
+ "Added local MR key %u addr 0x%" PRIxPTR " len %zu",
mr->lkey, mr->addr, mr->len);
return mr;
}
@@ -266,7 +266,7 @@ mana_mr_btree_lookup(struct mana_mr_btree *bt, uint16_t *idx,
return &table[base];
DP_LOG(DEBUG,
- "addr 0x%" PRIx64 " len %zu idx %u sum 0x%" PRIx64 " not found",
+ "addr 0x%" PRIxPTR " len %zu idx %u sum 0x%" PRIxPTR " not found",
addr, len, *idx, addr + len);
return NULL;
@@ -316,7 +316,7 @@ mana_mr_btree_insert(struct mana_mr_btree *bt, struct mana_mr_cache *entry)
uint16_t shift;
if (mana_mr_btree_lookup(bt, &idx, entry->addr, entry->len)) {
- DP_LOG(DEBUG, "Addr 0x%" PRIx64 " len %zu exists in btree",
+ DP_LOG(DEBUG, "Addr 0x%" PRIxPTR " len %zu exists in btree",
entry->addr, entry->len);
return 0;
}
@@ -340,7 +340,7 @@ mana_mr_btree_insert(struct mana_mr_btree *bt, struct mana_mr_cache *entry)
bt->len++;
DP_LOG(DEBUG,
- "Inserted MR b-tree table %p idx %d addr 0x%" PRIx64 " len %zu",
+ "Inserted MR b-tree table %p idx %d addr 0x%" PRIxPTR " len %zu",
table, idx, entry->addr, entry->len);
return 0;
--
2.34.1
^ permalink raw reply [flat|nested] 9+ messages in thread
* [PATCH 2/2] net/mana: add 32 bit short doorbell
2023-09-21 8:34 [PATCH 0/2] net/mana: 32 bit support Wei Hu
2023-09-21 8:34 ` [PATCH 1/2] net/mana: enable 32 bit build for mana driver Wei Hu
@ 2023-09-21 8:34 ` Wei Hu
2023-09-21 10:58 ` Ferruh Yigit
2023-09-21 21:32 ` Long Li
2023-09-21 10:58 ` [PATCH 0/2] net/mana: 32 bit support Ferruh Yigit
2 siblings, 2 replies; 9+ messages in thread
From: Wei Hu @ 2023-09-21 8:34 UTC (permalink / raw)
To: dev, ferruh.yigit, ktraynor, xuemingl, bluca, Long Li; +Cc: Wei Hu, stable
Add 32 bit short doorbell support. Ring short doorbell when running
in 32 bit applicactions.
Both 32 bit and 64 bit doorbells are supported by mana hardware on
same platform. 32 bit applications cannot use 64 bit doorbells.
64 bit applications can use 32 bit doorbells, however the performance
would greatly suffer and it is not recommended.
Cc: stable@dpdk.org
Signed-off-by: Wei Hu <weh@microsoft.com>
---
drivers/net/mana/gdma.c | 92 +++++++++++++++++++++++++++++++++++++++++
drivers/net/mana/mana.h | 26 ++++++++++++
drivers/net/mana/rx.c | 45 ++++++++++++++++++++
drivers/net/mana/tx.c | 25 +++++++++++
4 files changed, 188 insertions(+)
diff --git a/drivers/net/mana/gdma.c b/drivers/net/mana/gdma.c
index 65685fe236..7f66a7a7cf 100644
--- a/drivers/net/mana/gdma.c
+++ b/drivers/net/mana/gdma.c
@@ -166,6 +166,97 @@ gdma_post_work_request(struct mana_gdma_queue *queue,
return 0;
}
+#ifdef RTE_ARCH_32
+union gdma_short_doorbell_entry {
+ uint32_t as_uint32;
+
+ struct {
+ uint32_t tail_ptr_incr : 16; /* Number of CQEs */
+ uint32_t id : 12;
+ uint32_t reserved : 3;
+ uint32_t arm : 1;
+ } cq;
+
+ struct {
+ uint32_t tail_ptr_incr : 16; /* In number of bytes */
+ uint32_t id : 12;
+ uint32_t reserved : 4;
+ } rq;
+
+ struct {
+ uint32_t tail_ptr_incr : 16; /* In number of bytes */
+ uint32_t id : 12;
+ uint32_t reserved : 4;
+ } sq;
+
+ struct {
+ uint32_t tail_ptr_incr : 16; /* Number of EQEs */
+ uint32_t id : 12;
+ uint32_t reserved : 3;
+ uint32_t arm : 1;
+ } eq;
+}; /* HW DATA */
+
+enum {
+ DOORBELL_SHORT_OFFSET_SQ = 0x10,
+ DOORBELL_SHORT_OFFSET_RQ = 0x410,
+ DOORBELL_SHORT_OFFSET_CQ = 0x810,
+ DOORBELL_SHORT_OFFSET_EQ = 0xFF0,
+};
+
+/*
+ * Write to hardware doorbell to notify new activity.
+ */
+int
+mana_ring_short_doorbell(void *db_page, enum gdma_queue_types queue_type,
+ uint32_t queue_id, uint32_t tail_incr, uint8_t arm)
+{
+ uint8_t *addr = db_page;
+ union gdma_short_doorbell_entry e = {};
+
+ if ((queue_id & ~GDMA_SHORT_DB_QID_MASK) ||
+ (tail_incr & ~GDMA_SHORT_DB_INC_MASK)) {
+ DP_LOG(ERR, "%s: queue_id %u or "
+ "tail_incr %u overflowed, queue type %d",
+ __func__, queue_id, tail_incr, queue_type);
+ return -EINVAL;
+ }
+
+ switch (queue_type) {
+ case GDMA_QUEUE_SEND:
+ e.sq.id = queue_id;
+ e.sq.tail_ptr_incr = tail_incr;
+ addr += DOORBELL_SHORT_OFFSET_SQ;
+ break;
+
+ case GDMA_QUEUE_RECEIVE:
+ e.rq.id = queue_id;
+ e.rq.tail_ptr_incr = tail_incr;
+ addr += DOORBELL_SHORT_OFFSET_RQ;
+ break;
+
+ case GDMA_QUEUE_COMPLETION:
+ e.cq.id = queue_id;
+ e.cq.tail_ptr_incr = tail_incr;
+ e.cq.arm = arm;
+ addr += DOORBELL_SHORT_OFFSET_CQ;
+ break;
+
+ default:
+ DP_LOG(ERR, "Unsupported queue type %d", queue_type);
+ return -1;
+ }
+
+ /* Ensure all writes are done before ringing doorbell */
+ rte_wmb();
+
+ DP_LOG(DEBUG, "db_page %p addr %p queue_id %u type %u tail %u arm %u",
+ db_page, addr, queue_id, queue_type, tail_incr, arm);
+
+ rte_write32(e.as_uint32, addr);
+ return 0;
+}
+#else
union gdma_doorbell_entry {
uint64_t as_uint64;
@@ -248,6 +339,7 @@ mana_ring_doorbell(void *db_page, enum gdma_queue_types queue_type,
rte_write64(e.as_uint64, addr);
return 0;
}
+#endif
/*
* Poll completion queue for completions.
diff --git a/drivers/net/mana/mana.h b/drivers/net/mana/mana.h
index 5801491d75..74e37706be 100644
--- a/drivers/net/mana/mana.h
+++ b/drivers/net/mana/mana.h
@@ -50,6 +50,21 @@ struct mana_shared_data {
#define MAX_TX_WQE_SIZE 512
#define MAX_RX_WQE_SIZE 256
+/* For 32 bit only */
+#ifdef RTE_ARCH_32
+#define GDMA_SHORT_DB_INC_MASK 0xffff
+#define GDMA_SHORT_DB_QID_MASK 0xfff
+
+#define GDMA_SHORT_DB_MAX_WQE (0x10000 / GDMA_WQE_ALIGNMENT_UNIT_SIZE)
+
+#define TX_WQE_SHORT_DB_THRESHOLD \
+ (GDMA_SHORT_DB_MAX_WQE - \
+ (MAX_TX_WQE_SIZE / GDMA_WQE_ALIGNMENT_UNIT_SIZE))
+#define RX_WQE_SHORT_DB_THRESHOLD \
+ (GDMA_SHORT_DB_MAX_WQE - \
+ (MAX_RX_WQE_SIZE / GDMA_WQE_ALIGNMENT_UNIT_SIZE))
+#endif
+
/* Values from the GDMA specification document, WQE format description */
#define INLINE_OOB_SMALL_SIZE_IN_BYTES 8
#define INLINE_OOB_LARGE_SIZE_IN_BYTES 24
@@ -425,6 +440,11 @@ struct mana_rxq {
*/
uint32_t desc_ring_head, desc_ring_tail;
+#ifdef RTE_ARCH_32
+ /* For storing wqe increment count btw each short doorbell ring */
+ uint32_t wqe_cnt_to_short_db;
+#endif
+
struct mana_gdma_queue gdma_rq;
struct mana_gdma_queue gdma_cq;
struct gdma_comp *gdma_comp_buf;
@@ -455,8 +475,14 @@ extern int mana_logtype_init;
#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
+#ifdef RTE_ARCH_32
+int mana_ring_short_doorbell(void *db_page, enum gdma_queue_types queue_type,
+ uint32_t queue_id, uint32_t tail_incr,
+ uint8_t arm);
+#else
int mana_ring_doorbell(void *db_page, enum gdma_queue_types queue_type,
uint32_t queue_id, uint32_t tail, uint8_t arm);
+#endif
int mana_rq_ring_doorbell(struct mana_rxq *rxq);
int gdma_post_work_request(struct mana_gdma_queue *queue,
diff --git a/drivers/net/mana/rx.c b/drivers/net/mana/rx.c
index 14d9085801..fc1587e206 100644
--- a/drivers/net/mana/rx.c
+++ b/drivers/net/mana/rx.c
@@ -39,10 +39,18 @@ mana_rq_ring_doorbell(struct mana_rxq *rxq)
/* Hardware Spec specifies that software client should set 0 for
* wqe_cnt for Receive Queues.
*/
+#ifdef RTE_ARCH_32
+ ret = mana_ring_short_doorbell(db_page, GDMA_QUEUE_RECEIVE,
+ rxq->gdma_rq.id,
+ rxq->wqe_cnt_to_short_db *
+ GDMA_WQE_ALIGNMENT_UNIT_SIZE,
+ 0);
+#else
ret = mana_ring_doorbell(db_page, GDMA_QUEUE_RECEIVE,
rxq->gdma_rq.id,
rxq->gdma_rq.head * GDMA_WQE_ALIGNMENT_UNIT_SIZE,
0);
+#endif
if (ret)
DP_LOG(ERR, "failed to ring RX doorbell ret %d", ret);
@@ -97,6 +105,9 @@ mana_alloc_and_post_rx_wqe(struct mana_rxq *rxq)
/* update queue for tracking pending packets */
desc->pkt = mbuf;
desc->wqe_size_in_bu = wqe_size_in_bu;
+#ifdef RTE_ARCH_32
+ rxq->wqe_cnt_to_short_db += wqe_size_in_bu;
+#endif
rxq->desc_ring_head = (rxq->desc_ring_head + 1) % rxq->num_desc;
} else {
DP_LOG(DEBUG, "failed to post recv ret %d", ret);
@@ -115,12 +126,22 @@ mana_alloc_and_post_rx_wqes(struct mana_rxq *rxq)
int ret;
uint32_t i;
+#ifdef RTE_ARCH_32
+ rxq->wqe_cnt_to_short_db = 0;
+#endif
for (i = 0; i < rxq->num_desc; i++) {
ret = mana_alloc_and_post_rx_wqe(rxq);
if (ret) {
DP_LOG(ERR, "failed to post RX ret = %d", ret);
return ret;
}
+
+#ifdef RTE_ARCH_32
+ if (rxq->wqe_cnt_to_short_db > RX_WQE_SHORT_DB_THRESHOLD) {
+ mana_rq_ring_doorbell(rxq);
+ rxq->wqe_cnt_to_short_db = 0;
+ }
+#endif
}
mana_rq_ring_doorbell(rxq);
@@ -397,6 +418,10 @@ mana_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
uint32_t i;
int polled = 0;
+#ifdef RTE_ARCH_32
+ rxq->wqe_cnt_to_short_db = 0;
+#endif
+
repoll:
/* Polling on new completions if we have no backlog */
if (rxq->comp_buf_idx == rxq->comp_buf_len) {
@@ -505,6 +530,16 @@ mana_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
wqe_posted++;
if (pkt_received == pkts_n)
break;
+
+#ifdef RTE_ARCH_32
+ /* Ring short doorbell if approaching the wqe increment
+ * limit.
+ */
+ if (rxq->wqe_cnt_to_short_db > RX_WQE_SHORT_DB_THRESHOLD) {
+ mana_rq_ring_doorbell(rxq);
+ rxq->wqe_cnt_to_short_db = 0;
+ }
+#endif
}
rxq->backlog_idx = pkt_idx;
@@ -525,6 +560,15 @@ mana_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
return pkt_received;
}
+#ifdef RTE_ARCH_32
+static int
+mana_arm_cq(struct mana_rxq *rxq __rte_unused, uint8_t arm __rte_unused)
+{
+ DP_LOG(ERR, "Do not support in 32 bit");
+
+ return -ENODEV;
+}
+#else
static int
mana_arm_cq(struct mana_rxq *rxq, uint8_t arm)
{
@@ -538,6 +582,7 @@ mana_arm_cq(struct mana_rxq *rxq, uint8_t arm)
return mana_ring_doorbell(priv->db_page, GDMA_QUEUE_COMPLETION,
rxq->gdma_cq.id, head, arm);
}
+#endif
int
mana_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
diff --git a/drivers/net/mana/tx.c b/drivers/net/mana/tx.c
index 11ba2ee1ac..1e2508e1f2 100644
--- a/drivers/net/mana/tx.c
+++ b/drivers/net/mana/tx.c
@@ -176,6 +176,9 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
void *db_page;
uint16_t pkt_sent = 0;
uint32_t num_comp, i;
+#ifdef RTE_ARCH_32
+ uint32_t wqe_count = 0;
+#endif
/* Process send completions from GDMA */
num_comp = gdma_poll_completion_queue(&txq->gdma_cq,
@@ -418,6 +421,20 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
DP_LOG(DEBUG, "nb_pkts %u pkt[%d] sent",
nb_pkts, pkt_idx);
+#ifdef RTE_ARCH_32
+ wqe_count += wqe_size_in_bu;
+ if (wqe_count > TX_WQE_SHORT_DB_THRESHOLD) {
+ /* wqe_count approaching to short doorbell
+ * increment limit. Stop processing further
+ * more packets and just ring short
+ * doorbell.
+ */
+ DP_LOG(DEBUG, "wqe_count %u reaching limit, "
+ "pkt_sent %d",
+ wqe_count, pkt_sent);
+ break;
+ }
+#endif
} else {
DP_LOG(DEBUG, "pkt[%d] failed to post send ret %d",
pkt_idx, ret);
@@ -436,11 +453,19 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
}
if (pkt_sent) {
+#ifdef RTE_ARCH_32
+ ret = mana_ring_short_doorbell(db_page, GDMA_QUEUE_SEND,
+ txq->gdma_sq.id,
+ wqe_count *
+ GDMA_WQE_ALIGNMENT_UNIT_SIZE,
+ 0);
+#else
ret = mana_ring_doorbell(db_page, GDMA_QUEUE_SEND,
txq->gdma_sq.id,
txq->gdma_sq.head *
GDMA_WQE_ALIGNMENT_UNIT_SIZE,
0);
+#endif
if (ret)
DP_LOG(ERR, "mana_ring_doorbell failed ret %d", ret);
}
--
2.34.1
^ permalink raw reply [flat|nested] 9+ messages in thread