* [PATCH] net/axgbe: support segmented Tx
@ 2022-09-08 16:58 Bhagyada Modali
2022-09-08 18:15 ` [PATCH v2] " Bhagyada Modali
0 siblings, 1 reply; 4+ messages in thread
From: Bhagyada Modali @ 2022-09-08 16:58 UTC (permalink / raw)
To: chandu, ferruh.yigit; +Cc: dev, stable, Bhagyada Modali
Enable segmented tx support and add jumbo packet transmit capability
Signed-off-by: Bhagyada Modali <bhagyada.modali@amd.com>
---
drivers/net/axgbe/axgbe_ethdev.c | 1 +
drivers/net/axgbe/axgbe_ethdev.h | 1 +
drivers/net/axgbe/axgbe_rxtx.c | 215 ++++++++++++++++++++++++++++++-
drivers/net/axgbe/axgbe_rxtx.h | 4 +
4 files changed, 220 insertions(+), 1 deletion(-)
diff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c
index e6822fa711..b071e4e460 100644
--- a/drivers/net/axgbe/axgbe_ethdev.c
+++ b/drivers/net/axgbe/axgbe_ethdev.c
@@ -1228,6 +1228,7 @@ axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
diff --git a/drivers/net/axgbe/axgbe_ethdev.h b/drivers/net/axgbe/axgbe_ethdev.h
index e06d40f9eb..7f19321d88 100644
--- a/drivers/net/axgbe/axgbe_ethdev.h
+++ b/drivers/net/axgbe/axgbe_ethdev.h
@@ -582,6 +582,7 @@ struct axgbe_port {
unsigned int tx_pbl;
unsigned int tx_osp_mode;
unsigned int tx_max_fifo_size;
+ unsigned int multi_segs_tx;
/* Rx settings */
unsigned int rx_sf_mode;
diff --git a/drivers/net/axgbe/axgbe_rxtx.c b/drivers/net/axgbe/axgbe_rxtx.c
index 8b43e8160b..c32ebe24bb 100644
--- a/drivers/net/axgbe/axgbe_rxtx.c
+++ b/drivers/net/axgbe/axgbe_rxtx.c
@@ -544,6 +544,7 @@ int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
unsigned int tsize;
const struct rte_memzone *tz;
uint64_t offloads;
+ struct rte_eth_dev_data *dev_data = dev->data;
tx_desc = nb_desc;
pdata = dev->data->dev_private;
@@ -611,7 +612,13 @@ int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
if (!pdata->tx_queues)
pdata->tx_queues = dev->data->tx_queues;
- if (txq->vector_disable ||
+ if ((dev_data->dev_conf.txmode.offloads &
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS))
+ pdata->multi_segs_tx = true;
+
+ if (pdata->multi_segs_tx)
+ dev->tx_pkt_burst = &axgbe_xmit_pkts_seg;
+ else if (txq->vector_disable ||
rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128)
dev->tx_pkt_burst = &axgbe_xmit_pkts;
else
@@ -762,6 +769,29 @@ void axgbe_dev_enable_tx(struct rte_eth_dev *dev)
AXGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
}
+/* Free Tx conformed mbufs segments */
+static void
+axgbe_xmit_cleanup_seg(struct axgbe_tx_queue *txq)
+{
+ volatile struct axgbe_tx_desc *desc;
+ uint16_t idx;
+
+ idx = AXGBE_GET_DESC_IDX(txq, txq->dirty);
+ while (txq->cur != txq->dirty) {
+ if (unlikely(idx == txq->nb_desc))
+ idx = 0;
+ desc = &txq->desc[idx];
+ /* Check for ownership */
+ if (AXGMAC_GET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN))
+ return;
+ memset((void *)&desc->desc2, 0, 8);
+ /* Free mbuf */
+ rte_pktmbuf_free_seg(txq->sw_ring[idx]);
+ txq->sw_ring[idx++] = NULL;
+ txq->dirty++;
+ }
+}
+
/* Free Tx conformed mbufs */
static void axgbe_xmit_cleanup(struct axgbe_tx_queue *txq)
{
@@ -854,6 +884,189 @@ static int axgbe_xmit_hw(struct axgbe_tx_queue *txq,
return 0;
}
+/* Tx Descriptor formation for segmented mbuf
+ * Each mbuf will require multiple descriptors
+ */
+
+static int
+axgbe_xmit_hw_seg(struct axgbe_tx_queue *txq,
+ struct rte_mbuf *mbuf)
+{
+ volatile struct axgbe_tx_desc *desc;
+ uint16_t idx;
+ uint64_t mask;
+ int start_index;
+ uint32_t pkt_len = 0;
+ int nb_desc_free;
+ struct rte_mbuf *tx_pkt;
+
+ nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);
+
+ if (mbuf->nb_segs > nb_desc_free) {
+ axgbe_xmit_cleanup_seg(txq);
+ nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);
+ if (unlikely(mbuf->nb_segs > nb_desc_free))
+ return RTE_ETH_TX_DESC_UNAVAIL;
+ }
+
+ idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
+ desc = &txq->desc[idx];
+ /* Saving the start index for setting the OWN bit finally */
+ start_index = idx;
+
+ tx_pkt = mbuf;
+ /* Max_pkt len = 9018 ; need to update it according to Jumbo pkt size */
+ pkt_len = tx_pkt->pkt_len;
+
+ /* Update buffer address and length */
+ desc->baddr = rte_mbuf_data_iova(tx_pkt);
+ AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, HL_B1L,
+ tx_pkt->data_len);
+ /* Total msg length to transmit */
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FL,
+ tx_pkt->pkt_len);
+ /* Timestamp enablement check */
+ if (mbuf->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST)
+ AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, TTSE, 1);
+
+ rte_wmb();
+ /* Mark it as First Descriptor */
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FD, 1);
+ /* Mark it as a NORMAL descriptor */
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CTXT, 0);
+ /* configure h/w Offload */
+ mask = mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK;
+ if (mask == RTE_MBUF_F_TX_TCP_CKSUM || mask == RTE_MBUF_F_TX_UDP_CKSUM)
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x3);
+ else if (mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x1);
+ rte_wmb();
+
+ if (mbuf->ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) {
+ /* Mark it as a CONTEXT descriptor */
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
+ CTXT, 1);
+ /* Set the VLAN tag */
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
+ VT, mbuf->vlan_tci);
+ /* Indicate this descriptor contains the VLAN tag */
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
+ VLTV, 1);
+ AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, VTIR,
+ TX_NORMAL_DESC2_VLAN_INSERT);
+ } else {
+ AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, VTIR, 0x0);
+ }
+ rte_wmb();
+
+ /* Save mbuf */
+ txq->sw_ring[idx] = tx_pkt;
+ /* Update current index*/
+ txq->cur++;
+
+ tx_pkt = tx_pkt->next;
+
+ while (tx_pkt != NULL) {
+ idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
+ desc = &txq->desc[idx];
+
+ /* Update buffer address and length */
+ desc->baddr = rte_mbuf_data_iova(tx_pkt);
+
+ AXGMAC_SET_BITS_LE(desc->desc2,
+ TX_NORMAL_DESC2, HL_B1L, tx_pkt->data_len);
+
+ rte_wmb();
+
+ /* Mark it as a NORMAL descriptor */
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CTXT, 0);
+ /* configure h/w Offload */
+ mask = mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK;
+ if (mask == RTE_MBUF_F_TX_TCP_CKSUM ||
+ mask == RTE_MBUF_F_TX_UDP_CKSUM)
+ AXGMAC_SET_BITS_LE(desc->desc3,
+ TX_NORMAL_DESC3, CIC, 0x3);
+ else if (mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
+ AXGMAC_SET_BITS_LE(desc->desc3,
+ TX_NORMAL_DESC3, CIC, 0x1);
+
+ rte_wmb();
+
+ /* Set OWN bit */
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1);
+ rte_wmb();
+
+ /* Save mbuf */
+ txq->sw_ring[idx] = tx_pkt;
+ /* Update current index*/
+ txq->cur++;
+
+ tx_pkt = tx_pkt->next;
+
+ }
+
+ /* Set LD bit for the last descriptior */
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, LD, 1);
+ rte_wmb();
+
+ /* Update stats */
+ txq->bytes += pkt_len;
+
+ /* Set OWN bit for the first descriptor */
+ desc = &txq->desc[start_index];
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1);
+ rte_wmb();
+
+ return 0;
+}
+
+/* Eal supported tx wrapper- Segmented*/
+uint16_t
+axgbe_xmit_pkts_seg(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ struct axgbe_tx_queue *txq;
+ uint16_t nb_desc_free;
+ uint16_t nb_pkt_sent = 0;
+ uint16_t idx;
+ uint32_t tail_addr;
+ struct rte_mbuf *mbuf = NULL;
+
+ if (unlikely(nb_pkts == 0))
+ return nb_pkts;
+
+ txq = (struct axgbe_tx_queue *)tx_queue;
+
+ nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);
+ if (unlikely(nb_desc_free <= txq->free_thresh)) {
+ axgbe_xmit_cleanup_seg(txq);
+ nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);
+ if (unlikely(nb_desc_free == 0))
+ return 0;
+ }
+
+ while (nb_pkts--) {
+
+ mbuf = *tx_pkts++;
+
+ if (axgbe_xmit_hw_seg(txq, mbuf))
+ goto out;
+ nb_pkt_sent++;
+ }
+out:
+ /* Sync read and write */
+ rte_mb();
+ idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
+ tail_addr = low32_value(txq->ring_phys_addr +
+ idx * sizeof(struct axgbe_tx_desc));
+ /* Update tail reg with next immediate address to kick Tx DMA channel*/
+ AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDTR_LO, tail_addr);
+ txq->pkts += nb_pkt_sent;
+ return nb_pkt_sent;
+}
+
/* Eal supported tx wrapper*/
uint16_t
axgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
diff --git a/drivers/net/axgbe/axgbe_rxtx.h b/drivers/net/axgbe/axgbe_rxtx.h
index 2a330339cd..c19d6d9db1 100644
--- a/drivers/net/axgbe/axgbe_rxtx.h
+++ b/drivers/net/axgbe/axgbe_rxtx.h
@@ -167,6 +167,10 @@ int axgbe_dev_fw_version_get(struct rte_eth_dev *eth_dev,
uint16_t axgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+
+uint16_t axgbe_xmit_pkts_seg(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
uint16_t axgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
--
2.25.1
^ permalink raw reply [flat|nested] 4+ messages in thread
* [PATCH v2] net/axgbe: support segmented Tx
2022-09-08 16:58 [PATCH] net/axgbe: support segmented Tx Bhagyada Modali
@ 2022-09-08 18:15 ` Bhagyada Modali
2022-09-09 9:31 ` Namburu, Chandu-babu
0 siblings, 1 reply; 4+ messages in thread
From: Bhagyada Modali @ 2022-09-08 18:15 UTC (permalink / raw)
To: chandu, ferruh.yigit; +Cc: dev, stable, Bhagyada Modali
Enable segmented tx support and add jumbo packet transmit capability
Signed-off-by: Bhagyada Modali <bhagyada.modali@amd.com>
---
drivers/net/axgbe/axgbe_ethdev.c | 1 +
drivers/net/axgbe/axgbe_ethdev.h | 1 +
drivers/net/axgbe/axgbe_rxtx.c | 213 ++++++++++++++++++++++++++++++-
drivers/net/axgbe/axgbe_rxtx.h | 4 +
4 files changed, 218 insertions(+), 1 deletion(-)
diff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c
index e6822fa711..b071e4e460 100644
--- a/drivers/net/axgbe/axgbe_ethdev.c
+++ b/drivers/net/axgbe/axgbe_ethdev.c
@@ -1228,6 +1228,7 @@ axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
diff --git a/drivers/net/axgbe/axgbe_ethdev.h b/drivers/net/axgbe/axgbe_ethdev.h
index e06d40f9eb..7f19321d88 100644
--- a/drivers/net/axgbe/axgbe_ethdev.h
+++ b/drivers/net/axgbe/axgbe_ethdev.h
@@ -582,6 +582,7 @@ struct axgbe_port {
unsigned int tx_pbl;
unsigned int tx_osp_mode;
unsigned int tx_max_fifo_size;
+ unsigned int multi_segs_tx;
/* Rx settings */
unsigned int rx_sf_mode;
diff --git a/drivers/net/axgbe/axgbe_rxtx.c b/drivers/net/axgbe/axgbe_rxtx.c
index 8b43e8160b..881ffa01db 100644
--- a/drivers/net/axgbe/axgbe_rxtx.c
+++ b/drivers/net/axgbe/axgbe_rxtx.c
@@ -544,6 +544,7 @@ int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
unsigned int tsize;
const struct rte_memzone *tz;
uint64_t offloads;
+ struct rte_eth_dev_data *dev_data = dev->data;
tx_desc = nb_desc;
pdata = dev->data->dev_private;
@@ -611,7 +612,13 @@ int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
if (!pdata->tx_queues)
pdata->tx_queues = dev->data->tx_queues;
- if (txq->vector_disable ||
+ if ((dev_data->dev_conf.txmode.offloads &
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS))
+ pdata->multi_segs_tx = true;
+
+ if (pdata->multi_segs_tx)
+ dev->tx_pkt_burst = &axgbe_xmit_pkts_seg;
+ else if (txq->vector_disable ||
rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128)
dev->tx_pkt_burst = &axgbe_xmit_pkts;
else
@@ -762,6 +769,29 @@ void axgbe_dev_enable_tx(struct rte_eth_dev *dev)
AXGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
}
+/* Free Tx conformed mbufs segments */
+static void
+axgbe_xmit_cleanup_seg(struct axgbe_tx_queue *txq)
+{
+ volatile struct axgbe_tx_desc *desc;
+ uint16_t idx;
+
+ idx = AXGBE_GET_DESC_IDX(txq, txq->dirty);
+ while (txq->cur != txq->dirty) {
+ if (unlikely(idx == txq->nb_desc))
+ idx = 0;
+ desc = &txq->desc[idx];
+ /* Check for ownership */
+ if (AXGMAC_GET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN))
+ return;
+ memset((void *)&desc->desc2, 0, 8);
+ /* Free mbuf */
+ rte_pktmbuf_free_seg(txq->sw_ring[idx]);
+ txq->sw_ring[idx++] = NULL;
+ txq->dirty++;
+ }
+}
+
/* Free Tx conformed mbufs */
static void axgbe_xmit_cleanup(struct axgbe_tx_queue *txq)
{
@@ -854,6 +884,187 @@ static int axgbe_xmit_hw(struct axgbe_tx_queue *txq,
return 0;
}
+/* Tx Descriptor formation for segmented mbuf
+ * Each mbuf will require multiple descriptors
+ */
+
+static int
+axgbe_xmit_hw_seg(struct axgbe_tx_queue *txq,
+ struct rte_mbuf *mbuf)
+{
+ volatile struct axgbe_tx_desc *desc;
+ uint16_t idx;
+ uint64_t mask;
+ int start_index;
+ uint32_t pkt_len = 0;
+ int nb_desc_free;
+ struct rte_mbuf *tx_pkt;
+
+ nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);
+
+ if (mbuf->nb_segs > nb_desc_free) {
+ axgbe_xmit_cleanup_seg(txq);
+ nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);
+ if (unlikely(mbuf->nb_segs > nb_desc_free))
+ return RTE_ETH_TX_DESC_UNAVAIL;
+ }
+
+ idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
+ desc = &txq->desc[idx];
+ /* Saving the start index for setting the OWN bit finally */
+ start_index = idx;
+
+ tx_pkt = mbuf;
+ /* Max_pkt len = 9018 ; need to update it according to Jumbo pkt size */
+ pkt_len = tx_pkt->pkt_len;
+
+ /* Update buffer address and length */
+ desc->baddr = rte_mbuf_data_iova(tx_pkt);
+ AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, HL_B1L,
+ tx_pkt->data_len);
+ /* Total msg length to transmit */
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FL,
+ tx_pkt->pkt_len);
+ /* Timestamp enablement check */
+ if (mbuf->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST)
+ AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, TTSE, 1);
+
+ rte_wmb();
+ /* Mark it as First Descriptor */
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FD, 1);
+ /* Mark it as a NORMAL descriptor */
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CTXT, 0);
+ /* configure h/w Offload */
+ mask = mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK;
+ if (mask == RTE_MBUF_F_TX_TCP_CKSUM || mask == RTE_MBUF_F_TX_UDP_CKSUM)
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x3);
+ else if (mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x1);
+ rte_wmb();
+
+ if (mbuf->ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) {
+ /* Mark it as a CONTEXT descriptor */
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
+ CTXT, 1);
+ /* Set the VLAN tag */
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
+ VT, mbuf->vlan_tci);
+ /* Indicate this descriptor contains the VLAN tag */
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
+ VLTV, 1);
+ AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, VTIR,
+ TX_NORMAL_DESC2_VLAN_INSERT);
+ } else {
+ AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, VTIR, 0x0);
+ }
+ rte_wmb();
+
+ /* Save mbuf */
+ txq->sw_ring[idx] = tx_pkt;
+ /* Update current index*/
+ txq->cur++;
+
+ tx_pkt = tx_pkt->next;
+
+ while (tx_pkt != NULL) {
+ idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
+ desc = &txq->desc[idx];
+
+ /* Update buffer address and length */
+ desc->baddr = rte_mbuf_data_iova(tx_pkt);
+
+ AXGMAC_SET_BITS_LE(desc->desc2,
+ TX_NORMAL_DESC2, HL_B1L, tx_pkt->data_len);
+
+ rte_wmb();
+
+ /* Mark it as a NORMAL descriptor */
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CTXT, 0);
+ /* configure h/w Offload */
+ mask = mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK;
+ if (mask == RTE_MBUF_F_TX_TCP_CKSUM ||
+ mask == RTE_MBUF_F_TX_UDP_CKSUM)
+ AXGMAC_SET_BITS_LE(desc->desc3,
+ TX_NORMAL_DESC3, CIC, 0x3);
+ else if (mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
+ AXGMAC_SET_BITS_LE(desc->desc3,
+ TX_NORMAL_DESC3, CIC, 0x1);
+
+ rte_wmb();
+
+ /* Set OWN bit */
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1);
+ rte_wmb();
+
+ /* Save mbuf */
+ txq->sw_ring[idx] = tx_pkt;
+ /* Update current index*/
+ txq->cur++;
+
+ tx_pkt = tx_pkt->next;
+ }
+
+ /* Set LD bit for the last descriptor */
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, LD, 1);
+ rte_wmb();
+
+ /* Update stats */
+ txq->bytes += pkt_len;
+
+ /* Set OWN bit for the first descriptor */
+ desc = &txq->desc[start_index];
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1);
+ rte_wmb();
+
+ return 0;
+}
+
+/* Eal supported tx wrapper- Segmented*/
+uint16_t
+axgbe_xmit_pkts_seg(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ struct axgbe_tx_queue *txq;
+ uint16_t nb_desc_free;
+ uint16_t nb_pkt_sent = 0;
+ uint16_t idx;
+ uint32_t tail_addr;
+ struct rte_mbuf *mbuf = NULL;
+
+ if (unlikely(nb_pkts == 0))
+ return nb_pkts;
+
+ txq = (struct axgbe_tx_queue *)tx_queue;
+
+ nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);
+ if (unlikely(nb_desc_free <= txq->free_thresh)) {
+ axgbe_xmit_cleanup_seg(txq);
+ nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);
+ if (unlikely(nb_desc_free == 0))
+ return 0;
+ }
+
+ while (nb_pkts--) {
+ mbuf = *tx_pkts++;
+
+ if (axgbe_xmit_hw_seg(txq, mbuf))
+ goto out;
+ nb_pkt_sent++;
+ }
+out:
+ /* Sync read and write */
+ rte_mb();
+ idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
+ tail_addr = low32_value(txq->ring_phys_addr +
+ idx * sizeof(struct axgbe_tx_desc));
+ /* Update tail reg with next immediate address to kick Tx DMA channel*/
+ AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDTR_LO, tail_addr);
+ txq->pkts += nb_pkt_sent;
+ return nb_pkt_sent;
+}
+
/* Eal supported tx wrapper*/
uint16_t
axgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
diff --git a/drivers/net/axgbe/axgbe_rxtx.h b/drivers/net/axgbe/axgbe_rxtx.h
index 2a330339cd..c19d6d9db1 100644
--- a/drivers/net/axgbe/axgbe_rxtx.h
+++ b/drivers/net/axgbe/axgbe_rxtx.h
@@ -167,6 +167,10 @@ int axgbe_dev_fw_version_get(struct rte_eth_dev *eth_dev,
uint16_t axgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+
+uint16_t axgbe_xmit_pkts_seg(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
uint16_t axgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
--
2.25.1
^ permalink raw reply [flat|nested] 4+ messages in thread
* RE: [PATCH v2] net/axgbe: support segmented Tx
2022-09-08 18:15 ` [PATCH v2] " Bhagyada Modali
@ 2022-09-09 9:31 ` Namburu, Chandu-babu
2022-10-04 14:59 ` Andrew Rybchenko
0 siblings, 1 reply; 4+ messages in thread
From: Namburu, Chandu-babu @ 2022-09-09 9:31 UTC (permalink / raw)
To: Modali, Bhagyada, Yigit, Ferruh; +Cc: dev, stable
[Public]
Acked-by: Chandubabu Namburu <chandu@amd.com>
-----Original Message-----
From: Modali, Bhagyada <Bhagyada.Modali@amd.com>
Sent: Thursday, September 8, 2022 11:45 PM
To: Namburu, Chandu-babu <chandu@amd.com>; Yigit, Ferruh <Ferruh.Yigit@amd.com>
Cc: dev@dpdk.org; stable@dpdk.org; Modali, Bhagyada <Bhagyada.Modali@amd.com>
Subject: [PATCH v2] net/axgbe: support segmented Tx
Enable segmented tx support and add jumbo packet transmit capability
Signed-off-by: Bhagyada Modali <bhagyada.modali@amd.com>
---
drivers/net/axgbe/axgbe_ethdev.c | 1 +
drivers/net/axgbe/axgbe_ethdev.h | 1 +
drivers/net/axgbe/axgbe_rxtx.c | 213 ++++++++++++++++++++++++++++++-
drivers/net/axgbe/axgbe_rxtx.h | 4 +
4 files changed, 218 insertions(+), 1 deletion(-)
diff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c
index e6822fa711..b071e4e460 100644
--- a/drivers/net/axgbe/axgbe_ethdev.c
+++ b/drivers/net/axgbe/axgbe_ethdev.c
@@ -1228,6 +1228,7 @@ axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
diff --git a/drivers/net/axgbe/axgbe_ethdev.h b/drivers/net/axgbe/axgbe_ethdev.h
index e06d40f9eb..7f19321d88 100644
--- a/drivers/net/axgbe/axgbe_ethdev.h
+++ b/drivers/net/axgbe/axgbe_ethdev.h
@@ -582,6 +582,7 @@ struct axgbe_port {
unsigned int tx_pbl;
unsigned int tx_osp_mode;
unsigned int tx_max_fifo_size;
+ unsigned int multi_segs_tx;
/* Rx settings */
unsigned int rx_sf_mode;
diff --git a/drivers/net/axgbe/axgbe_rxtx.c b/drivers/net/axgbe/axgbe_rxtx.c index 8b43e8160b..881ffa01db 100644
--- a/drivers/net/axgbe/axgbe_rxtx.c
+++ b/drivers/net/axgbe/axgbe_rxtx.c
@@ -544,6 +544,7 @@ int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
unsigned int tsize;
const struct rte_memzone *tz;
uint64_t offloads;
+ struct rte_eth_dev_data *dev_data = dev->data;
tx_desc = nb_desc;
pdata = dev->data->dev_private;
@@ -611,7 +612,13 @@ int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
if (!pdata->tx_queues)
pdata->tx_queues = dev->data->tx_queues;
- if (txq->vector_disable ||
+ if ((dev_data->dev_conf.txmode.offloads &
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS))
+ pdata->multi_segs_tx = true;
+
+ if (pdata->multi_segs_tx)
+ dev->tx_pkt_burst = &axgbe_xmit_pkts_seg;
+ else if (txq->vector_disable ||
rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128)
dev->tx_pkt_burst = &axgbe_xmit_pkts;
else
@@ -762,6 +769,29 @@ void axgbe_dev_enable_tx(struct rte_eth_dev *dev)
AXGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1); }
+/* Free Tx conformed mbufs segments */
+static void
+axgbe_xmit_cleanup_seg(struct axgbe_tx_queue *txq) {
+ volatile struct axgbe_tx_desc *desc;
+ uint16_t idx;
+
+ idx = AXGBE_GET_DESC_IDX(txq, txq->dirty);
+ while (txq->cur != txq->dirty) {
+ if (unlikely(idx == txq->nb_desc))
+ idx = 0;
+ desc = &txq->desc[idx];
+ /* Check for ownership */
+ if (AXGMAC_GET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN))
+ return;
+ memset((void *)&desc->desc2, 0, 8);
+ /* Free mbuf */
+ rte_pktmbuf_free_seg(txq->sw_ring[idx]);
+ txq->sw_ring[idx++] = NULL;
+ txq->dirty++;
+ }
+}
+
/* Free Tx conformed mbufs */
static void axgbe_xmit_cleanup(struct axgbe_tx_queue *txq) { @@ -854,6 +884,187 @@ static int axgbe_xmit_hw(struct axgbe_tx_queue *txq,
return 0;
}
+/* Tx Descriptor formation for segmented mbuf
+ * Each mbuf will require multiple descriptors */
+
+static int
+axgbe_xmit_hw_seg(struct axgbe_tx_queue *txq,
+ struct rte_mbuf *mbuf)
+{
+ volatile struct axgbe_tx_desc *desc;
+ uint16_t idx;
+ uint64_t mask;
+ int start_index;
+ uint32_t pkt_len = 0;
+ int nb_desc_free;
+ struct rte_mbuf *tx_pkt;
+
+ nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);
+
+ if (mbuf->nb_segs > nb_desc_free) {
+ axgbe_xmit_cleanup_seg(txq);
+ nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);
+ if (unlikely(mbuf->nb_segs > nb_desc_free))
+ return RTE_ETH_TX_DESC_UNAVAIL;
+ }
+
+ idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
+ desc = &txq->desc[idx];
+ /* Saving the start index for setting the OWN bit finally */
+ start_index = idx;
+
+ tx_pkt = mbuf;
+ /* Max_pkt len = 9018 ; need to update it according to Jumbo pkt size */
+ pkt_len = tx_pkt->pkt_len;
+
+ /* Update buffer address and length */
+ desc->baddr = rte_mbuf_data_iova(tx_pkt);
+ AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, HL_B1L,
+ tx_pkt->data_len);
+ /* Total msg length to transmit */
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FL,
+ tx_pkt->pkt_len);
+ /* Timestamp enablement check */
+ if (mbuf->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST)
+ AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, TTSE, 1);
+
+ rte_wmb();
+ /* Mark it as First Descriptor */
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FD, 1);
+ /* Mark it as a NORMAL descriptor */
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CTXT, 0);
+ /* configure h/w Offload */
+ mask = mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK;
+ if (mask == RTE_MBUF_F_TX_TCP_CKSUM || mask == RTE_MBUF_F_TX_UDP_CKSUM)
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x3);
+ else if (mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x1);
+ rte_wmb();
+
+ if (mbuf->ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) {
+ /* Mark it as a CONTEXT descriptor */
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
+ CTXT, 1);
+ /* Set the VLAN tag */
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
+ VT, mbuf->vlan_tci);
+ /* Indicate this descriptor contains the VLAN tag */
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
+ VLTV, 1);
+ AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, VTIR,
+ TX_NORMAL_DESC2_VLAN_INSERT);
+ } else {
+ AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, VTIR, 0x0);
+ }
+ rte_wmb();
+
+ /* Save mbuf */
+ txq->sw_ring[idx] = tx_pkt;
+ /* Update current index*/
+ txq->cur++;
+
+ tx_pkt = tx_pkt->next;
+
+ while (tx_pkt != NULL) {
+ idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
+ desc = &txq->desc[idx];
+
+ /* Update buffer address and length */
+ desc->baddr = rte_mbuf_data_iova(tx_pkt);
+
+ AXGMAC_SET_BITS_LE(desc->desc2,
+ TX_NORMAL_DESC2, HL_B1L, tx_pkt->data_len);
+
+ rte_wmb();
+
+ /* Mark it as a NORMAL descriptor */
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CTXT, 0);
+ /* configure h/w Offload */
+ mask = mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK;
+ if (mask == RTE_MBUF_F_TX_TCP_CKSUM ||
+ mask == RTE_MBUF_F_TX_UDP_CKSUM)
+ AXGMAC_SET_BITS_LE(desc->desc3,
+ TX_NORMAL_DESC3, CIC, 0x3);
+ else if (mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
+ AXGMAC_SET_BITS_LE(desc->desc3,
+ TX_NORMAL_DESC3, CIC, 0x1);
+
+ rte_wmb();
+
+ /* Set OWN bit */
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1);
+ rte_wmb();
+
+ /* Save mbuf */
+ txq->sw_ring[idx] = tx_pkt;
+ /* Update current index*/
+ txq->cur++;
+
+ tx_pkt = tx_pkt->next;
+ }
+
+ /* Set LD bit for the last descriptor */
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, LD, 1);
+ rte_wmb();
+
+ /* Update stats */
+ txq->bytes += pkt_len;
+
+ /* Set OWN bit for the first descriptor */
+ desc = &txq->desc[start_index];
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1);
+ rte_wmb();
+
+ return 0;
+}
+
+/* Eal supported tx wrapper- Segmented*/ uint16_t
+axgbe_xmit_pkts_seg(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ struct axgbe_tx_queue *txq;
+ uint16_t nb_desc_free;
+ uint16_t nb_pkt_sent = 0;
+ uint16_t idx;
+ uint32_t tail_addr;
+ struct rte_mbuf *mbuf = NULL;
+
+ if (unlikely(nb_pkts == 0))
+ return nb_pkts;
+
+ txq = (struct axgbe_tx_queue *)tx_queue;
+
+ nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);
+ if (unlikely(nb_desc_free <= txq->free_thresh)) {
+ axgbe_xmit_cleanup_seg(txq);
+ nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);
+ if (unlikely(nb_desc_free == 0))
+ return 0;
+ }
+
+ while (nb_pkts--) {
+ mbuf = *tx_pkts++;
+
+ if (axgbe_xmit_hw_seg(txq, mbuf))
+ goto out;
+ nb_pkt_sent++;
+ }
+out:
+ /* Sync read and write */
+ rte_mb();
+ idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
+ tail_addr = low32_value(txq->ring_phys_addr +
+ idx * sizeof(struct axgbe_tx_desc));
+ /* Update tail reg with next immediate address to kick Tx DMA channel*/
+ AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDTR_LO, tail_addr);
+ txq->pkts += nb_pkt_sent;
+ return nb_pkt_sent;
+}
+
/* Eal supported tx wrapper*/
uint16_t
axgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, diff --git a/drivers/net/axgbe/axgbe_rxtx.h b/drivers/net/axgbe/axgbe_rxtx.h index 2a330339cd..c19d6d9db1 100644
--- a/drivers/net/axgbe/axgbe_rxtx.h
+++ b/drivers/net/axgbe/axgbe_rxtx.h
@@ -167,6 +167,10 @@ int axgbe_dev_fw_version_get(struct rte_eth_dev *eth_dev,
uint16_t axgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+
+uint16_t axgbe_xmit_pkts_seg(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
uint16_t axgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
--
2.25.1
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH v2] net/axgbe: support segmented Tx
2022-09-09 9:31 ` Namburu, Chandu-babu
@ 2022-10-04 14:59 ` Andrew Rybchenko
0 siblings, 0 replies; 4+ messages in thread
From: Andrew Rybchenko @ 2022-10-04 14:59 UTC (permalink / raw)
To: Namburu, Chandu-babu, Modali, Bhagyada, Yigit, Ferruh; +Cc: dev, stable
On 9/9/22 12:31, Namburu, Chandu-babu wrote:
> -----Original Message-----
> From: Modali, Bhagyada <Bhagyada.Modali@amd.com>
> Sent: Thursday, September 8, 2022 11:45 PM
> To: Namburu, Chandu-babu <chandu@amd.com>; Yigit, Ferruh <Ferruh.Yigit@amd.com>
> Cc: dev@dpdk.org; stable@dpdk.org; Modali, Bhagyada <Bhagyada.Modali@amd.com>
> Subject: [PATCH v2] net/axgbe: support segmented Tx
>
> Enable segmented tx support and add jumbo packet transmit capability
>
> Signed-off-by: Bhagyada Modali <bhagyada.modali@amd.com>
Acked-by: Chandubabu Namburu <chandu@amd.com>
Applied to dpdk-next-net/main, thanks.
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2022-10-04 15:00 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-09-08 16:58 [PATCH] net/axgbe: support segmented Tx Bhagyada Modali
2022-09-08 18:15 ` [PATCH v2] " Bhagyada Modali
2022-09-09 9:31 ` Namburu, Chandu-babu
2022-10-04 14:59 ` Andrew Rybchenko
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).