DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH v1 1/3] net/axgbe: packet size doesn't exceed the configured MTU
@ 2023-11-11 16:00 Jesna K E
  2023-11-11 16:00 ` [PATCH v1 2/3] net/axgbe: correct API call when offload enabled Jesna K E
                   ` (2 more replies)
  0 siblings, 3 replies; 16+ messages in thread
From: Jesna K E @ 2023-11-11 16:00 UTC (permalink / raw)
  To: dev; +Cc: Ferruh.Yigit, Selwin.Sebastian, Jesna K E

Signed-off-by: Jesna K E <jesna.k.e@amd.com>
---
 drivers/net/axgbe/axgbe_ethdev.c |  6 ------
 drivers/net/axgbe/axgbe_rxtx.c   | 20 ++++++++++++++++++--
 2 files changed, 18 insertions(+), 8 deletions(-)

diff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c
index 3717166384..e12ee3e17a 100644
--- a/drivers/net/axgbe/axgbe_ethdev.c
+++ b/drivers/net/axgbe/axgbe_ethdev.c
@@ -1492,12 +1492,6 @@ static int axgb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 	struct axgbe_port *pdata = dev->data->dev_private;
 	unsigned int val;
 
-	/* mtu setting is forbidden if port is start */
-	if (dev->data->dev_started) {
-		PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
-				dev->data->port_id);
-		return -EBUSY;
-	}
 	val = mtu > RTE_ETHER_MTU ? 1 : 0;
 	AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
 
diff --git a/drivers/net/axgbe/axgbe_rxtx.c b/drivers/net/axgbe/axgbe_rxtx.c
index a9ff291cef..68aa67a3fa 100644
--- a/drivers/net/axgbe/axgbe_rxtx.c
+++ b/drivers/net/axgbe/axgbe_rxtx.c
@@ -210,7 +210,7 @@ axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 	uint64_t old_dirty = rxq->dirty;
 	struct rte_mbuf *mbuf, *tmbuf;
 	unsigned int err, etlt;
-	uint32_t error_status;
+	uint32_t error_status, max_len;
 	uint16_t idx, pidx, pkt_len;
 
 	idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur);
@@ -300,6 +300,14 @@ axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 					| RTE_MBUF_F_RX_IEEE1588_TMST;
 		pkt_len = AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3,
 					     PL) - rxq->crc_len;
+
+		/* Be sure we don't exceed the configured MTU */
+		max_len = rxq->pdata->eth_dev->data->mtu  + RTE_ETHER_HDR_LEN;
+			if (pkt_len > max_len) {
+				printf( "packet length exceeds configured MTU\n");
+				goto err_set;
+			}
+
 		/* Mbuf populate */
 		mbuf->next = NULL;
 		mbuf->data_off = RTE_PKTMBUF_HEADROOM;
@@ -342,7 +350,7 @@ uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue,
 	struct rte_mbuf *first_seg = NULL;
 	struct rte_mbuf *mbuf, *tmbuf;
 	unsigned int err = 0, etlt;
-	uint32_t error_status = 0;
+	uint32_t error_status = 0, max_len = 0;
 	uint16_t idx, pidx, data_len = 0, pkt_len = 0;
 	bool eop = 0;
 
@@ -409,6 +417,14 @@ uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue,
 			}
 
 		}
+
+                /* Be sure we don't exceed the configured MTU */
+                max_len = rxq->pdata->eth_dev->data->mtu  + RTE_ETHER_HDR_LEN;
+                        if (pkt_len > max_len) {
+                                printf( "packet length exceeds configured MTU\n");
+                                goto err_set;
+                        }
+
 		/* Mbuf populate */
 		mbuf->data_off = RTE_PKTMBUF_HEADROOM;
 		mbuf->data_len = data_len;
-- 
2.34.1


^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH v1 2/3] net/axgbe: correct API call when offload enabled
  2023-11-11 16:00 [PATCH v1 1/3] net/axgbe: packet size doesn't exceed the configured MTU Jesna K E
@ 2023-11-11 16:00 ` Jesna K E
  2023-11-13 15:23   ` Ferruh Yigit
  2023-11-11 16:00 ` [PATCH v1 3/3] net/axgbe: support TSO Implementation Jesna K E
  2023-11-13 15:07 ` [PATCH v1 1/3] net/axgbe: packet size doesn't exceed the configured MTU Ferruh Yigit
  2 siblings, 1 reply; 16+ messages in thread
From: Jesna K E @ 2023-11-11 16:00 UTC (permalink / raw)
  To: dev; +Cc: Ferruh.Yigit, Selwin.Sebastian, Jesna K E

Fixes: 9963b5131af8 ("net/axgbe: support multi-process")

Signed-off-by: Jesna K E <jesna.k.e@amd.com>
---
 drivers/net/axgbe/axgbe_ethdev.c | 10 ++++++----
 1 file changed, 6 insertions(+), 4 deletions(-)

diff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c
index e12ee3e17a..e1cb60c1c3 100644
--- a/drivers/net/axgbe/axgbe_ethdev.c
+++ b/drivers/net/axgbe/axgbe_ethdev.c
@@ -2130,16 +2130,18 @@ void
 axgbe_set_tx_function(struct rte_eth_dev *dev)
 {
 	struct axgbe_port *pdata = dev->data->dev_private;
+	struct axgbe_tx_queue *txq = dev->data->tx_queues[0];
 
 	if (pdata->multi_segs_tx)
 		dev->tx_pkt_burst = &axgbe_xmit_pkts_seg;
+	else if (txq->vector_disable ||
+			rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128)
+		dev->tx_pkt_burst = &axgbe_xmit_pkts;
+	else
 #ifdef RTE_ARCH_X86
-	struct axgbe_tx_queue *txq = dev->data->tx_queues[0];
-	if (!txq->vector_disable &&
-			rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128)
 		dev->tx_pkt_burst = &axgbe_xmit_pkts_vec;
 #else
-	dev->tx_pkt_burst = &axgbe_xmit_pkts;
+		dev->tx_pkt_burst = &axgbe_xmit_pkts;
 #endif
 }
 
-- 
2.34.1


^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH v1 3/3] net/axgbe: support TSO Implementation
  2023-11-11 16:00 [PATCH v1 1/3] net/axgbe: packet size doesn't exceed the configured MTU Jesna K E
  2023-11-11 16:00 ` [PATCH v1 2/3] net/axgbe: correct API call when offload enabled Jesna K E
@ 2023-11-11 16:00 ` Jesna K E
  2023-11-15 19:33   ` Ferruh Yigit
  2023-11-13 15:07 ` [PATCH v1 1/3] net/axgbe: packet size doesn't exceed the configured MTU Ferruh Yigit
  2 siblings, 1 reply; 16+ messages in thread
From: Jesna K E @ 2023-11-11 16:00 UTC (permalink / raw)
  To: dev; +Cc: Ferruh.Yigit, Selwin.Sebastian, Jesna K E

Signed-off-by: Jesna K E <jesna.k.e@amd.com>
---
 drivers/net/axgbe/axgbe_common.h       |  11 +
 drivers/net/axgbe/axgbe_dev.c          |  19 ++
 drivers/net/axgbe/axgbe_ethdev.c       |   1 +
 drivers/net/axgbe/axgbe_ethdev.h       |   1 +
 drivers/net/axgbe/axgbe_rxtx.c         | 305 +++++++++++++++----------
 drivers/net/axgbe/axgbe_rxtx_vec_sse.c |   1 +
 6 files changed, 223 insertions(+), 115 deletions(-)

diff --git a/drivers/net/axgbe/axgbe_common.h b/drivers/net/axgbe/axgbe_common.h
index a5d11c5832..1face6f361 100644
--- a/drivers/net/axgbe/axgbe_common.h
+++ b/drivers/net/axgbe/axgbe_common.h
@@ -162,6 +162,9 @@
 #define DMA_CH_SR			0x60
 
 /* DMA channel register entry bit positions and sizes */
+//TSO
+#define DMA_CH_CR_MSS_INDEX             0
+#define DMA_CH_CR_MSS_WIDTH             14
 #define DMA_CH_CR_PBLX8_INDEX		16
 #define DMA_CH_CR_PBLX8_WIDTH		1
 #define DMA_CH_CR_SPH_INDEX		24
@@ -1232,6 +1235,14 @@
 #define TX_CONTEXT_DESC3_VT_INDEX		0
 #define TX_CONTEXT_DESC3_VT_WIDTH		16
 
+//TSO
+#define TX_NORMAL_DESC3_TPL_INDEX               0
+#define TX_NORMAL_DESC3_TPL_WIDTH               18
+#define TX_NORMAL_DESC3_THL_INDEX               19
+#define TX_NORMAL_DESC3_THL_WIDTH               4
+#define TX_CONTEXT_DESC3_OSTC_INDEX             27
+#define TX_CONTEXT_DESC3_OSTC_WIDTH             1
+
 #define TX_NORMAL_DESC2_HL_B1L_INDEX		0
 #define TX_NORMAL_DESC2_HL_B1L_WIDTH		14
 #define TX_NORMAL_DESC2_IC_INDEX		31
diff --git a/drivers/net/axgbe/axgbe_dev.c b/drivers/net/axgbe/axgbe_dev.c
index 6a7fddffca..7e0d387fc3 100644
--- a/drivers/net/axgbe/axgbe_dev.c
+++ b/drivers/net/axgbe/axgbe_dev.c
@@ -808,6 +808,24 @@ int axgbe_write_rss_lookup_table(struct axgbe_port *pdata)
 	return 0;
 }
 
+
+static void xgbe_config_tso_mode(struct axgbe_port *pdata)
+{
+        unsigned int i;
+
+        struct axgbe_tx_queue *txq;
+
+        for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
+                txq = pdata->eth_dev->data->tx_queues[i];
+                AXGMAC_DMA_IOWRITE_BITS(txq,DMA_CH_TCR, TSE,
+                                        1);
+		AXGMAC_DMA_IOWRITE_BITS(txq,DMA_CH_CR, MSS,
+					800);
+        }
+
+}
+
+
 static int axgbe_enable_rss(struct axgbe_port *pdata)
 {
 	int ret;
@@ -1314,6 +1332,7 @@ static int axgbe_init(struct axgbe_port *pdata)
 	axgbe_config_rx_pbl_val(pdata);
 	axgbe_config_rx_buffer_size(pdata);
 	axgbe_config_rss(pdata);
+	xgbe_config_tso_mode(pdata);
 	wrapper_tx_desc_init(pdata);
 	ret = wrapper_rx_desc_init(pdata);
 	if (ret)
diff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c
index e1cb60c1c3..5aa8743a1a 100644
--- a/drivers/net/axgbe/axgbe_ethdev.c
+++ b/drivers/net/axgbe/axgbe_ethdev.c
@@ -1237,6 +1237,7 @@ axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  |
 		RTE_ETH_TX_OFFLOAD_MULTI_SEGS  |
 		RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO     |
 		RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
 
 	if (pdata->hw_feat.rss) {
diff --git a/drivers/net/axgbe/axgbe_ethdev.h b/drivers/net/axgbe/axgbe_ethdev.h
index 7f19321d88..31a583c2c6 100644
--- a/drivers/net/axgbe/axgbe_ethdev.h
+++ b/drivers/net/axgbe/axgbe_ethdev.h
@@ -583,6 +583,7 @@ struct axgbe_port {
 	unsigned int tx_osp_mode;
 	unsigned int tx_max_fifo_size;
 	unsigned int multi_segs_tx;
+	unsigned int tso_tx;
 
 	/* Rx settings */
 	unsigned int rx_sf_mode;
diff --git a/drivers/net/axgbe/axgbe_rxtx.c b/drivers/net/axgbe/axgbe_rxtx.c
index 68aa67a3fa..6b5ea6d622 100644
--- a/drivers/net/axgbe/axgbe_rxtx.c
+++ b/drivers/net/axgbe/axgbe_rxtx.c
@@ -643,6 +643,10 @@ int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 				RTE_ETH_TX_OFFLOAD_MULTI_SEGS))
 		pdata->multi_segs_tx = true;
 
+	if ((dev_data->dev_conf.txmode.offloads &
+                               RTE_ETH_TX_OFFLOAD_TCP_TSO))
+               pdata->tso_tx = true;
+
 
 	return 0;
 }
@@ -843,7 +847,7 @@ static int axgbe_xmit_hw(struct axgbe_tx_queue *txq,
 
 	idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
 	desc = &txq->desc[idx];
-
+	printf("tso::Inside axgbe_xmit_hw \n");
 	/* Update buffer address  and length */
 	desc->baddr = rte_mbuf_data_iova(mbuf);
 	AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, HL_B1L,
@@ -889,7 +893,6 @@ static int axgbe_xmit_hw(struct axgbe_tx_queue *txq,
 	AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1);
 	rte_wmb();
 
-
 	/* Save mbuf */
 	txq->sw_ring[idx] = mbuf;
 	/* Update current index*/
@@ -900,138 +903,208 @@ static int axgbe_xmit_hw(struct axgbe_tx_queue *txq,
 	return 0;
 }
 
+
 /* Tx Descriptor formation for segmented mbuf
  * Each mbuf will require multiple descriptors
  */
 
 static int
 axgbe_xmit_hw_seg(struct axgbe_tx_queue *txq,
-		struct rte_mbuf *mbuf)
+                struct rte_mbuf *mbuf)
 {
-	volatile struct axgbe_tx_desc *desc;
-	uint16_t idx;
-	uint64_t mask;
-	int start_index;
-	uint32_t pkt_len = 0;
-	int nb_desc_free;
-	struct rte_mbuf  *tx_pkt;
+        volatile struct axgbe_tx_desc *desc;
+        uint16_t idx;
+        uint64_t mask;
+        int start_index;
+        uint32_t pkt_len = 0;
+        int nb_desc_free;
+        struct rte_mbuf  *tx_pkt;
+        uint64_t l2_len = 0;
+        uint64_t l3_len = 0;
+        uint64_t l4_len = 0;
+        uint64_t tso_segsz = 0;
+        uint64_t total_hdr_len;
+	int tso = 0;
+
+        /*Parameters required for tso*/
+        l2_len = mbuf->l2_len;
+        l3_len = mbuf->l3_len;
+        l4_len = mbuf->l4_len;
+        tso_segsz = mbuf->tso_segsz;
+        total_hdr_len = l2_len + l3_len + l4_len;
+
+        if ((txq->pdata->tso_tx))
+                tso = 1;
+        else
+                tso = 0;
+
+        printf("tso:l2_len = %ld,l3_len=%ld,l4_len=%ld,tso_segsz=%ld,total_hdr_len%ld\n",l2_len,l3_len,l4_len,
+                         tso_segsz,total_hdr_len);
+
+        nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);
+
+        printf("tso::Inside axgbe_xmit_hw_seg \n");
+        if (mbuf->nb_segs > nb_desc_free) {
+                axgbe_xmit_cleanup_seg(txq);
+                nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);
+                if (unlikely(mbuf->nb_segs > nb_desc_free))
+                        return RTE_ETH_TX_DESC_UNAVAIL;
+        }
+
+        idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
+        desc = &txq->desc[idx];
+        /* Saving the start index for setting the OWN bit finally */
+        start_index = idx;
+	tx_pkt = mbuf;
+        /* Max_pkt len = 9018 ; need to update it according to Jumbo pkt size */
+        pkt_len = tx_pkt->pkt_len;
 
-	nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);
+        /* Update buffer address  and length */
+       desc->baddr = rte_pktmbuf_iova_offset(mbuf,0);
+       /*For TSO first buffer contains the Header */
+       if (tso)
+	AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, HL_B1L,
+                                           total_hdr_len);
+	else
+        AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, HL_B1L,
+                                           tx_pkt->data_len);
 
-	if (mbuf->nb_segs > nb_desc_free) {
-		axgbe_xmit_cleanup_seg(txq);
-		nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);
-		if (unlikely(mbuf->nb_segs > nb_desc_free))
-			return RTE_ETH_TX_DESC_UNAVAIL;
-	}
+	rte_wmb();
 
+	/* Timestamp enablement check */
+        if (mbuf->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST)
+                AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, TTSE, 1);
+
+        rte_wmb();
+        /* Mark it as First Descriptor */
+        AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FD, 1);
+        /* Mark it as a NORMAL descriptor */
+        AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CTXT, 0);
+        /* configure h/w Offload */
+        mask = mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK;
+        if (mask == RTE_MBUF_F_TX_TCP_CKSUM || mask == RTE_MBUF_F_TX_UDP_CKSUM)
+                AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x3);
+        else if (mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
+                AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x1);
+        rte_wmb();
+
+        if (mbuf->ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) {
+                /* Mark it as a CONTEXT descriptor */
+                AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
+                                CTXT, 1);
+                /* Set the VLAN tag */
+                AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
+                                VT, mbuf->vlan_tci);
+                /* Indicate this descriptor contains the VLAN tag */
+                AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
+                                VLTV, 1);
+                AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, VTIR,
+                                TX_NORMAL_DESC2_VLAN_INSERT);
+        } else {
+                AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, VTIR, 0x0);
+        }
+        rte_wmb();
+
+	/*Register settings for TSO*/
+        if (tso) {
+                printf("Inside register setting-tso\n");
+                /* Enable TSO */
+                AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, TSE,1);
+                AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, TPL,
+                                ((mbuf->pkt_len)-total_hdr_len));
+                AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, THL,
+                                l4_len);
+        } else {
+                /* Enable CRC and Pad Insertion */
+                AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CPC, 0);
+                /* Total msg length to transmit */
+                AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FL,
+                                mbuf->pkt_len);
+        }
+#if 0
+	/*For TSO , needs one more descriptor to hold
+	 * the Payload
+	 * But while adding another descriptor packets are not
+	 * transmitted */
+      /* Save mbuf */
+        txq->sw_ring[idx] = tx_pkt;
+        /* Update current index*/
+        txq->cur++;
 	idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
 	desc = &txq->desc[idx];
-	/* Saving the start index for setting the OWN bit finally */
-	start_index = idx;
+	desc->baddr = rte_pktmbuf_iova_offset(mbuf,total_hdr_len);
+	AXGMAC_SET_BITS_LE(desc->desc2,
+			TX_NORMAL_DESC2, HL_B1L, (mbuf->pkt_len)-total_hdr_len));
 
-	tx_pkt = mbuf;
-	/* Max_pkt len = 9018 ; need to update it according to Jumbo pkt size */
-	pkt_len = tx_pkt->pkt_len;
+	printf("(mbuf->pkt_len)-total_hdr_len=%d\n",(mbuf->pkt_len)-total_hdr_len);
+        printf("total_hdr_len=%d\n",total_hdr_len);
 
-	/* Update buffer address  and length */
-	desc->baddr = rte_mbuf_data_iova(tx_pkt);
-	AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, HL_B1L,
-					   tx_pkt->data_len);
-	/* Total msg length to transmit */
-	AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FL,
-					   tx_pkt->pkt_len);
-	/* Timestamp enablement check */
-	if (mbuf->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST)
-		AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, TTSE, 1);
-
-	rte_wmb();
-	/* Mark it as First Descriptor */
-	AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FD, 1);
-	/* Mark it as a NORMAL descriptor */
 	AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CTXT, 0);
-	/* configure h/w Offload */
-	mask = mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK;
-	if (mask == RTE_MBUF_F_TX_TCP_CKSUM || mask == RTE_MBUF_F_TX_UDP_CKSUM)
-		AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x3);
-	else if (mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
-		AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x1);
-	rte_wmb();
-
-	if (mbuf->ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) {
-		/* Mark it as a CONTEXT descriptor */
-		AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
-				CTXT, 1);
-		/* Set the VLAN tag */
-		AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
-				VT, mbuf->vlan_tci);
-		/* Indicate this descriptor contains the VLAN tag */
-		AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
-				VLTV, 1);
-		AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, VTIR,
-				TX_NORMAL_DESC2_VLAN_INSERT);
-	} else {
-		AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, VTIR, 0x0);
-	}
+	AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1);
 	rte_wmb();
-
-	/* Save mbuf */
-	txq->sw_ring[idx] = tx_pkt;
-	/* Update current index*/
 	txq->cur++;
-
-	tx_pkt = tx_pkt->next;
+#endif
+#if 1
+        /* Save mbuf */
+        txq->sw_ring[idx] = tx_pkt;
+        /* Update current index*/
+        txq->cur++;
+#endif
+        tx_pkt = tx_pkt->next;
 
 	while (tx_pkt != NULL) {
-		idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
-		desc = &txq->desc[idx];
-
-		/* Update buffer address  and length */
-		desc->baddr = rte_mbuf_data_iova(tx_pkt);
-
-		AXGMAC_SET_BITS_LE(desc->desc2,
-				TX_NORMAL_DESC2, HL_B1L, tx_pkt->data_len);
-
-		rte_wmb();
-
-		/* Mark it as a NORMAL descriptor */
-		AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CTXT, 0);
-		/* configure h/w Offload */
-		mask = mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK;
-		if (mask == RTE_MBUF_F_TX_TCP_CKSUM ||
-				mask == RTE_MBUF_F_TX_UDP_CKSUM)
-			AXGMAC_SET_BITS_LE(desc->desc3,
-					TX_NORMAL_DESC3, CIC, 0x3);
-		else if (mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
-			AXGMAC_SET_BITS_LE(desc->desc3,
-					TX_NORMAL_DESC3, CIC, 0x1);
-
-		rte_wmb();
-
-		 /* Set OWN bit */
-		AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1);
-		rte_wmb();
-
-		/* Save mbuf */
-		txq->sw_ring[idx] = tx_pkt;
-		/* Update current index*/
-		txq->cur++;
-
-		tx_pkt = tx_pkt->next;
-	}
-
-	/* Set LD bit for the last descriptor */
-	AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, LD, 1);
-	rte_wmb();
-
-	/* Update stats */
-	txq->bytes += pkt_len;
-
-	/* Set OWN bit for the first descriptor */
-	desc = &txq->desc[start_index];
-	AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1);
-	rte_wmb();
-
+                idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
+                desc = &txq->desc[idx];
+
+		if (tso)
+		desc->baddr = rte_pktmbuf_iova_offset(mbuf,total_hdr_len);
+		else
+                /* Update buffer address  and length */
+                desc->baddr = rte_mbuf_data_iova(tx_pkt);
+
+                AXGMAC_SET_BITS_LE(desc->desc2,
+                                TX_NORMAL_DESC2, HL_B1L, tx_pkt->data_len);
+
+                rte_wmb();
+
+                /* Mark it as a NORMAL descriptor */
+                AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CTXT, 0);
+                /* configure h/w Offload */
+                mask = mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK;
+                if (mask == RTE_MBUF_F_TX_TCP_CKSUM ||
+                                mask == RTE_MBUF_F_TX_UDP_CKSUM)
+                        AXGMAC_SET_BITS_LE(desc->desc3,
+                                        TX_NORMAL_DESC3, CIC, 0x3);
+                else if (mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
+                        AXGMAC_SET_BITS_LE(desc->desc3,
+                                        TX_NORMAL_DESC3, CIC, 0x1);
+
+                rte_wmb();
+
+                 /* Set OWN bit */
+                AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1);
+                rte_wmb();
+
+                /* Save mbuf */
+                txq->sw_ring[idx] = tx_pkt;
+                /* Update current index*/
+                txq->cur++;
+
+                tx_pkt = tx_pkt->next;
+        }
+
+        /* Set LD bit for the last descriptor */
+        AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, LD, 1);
+        rte_wmb();
+
+	printf("tso:: pkt_len = %d\n",pkt_len);
+        /* Update stats */
+        txq->bytes += pkt_len;
+
+        /* Set OWN bit for the first descriptor */
+        desc = &txq->desc[start_index];
+        AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1);
+        rte_wmb();
 	return 0;
 }
 
@@ -1077,6 +1150,8 @@ axgbe_xmit_pkts_seg(void *tx_queue, struct rte_mbuf **tx_pkts,
 				idx * sizeof(struct axgbe_tx_desc));
 	/* Update tail reg with next immediate address to kick Tx DMA channel*/
 	AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDTR_LO, tail_addr);
+
+
 	txq->pkts += nb_pkt_sent;
 	return nb_pkt_sent;
 }
diff --git a/drivers/net/axgbe/axgbe_rxtx_vec_sse.c b/drivers/net/axgbe/axgbe_rxtx_vec_sse.c
index d95a446bef..7034d5737a 100644
--- a/drivers/net/axgbe/axgbe_rxtx_vec_sse.c
+++ b/drivers/net/axgbe/axgbe_rxtx_vec_sse.c
@@ -65,6 +65,7 @@ axgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 	uint16_t idx, nb_commit, loop, i;
 	uint32_t tail_addr;
 
+	printf("jesna::Inside axgbe_xmit_pkts_vec \n");
 	txq  = (struct axgbe_tx_queue *)tx_queue;
 	if (txq->nb_desc_free < txq->free_thresh) {
 		axgbe_xmit_cleanup_vec(txq);
-- 
2.34.1


^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH v1 1/3] net/axgbe: packet size doesn't exceed the configured MTU
  2023-11-11 16:00 [PATCH v1 1/3] net/axgbe: packet size doesn't exceed the configured MTU Jesna K E
  2023-11-11 16:00 ` [PATCH v1 2/3] net/axgbe: correct API call when offload enabled Jesna K E
  2023-11-11 16:00 ` [PATCH v1 3/3] net/axgbe: support TSO Implementation Jesna K E
@ 2023-11-13 15:07 ` Ferruh Yigit
  2 siblings, 0 replies; 16+ messages in thread
From: Ferruh Yigit @ 2023-11-13 15:07 UTC (permalink / raw)
  To: Jesna K E, dev; +Cc: Selwin.Sebastian

On 11/11/2023 4:00 PM, Jesna K E wrote:
> Signed-off-by: Jesna K E <jesna.k.e@amd.com>
>

Hi Jesna,

Description is missing, making it hard to understand the problem and
motivation of the change.


> ---
>  drivers/net/axgbe/axgbe_ethdev.c |  6 ------
>  drivers/net/axgbe/axgbe_rxtx.c   | 20 ++++++++++++++++++--
>  2 files changed, 18 insertions(+), 8 deletions(-)
> 
> diff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c
> index 3717166384..e12ee3e17a 100644
> --- a/drivers/net/axgbe/axgbe_ethdev.c
> +++ b/drivers/net/axgbe/axgbe_ethdev.c
> @@ -1492,12 +1492,6 @@ static int axgb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
>  	struct axgbe_port *pdata = dev->data->dev_private;
>  	unsigned int val;
>  
> -	/* mtu setting is forbidden if port is start */
> -	if (dev->data->dev_started) {
> -		PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
> -				dev->data->port_id);
> -		return -EBUSY;
> -	}
>

Is it allowed to configure MTU when port is already started?

>  	val = mtu > RTE_ETHER_MTU ? 1 : 0;
>  	AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
>  
> diff --git a/drivers/net/axgbe/axgbe_rxtx.c b/drivers/net/axgbe/axgbe_rxtx.c
> index a9ff291cef..68aa67a3fa 100644
> --- a/drivers/net/axgbe/axgbe_rxtx.c
> +++ b/drivers/net/axgbe/axgbe_rxtx.c
> @@ -210,7 +210,7 @@ axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
>  	uint64_t old_dirty = rxq->dirty;
>  	struct rte_mbuf *mbuf, *tmbuf;
>  	unsigned int err, etlt;
> -	uint32_t error_status;
> +	uint32_t error_status, max_len;
>  	uint16_t idx, pidx, pkt_len;
>  
>  	idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur);
> @@ -300,6 +300,14 @@ axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
>  					| RTE_MBUF_F_RX_IEEE1588_TMST;
>  		pkt_len = AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3,
>  					     PL) - rxq->crc_len;
> +
> +		/* Be sure we don't exceed the configured MTU */
> +		max_len = rxq->pdata->eth_dev->data->mtu  + RTE_ETHER_HDR_LEN;
> +			if (pkt_len > max_len) {
> +				printf( "packet length exceeds configured MTU\n");
> +				goto err_set;
> +			}
> +
>

Not sure if it is good idea to add above check per packet, can there be
a device configuration to limit received packet size.


>  		/* Mbuf populate */
>  		mbuf->next = NULL;
>  		mbuf->data_off = RTE_PKTMBUF_HEADROOM;
> @@ -342,7 +350,7 @@ uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue,
>  	struct rte_mbuf *first_seg = NULL;
>  	struct rte_mbuf *mbuf, *tmbuf;
>  	unsigned int err = 0, etlt;
> -	uint32_t error_status = 0;
> +	uint32_t error_status = 0, max_len = 0;
>  	uint16_t idx, pidx, data_len = 0, pkt_len = 0;
>  	bool eop = 0;
>  
> @@ -409,6 +417,14 @@ uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue,
>  			}
>  
>  		}
> +
> +                /* Be sure we don't exceed the configured MTU */
> +                max_len = rxq->pdata->eth_dev->data->mtu  + RTE_ETHER_HDR_LEN;
> +                        if (pkt_len > max_len) {
> +                                printf( "packet length exceeds configured MTU\n");
> +                                goto err_set;
> +                        }
> +
>  		/* Mbuf populate */
>  		mbuf->data_off = RTE_PKTMBUF_HEADROOM;
>  		mbuf->data_len = data_len;


^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH v1 2/3] net/axgbe: correct API call when offload enabled
  2023-11-11 16:00 ` [PATCH v1 2/3] net/axgbe: correct API call when offload enabled Jesna K E
@ 2023-11-13 15:23   ` Ferruh Yigit
  2023-11-13 16:55     ` Ferruh Yigit
  0 siblings, 1 reply; 16+ messages in thread
From: Ferruh Yigit @ 2023-11-13 15:23 UTC (permalink / raw)
  To: Jesna K E, dev; +Cc: Selwin.Sebastian

On 11/11/2023 4:00 PM, Jesna K E wrote:
> Fixes: 9963b5131af8 ("net/axgbe: support multi-process")
> 

Can you please add more description?


> Signed-off-by: Jesna K E <jesna.k.e@amd.com>
> ---
>  drivers/net/axgbe/axgbe_ethdev.c | 10 ++++++----
>  1 file changed, 6 insertions(+), 4 deletions(-)
> 
> diff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c
> index e12ee3e17a..e1cb60c1c3 100644
> --- a/drivers/net/axgbe/axgbe_ethdev.c
> +++ b/drivers/net/axgbe/axgbe_ethdev.c
> @@ -2130,16 +2130,18 @@ void
>  axgbe_set_tx_function(struct rte_eth_dev *dev)
>  {
>  	struct axgbe_port *pdata = dev->data->dev_private;
> +	struct axgbe_tx_queue *txq = dev->data->tx_queues[0];
>  
>  	if (pdata->multi_segs_tx)
>  		dev->tx_pkt_burst = &axgbe_xmit_pkts_seg;
> +	else if (txq->vector_disable ||
> +			rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128)
> +		dev->tx_pkt_burst = &axgbe_xmit_pkts;
> +	else
>  #ifdef RTE_ARCH_X86
> -	struct axgbe_tx_queue *txq = dev->data->tx_queues[0];
> -	if (!txq->vector_disable &&
> -			rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128)
>  		dev->tx_pkt_burst = &axgbe_xmit_pkts_vec;
>  #else
> -	dev->tx_pkt_burst = &axgbe_xmit_pkts;
> +		dev->tx_pkt_burst = &axgbe_xmit_pkts;
>  #endif
>  }
>  

What about following for simplification:

{
  tx_pkt_burst = &axgbe_xmit_pkts;

  if (pdata->multi_segs_tx)
    tx_pkt_burst = &axgbe_xmit_pkts_seg;
  else if (!vector_disable &&
      rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128)
    tx_pkt_burst = &axgbe_xmit_pkts_vec;
}

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH v1 2/3] net/axgbe: correct API call when offload enabled
  2023-11-13 15:23   ` Ferruh Yigit
@ 2023-11-13 16:55     ` Ferruh Yigit
  2023-11-14  6:07       ` [PATCH v2] net/axgbe: invoke correct API when offloads enabled Jesna K E
                         ` (2 more replies)
  0 siblings, 3 replies; 16+ messages in thread
From: Ferruh Yigit @ 2023-11-13 16:55 UTC (permalink / raw)
  To: Jesna K E, dev; +Cc: Selwin.Sebastian

On 11/13/2023 3:23 PM, Ferruh Yigit wrote:
> On 11/11/2023 4:00 PM, Jesna K E wrote:
>> Fixes: 9963b5131af8 ("net/axgbe: support multi-process")
>>
> 
> Can you please add more description?
> 
> 
>> Signed-off-by: Jesna K E <jesna.k.e@amd.com>
>> ---
>>  drivers/net/axgbe/axgbe_ethdev.c | 10 ++++++----
>>  1 file changed, 6 insertions(+), 4 deletions(-)
>>
>> diff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c
>> index e12ee3e17a..e1cb60c1c3 100644
>> --- a/drivers/net/axgbe/axgbe_ethdev.c
>> +++ b/drivers/net/axgbe/axgbe_ethdev.c
>> @@ -2130,16 +2130,18 @@ void
>>  axgbe_set_tx_function(struct rte_eth_dev *dev)
>>  {
>>  	struct axgbe_port *pdata = dev->data->dev_private;
>> +	struct axgbe_tx_queue *txq = dev->data->tx_queues[0];
>>  
>>  	if (pdata->multi_segs_tx)
>>  		dev->tx_pkt_burst = &axgbe_xmit_pkts_seg;
>> +	else if (txq->vector_disable ||
>> +			rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128)
>> +		dev->tx_pkt_burst = &axgbe_xmit_pkts;
>> +	else
>>  #ifdef RTE_ARCH_X86
>> -	struct axgbe_tx_queue *txq = dev->data->tx_queues[0];
>> -	if (!txq->vector_disable &&
>> -			rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128)
>>  		dev->tx_pkt_burst = &axgbe_xmit_pkts_vec;
>>  #else
>> -	dev->tx_pkt_burst = &axgbe_xmit_pkts;
>> +		dev->tx_pkt_burst = &axgbe_xmit_pkts;
>>  #endif
>>  }
>>  
> 
> What about following for simplification:
> 
> {
>   tx_pkt_burst = &axgbe_xmit_pkts;
> 
>   if (pdata->multi_segs_tx)
>     tx_pkt_burst = &axgbe_xmit_pkts_seg;
>   else if (!vector_disable &&
>       rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128)
>     tx_pkt_burst = &axgbe_xmit_pkts_vec;
> }

btw, there is a build error [1], about implicit declaration of function
and enum, which can be fixed by included relevant header [2].


[1]
https://mails.dpdk.org/archives/test-report/2023-November/508556.html

[2]
 + #include <rte_vect.h>


^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH v2] net/axgbe: invoke correct API when offloads enabled
  2023-11-13 16:55     ` Ferruh Yigit
@ 2023-11-14  6:07       ` Jesna K E
  2023-11-14  7:15       ` Jesna K E
  2023-11-15  5:56       ` [PATCH v3] " Jesna K E
  2 siblings, 0 replies; 16+ messages in thread
From: Jesna K E @ 2023-11-14  6:07 UTC (permalink / raw)
  To: dev; +Cc: Ferruh.Yigit, Selwin.Sebastian, Jesna K E

A bug was introduced with the recent fix that when
an offload feature is enabled  axgbe_xmit_pkts_vec API is called
rather than axgbe_xmit_pkts API.This patch fixes it.

Fixes: 9963b5131af8 ("net/axgbe: support multi-process")

Signed-off-by: Jesna K E <jesna.k.e@amd.com>
---
 drivers/net/axgbe/axgbe_ethdev.c | 10 ++++++----
 1 file changed, 6 insertions(+), 4 deletions(-)

diff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c
index 3717166384..9c6de2fdf8 100644
--- a/drivers/net/axgbe/axgbe_ethdev.c
+++ b/drivers/net/axgbe/axgbe_ethdev.c
@@ -12,6 +12,8 @@
 
 #include "eal_filesystem.h"
 
+#include <rte_vect.h>
+
 #ifdef RTE_ARCH_X86
 #include <cpuid.h>
 #else
@@ -2136,16 +2138,16 @@ void
 axgbe_set_tx_function(struct rte_eth_dev *dev)
 {
 	struct axgbe_port *pdata = dev->data->dev_private;
+	struct axgbe_tx_queue *txq = dev->data->tx_queues[0];
+
+	dev->tx_pkt_burst = &axgbe_xmit_pkts;
 
 	if (pdata->multi_segs_tx)
 		dev->tx_pkt_burst = &axgbe_xmit_pkts_seg;
 #ifdef RTE_ARCH_X86
-	struct axgbe_tx_queue *txq = dev->data->tx_queues[0];
-	if (!txq->vector_disable &&
+	else if (!txq->vector_disable &&
 			rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128)
 		dev->tx_pkt_burst = &axgbe_xmit_pkts_vec;
-#else
-	dev->tx_pkt_burst = &axgbe_xmit_pkts;
 #endif
 }
 
-- 
2.34.1


^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH v2] net/axgbe: invoke correct API when offloads enabled
  2023-11-13 16:55     ` Ferruh Yigit
  2023-11-14  6:07       ` [PATCH v2] net/axgbe: invoke correct API when offloads enabled Jesna K E
@ 2023-11-14  7:15       ` Jesna K E
  2023-11-15  5:56       ` [PATCH v3] " Jesna K E
  2 siblings, 0 replies; 16+ messages in thread
From: Jesna K E @ 2023-11-14  7:15 UTC (permalink / raw)
  To: dev; +Cc: Ferruh.Yigit, Selwin.Sebastian, Jesna K E

A bug was introduced with the recent fix that when
an offload feature is enabled axgbe_xmit_pkts_vec API is called
rather than axgbe_xmit_pkts API.This patch fixes it.

Fixes: 9963b5131af8 ("net/axgbe: support multi-process")

Signed-off-by: Jesna K E <jesna.k.e@amd.com>
---
 drivers/net/axgbe/axgbe_ethdev.c | 8 +++++---
 1 file changed, 5 insertions(+), 3 deletions(-)

diff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c
index 3717166384..7a57418767 100644
--- a/drivers/net/axgbe/axgbe_ethdev.c
+++ b/drivers/net/axgbe/axgbe_ethdev.c
@@ -12,6 +12,8 @@
 
 #include "eal_filesystem.h"
 
+#include <rte_vect.h>
+
 #ifdef RTE_ARCH_X86
 #include <cpuid.h>
 #else
@@ -2137,15 +2139,15 @@ axgbe_set_tx_function(struct rte_eth_dev *dev)
 {
 	struct axgbe_port *pdata = dev->data->dev_private;
 
+	dev->tx_pkt_burst = &axgbe_xmit_pkts;
+
 	if (pdata->multi_segs_tx)
 		dev->tx_pkt_burst = &axgbe_xmit_pkts_seg;
 #ifdef RTE_ARCH_X86
 	struct axgbe_tx_queue *txq = dev->data->tx_queues[0];
-	if (!txq->vector_disable &&
+	else if (!txq->vector_disable &&
 			rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128)
 		dev->tx_pkt_burst = &axgbe_xmit_pkts_vec;
-#else
-	dev->tx_pkt_burst = &axgbe_xmit_pkts;
 #endif
 }
 
-- 
2.34.1


^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH v3] net/axgbe: invoke correct API when offloads enabled
  2023-11-13 16:55     ` Ferruh Yigit
  2023-11-14  6:07       ` [PATCH v2] net/axgbe: invoke correct API when offloads enabled Jesna K E
  2023-11-14  7:15       ` Jesna K E
@ 2023-11-15  5:56       ` Jesna K E
  2023-11-15 11:57         ` Ferruh Yigit
  2023-11-15 12:54         ` Sebastian, Selwin
  2 siblings, 2 replies; 16+ messages in thread
From: Jesna K E @ 2023-11-15  5:56 UTC (permalink / raw)
  To: dev; +Cc: Ferruh.Yigit, Selwin.Sebastian, Jesna K E

A bug was introduced with the recent fix that when
an offload feature is enabled axgbe_xmit_pkts_vec API is called
rather than axgbe_xmit_pkts API.This patch fixes it.

Fixes: 9963b5131af8 ("net/axgbe: support multi-process")

Signed-off-by: Jesna K E <jesna.k.e@amd.com>
---
 drivers/net/axgbe/axgbe_ethdev.c | 6 ++++--
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c
index 3717166384..f174d46143 100644
--- a/drivers/net/axgbe/axgbe_ethdev.c
+++ b/drivers/net/axgbe/axgbe_ethdev.c
@@ -12,6 +12,8 @@
 
 #include "eal_filesystem.h"
 
+#include <rte_vect.h>
+
 #ifdef RTE_ARCH_X86
 #include <cpuid.h>
 #else
@@ -2137,6 +2139,8 @@ axgbe_set_tx_function(struct rte_eth_dev *dev)
 {
 	struct axgbe_port *pdata = dev->data->dev_private;
 
+	dev->tx_pkt_burst = &axgbe_xmit_pkts;
+
 	if (pdata->multi_segs_tx)
 		dev->tx_pkt_burst = &axgbe_xmit_pkts_seg;
 #ifdef RTE_ARCH_X86
@@ -2144,8 +2148,6 @@ axgbe_set_tx_function(struct rte_eth_dev *dev)
 	if (!txq->vector_disable &&
 			rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128)
 		dev->tx_pkt_burst = &axgbe_xmit_pkts_vec;
-#else
-	dev->tx_pkt_burst = &axgbe_xmit_pkts;
 #endif
 }
 
-- 
2.34.1


^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH v3] net/axgbe: invoke correct API when offloads enabled
  2023-11-15  5:56       ` [PATCH v3] " Jesna K E
@ 2023-11-15 11:57         ` Ferruh Yigit
  2023-11-15 12:54         ` Sebastian, Selwin
  1 sibling, 0 replies; 16+ messages in thread
From: Ferruh Yigit @ 2023-11-15 11:57 UTC (permalink / raw)
  To: Jesna K E, dev; +Cc: Selwin.Sebastian

On 11/15/2023 5:56 AM, Jesna K E wrote:
> A bug was introduced with the recent fix that when
> an offload feature is enabled axgbe_xmit_pkts_vec API is called
> rather than axgbe_xmit_pkts API.This patch fixes it.
> 
> Fixes: 9963b5131af8 ("net/axgbe: support multi-process")
> 
> Signed-off-by: Jesna K E <jesna.k.e@amd.com>
> 

Needs stable tag, can add while merging.

Acked-by: Ferruh Yigit <ferruh.yigit@amd.com>


^ permalink raw reply	[flat|nested] 16+ messages in thread

* RE: [PATCH v3] net/axgbe: invoke correct API when offloads enabled
  2023-11-15  5:56       ` [PATCH v3] " Jesna K E
  2023-11-15 11:57         ` Ferruh Yigit
@ 2023-11-15 12:54         ` Sebastian, Selwin
  2023-11-15 12:59           ` Ferruh Yigit
  1 sibling, 1 reply; 16+ messages in thread
From: Sebastian, Selwin @ 2023-11-15 12:54 UTC (permalink / raw)
  To: K.E., Jesna, dev; +Cc: Yigit, Ferruh

[Public]

Acked-by: Selwin Sebastian<selwin.sebastian@amd.com>

-----Original Message-----
From: K.E., Jesna <Jesna.K.e@amd.com>
Sent: Wednesday, November 15, 2023 11:26 AM
To: dev@dpdk.org
Cc: Yigit, Ferruh <Ferruh.Yigit@amd.com>; Sebastian, Selwin <Selwin.Sebastian@amd.com>; K.E., Jesna <Jesna.K.e@amd.com>
Subject: [PATCH v3] net/axgbe: invoke correct API when offloads enabled

A bug was introduced with the recent fix that when an offload feature is enabled axgbe_xmit_pkts_vec API is called rather than axgbe_xmit_pkts API.This patch fixes it.

Fixes: 9963b5131af8 ("net/axgbe: support multi-process")

Signed-off-by: Jesna K E <jesna.k.e@amd.com>
---
 drivers/net/axgbe/axgbe_ethdev.c | 6 ++++--
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c
index 3717166384..f174d46143 100644
--- a/drivers/net/axgbe/axgbe_ethdev.c
+++ b/drivers/net/axgbe/axgbe_ethdev.c
@@ -12,6 +12,8 @@

 #include "eal_filesystem.h"

+#include <rte_vect.h>
+
 #ifdef RTE_ARCH_X86
 #include <cpuid.h>
 #else
@@ -2137,6 +2139,8 @@ axgbe_set_tx_function(struct rte_eth_dev *dev)  {
        struct axgbe_port *pdata = dev->data->dev_private;

+       dev->tx_pkt_burst = &axgbe_xmit_pkts;
+
        if (pdata->multi_segs_tx)
                dev->tx_pkt_burst = &axgbe_xmit_pkts_seg;  #ifdef RTE_ARCH_X86 @@ -2144,8 +2148,6 @@ axgbe_set_tx_function(struct rte_eth_dev *dev)
        if (!txq->vector_disable &&
                        rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128)
                dev->tx_pkt_burst = &axgbe_xmit_pkts_vec; -#else
-       dev->tx_pkt_burst = &axgbe_xmit_pkts;
 #endif
 }

--
2.34.1


^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH v3] net/axgbe: invoke correct API when offloads enabled
  2023-11-15 12:54         ` Sebastian, Selwin
@ 2023-11-15 12:59           ` Ferruh Yigit
  0 siblings, 0 replies; 16+ messages in thread
From: Ferruh Yigit @ 2023-11-15 12:59 UTC (permalink / raw)
  To: Sebastian, Selwin, K.E., Jesna; +Cc: dev

On 11/15/2023 12:54 PM, Sebastian, Selwin wrote:

> -----Original Message-----
> From: K.E., Jesna <Jesna.K.e@amd.com>
> Sent: Wednesday, November 15, 2023 11:26 AM
> To: dev@dpdk.org
> Cc: Yigit, Ferruh <Ferruh.Yigit@amd.com>; Sebastian, Selwin <Selwin.Sebastian@amd.com>; K.E., Jesna <Jesna.K.e@amd.com>
> Subject: [PATCH v3] net/axgbe: invoke correct API when offloads enabled
> 
> A bug was introduced with the recent fix that when an offload feature is enabled axgbe_xmit_pkts_vec API is called rather than axgbe_xmit_pkts API.This patch fixes it.
> 
> Fixes: 9963b5131af8 ("net/axgbe: support multi-process")
> 
> Signed-off-by: Jesna K E <jesna.k.e@amd.com>
> 
> Acked-by: Selwin Sebastian<selwin.sebastian@amd.com>
> 
Applied to dpdk-next-net/main, thanks.

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH v1 3/3] net/axgbe: support TSO Implementation
  2023-11-11 16:00 ` [PATCH v1 3/3] net/axgbe: support TSO Implementation Jesna K E
@ 2023-11-15 19:33   ` Ferruh Yigit
  2023-11-16  9:44     ` [PATCH v2] net/axgbe: support TSO Jesna K E
  2023-11-16 16:03     ` [PATCH v3] " Jesna K E
  0 siblings, 2 replies; 16+ messages in thread
From: Ferruh Yigit @ 2023-11-15 19:33 UTC (permalink / raw)
  To: Jesna K E, dev; +Cc: Selwin.Sebastian

On 11/11/2023 4:00 PM, Jesna K E wrote:
> Signed-off-by: Jesna K E <jesna.k.e@amd.com>
>

Hi Jesna,

There are some reported build errors [1], can you please check them?

Also please provide some commit log, event it is brief.

[1]
https://mails.dpdk.org/archives/test-report/2023-November/508540.html


> ---
>  drivers/net/axgbe/axgbe_common.h       |  11 +
>  drivers/net/axgbe/axgbe_dev.c          |  19 ++
>  drivers/net/axgbe/axgbe_ethdev.c       |   1 +
>  drivers/net/axgbe/axgbe_ethdev.h       |   1 +
>  drivers/net/axgbe/axgbe_rxtx.c         | 305 +++++++++++++++----------
>  drivers/net/axgbe/axgbe_rxtx_vec_sse.c |   1 +
>  6 files changed, 223 insertions(+), 115 deletions(-)
> 

'doc/guides/nics/features/axgbe.ini' needs to be updated

> diff --git a/drivers/net/axgbe/axgbe_common.h b/drivers/net/axgbe/axgbe_common.h
> index a5d11c5832..1face6f361 100644
> --- a/drivers/net/axgbe/axgbe_common.h
> +++ b/drivers/net/axgbe/axgbe_common.h
> @@ -162,6 +162,9 @@
>  #define DMA_CH_SR			0x60
>  
>  /* DMA channel register entry bit positions and sizes */
> +//TSO
>

Please prefer /* */ comments


> +#define DMA_CH_CR_MSS_INDEX             0
> +#define DMA_CH_CR_MSS_WIDTH             14
>  #define DMA_CH_CR_PBLX8_INDEX		16
>  #define DMA_CH_CR_PBLX8_WIDTH		1
>  #define DMA_CH_CR_SPH_INDEX		24
> @@ -1232,6 +1235,14 @@
>  #define TX_CONTEXT_DESC3_VT_INDEX		0
>  #define TX_CONTEXT_DESC3_VT_WIDTH		16
>  
> +//TSO
> +#define TX_NORMAL_DESC3_TPL_INDEX               0
> +#define TX_NORMAL_DESC3_TPL_WIDTH               18
> +#define TX_NORMAL_DESC3_THL_INDEX               19
> +#define TX_NORMAL_DESC3_THL_WIDTH               4
> +#define TX_CONTEXT_DESC3_OSTC_INDEX             27
> +#define TX_CONTEXT_DESC3_OSTC_WIDTH             1
> +
>  #define TX_NORMAL_DESC2_HL_B1L_INDEX		0
>  #define TX_NORMAL_DESC2_HL_B1L_WIDTH		14
>  #define TX_NORMAL_DESC2_IC_INDEX		31
> diff --git a/drivers/net/axgbe/axgbe_dev.c b/drivers/net/axgbe/axgbe_dev.c
> index 6a7fddffca..7e0d387fc3 100644
> --- a/drivers/net/axgbe/axgbe_dev.c
> +++ b/drivers/net/axgbe/axgbe_dev.c
> @@ -808,6 +808,24 @@ int axgbe_write_rss_lookup_table(struct axgbe_port *pdata)
>  	return 0;
>  }
>  
> +
>

here adds extra empty line

<...>

> @@ -843,7 +847,7 @@ static int axgbe_xmit_hw(struct axgbe_tx_queue *txq,
>  
>  	idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
>  	desc = &txq->desc[idx];
> -
> +	printf("tso::Inside axgbe_xmit_hw \n");
>

We are not allowed to use 'printf' for logging, but need to use logging
macros.
There are other instances of 'printf' usage in this patch.
									
<...>

> +        } else {
> +                /* Enable CRC and Pad Insertion */
> +                AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CPC, 0);
> +                /* Total msg length to transmit */
> +                AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FL,
> +                                mbuf->pkt_len);
> +        }
> +#if 0
>

Please remove unused code for upstreaming.


^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH v2] net/axgbe: support TSO
  2023-11-15 19:33   ` Ferruh Yigit
@ 2023-11-16  9:44     ` Jesna K E
  2023-11-16 16:03     ` [PATCH v3] " Jesna K E
  1 sibling, 0 replies; 16+ messages in thread
From: Jesna K E @ 2023-11-16  9:44 UTC (permalink / raw)
  To: dev; +Cc: Ferruh.Yigit, Selwin.Sebastian, Jesna K E

Added TSO support for axgbe PMD.

Initial Implementation for the TSO feature support
Currently only headers transmitted to tester
receiver side

Signed-off-by: Jesna K E <jesna.k.e@amd.com>
---
 doc/guides/nics/features/axgbe.ini |   1 +
 drivers/net/axgbe/axgbe_common.h   |  12 ++
 drivers/net/axgbe/axgbe_dev.c      |  13 ++
 drivers/net/axgbe/axgbe_ethdev.c   |   3 +
 drivers/net/axgbe/axgbe_ethdev.h   |   1 +
 drivers/net/axgbe/axgbe_rxtx.c     | 276 +++++++++++++++++------------
 6 files changed, 193 insertions(+), 113 deletions(-)

diff --git a/doc/guides/nics/features/axgbe.ini b/doc/guides/nics/features/axgbe.ini
index 5e2d6498e5..5c30c967bc 100644
--- a/doc/guides/nics/features/axgbe.ini
+++ b/doc/guides/nics/features/axgbe.ini
@@ -7,6 +7,7 @@
 Speed capabilities   = Y
 Link status          = Y
 Scattered Rx         = Y
+TSO		     = Y
 Promiscuous mode     = Y
 Allmulticast mode    = Y
 RSS hash             = Y
diff --git a/drivers/net/axgbe/axgbe_common.h b/drivers/net/axgbe/axgbe_common.h
index a5d11c5832..c30efe4c02 100644
--- a/drivers/net/axgbe/axgbe_common.h
+++ b/drivers/net/axgbe/axgbe_common.h
@@ -161,6 +161,10 @@
 #define DMA_CH_CARBR_LO			0x5c
 #define DMA_CH_SR			0x60
 
+/* Setting MSS register entry bit positions and sizes for TSO */
+#define DMA_CH_CR_MSS_INDEX             0
+#define DMA_CH_CR_MSS_WIDTH             14
+
 /* DMA channel register entry bit positions and sizes */
 #define DMA_CH_CR_PBLX8_INDEX		16
 #define DMA_CH_CR_PBLX8_WIDTH		1
@@ -1232,6 +1236,14 @@
 #define TX_CONTEXT_DESC3_VT_INDEX		0
 #define TX_CONTEXT_DESC3_VT_WIDTH		16
 
+/* TSO related register entry bit positions and sizes*/
+#define TX_NORMAL_DESC3_TPL_INDEX               0
+#define TX_NORMAL_DESC3_TPL_WIDTH               18
+#define TX_NORMAL_DESC3_THL_INDEX               19
+#define TX_NORMAL_DESC3_THL_WIDTH               4
+#define TX_CONTEXT_DESC3_OSTC_INDEX             27
+#define TX_CONTEXT_DESC3_OSTC_WIDTH             1
+
 #define TX_NORMAL_DESC2_HL_B1L_INDEX		0
 #define TX_NORMAL_DESC2_HL_B1L_WIDTH		14
 #define TX_NORMAL_DESC2_IC_INDEX		31
diff --git a/drivers/net/axgbe/axgbe_dev.c b/drivers/net/axgbe/axgbe_dev.c
index 6a7fddffca..eef453fab0 100644
--- a/drivers/net/axgbe/axgbe_dev.c
+++ b/drivers/net/axgbe/axgbe_dev.c
@@ -808,6 +808,18 @@ int axgbe_write_rss_lookup_table(struct axgbe_port *pdata)
 	return 0;
 }
 
+static void xgbe_config_tso_mode(struct axgbe_port *pdata)
+{
+	unsigned int i;
+	struct axgbe_tx_queue *txq;
+
+	for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
+		txq = pdata->eth_dev->data->tx_queues[i];
+		AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, TSE, 1);
+		AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_CR, MSS, 800);
+	}
+}
+
 static int axgbe_enable_rss(struct axgbe_port *pdata)
 {
 	int ret;
@@ -1314,6 +1326,7 @@ static int axgbe_init(struct axgbe_port *pdata)
 	axgbe_config_rx_pbl_val(pdata);
 	axgbe_config_rx_buffer_size(pdata);
 	axgbe_config_rss(pdata);
+	xgbe_config_tso_mode(pdata);
 	wrapper_tx_desc_init(pdata);
 	ret = wrapper_rx_desc_init(pdata);
 	if (ret)
diff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c
index 3717166384..0a4901aabc 100644
--- a/drivers/net/axgbe/axgbe_ethdev.c
+++ b/drivers/net/axgbe/axgbe_ethdev.c
@@ -12,6 +12,8 @@
 
 #include "eal_filesystem.h"
 
+#include <rte_vect.h>
+
 #ifdef RTE_ARCH_X86
 #include <cpuid.h>
 #else
@@ -1237,6 +1239,7 @@ axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  |
 		RTE_ETH_TX_OFFLOAD_MULTI_SEGS  |
 		RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO     |
 		RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
 
 	if (pdata->hw_feat.rss) {
diff --git a/drivers/net/axgbe/axgbe_ethdev.h b/drivers/net/axgbe/axgbe_ethdev.h
index 7f19321d88..31a583c2c6 100644
--- a/drivers/net/axgbe/axgbe_ethdev.h
+++ b/drivers/net/axgbe/axgbe_ethdev.h
@@ -583,6 +583,7 @@ struct axgbe_port {
 	unsigned int tx_osp_mode;
 	unsigned int tx_max_fifo_size;
 	unsigned int multi_segs_tx;
+	unsigned int tso_tx;
 
 	/* Rx settings */
 	unsigned int rx_sf_mode;
diff --git a/drivers/net/axgbe/axgbe_rxtx.c b/drivers/net/axgbe/axgbe_rxtx.c
index a9ff291cef..d7c97f3919 100644
--- a/drivers/net/axgbe/axgbe_rxtx.c
+++ b/drivers/net/axgbe/axgbe_rxtx.c
@@ -627,6 +627,9 @@ int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 				RTE_ETH_TX_OFFLOAD_MULTI_SEGS))
 		pdata->multi_segs_tx = true;
 
+	if ((dev_data->dev_conf.txmode.offloads &
+				RTE_ETH_TX_OFFLOAD_TCP_TSO))
+		pdata->tso_tx = true;
 
 	return 0;
 }
@@ -827,6 +830,7 @@ static int axgbe_xmit_hw(struct axgbe_tx_queue *txq,
 
 	idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
 	desc = &txq->desc[idx];
+	PMD_DRV_LOG(DEBUG, "tso:Inside axgbe_xmit_hw\n");
 
 	/* Update buffer address  and length */
 	desc->baddr = rte_mbuf_data_iova(mbuf);
@@ -873,7 +877,6 @@ static int axgbe_xmit_hw(struct axgbe_tx_queue *txq,
 	AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1);
 	rte_wmb();
 
-
 	/* Save mbuf */
 	txq->sw_ring[idx] = mbuf;
 	/* Update current index*/
@@ -884,6 +887,7 @@ static int axgbe_xmit_hw(struct axgbe_tx_queue *txq,
 	return 0;
 }
 
+
 /* Tx Descriptor formation for segmented mbuf
  * Each mbuf will require multiple descriptors
  */
@@ -892,130 +896,175 @@ static int
 axgbe_xmit_hw_seg(struct axgbe_tx_queue *txq,
 		struct rte_mbuf *mbuf)
 {
-	volatile struct axgbe_tx_desc *desc;
-	uint16_t idx;
-	uint64_t mask;
-	int start_index;
-	uint32_t pkt_len = 0;
-	int nb_desc_free;
-	struct rte_mbuf  *tx_pkt;
-
-	nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);
-
-	if (mbuf->nb_segs > nb_desc_free) {
-		axgbe_xmit_cleanup_seg(txq);
-		nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);
-		if (unlikely(mbuf->nb_segs > nb_desc_free))
-			return RTE_ETH_TX_DESC_UNAVAIL;
-	}
-
-	idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
-	desc = &txq->desc[idx];
-	/* Saving the start index for setting the OWN bit finally */
-	start_index = idx;
-
+        volatile struct axgbe_tx_desc *desc;
+        uint16_t idx;
+        uint64_t mask;
+        int start_index;
+        uint32_t pkt_len = 0;
+        int nb_desc_free;
+        struct rte_mbuf  *tx_pkt;
+        uint64_t l2_len = 0;
+        uint64_t l3_len = 0;
+        uint64_t l4_len = 0;
+        uint64_t tso_segsz = 0;
+        uint64_t total_hdr_len;
+	int tso = 0;
+
+        /*Parameters required for tso*/
+        l2_len = mbuf->l2_len;
+        l3_len = mbuf->l3_len;
+        l4_len = mbuf->l4_len;
+        tso_segsz = mbuf->tso_segsz;
+        total_hdr_len = l2_len + l3_len + l4_len;
+
+        if (txq->pdata->tso_tx)
+                tso = 1;
+        else
+                tso = 0;
+
+        PMD_DRV_LOG(DEBUG, "tso:l2_len = %ld,l3_len=%ld,l4_len=%ld,tso_segsz=%lu,
+				total_hdr_len=%lu\n", l2_len, l3_len, l4_len, tso_segsz, total_hdr_len);
+
+        nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);
+
+        PMD_DRV_LOG(DEBUG, "tso::Inside axgbe_xmit_hw_seg\n");
+        if (mbuf->nb_segs > nb_desc_free) {
+                axgbe_xmit_cleanup_seg(txq);
+                nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);
+                if (unlikely(mbuf->nb_segs > nb_desc_free))
+                        return RTE_ETH_TX_DESC_UNAVAIL;
+        }
+
+        idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
+        desc = &txq->desc[idx];
+        /* Saving the start index for setting the OWN bit finally */
+        start_index = idx;
 	tx_pkt = mbuf;
-	/* Max_pkt len = 9018 ; need to update it according to Jumbo pkt size */
-	pkt_len = tx_pkt->pkt_len;
+        /* Max_pkt len = 9018 ; need to update it according to Jumbo pkt size */
+        pkt_len = tx_pkt->pkt_len;
 
-	/* Update buffer address  and length */
-	desc->baddr = rte_mbuf_data_iova(tx_pkt);
+        /* Update buffer address  and length */
+       desc->baddr = rte_pktmbuf_iova_offset(mbuf,0);
+       /*For TSO first buffer contains the Header */
+       if (tso)
 	AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, HL_B1L,
-					   tx_pkt->data_len);
-	/* Total msg length to transmit */
-	AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FL,
-					   tx_pkt->pkt_len);
-	/* Timestamp enablement check */
-	if (mbuf->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST)
-		AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, TTSE, 1);
+                                           total_hdr_len);
+	else
+        AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, HL_B1L,
+                                           tx_pkt->data_len);
 
 	rte_wmb();
-	/* Mark it as First Descriptor */
-	AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FD, 1);
-	/* Mark it as a NORMAL descriptor */
-	AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CTXT, 0);
-	/* configure h/w Offload */
-	mask = mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK;
-	if (mask == RTE_MBUF_F_TX_TCP_CKSUM || mask == RTE_MBUF_F_TX_UDP_CKSUM)
-		AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x3);
-	else if (mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
-		AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x1);
-	rte_wmb();
 
-	if (mbuf->ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) {
-		/* Mark it as a CONTEXT descriptor */
-		AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
-				CTXT, 1);
-		/* Set the VLAN tag */
-		AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
-				VT, mbuf->vlan_tci);
-		/* Indicate this descriptor contains the VLAN tag */
-		AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
-				VLTV, 1);
-		AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, VTIR,
-				TX_NORMAL_DESC2_VLAN_INSERT);
-	} else {
-		AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, VTIR, 0x0);
-	}
-	rte_wmb();
+	/* Timestamp enablement check */
+        if (mbuf->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST)
+                AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, TTSE, 1);
+
+        rte_wmb();
+        /* Mark it as First Descriptor */
+        AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FD, 1);
+        /* Mark it as a NORMAL descriptor */
+        AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CTXT, 0);
+        /* configure h/w Offload */
+        mask = mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK;
+        if (mask == RTE_MBUF_F_TX_TCP_CKSUM || mask == RTE_MBUF_F_TX_UDP_CKSUM)
+                AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x3);
+        else if (mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
+                AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x1);
+        rte_wmb();
+
+        if (mbuf->ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) {
+                /* Mark it as a CONTEXT descriptor */
+                AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
+                                CTXT, 1);
+                /* Set the VLAN tag */
+                AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
+                                VT, mbuf->vlan_tci);
+                /* Indicate this descriptor contains the VLAN tag */
+                AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
+                                VLTV, 1);
+                AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, VTIR,
+                                TX_NORMAL_DESC2_VLAN_INSERT);
+        } else {
+                AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, VTIR, 0x0);
+        }
+        rte_wmb();
+
+	/*Register settings for TSO*/
+        if (tso) {
+                PMD_DRV_LOG(DEBUG, "tso : Inside TSO register settings\n");
+                /* Enable TSO */
+                AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, TSE,1);
+                AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, TPL,
+                                ((mbuf->pkt_len)-total_hdr_len));
+                AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, THL,
+                                l4_len);
+        } else {
+                /* Enable CRC and Pad Insertion */
+                AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CPC, 0);
+                /* Total msg length to transmit */
+                AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FL,
+                                mbuf->pkt_len);
+        }
 
 	/* Save mbuf */
-	txq->sw_ring[idx] = tx_pkt;
-	/* Update current index*/
-	txq->cur++;
+        txq->sw_ring[idx] = tx_pkt;
+        /* Update current index*/
+        txq->cur++;
 
 	tx_pkt = tx_pkt->next;
 
 	while (tx_pkt != NULL) {
-		idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
-		desc = &txq->desc[idx];
-
-		/* Update buffer address  and length */
-		desc->baddr = rte_mbuf_data_iova(tx_pkt);
-
-		AXGMAC_SET_BITS_LE(desc->desc2,
-				TX_NORMAL_DESC2, HL_B1L, tx_pkt->data_len);
-
-		rte_wmb();
-
-		/* Mark it as a NORMAL descriptor */
-		AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CTXT, 0);
-		/* configure h/w Offload */
-		mask = mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK;
-		if (mask == RTE_MBUF_F_TX_TCP_CKSUM ||
-				mask == RTE_MBUF_F_TX_UDP_CKSUM)
-			AXGMAC_SET_BITS_LE(desc->desc3,
-					TX_NORMAL_DESC3, CIC, 0x3);
-		else if (mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
-			AXGMAC_SET_BITS_LE(desc->desc3,
-					TX_NORMAL_DESC3, CIC, 0x1);
-
-		rte_wmb();
-
-		 /* Set OWN bit */
-		AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1);
-		rte_wmb();
-
-		/* Save mbuf */
-		txq->sw_ring[idx] = tx_pkt;
-		/* Update current index*/
-		txq->cur++;
-
-		tx_pkt = tx_pkt->next;
-	}
-
-	/* Set LD bit for the last descriptor */
-	AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, LD, 1);
-	rte_wmb();
-
-	/* Update stats */
-	txq->bytes += pkt_len;
-
-	/* Set OWN bit for the first descriptor */
-	desc = &txq->desc[start_index];
-	AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1);
-	rte_wmb();
-
+                idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
+                desc = &txq->desc[idx];
+
+		if (tso)
+		desc->baddr = rte_pktmbuf_iova_offset(mbuf,total_hdr_len);
+		else
+                /* Update buffer address  and length */
+                desc->baddr = rte_mbuf_data_iova(tx_pkt);
+
+                AXGMAC_SET_BITS_LE(desc->desc2,
+                                TX_NORMAL_DESC2, HL_B1L, tx_pkt->data_len);
+
+                rte_wmb();
+
+                /* Mark it as a NORMAL descriptor */
+                AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CTXT, 0);
+                /* configure h/w Offload */
+                mask = mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK;
+                if (mask == RTE_MBUF_F_TX_TCP_CKSUM ||
+                                mask == RTE_MBUF_F_TX_UDP_CKSUM)
+                        AXGMAC_SET_BITS_LE(desc->desc3,
+                                        TX_NORMAL_DESC3, CIC, 0x3);
+                else if (mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
+                        AXGMAC_SET_BITS_LE(desc->desc3,
+                                        TX_NORMAL_DESC3, CIC, 0x1);
+
+                rte_wmb();
+
+                 /* Set OWN bit */
+                AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1);
+                rte_wmb();
+
+                /* Save mbuf */
+                txq->sw_ring[idx] = tx_pkt;
+                /* Update current index*/
+                txq->cur++;
+
+                tx_pkt = tx_pkt->next;
+        }
+
+        /* Set LD bit for the last descriptor */
+        AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, LD, 1);
+        rte_wmb();
+
+        /* Update stats */
+        txq->bytes += pkt_len;
+
+        /* Set OWN bit for the first descriptor */
+        desc = &txq->desc[start_index];
+        AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1);
+        rte_wmb();
 	return 0;
 }
 
@@ -1061,6 +1110,7 @@ axgbe_xmit_pkts_seg(void *tx_queue, struct rte_mbuf **tx_pkts,
 				idx * sizeof(struct axgbe_tx_desc));
 	/* Update tail reg with next immediate address to kick Tx DMA channel*/
 	AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDTR_LO, tail_addr);
+
 	txq->pkts += nb_pkt_sent;
 	return nb_pkt_sent;
 }
-- 
2.34.1


^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH v3] net/axgbe: support TSO
  2023-11-15 19:33   ` Ferruh Yigit
  2023-11-16  9:44     ` [PATCH v2] net/axgbe: support TSO Jesna K E
@ 2023-11-16 16:03     ` Jesna K E
  2023-11-17 18:34       ` Ferruh Yigit
  1 sibling, 1 reply; 16+ messages in thread
From: Jesna K E @ 2023-11-16 16:03 UTC (permalink / raw)
  To: dev; +Cc: Ferruh.Yigit, Selwin.Sebastian, Jesna K E

Added TSO support for axgbe PMD.

Initial Implementation for the TSO feature support
Currently only headers transmitted to
tester receiver side

Signed-off-by: Jesna K E <jesna.k.e@amd.com>
---
 doc/guides/nics/features/axgbe.ini |  1 +
 drivers/net/axgbe/axgbe_common.h   | 12 ++++
 drivers/net/axgbe/axgbe_dev.c      | 13 +++++
 drivers/net/axgbe/axgbe_ethdev.c   |  3 +
 drivers/net/axgbe/axgbe_ethdev.h   |  1 +
 drivers/net/axgbe/axgbe_rxtx.c     | 88 +++++++++++++++++++++++++-----
 6 files changed, 104 insertions(+), 14 deletions(-)

diff --git a/doc/guides/nics/features/axgbe.ini b/doc/guides/nics/features/axgbe.ini
index 5e2d6498e5..5c30c967bc 100644
--- a/doc/guides/nics/features/axgbe.ini
+++ b/doc/guides/nics/features/axgbe.ini
@@ -7,6 +7,7 @@
 Speed capabilities   = Y
 Link status          = Y
 Scattered Rx         = Y
+TSO		     = Y
 Promiscuous mode     = Y
 Allmulticast mode    = Y
 RSS hash             = Y
diff --git a/drivers/net/axgbe/axgbe_common.h b/drivers/net/axgbe/axgbe_common.h
index a5d11c5832..c30efe4c02 100644
--- a/drivers/net/axgbe/axgbe_common.h
+++ b/drivers/net/axgbe/axgbe_common.h
@@ -161,6 +161,10 @@
 #define DMA_CH_CARBR_LO			0x5c
 #define DMA_CH_SR			0x60
 
+/* Setting MSS register entry bit positions and sizes for TSO */
+#define DMA_CH_CR_MSS_INDEX             0
+#define DMA_CH_CR_MSS_WIDTH             14
+
 /* DMA channel register entry bit positions and sizes */
 #define DMA_CH_CR_PBLX8_INDEX		16
 #define DMA_CH_CR_PBLX8_WIDTH		1
@@ -1232,6 +1236,14 @@
 #define TX_CONTEXT_DESC3_VT_INDEX		0
 #define TX_CONTEXT_DESC3_VT_WIDTH		16
 
+/* TSO related register entry bit positions and sizes*/
+#define TX_NORMAL_DESC3_TPL_INDEX               0
+#define TX_NORMAL_DESC3_TPL_WIDTH               18
+#define TX_NORMAL_DESC3_THL_INDEX               19
+#define TX_NORMAL_DESC3_THL_WIDTH               4
+#define TX_CONTEXT_DESC3_OSTC_INDEX             27
+#define TX_CONTEXT_DESC3_OSTC_WIDTH             1
+
 #define TX_NORMAL_DESC2_HL_B1L_INDEX		0
 #define TX_NORMAL_DESC2_HL_B1L_WIDTH		14
 #define TX_NORMAL_DESC2_IC_INDEX		31
diff --git a/drivers/net/axgbe/axgbe_dev.c b/drivers/net/axgbe/axgbe_dev.c
index 6a7fddffca..eef453fab0 100644
--- a/drivers/net/axgbe/axgbe_dev.c
+++ b/drivers/net/axgbe/axgbe_dev.c
@@ -808,6 +808,18 @@ int axgbe_write_rss_lookup_table(struct axgbe_port *pdata)
 	return 0;
 }
 
+static void xgbe_config_tso_mode(struct axgbe_port *pdata)
+{
+	unsigned int i;
+	struct axgbe_tx_queue *txq;
+
+	for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
+		txq = pdata->eth_dev->data->tx_queues[i];
+		AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, TSE, 1);
+		AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_CR, MSS, 800);
+	}
+}
+
 static int axgbe_enable_rss(struct axgbe_port *pdata)
 {
 	int ret;
@@ -1314,6 +1326,7 @@ static int axgbe_init(struct axgbe_port *pdata)
 	axgbe_config_rx_pbl_val(pdata);
 	axgbe_config_rx_buffer_size(pdata);
 	axgbe_config_rss(pdata);
+	xgbe_config_tso_mode(pdata);
 	wrapper_tx_desc_init(pdata);
 	ret = wrapper_rx_desc_init(pdata);
 	if (ret)
diff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c
index 3717166384..0a4901aabc 100644
--- a/drivers/net/axgbe/axgbe_ethdev.c
+++ b/drivers/net/axgbe/axgbe_ethdev.c
@@ -12,6 +12,8 @@
 
 #include "eal_filesystem.h"
 
+#include <rte_vect.h>
+
 #ifdef RTE_ARCH_X86
 #include <cpuid.h>
 #else
@@ -1237,6 +1239,7 @@ axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  |
 		RTE_ETH_TX_OFFLOAD_MULTI_SEGS  |
 		RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO     |
 		RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
 
 	if (pdata->hw_feat.rss) {
diff --git a/drivers/net/axgbe/axgbe_ethdev.h b/drivers/net/axgbe/axgbe_ethdev.h
index 7f19321d88..31a583c2c6 100644
--- a/drivers/net/axgbe/axgbe_ethdev.h
+++ b/drivers/net/axgbe/axgbe_ethdev.h
@@ -583,6 +583,7 @@ struct axgbe_port {
 	unsigned int tx_osp_mode;
 	unsigned int tx_max_fifo_size;
 	unsigned int multi_segs_tx;
+	unsigned int tso_tx;
 
 	/* Rx settings */
 	unsigned int rx_sf_mode;
diff --git a/drivers/net/axgbe/axgbe_rxtx.c b/drivers/net/axgbe/axgbe_rxtx.c
index a9ff291cef..b0cafcbdda 100644
--- a/drivers/net/axgbe/axgbe_rxtx.c
+++ b/drivers/net/axgbe/axgbe_rxtx.c
@@ -627,6 +627,9 @@ int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 				RTE_ETH_TX_OFFLOAD_MULTI_SEGS))
 		pdata->multi_segs_tx = true;
 
+	if ((dev_data->dev_conf.txmode.offloads &
+				RTE_ETH_TX_OFFLOAD_TCP_TSO))
+		pdata->tso_tx = true;
 
 	return 0;
 }
@@ -827,6 +830,7 @@ static int axgbe_xmit_hw(struct axgbe_tx_queue *txq,
 
 	idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
 	desc = &txq->desc[idx];
+	PMD_DRV_LOG(DEBUG, "tso:Inside %s /n", __func__);
 
 	/* Update buffer address  and length */
 	desc->baddr = rte_mbuf_data_iova(mbuf);
@@ -873,7 +877,6 @@ static int axgbe_xmit_hw(struct axgbe_tx_queue *txq,
 	AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1);
 	rte_wmb();
 
-
 	/* Save mbuf */
 	txq->sw_ring[idx] = mbuf;
 	/* Update current index*/
@@ -884,6 +887,7 @@ static int axgbe_xmit_hw(struct axgbe_tx_queue *txq,
 	return 0;
 }
 
+
 /* Tx Descriptor formation for segmented mbuf
  * Each mbuf will require multiple descriptors
  */
@@ -899,9 +903,26 @@ axgbe_xmit_hw_seg(struct axgbe_tx_queue *txq,
 	uint32_t pkt_len = 0;
 	int nb_desc_free;
 	struct rte_mbuf  *tx_pkt;
+	uint64_t l2_len = 0;
+	uint64_t l3_len = 0;
+	uint64_t l4_len = 0;
+	uint64_t total_hdr_len;
+	int tso = 0;
+
+	/*Parameters required for tso*/
+	l2_len = mbuf->l2_len;
+	l3_len = mbuf->l3_len;
+	l4_len = mbuf->l4_len;
+	total_hdr_len = l2_len + l3_len + l4_len;
+
+	if (txq->pdata->tso_tx)
+		tso = 1;
+	else
+		tso = 0;
 
-	nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);
+	PMD_DRV_LOG(DEBUG, "tso::Inside %s\n", __func__);
 
+	nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);
 	if (mbuf->nb_segs > nb_desc_free) {
 		axgbe_xmit_cleanup_seg(txq);
 		nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);
@@ -913,23 +934,27 @@ axgbe_xmit_hw_seg(struct axgbe_tx_queue *txq,
 	desc = &txq->desc[idx];
 	/* Saving the start index for setting the OWN bit finally */
 	start_index = idx;
-
 	tx_pkt = mbuf;
 	/* Max_pkt len = 9018 ; need to update it according to Jumbo pkt size */
 	pkt_len = tx_pkt->pkt_len;
 
 	/* Update buffer address  and length */
-	desc->baddr = rte_mbuf_data_iova(tx_pkt);
-	AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, HL_B1L,
-					   tx_pkt->data_len);
-	/* Total msg length to transmit */
-	AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FL,
-					   tx_pkt->pkt_len);
+	desc->baddr = rte_pktmbuf_iova_offset(mbuf, 0);
+	/*For TSO first buffer contains the Header */
+	if (tso)
+		AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, HL_B1L,
+				total_hdr_len);
+	else
+		AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, HL_B1L,
+				tx_pkt->data_len);
+	rte_wmb();
+
 	/* Timestamp enablement check */
 	if (mbuf->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST)
 		AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, TTSE, 1);
 
 	rte_wmb();
+
 	/* Mark it as First Descriptor */
 	AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FD, 1);
 	/* Mark it as a NORMAL descriptor */
@@ -959,19 +984,55 @@ axgbe_xmit_hw_seg(struct axgbe_tx_queue *txq,
 	}
 	rte_wmb();
 
+	/*Register settings for TSO*/
+	if (tso) {
+		PMD_DRV_LOG(DEBUG, "tso : Inside TSO register settings\n");
+		/* Enable TSO */
+		AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, TSE, 1);
+		AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, TPL,
+				((mbuf->pkt_len) - total_hdr_len));
+		AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, THL,
+				l4_len);
+	} else {
+		/* Enable CRC and Pad Insertion */
+		AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CPC, 0);
+		/* Total msg length to transmit */
+		AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FL,
+				mbuf->pkt_len);
+	}
+
 	/* Save mbuf */
 	txq->sw_ring[idx] = tx_pkt;
 	/* Update current index*/
 	txq->cur++;
 
+	/*For TSO , needs one more descriptor to hold
+	 * * the Payload
+	 * * *But while adding another descriptor packets are not transmitted
+	 */
+
+
+	idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
+	desc = &txq->desc[idx];
+	desc->baddr = rte_pktmbuf_iova_offset(mbuf, total_hdr_len);
+	AXGMAC_SET_BITS_LE(desc->desc2,
+			TX_NORMAL_DESC2, HL_B1L, (mbuf->pkt_len) - total_hdr_len);
+	AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CTXT, 0);
+	AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1);
+	rte_wmb();
+
+	txq->cur++;
 	tx_pkt = tx_pkt->next;
 
 	while (tx_pkt != NULL) {
 		idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
 		desc = &txq->desc[idx];
 
-		/* Update buffer address  and length */
-		desc->baddr = rte_mbuf_data_iova(tx_pkt);
+		if (tso)
+			desc->baddr = rte_pktmbuf_iova_offset(mbuf, total_hdr_len);
+		else
+			/* Update buffer address  and length */
+			desc->baddr = rte_mbuf_data_iova(tx_pkt);
 
 		AXGMAC_SET_BITS_LE(desc->desc2,
 				TX_NORMAL_DESC2, HL_B1L, tx_pkt->data_len);
@@ -992,7 +1053,7 @@ axgbe_xmit_hw_seg(struct axgbe_tx_queue *txq,
 
 		rte_wmb();
 
-		 /* Set OWN bit */
+		/* Set OWN bit */
 		AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1);
 		rte_wmb();
 
@@ -1000,7 +1061,6 @@ axgbe_xmit_hw_seg(struct axgbe_tx_queue *txq,
 		txq->sw_ring[idx] = tx_pkt;
 		/* Update current index*/
 		txq->cur++;
-
 		tx_pkt = tx_pkt->next;
 	}
 
@@ -1015,7 +1075,6 @@ axgbe_xmit_hw_seg(struct axgbe_tx_queue *txq,
 	desc = &txq->desc[start_index];
 	AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1);
 	rte_wmb();
-
 	return 0;
 }
 
@@ -1061,6 +1120,7 @@ axgbe_xmit_pkts_seg(void *tx_queue, struct rte_mbuf **tx_pkts,
 				idx * sizeof(struct axgbe_tx_desc));
 	/* Update tail reg with next immediate address to kick Tx DMA channel*/
 	AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDTR_LO, tail_addr);
+
 	txq->pkts += nb_pkt_sent;
 	return nb_pkt_sent;
 }
-- 
2.34.1


^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH v3] net/axgbe: support TSO
  2023-11-16 16:03     ` [PATCH v3] " Jesna K E
@ 2023-11-17 18:34       ` Ferruh Yigit
  0 siblings, 0 replies; 16+ messages in thread
From: Ferruh Yigit @ 2023-11-17 18:34 UTC (permalink / raw)
  To: Jesna K E, dev; +Cc: Selwin.Sebastian

On 11/16/2023 4:03 PM, Jesna K E wrote:
> Added TSO support for axgbe PMD.
> 
> Initial Implementation for the TSO feature support
> Currently only headers transmitted to
> tester receiver side
> 
> Signed-off-by: Jesna K E <jesna.k.e@amd.com>
> ---
>  doc/guides/nics/features/axgbe.ini |  1 +
>  drivers/net/axgbe/axgbe_common.h   | 12 ++++
>  drivers/net/axgbe/axgbe_dev.c      | 13 +++++
>  drivers/net/axgbe/axgbe_ethdev.c   |  3 +
>  drivers/net/axgbe/axgbe_ethdev.h   |  1 +
>  drivers/net/axgbe/axgbe_rxtx.c     | 88 +++++++++++++++++++++++++-----
>  6 files changed, 104 insertions(+), 14 deletions(-)
> 
> diff --git a/doc/guides/nics/features/axgbe.ini b/doc/guides/nics/features/axgbe.ini
> index 5e2d6498e5..5c30c967bc 100644
> --- a/doc/guides/nics/features/axgbe.ini
> +++ b/doc/guides/nics/features/axgbe.ini
> @@ -7,6 +7,7 @@
>  Speed capabilities   = Y
>  Link status          = Y
>  Scattered Rx         = Y
> +TSO		     = Y
>  Promiscuous mode     = Y
>  Allmulticast mode    = Y
>  RSS hash             = Y
> diff --git a/drivers/net/axgbe/axgbe_common.h b/drivers/net/axgbe/axgbe_common.h
> index a5d11c5832..c30efe4c02 100644
> --- a/drivers/net/axgbe/axgbe_common.h
> +++ b/drivers/net/axgbe/axgbe_common.h
> @@ -161,6 +161,10 @@
>  #define DMA_CH_CARBR_LO			0x5c
>  #define DMA_CH_SR			0x60
>  
> +/* Setting MSS register entry bit positions and sizes for TSO */
> +#define DMA_CH_CR_MSS_INDEX             0
> +#define DMA_CH_CR_MSS_WIDTH             14
> +
>  /* DMA channel register entry bit positions and sizes */
>  #define DMA_CH_CR_PBLX8_INDEX		16
>  #define DMA_CH_CR_PBLX8_WIDTH		1
> @@ -1232,6 +1236,14 @@
>  #define TX_CONTEXT_DESC3_VT_INDEX		0
>  #define TX_CONTEXT_DESC3_VT_WIDTH		16
>  
> +/* TSO related register entry bit positions and sizes*/
> +#define TX_NORMAL_DESC3_TPL_INDEX               0
> +#define TX_NORMAL_DESC3_TPL_WIDTH               18
> +#define TX_NORMAL_DESC3_THL_INDEX               19
> +#define TX_NORMAL_DESC3_THL_WIDTH               4
> +#define TX_CONTEXT_DESC3_OSTC_INDEX             27
> +#define TX_CONTEXT_DESC3_OSTC_WIDTH             1
> +
>  #define TX_NORMAL_DESC2_HL_B1L_INDEX		0
>  #define TX_NORMAL_DESC2_HL_B1L_WIDTH		14
>  #define TX_NORMAL_DESC2_IC_INDEX		31
> diff --git a/drivers/net/axgbe/axgbe_dev.c b/drivers/net/axgbe/axgbe_dev.c
> index 6a7fddffca..eef453fab0 100644
> --- a/drivers/net/axgbe/axgbe_dev.c
> +++ b/drivers/net/axgbe/axgbe_dev.c
> @@ -808,6 +808,18 @@ int axgbe_write_rss_lookup_table(struct axgbe_port *pdata)
>  	return 0;
>  }
>  
> +static void xgbe_config_tso_mode(struct axgbe_port *pdata)
> +{
> +	unsigned int i;
> +	struct axgbe_tx_queue *txq;
> +
> +	for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
> +		txq = pdata->eth_dev->data->tx_queues[i];
> +		AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, TSE, 1);
> +		AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_CR, MSS, 800);
> +	}
> +}
> +
>  static int axgbe_enable_rss(struct axgbe_port *pdata)
>  {
>  	int ret;
> @@ -1314,6 +1326,7 @@ static int axgbe_init(struct axgbe_port *pdata)
>  	axgbe_config_rx_pbl_val(pdata);
>  	axgbe_config_rx_buffer_size(pdata);
>  	axgbe_config_rss(pdata);
> +	xgbe_config_tso_mode(pdata);
>

Driver namespace is 'axgbe', all other functions/variables starts with
it, but new additions start with 'xgbe', what do you think rename them
as 'axgbe' for consistency.


>  	wrapper_tx_desc_init(pdata);
>  	ret = wrapper_rx_desc_init(pdata);
>  	if (ret)
> diff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c
> index 3717166384..0a4901aabc 100644
> --- a/drivers/net/axgbe/axgbe_ethdev.c
> +++ b/drivers/net/axgbe/axgbe_ethdev.c
> @@ -12,6 +12,8 @@
>  
>  #include "eal_filesystem.h"
>  
> +#include <rte_vect.h>
> +
>  #ifdef RTE_ARCH_X86
>  #include <cpuid.h>
>  #else
> @@ -1237,6 +1239,7 @@ axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
>  		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  |
>  		RTE_ETH_TX_OFFLOAD_MULTI_SEGS  |
>  		RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
> +		RTE_ETH_TX_OFFLOAD_TCP_TSO     |
>  		RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
>  
>  	if (pdata->hw_feat.rss) {
> diff --git a/drivers/net/axgbe/axgbe_ethdev.h b/drivers/net/axgbe/axgbe_ethdev.h
> index 7f19321d88..31a583c2c6 100644
> --- a/drivers/net/axgbe/axgbe_ethdev.h
> +++ b/drivers/net/axgbe/axgbe_ethdev.h
> @@ -583,6 +583,7 @@ struct axgbe_port {
>  	unsigned int tx_osp_mode;
>  	unsigned int tx_max_fifo_size;
>  	unsigned int multi_segs_tx;
> +	unsigned int tso_tx;
>  
>  	/* Rx settings */
>  	unsigned int rx_sf_mode;
> diff --git a/drivers/net/axgbe/axgbe_rxtx.c b/drivers/net/axgbe/axgbe_rxtx.c
> index a9ff291cef..b0cafcbdda 100644
> --- a/drivers/net/axgbe/axgbe_rxtx.c
> +++ b/drivers/net/axgbe/axgbe_rxtx.c
> @@ -627,6 +627,9 @@ int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
>  				RTE_ETH_TX_OFFLOAD_MULTI_SEGS))
>  		pdata->multi_segs_tx = true;
>  
> +	if ((dev_data->dev_conf.txmode.offloads &
> +				RTE_ETH_TX_OFFLOAD_TCP_TSO))
> +		pdata->tso_tx = true;
>  
>  	return 0;
>  }
> @@ -827,6 +830,7 @@ static int axgbe_xmit_hw(struct axgbe_tx_queue *txq,
>  
>  	idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
>  	desc = &txq->desc[idx];
> +	PMD_DRV_LOG(DEBUG, "tso:Inside %s /n", __func__);
>  

This log is in datapath, please either convert it to the datapath log,
which is removed on compile time if level is not required, or remove it.

Looking to the message in the log, it doesn't look like something useful
per packet, so I believe it can be removed.


>  	/* Update buffer address  and length */
>  	desc->baddr = rte_mbuf_data_iova(mbuf);
> @@ -873,7 +877,6 @@ static int axgbe_xmit_hw(struct axgbe_tx_queue *txq,
>  	AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1);
>  	rte_wmb();
>  
> -
>  	/* Save mbuf */
>  	txq->sw_ring[idx] = mbuf;
>  	/* Update current index*/
> @@ -884,6 +887,7 @@ static int axgbe_xmit_hw(struct axgbe_tx_queue *txq,
>  	return 0;
>  }
>  
> +
>

Unrelated change, please drop.


>  /* Tx Descriptor formation for segmented mbuf
>   * Each mbuf will require multiple descriptors
>   */
> @@ -899,9 +903,26 @@ axgbe_xmit_hw_seg(struct axgbe_tx_queue *txq,
>  	uint32_t pkt_len = 0;
>  	int nb_desc_free;
>  	struct rte_mbuf  *tx_pkt;
> +	uint64_t l2_len = 0;
> +	uint64_t l3_len = 0;
> +	uint64_t l4_len = 0;
> +	uint64_t total_hdr_len;
> +	int tso = 0;
> +
> +	/*Parameters required for tso*/
> +	l2_len = mbuf->l2_len;
> +	l3_len = mbuf->l3_len;
> +	l4_len = mbuf->l4_len;
> +	total_hdr_len = l2_len + l3_len + l4_len;
> +
> +	if (txq->pdata->tso_tx)
> +		tso = 1;
> +	else
> +		tso = 0;
>  
> -	nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);
> +	PMD_DRV_LOG(DEBUG, "tso::Inside %s\n", __func__);
>

Same comment as above for logging.


>  
> +	nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);
>  	if (mbuf->nb_segs > nb_desc_free) {
>  		axgbe_xmit_cleanup_seg(txq);
>  		nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);
> @@ -913,23 +934,27 @@ axgbe_xmit_hw_seg(struct axgbe_tx_queue *txq,
>  	desc = &txq->desc[idx];
>  	/* Saving the start index for setting the OWN bit finally */
>  	start_index = idx;
> -
>  	tx_pkt = mbuf;
>  	/* Max_pkt len = 9018 ; need to update it according to Jumbo pkt size */
>  	pkt_len = tx_pkt->pkt_len;
>  
>  	/* Update buffer address  and length */
> -	desc->baddr = rte_mbuf_data_iova(tx_pkt);
> -	AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, HL_B1L,
> -					   tx_pkt->data_len);
> -	/* Total msg length to transmit */
> -	AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FL,
> -					   tx_pkt->pkt_len);
> +	desc->baddr = rte_pktmbuf_iova_offset(mbuf, 0);
>

This is exact same with "desc->baddr = rte_mbuf_data_iova(tx_pkt);",
right? Is it updated intentional?


> +	/*For TSO first buffer contains the Header */
> +	if (tso)
> +		AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, HL_B1L,
> +				total_hdr_len);
> +	else
> +		AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, HL_B1L,
> +				tx_pkt->data_len);
> +	rte_wmb();
> +
>  	/* Timestamp enablement check */
>  	if (mbuf->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST)
>  		AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, TTSE, 1);
>  
>  	rte_wmb();
> +
>  	/* Mark it as First Descriptor */
>  	AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FD, 1);
>  	/* Mark it as a NORMAL descriptor */
> @@ -959,19 +984,55 @@ axgbe_xmit_hw_seg(struct axgbe_tx_queue *txq,
>  	}
>  	rte_wmb();
>  
> +	/*Register settings for TSO*/
> +	if (tso) {
> +		PMD_DRV_LOG(DEBUG, "tso : Inside TSO register settings\n");
> +		/* Enable TSO */
> +		AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, TSE, 1);
> +		AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, TPL,
> +				((mbuf->pkt_len) - total_hdr_len));
> +		AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, THL,
> +				l4_len);
> +	} else {
> +		/* Enable CRC and Pad Insertion */
> +		AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CPC, 0);
>

Above changes default, non TSO, configuration, right? I just want to
confirm if this is intentional?


> +		/* Total msg length to transmit */
> +		AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FL,
> +				mbuf->pkt_len);
> +	}
> +
>  	/* Save mbuf */
>  	txq->sw_ring[idx] = tx_pkt;
>  	/* Update current index*/
>  	txq->cur++;
>  
> +	/*For TSO , needs one more descriptor to hold
> +	 * * the Payload
> +	 * * *But while adding another descriptor packets are not transmitted
> +	 */
>

Format is wrong in the above comment, but it is also not clear what it
means, is it residue from development?


> +
> +
>


Extra empty line.

> +	idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
> +	desc = &txq->desc[idx];
> +	desc->baddr = rte_pktmbuf_iova_offset(mbuf, total_hdr_len);
> +	AXGMAC_SET_BITS_LE(desc->desc2,
> +			TX_NORMAL_DESC2, HL_B1L, (mbuf->pkt_len) - total_hdr_len);
>

Can't use 'mbuf->pkt_len' directly, in case it is segmented mbuf.


> +	AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CTXT, 0);
> +	AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1);
> +	rte_wmb();
> +
> +	txq->cur++;
>


Shouldn't above block needs to be only if TSO enables, so with
"if (tso) {" condition?


>  	tx_pkt = tx_pkt->next;
>  
>  	while (tx_pkt != NULL) {
>  		idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
>  		desc = &txq->desc[idx];
>  
> -		/* Update buffer address  and length */
> -		desc->baddr = rte_mbuf_data_iova(tx_pkt);
> +		if (tso)
> +			desc->baddr = rte_pktmbuf_iova_offset(mbuf, total_hdr_len);
>

This code iterates mbufs in chained mbuf list, and for each iteration
'tx_pkt' is the reference to current mbuf.
'mbuf' pointer is for first mbuf in the list.

So above should use 'tx_pkt' to fill the address in descriptor, not
'mbuf', as original code does. And offset should be zero for 'tx_pkt'.

It may be good to test chained mbufs with TSO.


> +		else
> +			/* Update buffer address  and length */
> +			desc->baddr = rte_mbuf_data_iova(tx_pkt);
>  
>  		AXGMAC_SET_BITS_LE(desc->desc2,
>  				TX_NORMAL_DESC2, HL_B1L, tx_pkt->data_len);
> @@ -992,7 +1053,7 @@ axgbe_xmit_hw_seg(struct axgbe_tx_queue *txq,
>  
>  		rte_wmb();
>  
> -		 /* Set OWN bit */
> +		/* Set OWN bit */
>  		AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1);
>  		rte_wmb();
>  
> @@ -1000,7 +1061,6 @@ axgbe_xmit_hw_seg(struct axgbe_tx_queue *txq,
>  		txq->sw_ring[idx] = tx_pkt;
>  		/* Update current index*/
>  		txq->cur++;
> -
>  		tx_pkt = tx_pkt->next;
>  	}
>  
> @@ -1015,7 +1075,6 @@ axgbe_xmit_hw_seg(struct axgbe_tx_queue *txq,
>  	desc = &txq->desc[start_index];
>  	AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1);
>  	rte_wmb();
> -
>  	return 0;
>  }
>  
> @@ -1061,6 +1120,7 @@ axgbe_xmit_pkts_seg(void *tx_queue, struct rte_mbuf **tx_pkts,
>  				idx * sizeof(struct axgbe_tx_desc));
>  	/* Update tail reg with next immediate address to kick Tx DMA channel*/
>  	AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDTR_LO, tail_addr);
> +
>  	txq->pkts += nb_pkt_sent;
>  	return nb_pkt_sent;
>  }

It may be good to drop whitespace changes to prevent noise.


^ permalink raw reply	[flat|nested] 16+ messages in thread

end of thread, other threads:[~2023-11-17 18:35 UTC | newest]

Thread overview: 16+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-11-11 16:00 [PATCH v1 1/3] net/axgbe: packet size doesn't exceed the configured MTU Jesna K E
2023-11-11 16:00 ` [PATCH v1 2/3] net/axgbe: correct API call when offload enabled Jesna K E
2023-11-13 15:23   ` Ferruh Yigit
2023-11-13 16:55     ` Ferruh Yigit
2023-11-14  6:07       ` [PATCH v2] net/axgbe: invoke correct API when offloads enabled Jesna K E
2023-11-14  7:15       ` Jesna K E
2023-11-15  5:56       ` [PATCH v3] " Jesna K E
2023-11-15 11:57         ` Ferruh Yigit
2023-11-15 12:54         ` Sebastian, Selwin
2023-11-15 12:59           ` Ferruh Yigit
2023-11-11 16:00 ` [PATCH v1 3/3] net/axgbe: support TSO Implementation Jesna K E
2023-11-15 19:33   ` Ferruh Yigit
2023-11-16  9:44     ` [PATCH v2] net/axgbe: support TSO Jesna K E
2023-11-16 16:03     ` [PATCH v3] " Jesna K E
2023-11-17 18:34       ` Ferruh Yigit
2023-11-13 15:07 ` [PATCH v1 1/3] net/axgbe: packet size doesn't exceed the configured MTU Ferruh Yigit

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).