From: Jesna K E <jesna.k.e@amd.com>
To: <dev@dpdk.org>
Cc: <Ferruh.Yigit@amd.com>, <Selwin.Sebastian@amd.com>,
Jesna K E <jesna.k.e@amd.com>
Subject: [PATCH v1 3/3] net/axgbe: support TSO Implementation
Date: Sat, 11 Nov 2023 21:30:06 +0530 [thread overview]
Message-ID: <20231111160006.455767-3-jesna.k.e@amd.com> (raw)
In-Reply-To: <20231111160006.455767-1-jesna.k.e@amd.com>
Signed-off-by: Jesna K E <jesna.k.e@amd.com>
---
drivers/net/axgbe/axgbe_common.h | 11 +
drivers/net/axgbe/axgbe_dev.c | 19 ++
drivers/net/axgbe/axgbe_ethdev.c | 1 +
drivers/net/axgbe/axgbe_ethdev.h | 1 +
drivers/net/axgbe/axgbe_rxtx.c | 305 +++++++++++++++----------
drivers/net/axgbe/axgbe_rxtx_vec_sse.c | 1 +
6 files changed, 223 insertions(+), 115 deletions(-)
diff --git a/drivers/net/axgbe/axgbe_common.h b/drivers/net/axgbe/axgbe_common.h
index a5d11c5832..1face6f361 100644
--- a/drivers/net/axgbe/axgbe_common.h
+++ b/drivers/net/axgbe/axgbe_common.h
@@ -162,6 +162,9 @@
#define DMA_CH_SR 0x60
/* DMA channel register entry bit positions and sizes */
+//TSO
+#define DMA_CH_CR_MSS_INDEX 0
+#define DMA_CH_CR_MSS_WIDTH 14
#define DMA_CH_CR_PBLX8_INDEX 16
#define DMA_CH_CR_PBLX8_WIDTH 1
#define DMA_CH_CR_SPH_INDEX 24
@@ -1232,6 +1235,14 @@
#define TX_CONTEXT_DESC3_VT_INDEX 0
#define TX_CONTEXT_DESC3_VT_WIDTH 16
+//TSO
+#define TX_NORMAL_DESC3_TPL_INDEX 0
+#define TX_NORMAL_DESC3_TPL_WIDTH 18
+#define TX_NORMAL_DESC3_THL_INDEX 19
+#define TX_NORMAL_DESC3_THL_WIDTH 4
+#define TX_CONTEXT_DESC3_OSTC_INDEX 27
+#define TX_CONTEXT_DESC3_OSTC_WIDTH 1
+
#define TX_NORMAL_DESC2_HL_B1L_INDEX 0
#define TX_NORMAL_DESC2_HL_B1L_WIDTH 14
#define TX_NORMAL_DESC2_IC_INDEX 31
diff --git a/drivers/net/axgbe/axgbe_dev.c b/drivers/net/axgbe/axgbe_dev.c
index 6a7fddffca..7e0d387fc3 100644
--- a/drivers/net/axgbe/axgbe_dev.c
+++ b/drivers/net/axgbe/axgbe_dev.c
@@ -808,6 +808,24 @@ int axgbe_write_rss_lookup_table(struct axgbe_port *pdata)
return 0;
}
+
+static void xgbe_config_tso_mode(struct axgbe_port *pdata)
+{
+ unsigned int i;
+
+ struct axgbe_tx_queue *txq;
+
+ for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
+ txq = pdata->eth_dev->data->tx_queues[i];
+ AXGMAC_DMA_IOWRITE_BITS(txq,DMA_CH_TCR, TSE,
+ 1);
+ AXGMAC_DMA_IOWRITE_BITS(txq,DMA_CH_CR, MSS,
+ 800);
+ }
+
+}
+
+
static int axgbe_enable_rss(struct axgbe_port *pdata)
{
int ret;
@@ -1314,6 +1332,7 @@ static int axgbe_init(struct axgbe_port *pdata)
axgbe_config_rx_pbl_val(pdata);
axgbe_config_rx_buffer_size(pdata);
axgbe_config_rss(pdata);
+ xgbe_config_tso_mode(pdata);
wrapper_tx_desc_init(pdata);
ret = wrapper_rx_desc_init(pdata);
if (ret)
diff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c
index e1cb60c1c3..5aa8743a1a 100644
--- a/drivers/net/axgbe/axgbe_ethdev.c
+++ b/drivers/net/axgbe/axgbe_ethdev.c
@@ -1237,6 +1237,7 @@ axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_TSO |
RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
if (pdata->hw_feat.rss) {
diff --git a/drivers/net/axgbe/axgbe_ethdev.h b/drivers/net/axgbe/axgbe_ethdev.h
index 7f19321d88..31a583c2c6 100644
--- a/drivers/net/axgbe/axgbe_ethdev.h
+++ b/drivers/net/axgbe/axgbe_ethdev.h
@@ -583,6 +583,7 @@ struct axgbe_port {
unsigned int tx_osp_mode;
unsigned int tx_max_fifo_size;
unsigned int multi_segs_tx;
+ unsigned int tso_tx;
/* Rx settings */
unsigned int rx_sf_mode;
diff --git a/drivers/net/axgbe/axgbe_rxtx.c b/drivers/net/axgbe/axgbe_rxtx.c
index 68aa67a3fa..6b5ea6d622 100644
--- a/drivers/net/axgbe/axgbe_rxtx.c
+++ b/drivers/net/axgbe/axgbe_rxtx.c
@@ -643,6 +643,10 @@ int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
RTE_ETH_TX_OFFLOAD_MULTI_SEGS))
pdata->multi_segs_tx = true;
+ if ((dev_data->dev_conf.txmode.offloads &
+ RTE_ETH_TX_OFFLOAD_TCP_TSO))
+ pdata->tso_tx = true;
+
return 0;
}
@@ -843,7 +847,7 @@ static int axgbe_xmit_hw(struct axgbe_tx_queue *txq,
idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
desc = &txq->desc[idx];
-
+ printf("tso::Inside axgbe_xmit_hw \n");
/* Update buffer address and length */
desc->baddr = rte_mbuf_data_iova(mbuf);
AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, HL_B1L,
@@ -889,7 +893,6 @@ static int axgbe_xmit_hw(struct axgbe_tx_queue *txq,
AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1);
rte_wmb();
-
/* Save mbuf */
txq->sw_ring[idx] = mbuf;
/* Update current index*/
@@ -900,138 +903,208 @@ static int axgbe_xmit_hw(struct axgbe_tx_queue *txq,
return 0;
}
+
/* Tx Descriptor formation for segmented mbuf
* Each mbuf will require multiple descriptors
*/
static int
axgbe_xmit_hw_seg(struct axgbe_tx_queue *txq,
- struct rte_mbuf *mbuf)
+ struct rte_mbuf *mbuf)
{
- volatile struct axgbe_tx_desc *desc;
- uint16_t idx;
- uint64_t mask;
- int start_index;
- uint32_t pkt_len = 0;
- int nb_desc_free;
- struct rte_mbuf *tx_pkt;
+ volatile struct axgbe_tx_desc *desc;
+ uint16_t idx;
+ uint64_t mask;
+ int start_index;
+ uint32_t pkt_len = 0;
+ int nb_desc_free;
+ struct rte_mbuf *tx_pkt;
+ uint64_t l2_len = 0;
+ uint64_t l3_len = 0;
+ uint64_t l4_len = 0;
+ uint64_t tso_segsz = 0;
+ uint64_t total_hdr_len;
+ int tso = 0;
+
+ /*Parameters required for tso*/
+ l2_len = mbuf->l2_len;
+ l3_len = mbuf->l3_len;
+ l4_len = mbuf->l4_len;
+ tso_segsz = mbuf->tso_segsz;
+ total_hdr_len = l2_len + l3_len + l4_len;
+
+ if ((txq->pdata->tso_tx))
+ tso = 1;
+ else
+ tso = 0;
+
+ printf("tso:l2_len = %ld,l3_len=%ld,l4_len=%ld,tso_segsz=%ld,total_hdr_len%ld\n",l2_len,l3_len,l4_len,
+ tso_segsz,total_hdr_len);
+
+ nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);
+
+ printf("tso::Inside axgbe_xmit_hw_seg \n");
+ if (mbuf->nb_segs > nb_desc_free) {
+ axgbe_xmit_cleanup_seg(txq);
+ nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);
+ if (unlikely(mbuf->nb_segs > nb_desc_free))
+ return RTE_ETH_TX_DESC_UNAVAIL;
+ }
+
+ idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
+ desc = &txq->desc[idx];
+ /* Saving the start index for setting the OWN bit finally */
+ start_index = idx;
+ tx_pkt = mbuf;
+ /* Max_pkt len = 9018 ; need to update it according to Jumbo pkt size */
+ pkt_len = tx_pkt->pkt_len;
- nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);
+ /* Update buffer address and length */
+ desc->baddr = rte_pktmbuf_iova_offset(mbuf,0);
+ /*For TSO first buffer contains the Header */
+ if (tso)
+ AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, HL_B1L,
+ total_hdr_len);
+ else
+ AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, HL_B1L,
+ tx_pkt->data_len);
- if (mbuf->nb_segs > nb_desc_free) {
- axgbe_xmit_cleanup_seg(txq);
- nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);
- if (unlikely(mbuf->nb_segs > nb_desc_free))
- return RTE_ETH_TX_DESC_UNAVAIL;
- }
+ rte_wmb();
+ /* Timestamp enablement check */
+ if (mbuf->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST)
+ AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, TTSE, 1);
+
+ rte_wmb();
+ /* Mark it as First Descriptor */
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FD, 1);
+ /* Mark it as a NORMAL descriptor */
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CTXT, 0);
+ /* configure h/w Offload */
+ mask = mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK;
+ if (mask == RTE_MBUF_F_TX_TCP_CKSUM || mask == RTE_MBUF_F_TX_UDP_CKSUM)
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x3);
+ else if (mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x1);
+ rte_wmb();
+
+ if (mbuf->ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) {
+ /* Mark it as a CONTEXT descriptor */
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
+ CTXT, 1);
+ /* Set the VLAN tag */
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
+ VT, mbuf->vlan_tci);
+ /* Indicate this descriptor contains the VLAN tag */
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
+ VLTV, 1);
+ AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, VTIR,
+ TX_NORMAL_DESC2_VLAN_INSERT);
+ } else {
+ AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, VTIR, 0x0);
+ }
+ rte_wmb();
+
+ /*Register settings for TSO*/
+ if (tso) {
+ printf("Inside register setting-tso\n");
+ /* Enable TSO */
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, TSE,1);
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, TPL,
+ ((mbuf->pkt_len)-total_hdr_len));
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, THL,
+ l4_len);
+ } else {
+ /* Enable CRC and Pad Insertion */
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CPC, 0);
+ /* Total msg length to transmit */
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FL,
+ mbuf->pkt_len);
+ }
+#if 0
+ /*For TSO , needs one more descriptor to hold
+ * the Payload
+ * But while adding another descriptor packets are not
+ * transmitted */
+ /* Save mbuf */
+ txq->sw_ring[idx] = tx_pkt;
+ /* Update current index*/
+ txq->cur++;
idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
desc = &txq->desc[idx];
- /* Saving the start index for setting the OWN bit finally */
- start_index = idx;
+ desc->baddr = rte_pktmbuf_iova_offset(mbuf,total_hdr_len);
+ AXGMAC_SET_BITS_LE(desc->desc2,
+ TX_NORMAL_DESC2, HL_B1L, (mbuf->pkt_len)-total_hdr_len));
- tx_pkt = mbuf;
- /* Max_pkt len = 9018 ; need to update it according to Jumbo pkt size */
- pkt_len = tx_pkt->pkt_len;
+ printf("(mbuf->pkt_len)-total_hdr_len=%d\n",(mbuf->pkt_len)-total_hdr_len);
+ printf("total_hdr_len=%d\n",total_hdr_len);
- /* Update buffer address and length */
- desc->baddr = rte_mbuf_data_iova(tx_pkt);
- AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, HL_B1L,
- tx_pkt->data_len);
- /* Total msg length to transmit */
- AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FL,
- tx_pkt->pkt_len);
- /* Timestamp enablement check */
- if (mbuf->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST)
- AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, TTSE, 1);
-
- rte_wmb();
- /* Mark it as First Descriptor */
- AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FD, 1);
- /* Mark it as a NORMAL descriptor */
AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CTXT, 0);
- /* configure h/w Offload */
- mask = mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK;
- if (mask == RTE_MBUF_F_TX_TCP_CKSUM || mask == RTE_MBUF_F_TX_UDP_CKSUM)
- AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x3);
- else if (mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
- AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x1);
- rte_wmb();
-
- if (mbuf->ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) {
- /* Mark it as a CONTEXT descriptor */
- AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
- CTXT, 1);
- /* Set the VLAN tag */
- AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
- VT, mbuf->vlan_tci);
- /* Indicate this descriptor contains the VLAN tag */
- AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
- VLTV, 1);
- AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, VTIR,
- TX_NORMAL_DESC2_VLAN_INSERT);
- } else {
- AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, VTIR, 0x0);
- }
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1);
rte_wmb();
-
- /* Save mbuf */
- txq->sw_ring[idx] = tx_pkt;
- /* Update current index*/
txq->cur++;
-
- tx_pkt = tx_pkt->next;
+#endif
+#if 1
+ /* Save mbuf */
+ txq->sw_ring[idx] = tx_pkt;
+ /* Update current index*/
+ txq->cur++;
+#endif
+ tx_pkt = tx_pkt->next;
while (tx_pkt != NULL) {
- idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
- desc = &txq->desc[idx];
-
- /* Update buffer address and length */
- desc->baddr = rte_mbuf_data_iova(tx_pkt);
-
- AXGMAC_SET_BITS_LE(desc->desc2,
- TX_NORMAL_DESC2, HL_B1L, tx_pkt->data_len);
-
- rte_wmb();
-
- /* Mark it as a NORMAL descriptor */
- AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CTXT, 0);
- /* configure h/w Offload */
- mask = mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK;
- if (mask == RTE_MBUF_F_TX_TCP_CKSUM ||
- mask == RTE_MBUF_F_TX_UDP_CKSUM)
- AXGMAC_SET_BITS_LE(desc->desc3,
- TX_NORMAL_DESC3, CIC, 0x3);
- else if (mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
- AXGMAC_SET_BITS_LE(desc->desc3,
- TX_NORMAL_DESC3, CIC, 0x1);
-
- rte_wmb();
-
- /* Set OWN bit */
- AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1);
- rte_wmb();
-
- /* Save mbuf */
- txq->sw_ring[idx] = tx_pkt;
- /* Update current index*/
- txq->cur++;
-
- tx_pkt = tx_pkt->next;
- }
-
- /* Set LD bit for the last descriptor */
- AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, LD, 1);
- rte_wmb();
-
- /* Update stats */
- txq->bytes += pkt_len;
-
- /* Set OWN bit for the first descriptor */
- desc = &txq->desc[start_index];
- AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1);
- rte_wmb();
-
+ idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
+ desc = &txq->desc[idx];
+
+ if (tso)
+ desc->baddr = rte_pktmbuf_iova_offset(mbuf,total_hdr_len);
+ else
+ /* Update buffer address and length */
+ desc->baddr = rte_mbuf_data_iova(tx_pkt);
+
+ AXGMAC_SET_BITS_LE(desc->desc2,
+ TX_NORMAL_DESC2, HL_B1L, tx_pkt->data_len);
+
+ rte_wmb();
+
+ /* Mark it as a NORMAL descriptor */
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CTXT, 0);
+ /* configure h/w Offload */
+ mask = mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK;
+ if (mask == RTE_MBUF_F_TX_TCP_CKSUM ||
+ mask == RTE_MBUF_F_TX_UDP_CKSUM)
+ AXGMAC_SET_BITS_LE(desc->desc3,
+ TX_NORMAL_DESC3, CIC, 0x3);
+ else if (mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
+ AXGMAC_SET_BITS_LE(desc->desc3,
+ TX_NORMAL_DESC3, CIC, 0x1);
+
+ rte_wmb();
+
+ /* Set OWN bit */
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1);
+ rte_wmb();
+
+ /* Save mbuf */
+ txq->sw_ring[idx] = tx_pkt;
+ /* Update current index*/
+ txq->cur++;
+
+ tx_pkt = tx_pkt->next;
+ }
+
+ /* Set LD bit for the last descriptor */
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, LD, 1);
+ rte_wmb();
+
+ printf("tso:: pkt_len = %d\n",pkt_len);
+ /* Update stats */
+ txq->bytes += pkt_len;
+
+ /* Set OWN bit for the first descriptor */
+ desc = &txq->desc[start_index];
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1);
+ rte_wmb();
return 0;
}
@@ -1077,6 +1150,8 @@ axgbe_xmit_pkts_seg(void *tx_queue, struct rte_mbuf **tx_pkts,
idx * sizeof(struct axgbe_tx_desc));
/* Update tail reg with next immediate address to kick Tx DMA channel*/
AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDTR_LO, tail_addr);
+
+
txq->pkts += nb_pkt_sent;
return nb_pkt_sent;
}
diff --git a/drivers/net/axgbe/axgbe_rxtx_vec_sse.c b/drivers/net/axgbe/axgbe_rxtx_vec_sse.c
index d95a446bef..7034d5737a 100644
--- a/drivers/net/axgbe/axgbe_rxtx_vec_sse.c
+++ b/drivers/net/axgbe/axgbe_rxtx_vec_sse.c
@@ -65,6 +65,7 @@ axgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t idx, nb_commit, loop, i;
uint32_t tail_addr;
+ printf("jesna::Inside axgbe_xmit_pkts_vec \n");
txq = (struct axgbe_tx_queue *)tx_queue;
if (txq->nb_desc_free < txq->free_thresh) {
axgbe_xmit_cleanup_vec(txq);
--
2.34.1
next prev parent reply other threads:[~2023-11-11 16:00 UTC|newest]
Thread overview: 16+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-11-11 16:00 [PATCH v1 1/3] net/axgbe: packet size doesn't exceed the configured MTU Jesna K E
2023-11-11 16:00 ` [PATCH v1 2/3] net/axgbe: correct API call when offload enabled Jesna K E
2023-11-13 15:23 ` Ferruh Yigit
2023-11-13 16:55 ` Ferruh Yigit
2023-11-14 6:07 ` [PATCH v2] net/axgbe: invoke correct API when offloads enabled Jesna K E
2023-11-14 7:15 ` Jesna K E
2023-11-15 5:56 ` [PATCH v3] " Jesna K E
2023-11-15 11:57 ` Ferruh Yigit
2023-11-15 12:54 ` Sebastian, Selwin
2023-11-15 12:59 ` Ferruh Yigit
2023-11-11 16:00 ` Jesna K E [this message]
2023-11-15 19:33 ` [PATCH v1 3/3] net/axgbe: support TSO Implementation Ferruh Yigit
2023-11-16 9:44 ` [PATCH v2] net/axgbe: support TSO Jesna K E
2023-11-16 16:03 ` [PATCH v3] " Jesna K E
2023-11-17 18:34 ` Ferruh Yigit
2023-11-13 15:07 ` [PATCH v1 1/3] net/axgbe: packet size doesn't exceed the configured MTU Ferruh Yigit
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20231111160006.455767-3-jesna.k.e@amd.com \
--to=jesna.k.e@amd.com \
--cc=Ferruh.Yigit@amd.com \
--cc=Selwin.Sebastian@amd.com \
--cc=dev@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).