DPDK patches and discussions
 help / color / mirror / Atom feed
From: "Sebastian, Selwin" <Selwin.Sebastian@amd.com>
To: "K.E., Jesna" <Jesna.K.e@amd.com>, "dev@dpdk.org" <dev@dpdk.org>
Subject: RE: [PATCH v1] net/axgbe: support TSO
Date: Tue, 11 Feb 2025 13:16:36 +0000	[thread overview]
Message-ID: <DM4PR12MB505508D5FCE334DA5DE2440F8DFD2@DM4PR12MB5055.namprd12.prod.outlook.com> (raw)
In-Reply-To: <20250124073020.189242-1-jesna.k.e@amd.com>

[AMD Official Use Only - AMD Internal Distribution Only]

Acked-by: Selwin Sebastian<selwin.sebastian@amd.com>

-----Original Message-----
From: K.E., Jesna <Jesna.K.e@amd.com>
Sent: Friday, January 24, 2025 1:00 PM
To: dev@dpdk.org
Cc: Sebastian, Selwin <Selwin.Sebastian@amd.com>; K.E., Jesna <Jesna.K.e@amd.com>
Subject: [PATCH v1] net/axgbe: support TSO

Added TSO(Transmit Segmentation offload) support for axgbe PMD.

Signed-off-by: Jesna K E <jesna.k.e@amd.com>
---
 doc/guides/nics/features/axgbe.ini |   1 +
 drivers/net/axgbe/axgbe_common.h   |  13 ++
 drivers/net/axgbe/axgbe_dev.c      |  12 ++
 drivers/net/axgbe/axgbe_ethdev.c   |   2 +
 drivers/net/axgbe/axgbe_ethdev.h   |   1 +
 drivers/net/axgbe/axgbe_rxtx.c     | 247 ++++++++++++++++++-----------
 6 files changed, 179 insertions(+), 97 deletions(-)

diff --git a/doc/guides/nics/features/axgbe.ini b/doc/guides/nics/features/axgbe.ini
index 5e2d6498e5..ce4a5075f4 100644
--- a/doc/guides/nics/features/axgbe.ini
+++ b/doc/guides/nics/features/axgbe.ini
@@ -7,6 +7,7 @@
 Speed capabilities   = Y
 Link status          = Y
 Scattered Rx         = Y
+TSO                  = Y
 Promiscuous mode     = Y
 Allmulticast mode    = Y
 RSS hash             = Y
diff --git a/drivers/net/axgbe/axgbe_common.h b/drivers/net/axgbe/axgbe_common.h
index 0e1b2c1500..93e6c177b6 100644
--- a/drivers/net/axgbe/axgbe_common.h
+++ b/drivers/net/axgbe/axgbe_common.h
@@ -161,6 +161,10 @@
 #define DMA_CH_CARBR_LO                        0x5c
 #define DMA_CH_SR                      0x60

+/* Setting MSS register entry bit positions and sizes for TSO */
+#define DMA_CH_CR_MSS_INDEX             0
+#define DMA_CH_CR_MSS_WIDTH             14
+
 /* DMA channel register entry bit positions and sizes */
 #define DMA_CH_CR_PBLX8_INDEX          16
 #define DMA_CH_CR_PBLX8_WIDTH          1
@@ -1230,6 +1234,15 @@
 #define TX_CONTEXT_DESC3_VT_INDEX              0
 #define TX_CONTEXT_DESC3_VT_WIDTH              16

+/* TSO related register entry bit positions and sizes*/
+#define TX_NORMAL_DESC3_TPL_INDEX               0
+#define TX_NORMAL_DESC3_TPL_WIDTH               18
+#define TX_NORMAL_DESC3_THL_INDEX               19
+#define TX_NORMAL_DESC3_THL_WIDTH               4
+#define TX_CONTEXT_DESC3_OSTC_INDEX             27
+#define TX_CONTEXT_DESC3_OSTC_WIDTH             1
+
+
 #define TX_NORMAL_DESC2_HL_B1L_INDEX           0
 #define TX_NORMAL_DESC2_HL_B1L_WIDTH           14
 #define TX_NORMAL_DESC2_IC_INDEX               31
diff --git a/drivers/net/axgbe/axgbe_dev.c b/drivers/net/axgbe/axgbe_dev.c index 9173a6fea6..634d4ee4a5 100644
--- a/drivers/net/axgbe/axgbe_dev.c
+++ b/drivers/net/axgbe/axgbe_dev.c
@@ -872,6 +872,17 @@ int axgbe_write_rss_lookup_table(struct axgbe_port *pdata)
        return 0;
 }

+static void axgbe_config_tso_mode(struct axgbe_port *pdata) {
+       unsigned int i;
+       struct axgbe_tx_queue *txq;
+
+       for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
+               txq = pdata->eth_dev->data->tx_queues[i];
+               AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, TSE, 1);
+       }
+}
+
 static int axgbe_enable_rss(struct axgbe_port *pdata)  {
        int ret;
@@ -1378,6 +1389,7 @@ static int axgbe_init(struct axgbe_port *pdata)
        axgbe_config_rx_pbl_val(pdata);
        axgbe_config_rx_buffer_size(pdata);
        axgbe_config_rss(pdata);
+       axgbe_config_tso_mode(pdata);
        wrapper_tx_desc_init(pdata);
        ret = wrapper_rx_desc_init(pdata);
        if (ret)
diff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c
index 5448a5f3d7..c42cac5b8b 100644
--- a/drivers/net/axgbe/axgbe_ethdev.c
+++ b/drivers/net/axgbe/axgbe_ethdev.c
@@ -11,6 +11,7 @@
 #include "rte_time.h"

 #include "eal_filesystem.h"
+#include <rte_vect.h>

 #include <rte_vect.h>

@@ -1241,6 +1242,7 @@ axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
                RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
                RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  |
                RTE_ETH_TX_OFFLOAD_MULTI_SEGS  |
+               RTE_ETH_TX_OFFLOAD_TCP_TSO     |
                RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
                RTE_ETH_TX_OFFLOAD_TCP_CKSUM;

diff --git a/drivers/net/axgbe/axgbe_ethdev.h b/drivers/net/axgbe/axgbe_ethdev.h
index dd00ae8af5..5cd4317d7a 100644
--- a/drivers/net/axgbe/axgbe_ethdev.h
+++ b/drivers/net/axgbe/axgbe_ethdev.h
@@ -623,6 +623,7 @@ struct axgbe_port {
        unsigned int tx_osp_mode;
        unsigned int tx_max_fifo_size;
        unsigned int multi_segs_tx;
+       unsigned int tso_tx;

        /* Rx settings */
        unsigned int rx_sf_mode;
diff --git a/drivers/net/axgbe/axgbe_rxtx.c b/drivers/net/axgbe/axgbe_rxtx.c index 974ade9ab7..51a1aeb0b9 100644
--- a/drivers/net/axgbe/axgbe_rxtx.c
+++ b/drivers/net/axgbe/axgbe_rxtx.c
@@ -627,6 +627,9 @@ int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
                                RTE_ETH_TX_OFFLOAD_MULTI_SEGS))
                pdata->multi_segs_tx = true;

+       if ((dev_data->dev_conf.txmode.offloads &
+                               RTE_ETH_TX_OFFLOAD_TCP_TSO))
+               pdata->tso_tx = true;

        return 0;
 }
@@ -824,26 +827,77 @@ static int axgbe_xmit_hw(struct axgbe_tx_queue *txq,
        volatile struct axgbe_tx_desc *desc;
        uint16_t idx;
        uint64_t mask;
+       int start_index;
+       uint64_t l2_len = 0;
+       uint64_t l3_len = 0;
+       uint64_t l4_len = 0;
+       uint64_t tso_segz = 0;
+       uint64_t total_hdr_len;
+       int tso = 0;
+
+       /*Parameters required for tso*/
+       l2_len = mbuf->l2_len;
+       l3_len = mbuf->l3_len;
+       l4_len = mbuf->l4_len;
+       total_hdr_len = l2_len + l3_len + l4_len;
+       tso_segz = mbuf->tso_segsz;
+
+       if (txq->pdata->tso_tx)
+               tso = 1;
+       else
+               tso = 0;
+
+       AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_CR, MSS, tso_segz);

        idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
        desc = &txq->desc[idx];

-       /* Update buffer address  and length */
-       desc->baddr = rte_mbuf_data_iova(mbuf);
-       AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, HL_B1L,
-                          mbuf->pkt_len);
-       /* Total msg length to transmit */
-       AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FL,
-                          mbuf->pkt_len);
+       /* Saving the start index for setting the OWN bit finally */
+       start_index = idx;
+       if (tso) {
+               /* Update buffer address  and length */
+               desc->baddr = rte_mbuf_data_iova(mbuf);
+               AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, HL_B1L,
+                               total_hdr_len);
+       } else {
+               /* Update buffer address  and length */
+               desc->baddr = rte_mbuf_data_iova(mbuf);
+               AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, HL_B1L,
+                               mbuf->pkt_len);
+               /* Total msg length to transmit */
+               AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FL,
+                               mbuf->pkt_len);
+       }
        /* Timestamp enablement check */
        if (mbuf->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST)
                AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, TTSE, 1);
        rte_wmb();
        /* Mark it as First and Last Descriptor */
        AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FD, 1);
-       AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, LD, 1);
        /* Mark it as a NORMAL descriptor */
        AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CTXT, 0);
+       if (tso) {
+               /*Register settings for TSO*/
+               /* Enable TSO */
+               AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, TSE, 1);
+               AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, TPL,
+                               ((mbuf->pkt_len) - total_hdr_len));
+               AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, THL,
+                               (l4_len / 4));
+               rte_wmb();
+               txq->cur++;
+               idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
+               desc = &txq->desc[idx];
+               desc->baddr = rte_mbuf_data_iova(mbuf);
+               AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, HL_B1L,
+                               (mbuf->pkt_len) - total_hdr_len);
+               AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1);
+               AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, LD, 1);
+               /* Mark it as a NORMAL descriptor */
+               AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CTXT, 0);
+       } else {
+               AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, LD, 1);
+       }
        /* configure h/w Offload */
        mask = mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK;
        if (mask == RTE_MBUF_F_TX_TCP_CKSUM || mask == RTE_MBUF_F_TX_UDP_CKSUM) @@ -867,12 +921,16 @@ static int axgbe_xmit_hw(struct axgbe_tx_queue *txq,
        } else {
                AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, VTIR, 0x0);
        }
-       rte_wmb();
-
-       /* Set OWN bit */
-       AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1);
-       rte_wmb();

+       if (!tso) {
+               AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1);
+               rte_wmb();
+       } else {
+               /* Set OWN bit for the first descriptor */
+               desc = &txq->desc[start_index];
+               AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1);
+               rte_wmb();
+       }

        /* Save mbuf */
        txq->sw_ring[idx] = mbuf;
@@ -899,6 +957,7 @@ axgbe_xmit_hw_seg(struct axgbe_tx_queue *txq,
        uint32_t pkt_len = 0;
        int nb_desc_free;
        struct rte_mbuf  *tx_pkt;
+       uint32_t tso = 0;

        nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);

@@ -909,113 +968,107 @@ axgbe_xmit_hw_seg(struct axgbe_tx_queue *txq,
                        return RTE_ETH_TX_DESC_UNAVAIL;
        }

-       idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
-       desc = &txq->desc[idx];
-       /* Saving the start index for setting the OWN bit finally */
-       start_index = idx;
-
-       tx_pkt = mbuf;
-       /* Max_pkt len = 9018 ; need to update it according to Jumbo pkt size */
-       pkt_len = tx_pkt->pkt_len;
-
-       /* Update buffer address  and length */
-       desc->baddr = rte_mbuf_data_iova(tx_pkt);
-       AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, HL_B1L,
-                                          tx_pkt->data_len);
-       /* Total msg length to transmit */
-       AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FL,
-                                          tx_pkt->pkt_len);
-       /* Timestamp enablement check */
-       if (mbuf->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST)
-               AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, TTSE, 1);
-
-       rte_wmb();
-       /* Mark it as First Descriptor */
-       AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FD, 1);
-       /* Mark it as a NORMAL descriptor */
-       AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CTXT, 0);
-       /* configure h/w Offload */
-       mask = mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK;
-       if (mask == RTE_MBUF_F_TX_TCP_CKSUM || mask == RTE_MBUF_F_TX_UDP_CKSUM)
-               AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x3);
-       else if (mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
-               AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x1);
-       rte_wmb();
+       if (txq->pdata->tso_tx)
+               tso = 1;
+       else
+               tso = 0;

-       if (mbuf->ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) {
-               /* Mark it as a CONTEXT descriptor */
-               AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
-                               CTXT, 1);
-               /* Set the VLAN tag */
-               AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
-                               VT, mbuf->vlan_tci);
-               /* Indicate this descriptor contains the VLAN tag */
-               AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
-                               VLTV, 1);
-               AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, VTIR,
-                               TX_NORMAL_DESC2_VLAN_INSERT);
+       if (tso) {
+               axgbe_xmit_hw(txq, mbuf);
        } else {
-               AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, VTIR, 0x0);
-       }
-       rte_wmb();
-
-       /* Save mbuf */
-       txq->sw_ring[idx] = tx_pkt;
-       /* Update current index*/
-       txq->cur++;
-
-       tx_pkt = tx_pkt->next;
-
-       while (tx_pkt != NULL) {
                idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
                desc = &txq->desc[idx];
+               /* Saving the start index for setting the OWN bit finally */
+               start_index = idx;
+               tx_pkt = mbuf;
+               /* Max_pkt len = 9018 ; need to update it according to Jumbo pkt size */
+               pkt_len = tx_pkt->pkt_len;

                /* Update buffer address  and length */
                desc->baddr = rte_mbuf_data_iova(tx_pkt);
-
-               AXGMAC_SET_BITS_LE(desc->desc2,
-                               TX_NORMAL_DESC2, HL_B1L, tx_pkt->data_len);
-
+               AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, HL_B1L,
+                               tx_pkt->data_len);
+               /* Total msg length to transmit */
+               AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FL,
+                               tx_pkt->pkt_len);
+               /* Timestamp enablement check */
+               if (mbuf->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST)
+                       AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, TTSE, 1);
                rte_wmb();
-
+               /* Mark it as First Descriptor */
+               AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FD, 1);
                /* Mark it as a NORMAL descriptor */
                AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CTXT, 0);
                /* configure h/w Offload */
                mask = mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK;
-               if (mask == RTE_MBUF_F_TX_TCP_CKSUM ||
-                               mask == RTE_MBUF_F_TX_UDP_CKSUM)
-                       AXGMAC_SET_BITS_LE(desc->desc3,
-                                       TX_NORMAL_DESC3, CIC, 0x3);
+               if (mask == RTE_MBUF_F_TX_TCP_CKSUM || mask == RTE_MBUF_F_TX_UDP_CKSUM)
+                       AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x3);
                else if (mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
-                       AXGMAC_SET_BITS_LE(desc->desc3,
-                                       TX_NORMAL_DESC3, CIC, 0x1);
-
+                       AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x1);
                rte_wmb();
-
-                /* Set OWN bit */
-               AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1);
+               if (mbuf->ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) {
+                       /* Mark it as a CONTEXT descriptor */
+                       AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
+                                       CTXT, 1);
+                       /* Set the VLAN tag */
+                       AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
+                                       VT, mbuf->vlan_tci);
+                       /* Indicate this descriptor contains the VLAN tag */
+                       AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
+                                       VLTV, 1);
+                       AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, VTIR,
+                                       TX_NORMAL_DESC2_VLAN_INSERT);
+               } else {
+                       AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, VTIR, 0x0);
+               }
                rte_wmb();
-
                /* Save mbuf */
                txq->sw_ring[idx] = tx_pkt;
                /* Update current index*/
                txq->cur++;
-
                tx_pkt = tx_pkt->next;
-       }
-
-       /* Set LD bit for the last descriptor */
-       AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, LD, 1);
-       rte_wmb();
+               while (tx_pkt != NULL) {
+                       idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
+                       desc = &txq->desc[idx];
+                       /* Update buffer address  and length */
+                       desc->baddr = rte_mbuf_data_iova(tx_pkt);
+                       AXGMAC_SET_BITS_LE(desc->desc2,
+                                       TX_NORMAL_DESC2, HL_B1L, tx_pkt->data_len);
+                       rte_wmb();
+                       /* Mark it as a NORMAL descriptor */
+                       AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CTXT, 0);
+                       /* configure h/w Offload */
+                       mask = mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK;
+                       if (mask == RTE_MBUF_F_TX_TCP_CKSUM ||
+                                       mask == RTE_MBUF_F_TX_UDP_CKSUM)
+                               AXGMAC_SET_BITS_LE(desc->desc3,
+                                               TX_NORMAL_DESC3, CIC, 0x3);
+                       else if (mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
+                               AXGMAC_SET_BITS_LE(desc->desc3,
+                                               TX_NORMAL_DESC3, CIC, 0x1);
+                       rte_wmb();
+                       /* Set OWN bit */
+                       AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1);
+                       rte_wmb();
+                       /* Save mbuf */
+                       txq->sw_ring[idx] = tx_pkt;
+                       /* Update current index*/
+                       txq->cur++;
+                       tx_pkt = tx_pkt->next;
+               }

-       /* Update stats */
-       txq->bytes += pkt_len;
+               /* Set LD bit for the last descriptor */
+               AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, LD, 1);
+               rte_wmb();

-       /* Set OWN bit for the first descriptor */
-       desc = &txq->desc[start_index];
-       AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1);
-       rte_wmb();
+               /* Update stats */
+               txq->bytes += pkt_len;

+               /* Set OWN bit for the first descriptor */
+               desc = &txq->desc[start_index];
+               AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1);
+               rte_wmb();
+       }
        return 0;
 }

--
2.34.1


      reply	other threads:[~2025-02-11 13:16 UTC|newest]

Thread overview: 2+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-01-24  7:30 Jesna K E
2025-02-11 13:16 ` Sebastian, Selwin [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=DM4PR12MB505508D5FCE334DA5DE2440F8DFD2@DM4PR12MB5055.namprd12.prod.outlook.com \
    --to=selwin.sebastian@amd.com \
    --cc=Jesna.K.e@amd.com \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).