DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH v5 0/4] drivers/net: cleanup Tx buffers
       [not found] <“20191203055134.72874-1-chenxux.di@intel.com”>
@ 2019-12-27  3:45 ` Chenxu Di
  2019-12-27  3:45   ` [dpdk-dev] [PATCH v5 1/4] net/i40e: " Chenxu Di
                     ` (4 more replies)
  0 siblings, 5 replies; 9+ messages in thread
From: Chenxu Di @ 2019-12-27  3:45 UTC (permalink / raw)
  To: dev; +Cc: Yang Qiming, beilei.xing, Chenxu Di

Add support to the drivers inclulding i40e, ice, ixgbe
and igb vf for the API rte_eth_tx_done_cleanup to
force free consumed buffers on Tx ring.

---
v2:
added code about igb vf.
v3:
changed information of author
v4:
changed code.
v5:
fixed code and notes.
removed code for fm10k.

Chenxu Di (4):
  net/i40e: cleanup Tx buffers
  net/ice: cleanup Tx buffers
  net/ixgbe: cleanup Tx buffers
  net/e1000: cleanup Tx buffers

 drivers/net/e1000/igb_ethdev.c    |   1 +
 drivers/net/i40e/i40e_ethdev.c    |   1 +
 drivers/net/i40e/i40e_ethdev_vf.c |   1 +
 drivers/net/i40e/i40e_rxtx.c      | 122 +++++++++++++++++++++++++++++
 drivers/net/i40e/i40e_rxtx.h      |   1 +
 drivers/net/ice/ice_ethdev.c      |   1 +
 drivers/net/ice/ice_rxtx.c        | 123 ++++++++++++++++++++++++++++++
 drivers/net/ice/ice_rxtx.h        |   1 +
 drivers/net/ixgbe/ixgbe_ethdev.c  |   2 +
 drivers/net/ixgbe/ixgbe_rxtx.c    | 121 +++++++++++++++++++++++++++++
 drivers/net/ixgbe/ixgbe_rxtx.h    |   2 +
 11 files changed, 376 insertions(+)

-- 
2.17.1


^ permalink raw reply	[flat|nested] 9+ messages in thread

* [dpdk-dev] [PATCH v5 1/4] net/i40e: cleanup Tx buffers
  2019-12-27  3:45 ` [dpdk-dev] [PATCH v5 0/4] drivers/net: cleanup Tx buffers Chenxu Di
@ 2019-12-27  3:45   ` Chenxu Di
  2019-12-30  3:17     ` Yang, Qiming
  2019-12-27  3:45   ` [dpdk-dev] [PATCH v5 2/4] net/ice: " Chenxu Di
                     ` (3 subsequent siblings)
  4 siblings, 1 reply; 9+ messages in thread
From: Chenxu Di @ 2019-12-27  3:45 UTC (permalink / raw)
  To: dev; +Cc: Yang Qiming, beilei.xing, Chenxu Di

Add support to the i40e driver for the API rte_eth_tx_done_cleanup
to force free consumed buffers on Tx ring.

Signed-off-by: Chenxu Di <chenxux.di@intel.com>
---
 drivers/net/i40e/i40e_ethdev.c    |   1 +
 drivers/net/i40e/i40e_ethdev_vf.c |   1 +
 drivers/net/i40e/i40e_rxtx.c      | 122 ++++++++++++++++++++++++++++++
 drivers/net/i40e/i40e_rxtx.h      |   1 +
 4 files changed, 125 insertions(+)

diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 5999c964b..fad47a942 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -522,6 +522,7 @@ static const struct eth_dev_ops i40e_eth_dev_ops = {
 	.mac_addr_set                 = i40e_set_default_mac_addr,
 	.mtu_set                      = i40e_dev_mtu_set,
 	.tm_ops_get                   = i40e_tm_ops_get,
+	.tx_done_cleanup              = i40e_tx_done_cleanup,
 };
 
 /* store statistics names and its offset in stats structure */
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index 5dba0928b..0ca5417d7 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -215,6 +215,7 @@ static const struct eth_dev_ops i40evf_eth_dev_ops = {
 	.rss_hash_conf_get    = i40evf_dev_rss_hash_conf_get,
 	.mtu_set              = i40evf_dev_mtu_set,
 	.mac_addr_set         = i40evf_set_default_mac_addr,
+	.tx_done_cleanup      = i40e_tx_done_cleanup,
 };
 
 /*
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 17dc8c78f..9e4b0b678 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -2455,6 +2455,128 @@ i40e_tx_queue_release_mbufs(struct i40e_tx_queue *txq)
 	}
 }
 
+int i40e_tx_done_cleanup(void *q, uint32_t free_cnt)
+{
+	struct i40e_tx_queue *txq = (struct i40e_tx_queue *)q;
+	struct i40e_tx_entry *sw_ring;
+	volatile struct i40e_tx_desc *txr;
+	uint16_t tx_first; /* First segment analyzed. */
+	uint16_t tx_id;    /* Current segment being processed. */
+	uint16_t tx_last;  /* Last segment in the current packet. */
+	uint16_t tx_next;  /* First segment of the next packet. */
+	int count;
+
+	if (txq == NULL)
+		return -ENODEV;
+
+	count = 0;
+	sw_ring = txq->sw_ring;
+	txr = txq->tx_ring;
+
+	/*
+	 * tx_tail is the last sent packet on the sw_ring. Goto the end
+	 * of that packet (the last segment in the packet chain) and
+	 * then the next segment will be the start of the oldest segment
+	 * in the sw_ring. This is the first packet that will be
+	 * attempted to be freed.
+	 */
+
+	/* Get last segment in most recently added packet. */
+	tx_last = sw_ring[txq->tx_tail].last_id;
+
+	/* Get the next segment, which is the oldest segment in ring. */
+	tx_first = sw_ring[tx_last].next_id;
+
+	/* Set the current index to the first. */
+	tx_id = tx_first;
+
+	/*
+	 * Loop through each packet. For each packet, verify that an
+	 * mbuf exists and that the last segment is free. If so, free
+	 * it and move on.
+	 */
+	while (1) {
+		tx_last = sw_ring[tx_id].last_id;
+
+		if (sw_ring[tx_last].mbuf) {
+			if ((txr[tx_last].cmd_type_offset_bsz &
+				rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) ==
+				rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE)) {
+				/* Get the start of the next packet. */
+				tx_next = sw_ring[tx_last].next_id;
+
+				/*
+				 * Loop through all segments in a
+				 * packet.
+				 */
+				do {
+					rte_pktmbuf_free_seg(sw_ring[tx_id].mbuf);
+					sw_ring[tx_id].mbuf = NULL;
+					sw_ring[tx_id].last_id = tx_id;
+
+					/* Move to next segment. */
+					tx_id = sw_ring[tx_id].next_id;
+
+				} while (tx_id != tx_next);
+
+				/*
+				 * Increment the number of packets
+				 * freed.
+				 */
+				count++;
+
+				if (unlikely(count == (int)free_cnt))
+					break;
+			} else {
+				/*
+				 * mbuf still in use, nothing left to
+				 * free.
+				 */
+				break;
+			}
+		} else {
+			/*
+			 * There are multiple reasons to be here:
+			 * 1) All the packets on the ring have been
+			 *    freed - tx_id is equal to tx_first
+			 *    and some packets have been freed.
+			 *    - Done, exit
+			 * 2) Interfaces has not sent a rings worth of
+			 *    packets yet, so the segment after tail is
+			 *    still empty. Or a previous call to this
+			 *    function freed some of the segments but
+			 *    not all so there is a hole in the list.
+			 *    Hopefully this is a rare case.
+			 *    - Walk the list and find the next mbuf. If
+			 *      there isn't one, then done.
+			 */
+			if (likely(tx_id == tx_first && count != 0))
+				break;
+
+			/*
+			 * Walk the list and find the next mbuf, if any.
+			 */
+			do {
+				/* Move to next segment. */
+				tx_id = sw_ring[tx_id].next_id;
+
+				if (sw_ring[tx_id].mbuf)
+					break;
+
+			} while (tx_id != tx_first);
+
+			/*
+			 * Determine why previous loop bailed. If there
+			 * is not an mbuf, done.
+			 */
+			if (sw_ring[tx_id].mbuf == NULL)
+				break;
+		}
+	}
+
+	return count;
+}
+
 void
 i40e_reset_tx_queue(struct i40e_tx_queue *txq)
 {
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index 2106bb355..8f11f011a 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -212,6 +212,7 @@ void i40e_dev_free_queues(struct rte_eth_dev *dev);
 void i40e_reset_rx_queue(struct i40e_rx_queue *rxq);
 void i40e_reset_tx_queue(struct i40e_tx_queue *txq);
 void i40e_tx_queue_release_mbufs(struct i40e_tx_queue *txq);
+int i40e_tx_done_cleanup(void *txq, uint32_t free_cnt);
 int i40e_alloc_rx_queue_mbufs(struct i40e_rx_queue *rxq);
 void i40e_rx_queue_release_mbufs(struct i40e_rx_queue *rxq);
 
-- 
2.17.1


^ permalink raw reply	[flat|nested] 9+ messages in thread

* [dpdk-dev] [PATCH v5 2/4] net/ice: cleanup Tx buffers
  2019-12-27  3:45 ` [dpdk-dev] [PATCH v5 0/4] drivers/net: cleanup Tx buffers Chenxu Di
  2019-12-27  3:45   ` [dpdk-dev] [PATCH v5 1/4] net/i40e: " Chenxu Di
@ 2019-12-27  3:45   ` Chenxu Di
  2019-12-30  3:29     ` Yang, Qiming
  2019-12-27  3:45   ` [dpdk-dev] [PATCH v5 3/4] net/ixgbe: " Chenxu Di
                     ` (2 subsequent siblings)
  4 siblings, 1 reply; 9+ messages in thread
From: Chenxu Di @ 2019-12-27  3:45 UTC (permalink / raw)
  To: dev; +Cc: Yang Qiming, beilei.xing, Chenxu Di

Add support to the ice driver for the API rte_eth_tx_done_cleanup
to force free consumed buffers on Tx ring.

Signed-off-by: Chenxu Di <chenxux.di@intel.com>
---
 drivers/net/ice/ice_ethdev.c |   1 +
 drivers/net/ice/ice_rxtx.c   | 123 +++++++++++++++++++++++++++++++++++
 drivers/net/ice/ice_rxtx.h   |   1 +
 3 files changed, 125 insertions(+)

diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index de189daba..b55cdbf74 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -220,6 +220,7 @@ static const struct eth_dev_ops ice_eth_dev_ops = {
 	.filter_ctrl                  = ice_dev_filter_ctrl,
 	.udp_tunnel_port_add          = ice_dev_udp_tunnel_port_add,
 	.udp_tunnel_port_del          = ice_dev_udp_tunnel_port_del,
+	.tx_done_cleanup              = ice_tx_done_cleanup,
 };
 
 /* store statistics names and its offset in stats structure */
diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index 2db174456..7e704d2d5 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -863,6 +863,129 @@ ice_fdir_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 	return 0;
 }
 
+
+int ice_tx_done_cleanup(void *q, uint32_t free_cnt)
+{
+	struct ice_tx_queue *txq = (struct ice_tx_queue *)q;
+	struct ice_tx_entry *sw_ring;
+	volatile struct ice_tx_desc *txr;
+	uint16_t tx_first; /* First segment analyzed. */
+	uint16_t tx_id;    /* Current segment being processed. */
+	uint16_t tx_last;  /* Last segment in the current packet. */
+	uint16_t tx_next;  /* First segment of the next packet. */
+	int count;
+
+	if (txq == NULL)
+		return -ENODEV;
+
+	count = 0;
+	sw_ring = txq->sw_ring;
+	txr = txq->tx_ring;
+
+	/*
+	 * tx_tail is the last sent packet on the sw_ring. Goto the end
+	 * of that packet (the last segment in the packet chain) and
+	 * then the next segment will be the start of the oldest segment
+	 * in the sw_ring. This is the first packet that will be
+	 * attempted to be freed.
+	 */
+
+	/* Get last segment in most recently added packet. */
+	tx_last = sw_ring[txq->tx_tail].last_id;
+
+	/* Get the next segment, which is the oldest segment in ring. */
+	tx_first = sw_ring[tx_last].next_id;
+
+	/* Set the current index to the first. */
+	tx_id = tx_first;
+
+	/*
+	 * Loop through each packet. For each packet, verify that an
+	 * mbuf exists and that the last segment is free. If so, free
+	 * it and move on.
+	 */
+	while (1) {
+		tx_last = sw_ring[tx_id].last_id;
+
+		if (sw_ring[tx_last].mbuf) {
+			if ((txr[tx_last].cmd_type_offset_bsz &
+				rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) ==
+				rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE)) {
+				/* Get the start of the next packet. */
+				tx_next = sw_ring[tx_last].next_id;
+
+				/*
+				 * Loop through all segments in a
+				 * packet.
+				 */
+				do {
+					rte_pktmbuf_free_seg(sw_ring[tx_id].mbuf);
+					sw_ring[tx_id].mbuf = NULL;
+					sw_ring[tx_id].last_id = tx_id;
+
+					/* Move to next segment. */
+					tx_id = sw_ring[tx_id].next_id;
+
+				} while (tx_id != tx_next);
+
+				/*
+				 * Increment the number of packets
+				 * freed.
+				 */
+				count++;
+
+				if (unlikely(count == (int)free_cnt))
+					break;
+			} else {
+				/*
+				 * mbuf still in use, nothing left to
+				 * free.
+				 */
+				break;
+			}
+		} else {
+			/*
+			 * There are multiple reasons to be here:
+			 * 1) All the packets on the ring have been
+			 *    freed - tx_id is equal to tx_first
+			 *    and some packets have been freed.
+			 *    - Done, exit
+			 * 2) Interfaces has not sent a rings worth of
+			 *    packets yet, so the segment after tail is
+			 *    still empty. Or a previous call to this
+			 *    function freed some of the segments but
+			 *    not all so there is a hole in the list.
+			 *    Hopefully this is a rare case.
+			 *    - Walk the list and find the next mbuf. If
+			 *      there isn't one, then done.
+			 */
+			if (likely(tx_id == tx_first && count != 0))
+				break;
+
+			/*
+			 * Walk the list and find the next mbuf, if any.
+			 */
+			do {
+				/* Move to next segment. */
+				tx_id = sw_ring[tx_id].next_id;
+
+				if (sw_ring[tx_id].mbuf)
+					break;
+
+			} while (tx_id != tx_first);
+
+			/*
+			 * Determine why previous loop bailed. If there
+			 * is not an mbuf, done.
+			 */
+			if (sw_ring[tx_id].mbuf == NULL)
+				break;
+		}
+	}
+
+	return count;
+}
+
 int
 ice_rx_queue_setup(struct rte_eth_dev *dev,
 		   uint16_t queue_idx,
diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h
index 9e3d2cd07..8d4232a61 100644
--- a/drivers/net/ice/ice_rxtx.h
+++ b/drivers/net/ice/ice_rxtx.h
@@ -183,6 +183,7 @@ int ice_rx_descriptor_status(void *rx_queue, uint16_t offset);
 int ice_tx_descriptor_status(void *tx_queue, uint16_t offset);
 void ice_set_default_ptype_table(struct rte_eth_dev *dev);
 const uint32_t *ice_dev_supported_ptypes_get(struct rte_eth_dev *dev);
+int ice_tx_done_cleanup(void *txq, uint32_t free_cnt);
 
 int ice_rx_vec_dev_check(struct rte_eth_dev *dev);
 int ice_tx_vec_dev_check(struct rte_eth_dev *dev);
-- 
2.17.1


^ permalink raw reply	[flat|nested] 9+ messages in thread

* [dpdk-dev] [PATCH v5 3/4] net/ixgbe: cleanup Tx buffers
  2019-12-27  3:45 ` [dpdk-dev] [PATCH v5 0/4] drivers/net: cleanup Tx buffers Chenxu Di
  2019-12-27  3:45   ` [dpdk-dev] [PATCH v5 1/4] net/i40e: " Chenxu Di
  2019-12-27  3:45   ` [dpdk-dev] [PATCH v5 2/4] net/ice: " Chenxu Di
@ 2019-12-27  3:45   ` Chenxu Di
  2019-12-30  3:29     ` Yang, Qiming
  2019-12-27  3:45   ` [dpdk-dev] [PATCH v5 4/4] net/e1000: " Chenxu Di
  2019-12-27  8:25   ` [dpdk-dev] [PATCH v5 0/4] drivers/net: " Zhang, Xiao
  4 siblings, 1 reply; 9+ messages in thread
From: Chenxu Di @ 2019-12-27  3:45 UTC (permalink / raw)
  To: dev; +Cc: Yang Qiming, beilei.xing, Chenxu Di

Add support to the ixgbe driver for the API rte_eth_tx_done_cleanup
to force free consumed buffers on Tx ring.

Signed-off-by: Chenxu Di <chenxux.di@intel.com>
---
 drivers/net/ixgbe/ixgbe_ethdev.c |   2 +
 drivers/net/ixgbe/ixgbe_rxtx.c   | 121 +++++++++++++++++++++++++++++++
 drivers/net/ixgbe/ixgbe_rxtx.h   |   2 +
 3 files changed, 125 insertions(+)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 2c6fd0f13..0091405db 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -601,6 +601,7 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = {
 	.udp_tunnel_port_add  = ixgbe_dev_udp_tunnel_port_add,
 	.udp_tunnel_port_del  = ixgbe_dev_udp_tunnel_port_del,
 	.tm_ops_get           = ixgbe_tm_ops_get,
+	.tx_done_cleanup      = ixgbe_tx_done_cleanup,
 };
 
 /*
@@ -649,6 +650,7 @@ static const struct eth_dev_ops ixgbevf_eth_dev_ops = {
 	.reta_query           = ixgbe_dev_rss_reta_query,
 	.rss_hash_update      = ixgbe_dev_rss_hash_update,
 	.rss_hash_conf_get    = ixgbe_dev_rss_hash_conf_get,
+	.tx_done_cleanup      = ixgbe_tx_done_cleanup,
 };
 
 /* store statistics names and its offset in stats structure */
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index fa572d184..8d8e0655c 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -2306,6 +2306,127 @@ ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq)
 	}
 }
 
+int ixgbe_tx_done_cleanup(void *q, uint32_t free_cnt)
+{
+	struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)q;
+	struct ixgbe_tx_entry *sw_ring;
+	volatile union ixgbe_adv_tx_desc *txr;
+	uint16_t tx_first; /* First segment analyzed. */
+	uint16_t tx_id;    /* Current segment being processed. */
+	uint16_t tx_last;  /* Last segment in the current packet. */
+	uint16_t tx_next;  /* First segment of the next packet. */
+	int count;
+
+	if (txq == NULL)
+		return -ENODEV;
+
+	count = 0;
+	sw_ring = txq->sw_ring;
+	txr = txq->tx_ring;
+
+	/*
+	 * tx_tail is the last sent packet on the sw_ring. Goto the end
+	 * of that packet (the last segment in the packet chain) and
+	 * then the next segment will be the start of the oldest segment
+	 * in the sw_ring. This is the first packet that will be
+	 * attempted to be freed.
+	 */
+
+	/* Get last segment in most recently added packet. */
+	tx_last = sw_ring[txq->tx_tail].last_id;
+
+	/* Get the next segment, which is the oldest segment in ring. */
+	tx_first = sw_ring[tx_last].next_id;
+
+	/* Set the current index to the first. */
+	tx_id = tx_first;
+
+	/*
+	 * Loop through each packet. For each packet, verify that an
+	 * mbuf exists and that the last segment is free. If so, free
+	 * it and move on.
+	 */
+	while (1) {
+		tx_last = sw_ring[tx_id].last_id;
+
+		if (sw_ring[tx_last].mbuf) {
+			if (txr[tx_last].wb.status &
+					IXGBE_TXD_STAT_DD) {
+				/* Get the start of the next packet. */
+				tx_next = sw_ring[tx_last].next_id;
+
+				/*
+				 * Loop through all segments in a
+				 * packet.
+				 */
+				do {
+					rte_pktmbuf_free_seg(sw_ring[tx_id].mbuf);
+					sw_ring[tx_id].mbuf = NULL;
+					sw_ring[tx_id].last_id = tx_id;
+
+					/* Move to next segment. */
+					tx_id = sw_ring[tx_id].next_id;
+
+				} while (tx_id != tx_next);
+
+				/*
+				 * Increment the number of packets
+				 * freed.
+				 */
+				count++;
+
+				if (unlikely(count == (int)free_cnt))
+					break;
+			} else {
+				/*
+				 * mbuf still in use, nothing left to
+				 * free.
+				 */
+				break;
+			}
+		} else {
+			/*
+			 * There are multiple reasons to be here:
+			 * 1) All the packets on the ring have been
+			 *    freed - tx_id is equal to tx_first
+			 *    and some packets have been freed.
+			 *    - Done, exit
+			 * 2) Interfaces has not sent a rings worth of
+			 *    packets yet, so the segment after tail is
+			 *    still empty. Or a previous call to this
+			 *    function freed some of the segments but
+			 *    not all so there is a hole in the list.
+			 *    Hopefully this is a rare case.
+			 *    - Walk the list and find the next mbuf. If
+			 *      there isn't one, then done.
+			 */
+			if (likely(tx_id == tx_first && count != 0))
+				break;
+
+			/*
+			 * Walk the list and find the next mbuf, if any.
+			 */
+			do {
+				/* Move to next segment. */
+				tx_id = sw_ring[tx_id].next_id;
+
+				if (sw_ring[tx_id].mbuf)
+					break;
+
+			} while (tx_id != tx_first);
+
+			/*
+			 * Determine why previous loop bailed. If there
+			 * is not an mbuf, done.
+			 */
+			if (sw_ring[tx_id].mbuf == NULL)
+				break;
+		}
+	}
+
+	return count;
+}
+
 static void __attribute__((cold))
 ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
 {
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
index 505d344b9..2c3770af6 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx.h
@@ -285,6 +285,8 @@ int ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev);
 int ixgbe_rxq_vec_setup(struct ixgbe_rx_queue *rxq);
 void ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq);
 
+int ixgbe_tx_done_cleanup(void *txq, uint32_t free_cnt);
+
 extern const uint32_t ptype_table[IXGBE_PACKET_TYPE_MAX];
 extern const uint32_t ptype_table_tn[IXGBE_PACKET_TYPE_TN_MAX];
 
-- 
2.17.1


^ permalink raw reply	[flat|nested] 9+ messages in thread

* [dpdk-dev] [PATCH v5 4/4] net/e1000: cleanup Tx buffers
  2019-12-27  3:45 ` [dpdk-dev] [PATCH v5 0/4] drivers/net: cleanup Tx buffers Chenxu Di
                     ` (2 preceding siblings ...)
  2019-12-27  3:45   ` [dpdk-dev] [PATCH v5 3/4] net/ixgbe: " Chenxu Di
@ 2019-12-27  3:45   ` Chenxu Di
  2019-12-27  8:25   ` [dpdk-dev] [PATCH v5 0/4] drivers/net: " Zhang, Xiao
  4 siblings, 0 replies; 9+ messages in thread
From: Chenxu Di @ 2019-12-27  3:45 UTC (permalink / raw)
  To: dev; +Cc: Yang Qiming, beilei.xing, Chenxu Di

Add support to the igb vf for the API rte_eth_tx_done_cleanup
 to force free consumed buffers on Tx ring.

Signed-off-by: Chenxu Di <chenxux.di@intel.com>
---
 drivers/net/e1000/igb_ethdev.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c
index a3e30dbe5..647d5504f 100644
--- a/drivers/net/e1000/igb_ethdev.c
+++ b/drivers/net/e1000/igb_ethdev.c
@@ -446,6 +446,7 @@ static const struct eth_dev_ops igbvf_eth_dev_ops = {
 	.tx_descriptor_status = eth_igb_tx_descriptor_status,
 	.tx_queue_setup       = eth_igb_tx_queue_setup,
 	.tx_queue_release     = eth_igb_tx_queue_release,
+	.tx_done_cleanup      = eth_igb_tx_done_cleanup,
 	.set_mc_addr_list     = eth_igb_set_mc_addr_list,
 	.rxq_info_get         = igb_rxq_info_get,
 	.txq_info_get         = igb_txq_info_get,
-- 
2.17.1


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [dpdk-dev] [PATCH v5 0/4] drivers/net: cleanup Tx buffers
  2019-12-27  3:45 ` [dpdk-dev] [PATCH v5 0/4] drivers/net: cleanup Tx buffers Chenxu Di
                     ` (3 preceding siblings ...)
  2019-12-27  3:45   ` [dpdk-dev] [PATCH v5 4/4] net/e1000: " Chenxu Di
@ 2019-12-27  8:25   ` Zhang, Xiao
  4 siblings, 0 replies; 9+ messages in thread
From: Zhang, Xiao @ 2019-12-27  8:25 UTC (permalink / raw)
  To: Di, ChenxuX, dev; +Cc: Yang, Qiming, Xing, Beilei, Di, ChenxuX


> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Chenxu Di
> Sent: Friday, December 27, 2019 11:45 AM
> To: dev@dpdk.org
> Cc: Yang, Qiming <qiming.yang@intel.com>; Xing, Beilei
> <beilei.xing@intel.com>; Di, ChenxuX <chenxux.di@intel.com>
> Subject: [dpdk-dev] [PATCH v5 0/4] drivers/net: cleanup Tx buffers
> 
> Add support to the drivers inclulding i40e, ice, ixgbe and igb vf for the API
> rte_eth_tx_done_cleanup to force free consumed buffers on Tx ring.
> 
> ---
> v2:
> added code about igb vf.
> v3:
> changed information of author
> v4:
> changed code.
> v5:
> fixed code and notes.
> removed code for fm10k.
> 
> Chenxu Di (4):
>   net/i40e: cleanup Tx buffers
>   net/ice: cleanup Tx buffers
>   net/ixgbe: cleanup Tx buffers
>   net/e1000: cleanup Tx buffers
> 
>  drivers/net/e1000/igb_ethdev.c    |   1 +
>  drivers/net/i40e/i40e_ethdev.c    |   1 +
>  drivers/net/i40e/i40e_ethdev_vf.c |   1 +
>  drivers/net/i40e/i40e_rxtx.c      | 122 +++++++++++++++++++++++++++++
>  drivers/net/i40e/i40e_rxtx.h      |   1 +
>  drivers/net/ice/ice_ethdev.c      |   1 +
>  drivers/net/ice/ice_rxtx.c        | 123 ++++++++++++++++++++++++++++++
>  drivers/net/ice/ice_rxtx.h        |   1 +
>  drivers/net/ixgbe/ixgbe_ethdev.c  |   2 +
>  drivers/net/ixgbe/ixgbe_rxtx.c    | 121 +++++++++++++++++++++++++++++
>  drivers/net/ixgbe/ixgbe_rxtx.h    |   2 +
>  11 files changed, 376 insertions(+)
> 
> --
> 2.17.1

Reviewed-by: Xiao Zhang <xiao.zhang@intel.com>

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [dpdk-dev] [PATCH v5 1/4] net/i40e: cleanup Tx buffers
  2019-12-27  3:45   ` [dpdk-dev] [PATCH v5 1/4] net/i40e: " Chenxu Di
@ 2019-12-30  3:17     ` Yang, Qiming
  0 siblings, 0 replies; 9+ messages in thread
From: Yang, Qiming @ 2019-12-30  3:17 UTC (permalink / raw)
  To: Di, ChenxuX, dev; +Cc: Xing, Beilei



-----Original Message-----
From: Di, ChenxuX 
Sent: Friday, December 27, 2019 11:45
To: dev@dpdk.org
Cc: Yang, Qiming <qiming.yang@intel.com>; Xing, Beilei <beilei.xing@intel.com>; Di, ChenxuX <chenxux.di@intel.com>
Subject: [PATCH v5 1/4] net/i40e: cleanup Tx buffers

Add support to the i40e driver for the API rte_eth_tx_done_cleanup to force free consumed buffers on Tx ring.

Signed-off-by: Chenxu Di <chenxux.di@intel.com>
---
 drivers/net/i40e/i40e_ethdev.c    |   1 +
 drivers/net/i40e/i40e_ethdev_vf.c |   1 +
 drivers/net/i40e/i40e_rxtx.c      | 122 ++++++++++++++++++++++++++++++
 drivers/net/i40e/i40e_rxtx.h      |   1 +
 4 files changed, 125 insertions(+)

diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c index 5999c964b..fad47a942 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -522,6 +522,7 @@ static const struct eth_dev_ops i40e_eth_dev_ops = {
 	.mac_addr_set                 = i40e_set_default_mac_addr,
 	.mtu_set                      = i40e_dev_mtu_set,
 	.tm_ops_get                   = i40e_tm_ops_get,
+	.tx_done_cleanup              = i40e_tx_done_cleanup,
 };
 
 /* store statistics names and its offset in stats structure */ diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index 5dba0928b..0ca5417d7 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -215,6 +215,7 @@ static const struct eth_dev_ops i40evf_eth_dev_ops = {
 	.rss_hash_conf_get    = i40evf_dev_rss_hash_conf_get,
 	.mtu_set              = i40evf_dev_mtu_set,
 	.mac_addr_set         = i40evf_set_default_mac_addr,
+	.tx_done_cleanup      = i40e_tx_done_cleanup,
 };
 
 /*
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c index 17dc8c78f..9e4b0b678 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -2455,6 +2455,128 @@ i40e_tx_queue_release_mbufs(struct i40e_tx_queue *txq)
 	}
 }
 
+int i40e_tx_done_cleanup(void *q, uint32_t free_cnt) {
+	struct i40e_tx_queue *txq = (struct i40e_tx_queue *)q;
+	struct i40e_tx_entry *sw_ring;
+	volatile struct i40e_tx_desc *txr;
+	uint16_t tx_first; /* First segment analyzed. */
+	uint16_t tx_id;    /* Current segment being processed. */
+	uint16_t tx_last;  /* Last segment in the current packet. */
+	uint16_t tx_next;  /* First segment of the next packet. */
+	int count;
+
+	if (txq == NULL)
+		return -ENODEV;
+
+	count = 0;
+	sw_ring = txq->sw_ring;
+	txr = txq->tx_ring;
+
+	/*
+	 * tx_tail is the last sent packet on the sw_ring. Goto the end
+	 * of that packet (the last segment in the packet chain) and
+	 * then the next segment will be the start of the oldest segment
+	 * in the sw_ring. This is the first packet that will be
+	 * attempted to be freed.
+	 */
+
+	/* Get last segment in most recently added packet. */
+	tx_last = sw_ring[txq->tx_tail].last_id;
+
+	/* Get the next segment, which is the oldest segment in ring. */
+	tx_first = sw_ring[tx_last].next_id;
+
+	/* Set the current index to the first. */
+	tx_id = tx_first;
+
+	/*
+	 * Loop through each packet. For each packet, verify that an
+	 * mbuf exists and that the last segment is free. If so, free
+	 * it and move on.
+	 */
+	while (1) {
+		tx_last = sw_ring[tx_id].last_id;
+
+		if (sw_ring[tx_last].mbuf) {
+			if ((txr[tx_last].cmd_type_offset_bsz &
+				rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) ==
+				rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE)) {
+				/* Get the start of the next packet. */
+				tx_next = sw_ring[tx_last].next_id;
+
+				/*
+				 * Loop through all segments in a
+				 * packet.
+				 */
+				do {
+					rte_pktmbuf_free_seg(sw_ring[tx_id].mbuf);
+					sw_ring[tx_id].mbuf = NULL;
+					sw_ring[tx_id].last_id = tx_id;
+
+					/* Move to next segment. */
+					tx_id = sw_ring[tx_id].next_id;
+
+				} while (tx_id != tx_next);
+
+				/*
+				 * Increment the number of packets
+				 * freed.
+				 */
+				count++;
+
+				if (unlikely(count == (int)free_cnt))
+					break;
+			} else {

If the else is just break, then this if can be deleted.
Please follow clean code, remove unnecessary tabs. 

+				/*
+				 * mbuf still in use, nothing left to
+				 * free.
+				 */
+				break;
+			}
+		} else {
+			/*
+			 * There are multiple reasons to be here:
+			 * 1) All the packets on the ring have been
+			 *    freed - tx_id is equal to tx_first
+			 *    and some packets have been freed.
+			 *    - Done, exit
+			 * 2) Interfaces has not sent a rings worth of
+			 *    packets yet, so the segment after tail is
+			 *    still empty. Or a previous call to this
+			 *    function freed some of the segments but
+			 *    not all so there is a hole in the list.
+			 *    Hopefully this is a rare case.
+			 *    - Walk the list and find the next mbuf. If
+			 *      there isn't one, then done.
+			 */
+			if (likely(tx_id == tx_first && count != 0))
+				break;
+
+			/*
+			 * Walk the list and find the next mbuf, if any.
+			 */
+			do {
+				/* Move to next segment. */
+				tx_id = sw_ring[tx_id].next_id;
+
+				if (sw_ring[tx_id].mbuf)
+					break;
+
+			} while (tx_id != tx_first);
+
+			/*
+			 * Determine why previous loop bailed. If there
+			 * is not an mbuf, done.
+			 */
+			if (sw_ring[tx_id].mbuf == NULL)
+				break;
+		}
+	}
+
+	return count;
+}
+
 void
 i40e_reset_tx_queue(struct i40e_tx_queue *txq)  { diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h index 2106bb355..8f11f011a 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -212,6 +212,7 @@ void i40e_dev_free_queues(struct rte_eth_dev *dev);  void i40e_reset_rx_queue(struct i40e_rx_queue *rxq);  void i40e_reset_tx_queue(struct i40e_tx_queue *txq);  void i40e_tx_queue_release_mbufs(struct i40e_tx_queue *txq);
+int i40e_tx_done_cleanup(void *txq, uint32_t free_cnt);
 int i40e_alloc_rx_queue_mbufs(struct i40e_rx_queue *rxq);  void i40e_rx_queue_release_mbufs(struct i40e_rx_queue *rxq);
 
--
2.17.1


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [dpdk-dev] [PATCH v5 2/4] net/ice: cleanup Tx buffers
  2019-12-27  3:45   ` [dpdk-dev] [PATCH v5 2/4] net/ice: " Chenxu Di
@ 2019-12-30  3:29     ` Yang, Qiming
  0 siblings, 0 replies; 9+ messages in thread
From: Yang, Qiming @ 2019-12-30  3:29 UTC (permalink / raw)
  To: Di, ChenxuX, dev; +Cc: Xing, Beilei



-----Original Message-----
From: Di, ChenxuX 
Sent: Friday, December 27, 2019 11:45
To: dev@dpdk.org
Cc: Yang, Qiming <qiming.yang@intel.com>; Xing, Beilei <beilei.xing@intel.com>; Di, ChenxuX <chenxux.di@intel.com>
Subject: [PATCH v5 2/4] net/ice: cleanup Tx buffers

Add support to the ice driver for the API rte_eth_tx_done_cleanup to force free consumed buffers on Tx ring.

Signed-off-by: Chenxu Di <chenxux.di@intel.com>
---
 drivers/net/ice/ice_ethdev.c |   1 +
 drivers/net/ice/ice_rxtx.c   | 123 +++++++++++++++++++++++++++++++++++
 drivers/net/ice/ice_rxtx.h   |   1 +
 3 files changed, 125 insertions(+)

.......

+	/*
+	 * Loop through each packet. For each packet, verify that an
+	 * mbuf exists and that the last segment is free. If so, free
+	 * it and move on.
+	 */
+	while (1) {
+		tx_last = sw_ring[tx_id].last_id;
+
+		if (sw_ring[tx_last].mbuf) {
+			if ((txr[tx_last].cmd_type_offset_bsz &
+				rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) ==
+				rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE)) {
+				/* Get the start of the next packet. */
+				tx_next = sw_ring[tx_last].next_id;
+
+				/*
+				 * Loop through all segments in a
+				 * packet.
+				 */
+				do {
+					rte_pktmbuf_free_seg(sw_ring[tx_id].mbuf);
+					sw_ring[tx_id].mbuf = NULL;
+					sw_ring[tx_id].last_id = tx_id;
+
+					/* Move to next segment. */
+					tx_id = sw_ring[tx_id].next_id;
+
+				} while (tx_id != tx_next);
+
+				/*
+				 * Increment the number of packets
+				 * freed.
+				 */
+				count++;
+
+				if (unlikely(count == (int)free_cnt))
+					break;
+			} else {
+				/*
+				 * mbuf still in use, nothing left to
+				 * free.
+				 */
+				break;

Same comment as patch 1

+			}

.....

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [dpdk-dev] [PATCH v5 3/4] net/ixgbe: cleanup Tx buffers
  2019-12-27  3:45   ` [dpdk-dev] [PATCH v5 3/4] net/ixgbe: " Chenxu Di
@ 2019-12-30  3:29     ` Yang, Qiming
  0 siblings, 0 replies; 9+ messages in thread
From: Yang, Qiming @ 2019-12-30  3:29 UTC (permalink / raw)
  To: Di, ChenxuX, dev; +Cc: Xing, Beilei



-----Original Message-----
From: Di, ChenxuX 
Sent: Friday, December 27, 2019 11:45
To: dev@dpdk.org
Cc: Yang, Qiming <qiming.yang@intel.com>; Xing, Beilei <beilei.xing@intel.com>; Di, ChenxuX <chenxux.di@intel.com>
Subject: [PATCH v5 3/4] net/ixgbe: cleanup Tx buffers

Add support to the ixgbe driver for the API rte_eth_tx_done_cleanup to force free consumed buffers on Tx ring.

Signed-off-by: Chenxu Di <chenxux.di@intel.com>
---
 drivers/net/ixgbe/ixgbe_ethdev.c |   2 +
 drivers/net/ixgbe/ixgbe_rxtx.c   | 121 +++++++++++++++++++++++++++++++
 drivers/net/ixgbe/ixgbe_rxtx.h   |   2 +
 3 files changed, 125 insertions(+)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 2c6fd0f13..0091405db 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -601,6 +601,7 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = {
 	.udp_tunnel_port_add  = ixgbe_dev_udp_tunnel_port_add,
 	.udp_tunnel_port_del  = ixgbe_dev_udp_tunnel_port_del,
 	.tm_ops_get           = ixgbe_tm_ops_get,
+	.tx_done_cleanup      = ixgbe_tx_done_cleanup,
 };
 
 /*
@@ -649,6 +650,7 @@ static const struct eth_dev_ops ixgbevf_eth_dev_ops = {
 	.reta_query           = ixgbe_dev_rss_reta_query,
 	.rss_hash_update      = ixgbe_dev_rss_hash_update,
 	.rss_hash_conf_get    = ixgbe_dev_rss_hash_conf_get,
+	.tx_done_cleanup      = ixgbe_tx_done_cleanup,
 };
 
 /* store statistics names and its offset in stats structure */ diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c index fa572d184..8d8e0655c 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -2306,6 +2306,127 @@ ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq)
 	}
 }
 
+int ixgbe_tx_done_cleanup(void *q, uint32_t free_cnt) {
+	struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)q;
+	struct ixgbe_tx_entry *sw_ring;
+	volatile union ixgbe_adv_tx_desc *txr;
+	uint16_t tx_first; /* First segment analyzed. */
+	uint16_t tx_id;    /* Current segment being processed. */
+	uint16_t tx_last;  /* Last segment in the current packet. */
+	uint16_t tx_next;  /* First segment of the next packet. */
+	int count;
+
+	if (txq == NULL)
+		return -ENODEV;
+
+	count = 0;
+	sw_ring = txq->sw_ring;
+	txr = txq->tx_ring;
+
+	/*
+	 * tx_tail is the last sent packet on the sw_ring. Goto the end
+	 * of that packet (the last segment in the packet chain) and
+	 * then the next segment will be the start of the oldest segment
+	 * in the sw_ring. This is the first packet that will be
+	 * attempted to be freed.
+	 */
+
+	/* Get last segment in most recently added packet. */
+	tx_last = sw_ring[txq->tx_tail].last_id;
+
+	/* Get the next segment, which is the oldest segment in ring. */
+	tx_first = sw_ring[tx_last].next_id;
+
+	/* Set the current index to the first. */
+	tx_id = tx_first;
+
+	/*
+	 * Loop through each packet. For each packet, verify that an
+	 * mbuf exists and that the last segment is free. If so, free
+	 * it and move on.
+	 */
+	while (1) {
+		tx_last = sw_ring[tx_id].last_id;
+
+		if (sw_ring[tx_last].mbuf) {
+			if (txr[tx_last].wb.status &
+					IXGBE_TXD_STAT_DD) {
+				/* Get the start of the next packet. */
+				tx_next = sw_ring[tx_last].next_id;
+
+				/*
+				 * Loop through all segments in a
+				 * packet.
+				 */
+				do {
+					rte_pktmbuf_free_seg(sw_ring[tx_id].mbuf);
+					sw_ring[tx_id].mbuf = NULL;
+					sw_ring[tx_id].last_id = tx_id;
+
+					/* Move to next segment. */
+					tx_id = sw_ring[tx_id].next_id;
+
+				} while (tx_id != tx_next);
+
+				/*
+				 * Increment the number of packets
+				 * freed.
+				 */
+				count++;
+
+				if (unlikely(count == (int)free_cnt))
+					break;
+			} else {
+				/*
+				 * mbuf still in use, nothing left to
+				 * free.
+				 */
+				break;

same
+			}
+		} else {
+			/*
+			 * There are multiple reasons to be here:
+			 * 1) All the packets on the ring have been
+			 *    freed - tx_id is equal to tx_first
+			 *    and some packets have been freed.
+			 *    - Done, exit
+			 * 2) Interfaces has not sent a rings worth of
+			 *    packets yet, so the segment after tail is
+			 *    still empty. Or a previous call to this
+			 *    function freed some of the segments but
+			 *    not all so there is a hole in the list.
+			 *    Hopefully this is a rare case.
+			 *    - Walk the list and find the next mbuf. If
+			 *      there isn't one, then done.
+			 */
+			if (likely(tx_id == tx_first && count != 0))
+				break;
+
+			/*
+			 * Walk the list and find the next mbuf, if any.
+			 */
+			do {
+				/* Move to next segment. */
+				tx_id = sw_ring[tx_id].next_id;
+
+				if (sw_ring[tx_id].mbuf)
+					break;
+
+			} while (tx_id != tx_first);
+
+			/*
+			 * Determine why previous loop bailed. If there
+			 * is not an mbuf, done.
+			 */
+			if (sw_ring[tx_id].mbuf == NULL)
+				break;
+		}
+	}
+
+	return count;
+}
+
 static void __attribute__((cold))
 ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)  { diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h index 505d344b9..2c3770af6 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx.h
@@ -285,6 +285,8 @@ int ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev);  int ixgbe_rxq_vec_setup(struct ixgbe_rx_queue *rxq);  void ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq);
 
+int ixgbe_tx_done_cleanup(void *txq, uint32_t free_cnt);
+
 extern const uint32_t ptype_table[IXGBE_PACKET_TYPE_MAX];
 extern const uint32_t ptype_table_tn[IXGBE_PACKET_TYPE_TN_MAX];
 
--
2.17.1


^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2019-12-30  3:29 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
     [not found] <“20191203055134.72874-1-chenxux.di@intel.com”>
2019-12-27  3:45 ` [dpdk-dev] [PATCH v5 0/4] drivers/net: cleanup Tx buffers Chenxu Di
2019-12-27  3:45   ` [dpdk-dev] [PATCH v5 1/4] net/i40e: " Chenxu Di
2019-12-30  3:17     ` Yang, Qiming
2019-12-27  3:45   ` [dpdk-dev] [PATCH v5 2/4] net/ice: " Chenxu Di
2019-12-30  3:29     ` Yang, Qiming
2019-12-27  3:45   ` [dpdk-dev] [PATCH v5 3/4] net/ixgbe: " Chenxu Di
2019-12-30  3:29     ` Yang, Qiming
2019-12-27  3:45   ` [dpdk-dev] [PATCH v5 4/4] net/e1000: " Chenxu Di
2019-12-27  8:25   ` [dpdk-dev] [PATCH v5 0/4] drivers/net: " Zhang, Xiao

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).