DPDK patches and discussions
 help / color / mirror / Atom feed
From: "Yang, Qiming" <qiming.yang@intel.com>
To: "Di, ChenxuX" <chenxux.di@intel.com>, "dev@dpdk.org" <dev@dpdk.org>
Cc: "Xing, Beilei" <beilei.xing@intel.com>
Subject: Re: [dpdk-dev] [PATCH v5 1/4] net/i40e: cleanup Tx buffers
Date: Mon, 30 Dec 2019 03:17:29 +0000	[thread overview]
Message-ID: <F5DF4F0E3AFEF648ADC1C3C33AD4DBF17A5A46F6@SHSMSX101.ccr.corp.intel.com> (raw)
In-Reply-To: <20191227034520.78613-2-chenxux.di@intel.com>



-----Original Message-----
From: Di, ChenxuX 
Sent: Friday, December 27, 2019 11:45
To: dev@dpdk.org
Cc: Yang, Qiming <qiming.yang@intel.com>; Xing, Beilei <beilei.xing@intel.com>; Di, ChenxuX <chenxux.di@intel.com>
Subject: [PATCH v5 1/4] net/i40e: cleanup Tx buffers

Add support to the i40e driver for the API rte_eth_tx_done_cleanup to force free consumed buffers on Tx ring.

Signed-off-by: Chenxu Di <chenxux.di@intel.com>
---
 drivers/net/i40e/i40e_ethdev.c    |   1 +
 drivers/net/i40e/i40e_ethdev_vf.c |   1 +
 drivers/net/i40e/i40e_rxtx.c      | 122 ++++++++++++++++++++++++++++++
 drivers/net/i40e/i40e_rxtx.h      |   1 +
 4 files changed, 125 insertions(+)

diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c index 5999c964b..fad47a942 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -522,6 +522,7 @@ static const struct eth_dev_ops i40e_eth_dev_ops = {
 	.mac_addr_set                 = i40e_set_default_mac_addr,
 	.mtu_set                      = i40e_dev_mtu_set,
 	.tm_ops_get                   = i40e_tm_ops_get,
+	.tx_done_cleanup              = i40e_tx_done_cleanup,
 };
 
 /* store statistics names and its offset in stats structure */ diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index 5dba0928b..0ca5417d7 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -215,6 +215,7 @@ static const struct eth_dev_ops i40evf_eth_dev_ops = {
 	.rss_hash_conf_get    = i40evf_dev_rss_hash_conf_get,
 	.mtu_set              = i40evf_dev_mtu_set,
 	.mac_addr_set         = i40evf_set_default_mac_addr,
+	.tx_done_cleanup      = i40e_tx_done_cleanup,
 };
 
 /*
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c index 17dc8c78f..9e4b0b678 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -2455,6 +2455,128 @@ i40e_tx_queue_release_mbufs(struct i40e_tx_queue *txq)
 	}
 }
 
+int i40e_tx_done_cleanup(void *q, uint32_t free_cnt) {
+	struct i40e_tx_queue *txq = (struct i40e_tx_queue *)q;
+	struct i40e_tx_entry *sw_ring;
+	volatile struct i40e_tx_desc *txr;
+	uint16_t tx_first; /* First segment analyzed. */
+	uint16_t tx_id;    /* Current segment being processed. */
+	uint16_t tx_last;  /* Last segment in the current packet. */
+	uint16_t tx_next;  /* First segment of the next packet. */
+	int count;
+
+	if (txq == NULL)
+		return -ENODEV;
+
+	count = 0;
+	sw_ring = txq->sw_ring;
+	txr = txq->tx_ring;
+
+	/*
+	 * tx_tail is the last sent packet on the sw_ring. Goto the end
+	 * of that packet (the last segment in the packet chain) and
+	 * then the next segment will be the start of the oldest segment
+	 * in the sw_ring. This is the first packet that will be
+	 * attempted to be freed.
+	 */
+
+	/* Get last segment in most recently added packet. */
+	tx_last = sw_ring[txq->tx_tail].last_id;
+
+	/* Get the next segment, which is the oldest segment in ring. */
+	tx_first = sw_ring[tx_last].next_id;
+
+	/* Set the current index to the first. */
+	tx_id = tx_first;
+
+	/*
+	 * Loop through each packet. For each packet, verify that an
+	 * mbuf exists and that the last segment is free. If so, free
+	 * it and move on.
+	 */
+	while (1) {
+		tx_last = sw_ring[tx_id].last_id;
+
+		if (sw_ring[tx_last].mbuf) {
+			if ((txr[tx_last].cmd_type_offset_bsz &
+				rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) ==
+				rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE)) {
+				/* Get the start of the next packet. */
+				tx_next = sw_ring[tx_last].next_id;
+
+				/*
+				 * Loop through all segments in a
+				 * packet.
+				 */
+				do {
+					rte_pktmbuf_free_seg(sw_ring[tx_id].mbuf);
+					sw_ring[tx_id].mbuf = NULL;
+					sw_ring[tx_id].last_id = tx_id;
+
+					/* Move to next segment. */
+					tx_id = sw_ring[tx_id].next_id;
+
+				} while (tx_id != tx_next);
+
+				/*
+				 * Increment the number of packets
+				 * freed.
+				 */
+				count++;
+
+				if (unlikely(count == (int)free_cnt))
+					break;
+			} else {

If the else is just break, then this if can be deleted.
Please follow clean code, remove unnecessary tabs. 

+				/*
+				 * mbuf still in use, nothing left to
+				 * free.
+				 */
+				break;
+			}
+		} else {
+			/*
+			 * There are multiple reasons to be here:
+			 * 1) All the packets on the ring have been
+			 *    freed - tx_id is equal to tx_first
+			 *    and some packets have been freed.
+			 *    - Done, exit
+			 * 2) Interfaces has not sent a rings worth of
+			 *    packets yet, so the segment after tail is
+			 *    still empty. Or a previous call to this
+			 *    function freed some of the segments but
+			 *    not all so there is a hole in the list.
+			 *    Hopefully this is a rare case.
+			 *    - Walk the list and find the next mbuf. If
+			 *      there isn't one, then done.
+			 */
+			if (likely(tx_id == tx_first && count != 0))
+				break;
+
+			/*
+			 * Walk the list and find the next mbuf, if any.
+			 */
+			do {
+				/* Move to next segment. */
+				tx_id = sw_ring[tx_id].next_id;
+
+				if (sw_ring[tx_id].mbuf)
+					break;
+
+			} while (tx_id != tx_first);
+
+			/*
+			 * Determine why previous loop bailed. If there
+			 * is not an mbuf, done.
+			 */
+			if (sw_ring[tx_id].mbuf == NULL)
+				break;
+		}
+	}
+
+	return count;
+}
+
 void
 i40e_reset_tx_queue(struct i40e_tx_queue *txq)  { diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h index 2106bb355..8f11f011a 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -212,6 +212,7 @@ void i40e_dev_free_queues(struct rte_eth_dev *dev);  void i40e_reset_rx_queue(struct i40e_rx_queue *rxq);  void i40e_reset_tx_queue(struct i40e_tx_queue *txq);  void i40e_tx_queue_release_mbufs(struct i40e_tx_queue *txq);
+int i40e_tx_done_cleanup(void *txq, uint32_t free_cnt);
 int i40e_alloc_rx_queue_mbufs(struct i40e_rx_queue *rxq);  void i40e_rx_queue_release_mbufs(struct i40e_rx_queue *rxq);
 
--
2.17.1


  reply	other threads:[~2019-12-30  3:17 UTC|newest]

Thread overview: 9+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
     [not found] <“20191203055134.72874-1-chenxux.di@intel.com”>
2019-12-27  3:45 ` [dpdk-dev] [PATCH v5 0/4] drivers/net: " Chenxu Di
2019-12-27  3:45   ` [dpdk-dev] [PATCH v5 1/4] net/i40e: " Chenxu Di
2019-12-30  3:17     ` Yang, Qiming [this message]
2019-12-27  3:45   ` [dpdk-dev] [PATCH v5 2/4] net/ice: " Chenxu Di
2019-12-30  3:29     ` Yang, Qiming
2019-12-27  3:45   ` [dpdk-dev] [PATCH v5 3/4] net/ixgbe: " Chenxu Di
2019-12-30  3:29     ` Yang, Qiming
2019-12-27  3:45   ` [dpdk-dev] [PATCH v5 4/4] net/e1000: " Chenxu Di
2019-12-27  8:25   ` [dpdk-dev] [PATCH v5 0/4] drivers/net: " Zhang, Xiao

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=F5DF4F0E3AFEF648ADC1C3C33AD4DBF17A5A46F6@SHSMSX101.ccr.corp.intel.com \
    --to=qiming.yang@intel.com \
    --cc=beilei.xing@intel.com \
    --cc=chenxux.di@intel.com \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).