DPDK patches and discussions
 help / color / mirror / Atom feed
From: Bruce Richardson <bruce.richardson@intel.com>
To: dev@dpdk.org
Cc: Bruce Richardson <bruce.richardson@intel.com>,
	Anatoly Burakov <anatoly.burakov@intel.com>,
	Konstantin Ananyev <konstantin.v.ananyev@yandex.ru>
Subject: [RFC PATCH 14/21] net/ice: move Tx queue mbuf cleanup fn to common
Date: Fri, 22 Nov 2024 12:54:07 +0000	[thread overview]
Message-ID: <20241122125418.2857301-15-bruce.richardson@intel.com> (raw)
In-Reply-To: <20241122125418.2857301-1-bruce.richardson@intel.com>

The functions to loop over the Tx queue and clean up all the mbufs on
it, e.g. for queue shutdown, is not device specific and so can move into
the common/intel_eth driver. Only complication is ensuring that the
correct ring format, either minimal vector or full structure, is used.
Ice driver currently uses two functions and a function pointer to help
with this - though actually one of those functions uses a further check
inside it - so we can simplify this down to just one common function,
with a flag set in the appropriate place. This avoids checking for
AVX-512-specific functions, which were the only function using the
smaller struct in this driver.

Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
---
 drivers/common/intel_eth/ieth_rxtx.h  | 49 ++++++++++++++++++++++++-
 drivers/net/ice/ice_dcf_ethdev.c      |  5 +--
 drivers/net/ice/ice_ethdev.h          |  3 +-
 drivers/net/ice/ice_rxtx.c            | 33 +++++------------
 drivers/net/ice/ice_rxtx_vec_common.h | 51 ---------------------------
 drivers/net/ice/ice_rxtx_vec_sse.c    |  4 +--
 6 files changed, 61 insertions(+), 84 deletions(-)

diff --git a/drivers/common/intel_eth/ieth_rxtx.h b/drivers/common/intel_eth/ieth_rxtx.h
index c336ec81b3..c8e5e1ad76 100644
--- a/drivers/common/intel_eth/ieth_rxtx.h
+++ b/drivers/common/intel_eth/ieth_rxtx.h
@@ -65,6 +65,8 @@ struct ieth_tx_queue {
 	rte_iova_t tx_ring_dma;        /* TX ring DMA address */
 	_Bool tx_deferred_start; /* don't start this queue in dev start */
 	_Bool q_set;             /* indicate if tx queue has been configured */
+	_Bool vector_tx;         /* port is using vector TX */
+	_Bool vector_sw_ring;    /* port is using vectorized SW ring (ieth_vec_tx_entry) */
 	union {                  /* the VSI this queue belongs to */
 		struct i40e_vsi *i40e_vsi;
 		struct iavf_vsi *iavf_vsi;
@@ -74,7 +76,6 @@ struct ieth_tx_queue {
 
 	union {
 		struct { /* ICE driver specific values */
-			ice_tx_release_mbufs_t tx_rel_mbufs;
 			uint32_t q_teid; /* TX schedule node id. */
 		};
 		struct { /* I40E driver specific values */
@@ -102,4 +103,50 @@ struct ieth_tx_queue {
 	};
 };
 
+#define IETH_FREE_BUFS_LOOP(txq, swr, start) do { \
+		uint16_t i = start; \
+		if (txq->tx_tail < i) { \
+			for (; i < txq->nb_tx_desc; i++) { \
+				rte_pktmbuf_free_seg(swr[i].mbuf); \
+				swr[i].mbuf = NULL; \
+			} \
+			i = 0; \
+		} \
+		for (; i < txq->tx_tail; i++) { \
+			rte_pktmbuf_free_seg(swr[i].mbuf); \
+			swr[i].mbuf = NULL; \
+		} \
+} while(0)
+
+static inline void
+ieth_txq_release_all_mbufs(struct ieth_tx_queue *txq)
+{
+	if (unlikely(!txq || !txq->sw_ring))
+		return;
+
+	if (!txq->vector_tx) {
+		for (uint16_t i = 0; i < txq->nb_tx_desc; i++) {
+			if (txq->sw_ring[i].mbuf != NULL) {
+				rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
+				txq->sw_ring[i].mbuf = NULL;
+			}
+		}
+		return;
+	}
+
+	/**
+	 *  vPMD tx will not set sw_ring's mbuf to NULL after free,
+	 *  so need to free remains more carefully.
+	 */
+	const uint16_t start = txq->tx_next_dd - txq->tx_rs_thresh + 1;
+
+	if (txq->vector_sw_ring) {
+		struct ieth_vec_tx_entry *swr = txq->sw_ring_v;
+		IETH_FREE_BUFS_LOOP(txq, swr, start);
+	} else {
+		struct ieth_tx_entry *swr = txq->sw_ring;
+		IETH_FREE_BUFS_LOOP(txq, swr, start);
+	}
+}
+
 #endif /* IETH_RXTX_H_ */
diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c
index b5bab35d77..54d17875bb 100644
--- a/drivers/net/ice/ice_dcf_ethdev.c
+++ b/drivers/net/ice/ice_dcf_ethdev.c
@@ -24,6 +24,7 @@
 #include "ice_generic_flow.h"
 #include "ice_dcf_ethdev.h"
 #include "ice_rxtx.h"
+#include "ieth_rxtx.h"
 
 #define DCF_NUM_MACADDR_MAX      64
 
@@ -500,7 +501,7 @@ ice_dcf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 	}
 
 	txq = dev->data->tx_queues[tx_queue_id];
-	txq->tx_rel_mbufs(txq);
+	ieth_txq_release_all_mbufs(txq);
 	reset_tx_queue(txq);
 	dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
 
@@ -650,7 +651,7 @@ ice_dcf_stop_queues(struct rte_eth_dev *dev)
 		txq = dev->data->tx_queues[i];
 		if (!txq)
 			continue;
-		txq->tx_rel_mbufs(txq);
+		ieth_txq_release_all_mbufs(txq);
 		reset_tx_queue(txq);
 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
 	}
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index b5d39b3fc6..a99f65c8dc 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -621,13 +621,12 @@ struct ice_adapter {
 	/* Set bit if the engine is disabled */
 	unsigned long disabled_engine_mask;
 	struct ice_parser *psr;
-#ifdef RTE_ARCH_X86
+	/* used only on X86, zero on other Archs */
 	bool rx_use_avx2;
 	bool rx_use_avx512;
 	bool tx_use_avx2;
 	bool tx_use_avx512;
 	bool rx_vec_offload_support;
-#endif
 };
 
 struct ice_vsi_vlan_pvid_info {
diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index 9606ac7862..51f82738d5 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -751,6 +751,7 @@ ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 	struct ice_aqc_add_tx_qgrp *txq_elem;
 	struct ice_tlan_ctx tx_ctx;
 	int buf_len;
+	struct ice_adapter *ad = ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 
 	PMD_INIT_FUNC_TRACE();
 
@@ -822,6 +823,10 @@ ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 			return -EIO;
 		}
 
+	/* record what kind of descriptor cleanup we need on teardown */
+	txq->vector_tx = ad->tx_vec_allowed;
+	txq->vector_sw_ring = ad->tx_use_avx512;
+
 	dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
 
 	rte_free(txq_elem);
@@ -1006,25 +1011,6 @@ ice_fdir_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 	return 0;
 }
 
-/* Free all mbufs for descriptors in tx queue */
-static void
-_ice_tx_queue_release_mbufs(struct ieth_tx_queue *txq)
-{
-	uint16_t i;
-
-	if (!txq || !txq->sw_ring) {
-		PMD_DRV_LOG(DEBUG, "Pointer to txq or sw_ring is NULL");
-		return;
-	}
-
-	for (i = 0; i < txq->nb_tx_desc; i++) {
-		if (txq->sw_ring[i].mbuf) {
-			rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
-			txq->sw_ring[i].mbuf = NULL;
-		}
-	}
-}
-
 static void
 ice_reset_tx_queue(struct ieth_tx_queue *txq)
 {
@@ -1103,7 +1089,7 @@ ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 		return -EINVAL;
 	}
 
-	txq->tx_rel_mbufs(txq);
+	ieth_txq_release_all_mbufs(txq);
 	ice_reset_tx_queue(txq);
 	dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
 
@@ -1166,7 +1152,7 @@ ice_fdir_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 		return -EINVAL;
 	}
 
-	txq->tx_rel_mbufs(txq);
+	ieth_txq_release_all_mbufs(txq);
 	txq->qtx_tail = NULL;
 
 	return 0;
@@ -1518,7 +1504,6 @@ ice_tx_queue_setup(struct rte_eth_dev *dev,
 	ice_reset_tx_queue(txq);
 	txq->q_set = true;
 	dev->data->tx_queues[queue_idx] = txq;
-	txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;
 	ice_set_tx_function_flag(dev, txq);
 
 	return 0;
@@ -1546,8 +1531,7 @@ ice_tx_queue_release(void *txq)
 		return;
 	}
 
-	if (q->tx_rel_mbufs != NULL)
-		q->tx_rel_mbufs(q);
+	ieth_txq_release_all_mbufs(q);
 	rte_free(q->sw_ring);
 	rte_memzone_free(q->mz);
 	rte_free(q);
@@ -2460,7 +2444,6 @@ ice_fdir_setup_tx_resources(struct ice_pf *pf)
 	txq->q_set = true;
 	pf->fdir.txq = txq;
 
-	txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;
 
 	return ICE_SUCCESS;
 }
diff --git a/drivers/net/ice/ice_rxtx_vec_common.h b/drivers/net/ice/ice_rxtx_vec_common.h
index ef020a9f89..e1493cc28b 100644
--- a/drivers/net/ice/ice_rxtx_vec_common.h
+++ b/drivers/net/ice/ice_rxtx_vec_common.h
@@ -61,57 +61,6 @@ _ice_rx_queue_release_mbufs_vec(struct ice_rx_queue *rxq)
 	memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc);
 }
 
-static inline void
-_ice_tx_queue_release_mbufs_vec(struct ieth_tx_queue *txq)
-{
-	uint16_t i;
-
-	if (unlikely(!txq || !txq->sw_ring)) {
-		PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
-		return;
-	}
-
-	/**
-	 *  vPMD tx will not set sw_ring's mbuf to NULL after free,
-	 *  so need to free remains more carefully.
-	 */
-	i = txq->tx_next_dd - txq->tx_rs_thresh + 1;
-
-#ifdef __AVX512VL__
-	struct rte_eth_dev *dev = &rte_eth_devices[txq->ice_vsi->adapter->pf.dev_data->port_id];
-
-	if (dev->tx_pkt_burst == ice_xmit_pkts_vec_avx512 ||
-	    dev->tx_pkt_burst == ice_xmit_pkts_vec_avx512_offload) {
-		struct ieth_vec_tx_entry *swr = (void *)txq->sw_ring;
-
-		if (txq->tx_tail < i) {
-			for (; i < txq->nb_tx_desc; i++) {
-				rte_pktmbuf_free_seg(swr[i].mbuf);
-				swr[i].mbuf = NULL;
-			}
-			i = 0;
-		}
-		for (; i < txq->tx_tail; i++) {
-			rte_pktmbuf_free_seg(swr[i].mbuf);
-			swr[i].mbuf = NULL;
-		}
-	} else
-#endif
-	{
-		if (txq->tx_tail < i) {
-			for (; i < txq->nb_tx_desc; i++) {
-				rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
-				txq->sw_ring[i].mbuf = NULL;
-			}
-			i = 0;
-		}
-		for (; i < txq->tx_tail; i++) {
-			rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
-			txq->sw_ring[i].mbuf = NULL;
-		}
-	}
-}
-
 static inline int
 ice_rxq_vec_setup_default(struct ice_rx_queue *rxq)
 {
diff --git a/drivers/net/ice/ice_rxtx_vec_sse.c b/drivers/net/ice/ice_rxtx_vec_sse.c
index b951d85cfd..c89cbf2b15 100644
--- a/drivers/net/ice/ice_rxtx_vec_sse.c
+++ b/drivers/net/ice/ice_rxtx_vec_sse.c
@@ -793,12 +793,10 @@ ice_rxq_vec_setup(struct ice_rx_queue *rxq)
 }
 
 int __rte_cold
-ice_txq_vec_setup(struct ieth_tx_queue __rte_unused *txq)
+ice_txq_vec_setup(struct ieth_tx_queue *txq)
 {
 	if (!txq)
 		return -1;
-
-	txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs_vec;
 	return 0;
 }
 
-- 
2.43.0


  parent reply	other threads:[~2024-11-22 12:56 UTC|newest]

Thread overview: 22+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-11-22 12:53 [RFC PATCH 00/21] Reduce code duplication across Intel NIC drivers Bruce Richardson
2024-11-22 12:53 ` [RFC PATCH 01/21] common/intel_eth: add pkt reassembly fn for intel drivers Bruce Richardson
2024-11-22 12:53 ` [RFC PATCH 02/21] common/intel_eth: provide common Tx entry structures Bruce Richardson
2024-11-22 12:53 ` [RFC PATCH 03/21] common/intel_eth: add Tx mbuf ring replenish fn Bruce Richardson
2024-11-22 12:53 ` [RFC PATCH 04/21] drivers/net: align Tx queue struct field names Bruce Richardson
2024-11-22 12:53 ` [RFC PATCH 05/21] drivers/net: add prefix for driver-specific structs Bruce Richardson
2024-11-22 12:53 ` [RFC PATCH 06/21] common/intel_eth: merge ice and i40e Tx queue struct Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 07/21] net/iavf: use common Tx queue structure Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 08/21] net/ixgbe: convert Tx queue context cache field to ptr Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 09/21] net/ixgbe: use common Tx queue structure Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 10/21] common/intel_eth: pack " Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 11/21] common/intel_eth: add post-Tx buffer free function Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 12/21] common/intel_eth: add Tx buffer free fn for AVX-512 Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 13/21] net/iavf: use common Tx " Bruce Richardson
2024-11-22 12:54 ` Bruce Richardson [this message]
2024-11-22 12:54 ` [RFC PATCH 15/21] net/i40e: use common Tx queue mbuf cleanup fn Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 16/21] net/ixgbe: " Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 17/21] net/iavf: " Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 18/21] net/ice: use vector SW ring for all vector paths Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 19/21] net/i40e: " Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 20/21] net/iavf: " Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 21/21] net/ixgbe: use common Tx backlog entry fn Bruce Richardson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20241122125418.2857301-15-bruce.richardson@intel.com \
    --to=bruce.richardson@intel.com \
    --cc=anatoly.burakov@intel.com \
    --cc=dev@dpdk.org \
    --cc=konstantin.v.ananyev@yandex.ru \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).