DPDK patches and discussions
 help / color / mirror / Atom feed
From: Bruce Richardson <bruce.richardson@intel.com>
To: dev@dpdk.org
Cc: Bruce Richardson <bruce.richardson@intel.com>,
	Vladimir Medvedkin <vladimir.medvedkin@intel.com>,
	Ian Stokes <ian.stokes@intel.com>,
	Konstantin Ananyev <konstantin.v.ananyev@yandex.ru>
Subject: [RFC PATCH 07/21] net/iavf: use common Tx queue structure
Date: Fri, 22 Nov 2024 12:54:00 +0000	[thread overview]
Message-ID: <20241122125418.2857301-8-bruce.richardson@intel.com> (raw)
In-Reply-To: <20241122125418.2857301-1-bruce.richardson@intel.com>

Merge in the few additional fields used by iavf driver and convert it to
using the common Tx queue structure also.

Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
---
 drivers/common/intel_eth/ieth_rxtx.h    | 16 +++++++-
 drivers/net/iavf/iavf.h                 |  2 +-
 drivers/net/iavf/iavf_ethdev.c          |  4 +-
 drivers/net/iavf/iavf_rxtx.c            | 42 ++++++++++-----------
 drivers/net/iavf/iavf_rxtx.h            | 49 +++----------------------
 drivers/net/iavf/iavf_rxtx_vec_avx2.c   |  4 +-
 drivers/net/iavf/iavf_rxtx_vec_avx512.c | 14 +++----
 drivers/net/iavf/iavf_rxtx_vec_common.h |  8 ++--
 drivers/net/iavf/iavf_rxtx_vec_sse.c    |  8 ++--
 drivers/net/iavf/iavf_vchnl.c           |  4 +-
 10 files changed, 63 insertions(+), 88 deletions(-)

diff --git a/drivers/common/intel_eth/ieth_rxtx.h b/drivers/common/intel_eth/ieth_rxtx.h
index 8b12ff59e4..986e0a6d42 100644
--- a/drivers/common/intel_eth/ieth_rxtx.h
+++ b/drivers/common/intel_eth/ieth_rxtx.h
@@ -32,8 +32,9 @@ typedef void (*ice_tx_release_mbufs_t)(struct ieth_tx_queue *txq);
 
 struct ieth_tx_queue {
 	union { /* TX ring virtual address */
-		volatile struct ice_tx_desc *ice_tx_ring;
 		volatile struct i40e_tx_desc *i40e_tx_ring;
+		volatile struct iavf_tx_desc *iavf_tx_ring;
+		volatile struct ice_tx_desc *ice_tx_ring;
 	};
 	volatile uint8_t *qtx_tail;               /* register address of tail */
 	struct ieth_tx_entry *sw_ring; /* virtual address of SW ring */
@@ -64,8 +65,9 @@ struct ieth_tx_queue {
 	_Bool tx_deferred_start; /* don't start this queue in dev start */
 	_Bool q_set;             /* indicate if tx queue has been configured */
 	union {                  /* the VSI this queue belongs to */
-		struct ice_vsi *ice_vsi;
 		struct i40e_vsi *i40e_vsi;
+		struct iavf_vsi *iavf_vsi;
+		struct ice_vsi *ice_vsi;
 	};
 	const struct rte_memzone *mz;
 
@@ -77,6 +79,16 @@ struct ieth_tx_queue {
 		struct { /* I40E driver specific values */
 			uint8_t dcb_tc;
 		};
+		struct { /* iavf driver specific values */
+			uint16_t ipsec_crypto_pkt_md_offset;
+			uint8_t rel_mbufs_type;
+#define IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1 BIT(0)
+#define IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2 BIT(1)
+			uint8_t vlan_flag;
+			uint8_t tc;
+			uint8_t use_ctx : 1; /* if use the ctx desc, a packet needs
+					  two descriptors */
+		};
 	};
 };
 
diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index ad526c644c..7f52ca54f1 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -98,7 +98,7 @@
 
 struct iavf_adapter;
 struct iavf_rx_queue;
-struct iavf_tx_queue;
+struct ieth_tx_queue;
 
 
 struct iavf_ipsec_crypto_stats {
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 7f80cd6258..3d3803f5e9 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -954,7 +954,7 @@ static int
 iavf_start_queues(struct rte_eth_dev *dev)
 {
 	struct iavf_rx_queue *rxq;
-	struct iavf_tx_queue *txq;
+	struct ieth_tx_queue *txq;
 	int i;
 	uint16_t nb_txq, nb_rxq;
 
@@ -1885,7 +1885,7 @@ iavf_dev_update_mbuf_stats(struct rte_eth_dev *ethdev,
 		struct iavf_mbuf_stats *mbuf_stats)
 {
 	uint16_t idx;
-	struct iavf_tx_queue *txq;
+	struct ieth_tx_queue *txq;
 
 	for (idx = 0; idx < ethdev->data->nb_tx_queues; idx++) {
 		txq = ethdev->data->tx_queues[idx];
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 2d0f8eda79..c0f7d12804 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -213,7 +213,7 @@ check_rx_vec_allow(struct iavf_rx_queue *rxq)
 }
 
 static inline bool
-check_tx_vec_allow(struct iavf_tx_queue *txq)
+check_tx_vec_allow(struct ieth_tx_queue *txq)
 {
 	if (!(txq->offloads & IAVF_TX_NO_VECTOR_FLAGS) &&
 	    txq->tx_rs_thresh >= IAVF_VPMD_TX_MAX_BURST &&
@@ -282,7 +282,7 @@ reset_rx_queue(struct iavf_rx_queue *rxq)
 }
 
 static inline void
-reset_tx_queue(struct iavf_tx_queue *txq)
+reset_tx_queue(struct ieth_tx_queue *txq)
 {
 	struct ieth_tx_entry *txe;
 	uint32_t i, size;
@@ -388,7 +388,7 @@ release_rxq_mbufs(struct iavf_rx_queue *rxq)
 }
 
 static inline void
-release_txq_mbufs(struct iavf_tx_queue *txq)
+release_txq_mbufs(struct ieth_tx_queue *txq)
 {
 	uint16_t i;
 
@@ -778,7 +778,7 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	struct iavf_info *vf =
 		IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 	struct iavf_vsi *vsi = &vf->vsi;
-	struct iavf_tx_queue *txq;
+	struct ieth_tx_queue *txq;
 	const struct rte_memzone *mz;
 	uint32_t ring_size;
 	uint16_t tx_rs_thresh, tx_free_thresh;
@@ -814,7 +814,7 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 
 	/* Allocate the TX queue data structure. */
 	txq = rte_zmalloc_socket("iavf txq",
-				 sizeof(struct iavf_tx_queue),
+				 sizeof(struct ieth_tx_queue),
 				 RTE_CACHE_LINE_SIZE,
 				 socket_id);
 	if (!txq) {
@@ -979,7 +979,7 @@ iavf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 	struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	struct iavf_tx_queue *txq;
+	struct ieth_tx_queue *txq;
 	int err = 0;
 
 	PMD_DRV_FUNC_TRACE();
@@ -1048,7 +1048,7 @@ iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 	struct iavf_adapter *adapter =
 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
-	struct iavf_tx_queue *txq;
+	struct ieth_tx_queue *txq;
 	int err;
 
 	PMD_DRV_FUNC_TRACE();
@@ -1092,7 +1092,7 @@ iavf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 void
 iavf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-	struct iavf_tx_queue *q = dev->data->tx_queues[qid];
+	struct ieth_tx_queue *q = dev->data->tx_queues[qid];
 
 	if (!q)
 		return;
@@ -1107,7 +1107,7 @@ static void
 iavf_reset_queues(struct rte_eth_dev *dev)
 {
 	struct iavf_rx_queue *rxq;
-	struct iavf_tx_queue *txq;
+	struct ieth_tx_queue *txq;
 	int i;
 
 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
@@ -2377,7 +2377,7 @@ iavf_recv_pkts_bulk_alloc(void *rx_queue,
 }
 
 static inline int
-iavf_xmit_cleanup(struct iavf_tx_queue *txq)
+iavf_xmit_cleanup(struct ieth_tx_queue *txq)
 {
 	struct ieth_tx_entry *sw_ring = txq->sw_ring;
 	uint16_t last_desc_cleaned = txq->last_desc_cleaned;
@@ -2781,7 +2781,7 @@ iavf_fill_data_desc(volatile struct iavf_tx_desc *desc,
 
 
 static struct iavf_ipsec_crypto_pkt_metadata *
-iavf_ipsec_crypto_get_pkt_metadata(const struct iavf_tx_queue *txq,
+iavf_ipsec_crypto_get_pkt_metadata(const struct ieth_tx_queue *txq,
 		struct rte_mbuf *m)
 {
 	if (m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)
@@ -2795,7 +2795,7 @@ iavf_ipsec_crypto_get_pkt_metadata(const struct iavf_tx_queue *txq,
 uint16_t
 iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 {
-	struct iavf_tx_queue *txq = tx_queue;
+	struct ieth_tx_queue *txq = tx_queue;
 	volatile struct iavf_tx_desc *txr = txq->iavf_tx_ring;
 	struct ieth_tx_entry *txe_ring = txq->sw_ring;
 	struct ieth_tx_entry *txe, *txn;
@@ -3027,7 +3027,7 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
  * correct queue.
  */
 static int
-iavf_check_vlan_up2tc(struct iavf_tx_queue *txq, struct rte_mbuf *m)
+iavf_check_vlan_up2tc(struct ieth_tx_queue *txq, struct rte_mbuf *m)
 {
 	struct rte_eth_dev *dev = &rte_eth_devices[txq->port_id];
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
@@ -3646,7 +3646,7 @@ iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
 	int i, ret;
 	uint64_t ol_flags;
 	struct rte_mbuf *m;
-	struct iavf_tx_queue *txq = tx_queue;
+	struct ieth_tx_queue *txq = tx_queue;
 	struct rte_eth_dev *dev = &rte_eth_devices[txq->port_id];
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 	struct iavf_adapter *adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
@@ -3800,7 +3800,7 @@ static uint16_t
 iavf_xmit_pkts_no_poll(void *tx_queue, struct rte_mbuf **tx_pkts,
 				uint16_t nb_pkts)
 {
-	struct iavf_tx_queue *txq = tx_queue;
+	struct ieth_tx_queue *txq = tx_queue;
 	enum iavf_tx_burst_type tx_burst_type;
 
 	if (!txq->iavf_vsi || txq->iavf_vsi->adapter->no_poll)
@@ -3823,7 +3823,7 @@ iavf_xmit_pkts_check(void *tx_queue, struct rte_mbuf **tx_pkts,
 	uint16_t good_pkts = nb_pkts;
 	const char *reason = NULL;
 	bool pkt_error = false;
-	struct iavf_tx_queue *txq = tx_queue;
+	struct ieth_tx_queue *txq = tx_queue;
 	struct iavf_adapter *adapter = txq->iavf_vsi->adapter;
 	enum iavf_tx_burst_type tx_burst_type =
 		txq->iavf_vsi->adapter->tx_burst_type;
@@ -4144,7 +4144,7 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 	int mbuf_check = adapter->devargs.mbuf_check;
 	int no_poll_on_link_down = adapter->devargs.no_poll_on_link_down;
 #ifdef RTE_ARCH_X86
-	struct iavf_tx_queue *txq;
+	struct ieth_tx_queue *txq;
 	int i;
 	int check_ret;
 	bool use_sse = false;
@@ -4265,7 +4265,7 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 }
 
 static int
-iavf_tx_done_cleanup_full(struct iavf_tx_queue *txq,
+iavf_tx_done_cleanup_full(struct ieth_tx_queue *txq,
 			uint32_t free_cnt)
 {
 	struct ieth_tx_entry *swr_ring = txq->sw_ring;
@@ -4324,7 +4324,7 @@ iavf_tx_done_cleanup_full(struct iavf_tx_queue *txq,
 int
 iavf_dev_tx_done_cleanup(void *txq, uint32_t free_cnt)
 {
-	struct iavf_tx_queue *q = (struct iavf_tx_queue *)txq;
+	struct ieth_tx_queue *q = (struct ieth_tx_queue *)txq;
 
 	return iavf_tx_done_cleanup_full(q, free_cnt);
 }
@@ -4350,7 +4350,7 @@ void
 iavf_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 		     struct rte_eth_txq_info *qinfo)
 {
-	struct iavf_tx_queue *txq;
+	struct ieth_tx_queue *txq;
 
 	txq = dev->data->tx_queues[queue_id];
 
@@ -4422,7 +4422,7 @@ iavf_dev_rx_desc_status(void *rx_queue, uint16_t offset)
 int
 iavf_dev_tx_desc_status(void *tx_queue, uint16_t offset)
 {
-	struct iavf_tx_queue *txq = tx_queue;
+	struct ieth_tx_queue *txq = tx_queue;
 	volatile uint64_t *status;
 	uint64_t mask, expect;
 	uint32_t desc;
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index cba6d0573b..835fc8f08f 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -211,7 +211,7 @@ struct iavf_rxq_ops {
 };
 
 struct iavf_txq_ops {
-	void (*release_mbufs)(struct iavf_tx_queue *txq);
+	void (*release_mbufs)(struct ieth_tx_queue *txq);
 };
 
 
@@ -273,43 +273,6 @@ struct iavf_rx_queue {
 	uint64_t hw_time_update;
 };
 
-/* Structure associated with each TX queue. */
-struct iavf_tx_queue {
-	const struct rte_memzone *mz;  /* memzone for Tx ring */
-	volatile struct iavf_tx_desc *iavf_tx_ring; /* Tx ring virtual address */
-	rte_iova_t tx_ring_dma;    /* Tx ring DMA address */
-	struct ieth_tx_entry *sw_ring;  /* address array of SW ring */
-	uint16_t nb_tx_desc;           /* ring length */
-	uint16_t tx_tail;              /* current value of tail */
-	volatile uint8_t *qtx_tail;    /* register address of tail */
-	/* number of used desc since RS bit set */
-	uint16_t nb_tx_used;
-	uint16_t nb_tx_free;
-	uint16_t last_desc_cleaned;    /* last desc have been cleaned*/
-	uint16_t tx_free_thresh;
-	uint16_t tx_rs_thresh;
-	uint8_t rel_mbufs_type;
-	struct iavf_vsi *iavf_vsi; /**< the VSI this queue belongs to */
-
-	uint16_t port_id;
-	uint16_t queue_id;
-	uint64_t offloads;
-	uint16_t tx_next_dd;              /* next to set RS, for VPMD */
-	uint16_t tx_next_rs;              /* next to check DD,  for VPMD */
-	uint16_t ipsec_crypto_pkt_md_offset;
-
-	uint64_t mbuf_errors;
-
-	bool q_set;                    /* if rx queue has been configured */
-	bool tx_deferred_start;        /* don't start this queue in dev start */
-	const struct iavf_txq_ops *ops;
-#define IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1	BIT(0)
-#define IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2	BIT(1)
-	uint8_t vlan_flag;
-	uint8_t tc;
-	uint8_t use_ctx:1;            /* if use the ctx desc, a packet needs two descriptors */
-};
-
 /* Offload features */
 union iavf_tx_offload {
 	uint64_t data;
@@ -724,7 +687,7 @@ int iavf_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc);
 int iavf_rx_vec_dev_check(struct rte_eth_dev *dev);
 int iavf_tx_vec_dev_check(struct rte_eth_dev *dev);
 int iavf_rxq_vec_setup(struct iavf_rx_queue *rxq);
-int iavf_txq_vec_setup(struct iavf_tx_queue *txq);
+int iavf_txq_vec_setup(struct ieth_tx_queue *txq);
 uint16_t iavf_recv_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
 				   uint16_t nb_pkts);
 uint16_t iavf_recv_pkts_vec_avx512_offload(void *rx_queue,
@@ -757,14 +720,14 @@ uint16_t iavf_xmit_pkts_vec_avx512_ctx_offload(void *tx_queue, struct rte_mbuf *
 				  uint16_t nb_pkts);
 uint16_t iavf_xmit_pkts_vec_avx512_ctx(void *tx_queue, struct rte_mbuf **tx_pkts,
 				  uint16_t nb_pkts);
-int iavf_txq_vec_setup_avx512(struct iavf_tx_queue *txq);
+int iavf_txq_vec_setup_avx512(struct ieth_tx_queue *txq);
 
 uint8_t iavf_proto_xtr_type_to_rxdid(uint8_t xtr_type);
 
 void iavf_set_default_ptype_table(struct rte_eth_dev *dev);
-void iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq);
+void iavf_tx_queue_release_mbufs_avx512(struct ieth_tx_queue *txq);
 void iavf_rx_queue_release_mbufs_sse(struct iavf_rx_queue *rxq);
-void iavf_tx_queue_release_mbufs_sse(struct iavf_tx_queue *txq);
+void iavf_tx_queue_release_mbufs_sse(struct ieth_tx_queue *txq);
 
 static inline
 void iavf_dump_rx_descriptor(struct iavf_rx_queue *rxq,
@@ -791,7 +754,7 @@ void iavf_dump_rx_descriptor(struct iavf_rx_queue *rxq,
  * to print the qwords
  */
 static inline
-void iavf_dump_tx_descriptor(const struct iavf_tx_queue *txq,
+void iavf_dump_tx_descriptor(const struct ieth_tx_queue *txq,
 			    const volatile void *desc, uint16_t tx_id)
 {
 	const char *name;
diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx2.c b/drivers/net/iavf/iavf_rxtx_vec_avx2.c
index 94cf9c0038..25dc339303 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_avx2.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_avx2.c
@@ -1734,7 +1734,7 @@ static __rte_always_inline uint16_t
 iavf_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
 			       uint16_t nb_pkts, bool offload)
 {
-	struct iavf_tx_queue *txq = (struct iavf_tx_queue *)tx_queue;
+	struct ieth_tx_queue *txq = (struct ieth_tx_queue *)tx_queue;
 	volatile struct iavf_tx_desc *txdp;
 	struct ieth_tx_entry *txep;
 	uint16_t n, nb_commit, tx_id;
@@ -1800,7 +1800,7 @@ iavf_xmit_pkts_vec_avx2_common(void *tx_queue, struct rte_mbuf **tx_pkts,
 			       uint16_t nb_pkts, bool offload)
 {
 	uint16_t nb_tx = 0;
-	struct iavf_tx_queue *txq = (struct iavf_tx_queue *)tx_queue;
+	struct ieth_tx_queue *txq = (struct ieth_tx_queue *)tx_queue;
 
 	while (nb_pkts) {
 		uint16_t ret, num;
diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx512.c b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
index dd45bc0fd9..c774c0c365 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_avx512.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
@@ -1845,7 +1845,7 @@ iavf_recv_scattered_pkts_vec_avx512_flex_rxd_offload(void *rx_queue,
 }
 
 static __rte_always_inline int
-iavf_tx_free_bufs_avx512(struct iavf_tx_queue *txq)
+iavf_tx_free_bufs_avx512(struct ieth_tx_queue *txq)
 {
 	struct ieth_vec_tx_entry *txep;
 	uint32_t n;
@@ -2311,7 +2311,7 @@ static __rte_always_inline uint16_t
 iavf_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
 				 uint16_t nb_pkts, bool offload)
 {
-	struct iavf_tx_queue *txq = (struct iavf_tx_queue *)tx_queue;
+	struct ieth_tx_queue *txq = (struct ieth_tx_queue *)tx_queue;
 	volatile struct iavf_tx_desc *txdp;
 	struct ieth_vec_tx_entry *txep;
 	uint16_t n, nb_commit, tx_id;
@@ -2378,7 +2378,7 @@ static __rte_always_inline uint16_t
 iavf_xmit_fixed_burst_vec_avx512_ctx(void *tx_queue, struct rte_mbuf **tx_pkts,
 				 uint16_t nb_pkts, bool offload)
 {
-	struct iavf_tx_queue *txq = (struct iavf_tx_queue *)tx_queue;
+	struct ieth_tx_queue *txq = (struct ieth_tx_queue *)tx_queue;
 	volatile struct iavf_tx_desc *txdp;
 	struct ieth_vec_tx_entry *txep;
 	uint16_t n, nb_commit, nb_mbuf, tx_id;
@@ -2446,7 +2446,7 @@ iavf_xmit_pkts_vec_avx512_cmn(void *tx_queue, struct rte_mbuf **tx_pkts,
 			      uint16_t nb_pkts, bool offload)
 {
 	uint16_t nb_tx = 0;
-	struct iavf_tx_queue *txq = (struct iavf_tx_queue *)tx_queue;
+	struct ieth_tx_queue *txq = (struct ieth_tx_queue *)tx_queue;
 
 	while (nb_pkts) {
 		uint16_t ret, num;
@@ -2472,7 +2472,7 @@ iavf_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
 }
 
 void __rte_cold
-iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq)
+iavf_tx_queue_release_mbufs_avx512(struct ieth_tx_queue *txq)
 {
 	unsigned int i;
 	const uint16_t max_desc = (uint16_t)(txq->nb_tx_desc - 1);
@@ -2493,7 +2493,7 @@ iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq)
 }
 
 int __rte_cold
-iavf_txq_vec_setup_avx512(struct iavf_tx_queue *txq)
+iavf_txq_vec_setup_avx512(struct ieth_tx_queue *txq)
 {
 	txq->rel_mbufs_type = IAVF_REL_MBUFS_AVX512_VEC;
 	return 0;
@@ -2511,7 +2511,7 @@ iavf_xmit_pkts_vec_avx512_ctx_cmn(void *tx_queue, struct rte_mbuf **tx_pkts,
 				  uint16_t nb_pkts, bool offload)
 {
 	uint16_t nb_tx = 0;
-	struct iavf_tx_queue *txq = (struct iavf_tx_queue *)tx_queue;
+	struct ieth_tx_queue *txq = (struct ieth_tx_queue *)tx_queue;
 
 	while (nb_pkts) {
 		uint16_t ret, num;
diff --git a/drivers/net/iavf/iavf_rxtx_vec_common.h b/drivers/net/iavf/iavf_rxtx_vec_common.h
index b8b5e74b89..7a31c777f0 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_common.h
+++ b/drivers/net/iavf/iavf_rxtx_vec_common.h
@@ -17,7 +17,7 @@
 #endif
 
 static __rte_always_inline int
-iavf_tx_free_bufs(struct iavf_tx_queue *txq)
+iavf_tx_free_bufs(struct ieth_tx_queue *txq)
 {
 	struct ieth_tx_entry *txep;
 	uint32_t n;
@@ -104,7 +104,7 @@ _iavf_rx_queue_release_mbufs_vec(struct iavf_rx_queue *rxq)
 }
 
 static inline void
-_iavf_tx_queue_release_mbufs_vec(struct iavf_tx_queue *txq)
+_iavf_tx_queue_release_mbufs_vec(struct ieth_tx_queue *txq)
 {
 	unsigned i;
 	const uint16_t max_desc = (uint16_t)(txq->nb_tx_desc - 1);
@@ -164,7 +164,7 @@ iavf_rx_vec_queue_default(struct iavf_rx_queue *rxq)
 }
 
 static inline int
-iavf_tx_vec_queue_default(struct iavf_tx_queue *txq)
+iavf_tx_vec_queue_default(struct ieth_tx_queue *txq)
 {
 	if (!txq)
 		return -1;
@@ -227,7 +227,7 @@ static inline int
 iavf_tx_vec_dev_check_default(struct rte_eth_dev *dev)
 {
 	int i;
-	struct iavf_tx_queue *txq;
+	struct ieth_tx_queue *txq;
 	int ret;
 	int result = 0;
 
diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c
index 0a896a6e6f..de632c6de8 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_sse.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c
@@ -1366,7 +1366,7 @@ uint16_t
 iavf_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 			 uint16_t nb_pkts)
 {
-	struct iavf_tx_queue *txq = (struct iavf_tx_queue *)tx_queue;
+	struct ieth_tx_queue *txq = (struct ieth_tx_queue *)tx_queue;
 	volatile struct iavf_tx_desc *txdp;
 	struct ieth_tx_entry *txep;
 	uint16_t n, nb_commit, tx_id;
@@ -1435,7 +1435,7 @@ iavf_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 		   uint16_t nb_pkts)
 {
 	uint16_t nb_tx = 0;
-	struct iavf_tx_queue *txq = (struct iavf_tx_queue *)tx_queue;
+	struct ieth_tx_queue *txq = (struct ieth_tx_queue *)tx_queue;
 
 	while (nb_pkts) {
 		uint16_t ret, num;
@@ -1459,13 +1459,13 @@ iavf_rx_queue_release_mbufs_sse(struct iavf_rx_queue *rxq)
 }
 
 void __rte_cold
-iavf_tx_queue_release_mbufs_sse(struct iavf_tx_queue *txq)
+iavf_tx_queue_release_mbufs_sse(struct ieth_tx_queue *txq)
 {
 	_iavf_tx_queue_release_mbufs_vec(txq);
 }
 
 int __rte_cold
-iavf_txq_vec_setup(struct iavf_tx_queue *txq)
+iavf_txq_vec_setup(struct ieth_tx_queue *txq)
 {
 	txq->rel_mbufs_type = IAVF_REL_MBUFS_SSE_VEC;
 	return 0;
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 0646a2f978..3bdea403c0 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -1220,8 +1220,8 @@ iavf_configure_queues(struct iavf_adapter *adapter,
 {
 	struct iavf_rx_queue **rxq =
 		(struct iavf_rx_queue **)adapter->dev_data->rx_queues;
-	struct iavf_tx_queue **txq =
-		(struct iavf_tx_queue **)adapter->dev_data->tx_queues;
+	struct ieth_tx_queue **txq =
+		(struct ieth_tx_queue **)adapter->dev_data->tx_queues;
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
 	struct virtchnl_vsi_queue_config_info *vc_config;
 	struct virtchnl_queue_pair_info *vc_qp;
-- 
2.43.0


  parent reply	other threads:[~2024-11-22 12:55 UTC|newest]

Thread overview: 22+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-11-22 12:53 [RFC PATCH 00/21] Reduce code duplication across Intel NIC drivers Bruce Richardson
2024-11-22 12:53 ` [RFC PATCH 01/21] common/intel_eth: add pkt reassembly fn for intel drivers Bruce Richardson
2024-11-22 12:53 ` [RFC PATCH 02/21] common/intel_eth: provide common Tx entry structures Bruce Richardson
2024-11-22 12:53 ` [RFC PATCH 03/21] common/intel_eth: add Tx mbuf ring replenish fn Bruce Richardson
2024-11-22 12:53 ` [RFC PATCH 04/21] drivers/net: align Tx queue struct field names Bruce Richardson
2024-11-22 12:53 ` [RFC PATCH 05/21] drivers/net: add prefix for driver-specific structs Bruce Richardson
2024-11-22 12:53 ` [RFC PATCH 06/21] common/intel_eth: merge ice and i40e Tx queue struct Bruce Richardson
2024-11-22 12:54 ` Bruce Richardson [this message]
2024-11-22 12:54 ` [RFC PATCH 08/21] net/ixgbe: convert Tx queue context cache field to ptr Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 09/21] net/ixgbe: use common Tx queue structure Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 10/21] common/intel_eth: pack " Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 11/21] common/intel_eth: add post-Tx buffer free function Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 12/21] common/intel_eth: add Tx buffer free fn for AVX-512 Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 13/21] net/iavf: use common Tx " Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 14/21] net/ice: move Tx queue mbuf cleanup fn to common Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 15/21] net/i40e: use common Tx queue mbuf cleanup fn Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 16/21] net/ixgbe: " Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 17/21] net/iavf: " Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 18/21] net/ice: use vector SW ring for all vector paths Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 19/21] net/i40e: " Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 20/21] net/iavf: " Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 21/21] net/ixgbe: use common Tx backlog entry fn Bruce Richardson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20241122125418.2857301-8-bruce.richardson@intel.com \
    --to=bruce.richardson@intel.com \
    --cc=dev@dpdk.org \
    --cc=ian.stokes@intel.com \
    --cc=konstantin.v.ananyev@yandex.ru \
    --cc=vladimir.medvedkin@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).