From: Bruce Richardson <bruce.richardson@intel.com>
To: dev@dpdk.org
Cc: Bruce Richardson <bruce.richardson@intel.com>,
Ian Stokes <ian.stokes@intel.com>,
David Christensen <drc@linux.ibm.com>,
Konstantin Ananyev <konstantin.v.ananyev@yandex.ru>,
Wathsala Vithanage <wathsala.vithanage@arm.com>,
Vladimir Medvedkin <vladimir.medvedkin@intel.com>,
Anatoly Burakov <anatoly.burakov@intel.com>
Subject: [PATCH v4 02/24] net/_common_intel: provide common Tx entry structures
Date: Fri, 20 Dec 2024 14:38:59 +0000 [thread overview]
Message-ID: <20241220143925.609044-3-bruce.richardson@intel.com> (raw)
In-Reply-To: <20241220143925.609044-1-bruce.richardson@intel.com>
The Tx entry structures, both vector and scalar, are common across Intel
drivers, so provide a single definition to be used everywhere.
Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
---
drivers/net/_common_intel/tx.h | 27 +++++++++++++++++++
.../net/i40e/i40e_recycle_mbufs_vec_common.c | 2 +-
drivers/net/i40e/i40e_rxtx.c | 18 ++++++-------
drivers/net/i40e/i40e_rxtx.h | 14 +++-------
drivers/net/i40e/i40e_rxtx_vec_altivec.c | 2 +-
drivers/net/i40e/i40e_rxtx_vec_avx2.c | 2 +-
drivers/net/i40e/i40e_rxtx_vec_avx512.c | 6 ++---
drivers/net/i40e/i40e_rxtx_vec_common.h | 4 +--
drivers/net/i40e/i40e_rxtx_vec_neon.c | 2 +-
drivers/net/i40e/i40e_rxtx_vec_sse.c | 2 +-
drivers/net/iavf/iavf_rxtx.c | 12 ++++-----
drivers/net/iavf/iavf_rxtx.h | 14 +++-------
drivers/net/iavf/iavf_rxtx_vec_avx2.c | 2 +-
drivers/net/iavf/iavf_rxtx_vec_avx512.c | 10 +++----
drivers/net/iavf/iavf_rxtx_vec_common.h | 4 +--
drivers/net/iavf/iavf_rxtx_vec_sse.c | 2 +-
drivers/net/ice/ice_dcf_ethdev.c | 2 +-
drivers/net/ice/ice_rxtx.c | 16 +++++------
drivers/net/ice/ice_rxtx.h | 13 ++-------
drivers/net/ice/ice_rxtx_vec_avx2.c | 2 +-
drivers/net/ice/ice_rxtx_vec_avx512.c | 6 ++---
drivers/net/ice/ice_rxtx_vec_common.h | 6 ++---
drivers/net/ice/ice_rxtx_vec_sse.c | 2 +-
.../ixgbe/ixgbe_recycle_mbufs_vec_common.c | 2 +-
drivers/net/ixgbe/ixgbe_rxtx.c | 16 +++++------
drivers/net/ixgbe/ixgbe_rxtx.h | 22 +++------------
drivers/net/ixgbe/ixgbe_rxtx_vec_common.h | 8 +++---
drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c | 2 +-
drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c | 2 +-
29 files changed, 105 insertions(+), 117 deletions(-)
create mode 100644 drivers/net/_common_intel/tx.h
diff --git a/drivers/net/_common_intel/tx.h b/drivers/net/_common_intel/tx.h
new file mode 100644
index 0000000000..384352b9db
--- /dev/null
+++ b/drivers/net/_common_intel/tx.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 Intel Corporation
+ */
+
+#ifndef _COMMON_INTEL_TX_H_
+#define _COMMON_INTEL_TX_H_
+
+#include <stdint.h>
+#include <rte_mbuf.h>
+
+/**
+ * Structure associated with each descriptor of the TX ring of a TX queue.
+ */
+struct ci_tx_entry {
+ struct rte_mbuf *mbuf; /* mbuf associated with TX desc, if any. */
+ uint16_t next_id; /* Index of next descriptor in ring. */
+ uint16_t last_id; /* Index of last scattered descriptor. */
+};
+
+/**
+ * Structure associated with each descriptor of the TX ring of a TX queue in vector Tx.
+ */
+struct ci_tx_entry_vec {
+ struct rte_mbuf *mbuf; /* mbuf associated with TX desc, if any. */
+};
+
+#endif /* _COMMON_INTEL_TX_H_ */
diff --git a/drivers/net/i40e/i40e_recycle_mbufs_vec_common.c b/drivers/net/i40e/i40e_recycle_mbufs_vec_common.c
index 14424c9921..260d238ce4 100644
--- a/drivers/net/i40e/i40e_recycle_mbufs_vec_common.c
+++ b/drivers/net/i40e/i40e_recycle_mbufs_vec_common.c
@@ -56,7 +56,7 @@ i40e_recycle_tx_mbufs_reuse_vec(void *tx_queue,
struct rte_eth_recycle_rxq_info *recycle_rxq_info)
{
struct i40e_tx_queue *txq = tx_queue;
- struct i40e_tx_entry *txep;
+ struct ci_tx_entry *txep;
struct rte_mbuf **rxep;
int i, n;
uint16_t nb_recycle_mbufs;
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 839c8a5442..2e1f07d2a1 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -378,7 +378,7 @@ i40e_build_ctob(uint32_t td_cmd,
static inline int
i40e_xmit_cleanup(struct i40e_tx_queue *txq)
{
- struct i40e_tx_entry *sw_ring = txq->sw_ring;
+ struct ci_tx_entry *sw_ring = txq->sw_ring;
volatile struct i40e_tx_desc *txd = txq->tx_ring;
uint16_t last_desc_cleaned = txq->last_desc_cleaned;
uint16_t nb_tx_desc = txq->nb_tx_desc;
@@ -1081,8 +1081,8 @@ uint16_t
i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
struct i40e_tx_queue *txq;
- struct i40e_tx_entry *sw_ring;
- struct i40e_tx_entry *txe, *txn;
+ struct ci_tx_entry *sw_ring;
+ struct ci_tx_entry *txe, *txn;
volatile struct i40e_tx_desc *txd;
volatile struct i40e_tx_desc *txr;
struct rte_mbuf *tx_pkt;
@@ -1331,7 +1331,7 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
static __rte_always_inline int
i40e_tx_free_bufs(struct i40e_tx_queue *txq)
{
- struct i40e_tx_entry *txep;
+ struct ci_tx_entry *txep;
uint16_t tx_rs_thresh = txq->tx_rs_thresh;
uint16_t i = 0, j = 0;
struct rte_mbuf *free[RTE_I40E_TX_MAX_FREE_BUF_SZ];
@@ -1418,7 +1418,7 @@ i40e_tx_fill_hw_ring(struct i40e_tx_queue *txq,
uint16_t nb_pkts)
{
volatile struct i40e_tx_desc *txdp = &(txq->tx_ring[txq->tx_tail]);
- struct i40e_tx_entry *txep = &(txq->sw_ring[txq->tx_tail]);
+ struct ci_tx_entry *txep = &txq->sw_ring[txq->tx_tail];
const int N_PER_LOOP = 4;
const int N_PER_LOOP_MASK = N_PER_LOOP - 1;
int mainpart, leftover;
@@ -2555,7 +2555,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
/* Allocate software ring */
txq->sw_ring =
rte_zmalloc_socket("i40e tx sw ring",
- sizeof(struct i40e_tx_entry) * nb_desc,
+ sizeof(struct ci_tx_entry) * nb_desc,
RTE_CACHE_LINE_SIZE,
socket_id);
if (!txq->sw_ring) {
@@ -2723,7 +2723,7 @@ i40e_tx_queue_release_mbufs(struct i40e_tx_queue *txq)
*/
#ifdef CC_AVX512_SUPPORT
if (dev->tx_pkt_burst == i40e_xmit_pkts_vec_avx512) {
- struct i40e_vec_tx_entry *swr = (void *)txq->sw_ring;
+ struct ci_tx_entry_vec *swr = (void *)txq->sw_ring;
i = txq->tx_next_dd - txq->tx_rs_thresh + 1;
if (txq->tx_tail < i) {
@@ -2768,7 +2768,7 @@ static int
i40e_tx_done_cleanup_full(struct i40e_tx_queue *txq,
uint32_t free_cnt)
{
- struct i40e_tx_entry *swr_ring = txq->sw_ring;
+ struct ci_tx_entry *swr_ring = txq->sw_ring;
uint16_t i, tx_last, tx_id;
uint16_t nb_tx_free_last;
uint16_t nb_tx_to_clean;
@@ -2874,7 +2874,7 @@ i40e_tx_done_cleanup(void *txq, uint32_t free_cnt)
void
i40e_reset_tx_queue(struct i40e_tx_queue *txq)
{
- struct i40e_tx_entry *txe;
+ struct ci_tx_entry *txe;
uint16_t i, prev, size;
if (!txq) {
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index 33fc9770d9..0f5d3cb0b7 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -5,6 +5,8 @@
#ifndef _I40E_RXTX_H_
#define _I40E_RXTX_H_
+#include <_common_intel/tx.h>
+
#define RTE_PMD_I40E_RX_MAX_BURST 32
#define RTE_PMD_I40E_TX_MAX_BURST 32
@@ -122,16 +124,6 @@ struct i40e_rx_queue {
const struct rte_memzone *mz;
};
-struct i40e_tx_entry {
- struct rte_mbuf *mbuf;
- uint16_t next_id;
- uint16_t last_id;
-};
-
-struct i40e_vec_tx_entry {
- struct rte_mbuf *mbuf;
-};
-
/*
* Structure associated with each TX queue.
*/
@@ -139,7 +131,7 @@ struct i40e_tx_queue {
uint16_t nb_tx_desc; /**< number of TX descriptors */
uint64_t tx_ring_phys_addr; /**< TX ring DMA address */
volatile struct i40e_tx_desc *tx_ring; /**< TX ring virtual address */
- struct i40e_tx_entry *sw_ring; /**< virtual address of SW ring */
+ struct ci_tx_entry *sw_ring; /**< virtual address of SW ring */
uint16_t tx_tail; /**< current value of tail register */
volatile uint8_t *qtx_tail; /**< register address of tail */
uint16_t nb_tx_used; /**< number of TX desc used since RS bit set */
diff --git a/drivers/net/i40e/i40e_rxtx_vec_altivec.c b/drivers/net/i40e/i40e_rxtx_vec_altivec.c
index 95829f65d5..ca1038eaa6 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_altivec.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_altivec.c
@@ -553,7 +553,7 @@ i40e_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
{
struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
volatile struct i40e_tx_desc *txdp;
- struct i40e_tx_entry *txep;
+ struct ci_tx_entry *txep;
uint16_t n, nb_commit, tx_id;
uint64_t flags = I40E_TD_CMD;
uint64_t rs = I40E_TX_DESC_CMD_RS | I40E_TD_CMD;
diff --git a/drivers/net/i40e/i40e_rxtx_vec_avx2.c b/drivers/net/i40e/i40e_rxtx_vec_avx2.c
index 6dd6e55d9c..e8441de759 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_avx2.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_avx2.c
@@ -745,7 +745,7 @@ i40e_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
{
struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
volatile struct i40e_tx_desc *txdp;
- struct i40e_tx_entry *txep;
+ struct ci_tx_entry *txep;
uint16_t n, nb_commit, tx_id;
uint64_t flags = I40E_TD_CMD;
uint64_t rs = I40E_TX_DESC_CMD_RS | I40E_TD_CMD;
diff --git a/drivers/net/i40e/i40e_rxtx_vec_avx512.c b/drivers/net/i40e/i40e_rxtx_vec_avx512.c
index 506f1b5878..8b8a16daa8 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_avx512.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_avx512.c
@@ -757,7 +757,7 @@ i40e_recv_scattered_pkts_vec_avx512(void *rx_queue,
static __rte_always_inline int
i40e_tx_free_bufs_avx512(struct i40e_tx_queue *txq)
{
- struct i40e_vec_tx_entry *txep;
+ struct ci_tx_entry_vec *txep;
uint32_t n;
uint32_t i;
int nb_free = 0;
@@ -920,7 +920,7 @@ vtx(volatile struct i40e_tx_desc *txdp,
}
static __rte_always_inline void
-tx_backlog_entry_avx512(struct i40e_vec_tx_entry *txep,
+tx_backlog_entry_avx512(struct ci_tx_entry_vec *txep,
struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
int i;
@@ -935,7 +935,7 @@ i40e_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
{
struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
volatile struct i40e_tx_desc *txdp;
- struct i40e_vec_tx_entry *txep;
+ struct ci_tx_entry_vec *txep;
uint16_t n, nb_commit, tx_id;
uint64_t flags = I40E_TD_CMD;
uint64_t rs = I40E_TX_DESC_CMD_RS | I40E_TD_CMD;
diff --git a/drivers/net/i40e/i40e_rxtx_vec_common.h b/drivers/net/i40e/i40e_rxtx_vec_common.h
index 1248cecacd..619fb89110 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_common.h
+++ b/drivers/net/i40e/i40e_rxtx_vec_common.h
@@ -19,7 +19,7 @@
static __rte_always_inline int
i40e_tx_free_bufs(struct i40e_tx_queue *txq)
{
- struct i40e_tx_entry *txep;
+ struct ci_tx_entry *txep;
uint32_t n;
uint32_t i;
int nb_free = 0;
@@ -85,7 +85,7 @@ i40e_tx_free_bufs(struct i40e_tx_queue *txq)
}
static __rte_always_inline void
-tx_backlog_entry(struct i40e_tx_entry *txep,
+tx_backlog_entry(struct ci_tx_entry *txep,
struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
int i;
diff --git a/drivers/net/i40e/i40e_rxtx_vec_neon.c b/drivers/net/i40e/i40e_rxtx_vec_neon.c
index 159d971796..9b90a32e28 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_neon.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_neon.c
@@ -681,7 +681,7 @@ i40e_xmit_fixed_burst_vec(void *__rte_restrict tx_queue,
{
struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
volatile struct i40e_tx_desc *txdp;
- struct i40e_tx_entry *txep;
+ struct ci_tx_entry *txep;
uint16_t n, nb_commit, tx_id;
uint64_t flags = I40E_TD_CMD;
uint64_t rs = I40E_TX_DESC_CMD_RS | I40E_TD_CMD;
diff --git a/drivers/net/i40e/i40e_rxtx_vec_sse.c b/drivers/net/i40e/i40e_rxtx_vec_sse.c
index 3a8128e014..e1fa2ed543 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_sse.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_sse.c
@@ -700,7 +700,7 @@ i40e_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
{
struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
volatile struct i40e_tx_desc *txdp;
- struct i40e_tx_entry *txep;
+ struct ci_tx_entry *txep;
uint16_t n, nb_commit, tx_id;
uint64_t flags = I40E_TD_CMD;
uint64_t rs = I40E_TX_DESC_CMD_RS | I40E_TD_CMD;
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 6a093c6746..e337f20073 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -284,7 +284,7 @@ reset_rx_queue(struct iavf_rx_queue *rxq)
static inline void
reset_tx_queue(struct iavf_tx_queue *txq)
{
- struct iavf_tx_entry *txe;
+ struct ci_tx_entry *txe;
uint32_t i, size;
uint16_t prev;
@@ -860,7 +860,7 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
/* Allocate software ring */
txq->sw_ring =
rte_zmalloc_socket("iavf tx sw ring",
- sizeof(struct iavf_tx_entry) * nb_desc,
+ sizeof(struct ci_tx_entry) * nb_desc,
RTE_CACHE_LINE_SIZE,
socket_id);
if (!txq->sw_ring) {
@@ -2379,7 +2379,7 @@ iavf_recv_pkts_bulk_alloc(void *rx_queue,
static inline int
iavf_xmit_cleanup(struct iavf_tx_queue *txq)
{
- struct iavf_tx_entry *sw_ring = txq->sw_ring;
+ struct ci_tx_entry *sw_ring = txq->sw_ring;
uint16_t last_desc_cleaned = txq->last_desc_cleaned;
uint16_t nb_tx_desc = txq->nb_tx_desc;
uint16_t desc_to_clean_to;
@@ -2797,8 +2797,8 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
struct iavf_tx_queue *txq = tx_queue;
volatile struct iavf_tx_desc *txr = txq->tx_ring;
- struct iavf_tx_entry *txe_ring = txq->sw_ring;
- struct iavf_tx_entry *txe, *txn;
+ struct ci_tx_entry *txe_ring = txq->sw_ring;
+ struct ci_tx_entry *txe, *txn;
struct rte_mbuf *mb, *mb_seg;
uint64_t buf_dma_addr;
uint16_t desc_idx, desc_idx_last;
@@ -4268,7 +4268,7 @@ static int
iavf_tx_done_cleanup_full(struct iavf_tx_queue *txq,
uint32_t free_cnt)
{
- struct iavf_tx_entry *swr_ring = txq->sw_ring;
+ struct ci_tx_entry *swr_ring = txq->sw_ring;
uint16_t tx_last, tx_id;
uint16_t nb_tx_free_last;
uint16_t nb_tx_to_clean;
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index 7b56076d32..1a191f2c89 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -5,6 +5,8 @@
#ifndef _IAVF_RXTX_H_
#define _IAVF_RXTX_H_
+#include <_common_intel/tx.h>
+
/* In QLEN must be whole number of 32 descriptors. */
#define IAVF_ALIGN_RING_DESC 32
#define IAVF_MIN_RING_DESC 64
@@ -271,22 +273,12 @@ struct iavf_rx_queue {
uint64_t hw_time_update;
};
-struct iavf_tx_entry {
- struct rte_mbuf *mbuf;
- uint16_t next_id;
- uint16_t last_id;
-};
-
-struct iavf_tx_vec_entry {
- struct rte_mbuf *mbuf;
-};
-
/* Structure associated with each TX queue. */
struct iavf_tx_queue {
const struct rte_memzone *mz; /* memzone for Tx ring */
volatile struct iavf_tx_desc *tx_ring; /* Tx ring virtual address */
uint64_t tx_ring_phys_addr; /* Tx ring DMA address */
- struct iavf_tx_entry *sw_ring; /* address array of SW ring */
+ struct ci_tx_entry *sw_ring; /* address array of SW ring */
uint16_t nb_tx_desc; /* ring length */
uint16_t tx_tail; /* current value of tail */
volatile uint8_t *qtx_tail; /* register address of tail */
diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx2.c b/drivers/net/iavf/iavf_rxtx_vec_avx2.c
index 0baf5045c8..e7d3d52655 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_avx2.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_avx2.c
@@ -1736,7 +1736,7 @@ iavf_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
{
struct iavf_tx_queue *txq = (struct iavf_tx_queue *)tx_queue;
volatile struct iavf_tx_desc *txdp;
- struct iavf_tx_entry *txep;
+ struct ci_tx_entry *txep;
uint16_t n, nb_commit, tx_id;
/* bit2 is reserved and must be set to 1 according to Spec */
uint64_t flags = IAVF_TX_DESC_CMD_EOP | IAVF_TX_DESC_CMD_ICRC;
diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx512.c b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
index 5a88007096..a899309f94 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_avx512.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
@@ -1847,7 +1847,7 @@ iavf_recv_scattered_pkts_vec_avx512_flex_rxd_offload(void *rx_queue,
static __rte_always_inline int
iavf_tx_free_bufs_avx512(struct iavf_tx_queue *txq)
{
- struct iavf_tx_vec_entry *txep;
+ struct ci_tx_entry_vec *txep;
uint32_t n;
uint32_t i;
int nb_free = 0;
@@ -1960,7 +1960,7 @@ iavf_tx_free_bufs_avx512(struct iavf_tx_queue *txq)
}
static __rte_always_inline void
-tx_backlog_entry_avx512(struct iavf_tx_vec_entry *txep,
+tx_backlog_entry_avx512(struct ci_tx_entry_vec *txep,
struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
int i;
@@ -2313,7 +2313,7 @@ iavf_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
{
struct iavf_tx_queue *txq = (struct iavf_tx_queue *)tx_queue;
volatile struct iavf_tx_desc *txdp;
- struct iavf_tx_vec_entry *txep;
+ struct ci_tx_entry_vec *txep;
uint16_t n, nb_commit, tx_id;
/* bit2 is reserved and must be set to 1 according to Spec */
uint64_t flags = IAVF_TX_DESC_CMD_EOP | IAVF_TX_DESC_CMD_ICRC;
@@ -2380,7 +2380,7 @@ iavf_xmit_fixed_burst_vec_avx512_ctx(void *tx_queue, struct rte_mbuf **tx_pkts,
{
struct iavf_tx_queue *txq = (struct iavf_tx_queue *)tx_queue;
volatile struct iavf_tx_desc *txdp;
- struct iavf_tx_vec_entry *txep;
+ struct ci_tx_entry_vec *txep;
uint16_t n, nb_commit, nb_mbuf, tx_id;
/* bit2 is reserved and must be set to 1 according to Spec */
uint64_t flags = IAVF_TX_DESC_CMD_EOP | IAVF_TX_DESC_CMD_ICRC;
@@ -2478,7 +2478,7 @@ iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq)
const uint16_t max_desc = (uint16_t)(txq->nb_tx_desc - 1);
const uint16_t end_desc = txq->tx_tail >> txq->use_ctx; /* next empty slot */
const uint16_t wrap_point = txq->nb_tx_desc >> txq->use_ctx; /* end of SW ring */
- struct iavf_tx_vec_entry *swr = (void *)txq->sw_ring;
+ struct ci_tx_entry_vec *swr = (void *)txq->sw_ring;
if (!txq->sw_ring || txq->nb_free == max_desc)
return;
diff --git a/drivers/net/iavf/iavf_rxtx_vec_common.h b/drivers/net/iavf/iavf_rxtx_vec_common.h
index 26b6f07614..df40857218 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_common.h
+++ b/drivers/net/iavf/iavf_rxtx_vec_common.h
@@ -19,7 +19,7 @@
static __rte_always_inline int
iavf_tx_free_bufs(struct iavf_tx_queue *txq)
{
- struct iavf_tx_entry *txep;
+ struct ci_tx_entry *txep;
uint32_t n;
uint32_t i;
int nb_free = 0;
@@ -74,7 +74,7 @@ iavf_tx_free_bufs(struct iavf_tx_queue *txq)
}
static __rte_always_inline void
-tx_backlog_entry(struct iavf_tx_entry *txep,
+tx_backlog_entry(struct ci_tx_entry *txep,
struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
int i;
diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c
index 48b01462ea..0a30b1ef64 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_sse.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c
@@ -1368,7 +1368,7 @@ iavf_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
{
struct iavf_tx_queue *txq = (struct iavf_tx_queue *)tx_queue;
volatile struct iavf_tx_desc *txdp;
- struct iavf_tx_entry *txep;
+ struct ci_tx_entry *txep;
uint16_t n, nb_commit, tx_id;
uint64_t flags = IAVF_TX_DESC_CMD_EOP | 0x04; /* bit 2 must be set */
uint64_t rs = IAVF_TX_DESC_CMD_RS | flags;
diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c
index 91f4943a11..4b98e4066b 100644
--- a/drivers/net/ice/ice_dcf_ethdev.c
+++ b/drivers/net/ice/ice_dcf_ethdev.c
@@ -389,7 +389,7 @@ reset_rx_queue(struct ice_rx_queue *rxq)
static inline void
reset_tx_queue(struct ice_tx_queue *txq)
{
- struct ice_tx_entry *txe;
+ struct ci_tx_entry *txe;
uint32_t i, size;
uint16_t prev;
diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index 0c7106c7e0..d584086a36 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -1028,7 +1028,7 @@ _ice_tx_queue_release_mbufs(struct ice_tx_queue *txq)
static void
ice_reset_tx_queue(struct ice_tx_queue *txq)
{
- struct ice_tx_entry *txe;
+ struct ci_tx_entry *txe;
uint16_t i, prev, size;
if (!txq) {
@@ -1509,7 +1509,7 @@ ice_tx_queue_setup(struct rte_eth_dev *dev,
/* Allocate software ring */
txq->sw_ring =
rte_zmalloc_socket(NULL,
- sizeof(struct ice_tx_entry) * nb_desc,
+ sizeof(struct ci_tx_entry) * nb_desc,
RTE_CACHE_LINE_SIZE,
socket_id);
if (!txq->sw_ring) {
@@ -2837,7 +2837,7 @@ ice_txd_enable_checksum(uint64_t ol_flags,
static inline int
ice_xmit_cleanup(struct ice_tx_queue *txq)
{
- struct ice_tx_entry *sw_ring = txq->sw_ring;
+ struct ci_tx_entry *sw_ring = txq->sw_ring;
volatile struct ice_tx_desc *txd = txq->tx_ring;
uint16_t last_desc_cleaned = txq->last_desc_cleaned;
uint16_t nb_tx_desc = txq->nb_tx_desc;
@@ -2961,8 +2961,8 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
struct ice_tx_queue *txq;
volatile struct ice_tx_desc *tx_ring;
volatile struct ice_tx_desc *txd;
- struct ice_tx_entry *sw_ring;
- struct ice_tx_entry *txe, *txn;
+ struct ci_tx_entry *sw_ring;
+ struct ci_tx_entry *txe, *txn;
struct rte_mbuf *tx_pkt;
struct rte_mbuf *m_seg;
uint32_t cd_tunneling_params;
@@ -3184,7 +3184,7 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
static __rte_always_inline int
ice_tx_free_bufs(struct ice_tx_queue *txq)
{
- struct ice_tx_entry *txep;
+ struct ci_tx_entry *txep;
uint16_t i;
if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
@@ -3221,7 +3221,7 @@ static int
ice_tx_done_cleanup_full(struct ice_tx_queue *txq,
uint32_t free_cnt)
{
- struct ice_tx_entry *swr_ring = txq->sw_ring;
+ struct ci_tx_entry *swr_ring = txq->sw_ring;
uint16_t i, tx_last, tx_id;
uint16_t nb_tx_free_last;
uint16_t nb_tx_to_clean;
@@ -3361,7 +3361,7 @@ ice_tx_fill_hw_ring(struct ice_tx_queue *txq, struct rte_mbuf **pkts,
uint16_t nb_pkts)
{
volatile struct ice_tx_desc *txdp = &txq->tx_ring[txq->tx_tail];
- struct ice_tx_entry *txep = &txq->sw_ring[txq->tx_tail];
+ struct ci_tx_entry *txep = &txq->sw_ring[txq->tx_tail];
const int N_PER_LOOP = 4;
const int N_PER_LOOP_MASK = N_PER_LOOP - 1;
int mainpart, leftover;
diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h
index 45f25b3609..8d1a1a8676 100644
--- a/drivers/net/ice/ice_rxtx.h
+++ b/drivers/net/ice/ice_rxtx.h
@@ -5,6 +5,7 @@
#ifndef _ICE_RXTX_H_
#define _ICE_RXTX_H_
+#include <_common_intel/tx.h>
#include "ice_ethdev.h"
#define ICE_ALIGN_RING_DESC 32
@@ -144,21 +145,11 @@ struct ice_rx_queue {
bool ts_enable; /* if rxq timestamp is enabled */
};
-struct ice_tx_entry {
- struct rte_mbuf *mbuf;
- uint16_t next_id;
- uint16_t last_id;
-};
-
-struct ice_vec_tx_entry {
- struct rte_mbuf *mbuf;
-};
-
struct ice_tx_queue {
uint16_t nb_tx_desc; /* number of TX descriptors */
rte_iova_t tx_ring_dma; /* TX ring DMA address */
volatile struct ice_tx_desc *tx_ring; /* TX ring virtual address */
- struct ice_tx_entry *sw_ring; /* virtual address of SW ring */
+ struct ci_tx_entry *sw_ring; /* virtual address of SW ring */
uint16_t tx_tail; /* current value of tail register */
volatile uint8_t *qtx_tail; /* register address of tail */
uint16_t nb_tx_used; /* number of TX desc used since RS bit set */
diff --git a/drivers/net/ice/ice_rxtx_vec_avx2.c b/drivers/net/ice/ice_rxtx_vec_avx2.c
index ca247b155c..cf1862263a 100644
--- a/drivers/net/ice/ice_rxtx_vec_avx2.c
+++ b/drivers/net/ice/ice_rxtx_vec_avx2.c
@@ -858,7 +858,7 @@ ice_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
{
struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue;
volatile struct ice_tx_desc *txdp;
- struct ice_tx_entry *txep;
+ struct ci_tx_entry *txep;
uint16_t n, nb_commit, tx_id;
uint64_t flags = ICE_TD_CMD;
uint64_t rs = ICE_TX_DESC_CMD_RS | ICE_TD_CMD;
diff --git a/drivers/net/ice/ice_rxtx_vec_avx512.c b/drivers/net/ice/ice_rxtx_vec_avx512.c
index 1e603d5d8f..6b6aa3f1fe 100644
--- a/drivers/net/ice/ice_rxtx_vec_avx512.c
+++ b/drivers/net/ice/ice_rxtx_vec_avx512.c
@@ -862,7 +862,7 @@ ice_recv_scattered_pkts_vec_avx512_offload(void *rx_queue,
static __rte_always_inline int
ice_tx_free_bufs_avx512(struct ice_tx_queue *txq)
{
- struct ice_vec_tx_entry *txep;
+ struct ci_tx_entry_vec *txep;
uint32_t n;
uint32_t i;
int nb_free = 0;
@@ -1040,7 +1040,7 @@ ice_vtx(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkt,
}
static __rte_always_inline void
-ice_tx_backlog_entry_avx512(struct ice_vec_tx_entry *txep,
+ice_tx_backlog_entry_avx512(struct ci_tx_entry_vec *txep,
struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
int i;
@@ -1055,7 +1055,7 @@ ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
{
struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue;
volatile struct ice_tx_desc *txdp;
- struct ice_vec_tx_entry *txep;
+ struct ci_tx_entry_vec *txep;
uint16_t n, nb_commit, tx_id;
uint64_t flags = ICE_TD_CMD;
uint64_t rs = ICE_TX_DESC_CMD_RS | ICE_TD_CMD;
diff --git a/drivers/net/ice/ice_rxtx_vec_common.h b/drivers/net/ice/ice_rxtx_vec_common.h
index dd7da4761f..3dc6061e84 100644
--- a/drivers/net/ice/ice_rxtx_vec_common.h
+++ b/drivers/net/ice/ice_rxtx_vec_common.h
@@ -15,7 +15,7 @@
static __rte_always_inline int
ice_tx_free_bufs_vec(struct ice_tx_queue *txq)
{
- struct ice_tx_entry *txep;
+ struct ci_tx_entry *txep;
uint32_t n;
uint32_t i;
int nb_free = 0;
@@ -70,7 +70,7 @@ ice_tx_free_bufs_vec(struct ice_tx_queue *txq)
}
static __rte_always_inline void
-ice_tx_backlog_entry(struct ice_tx_entry *txep,
+ice_tx_backlog_entry(struct ci_tx_entry *txep,
struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
int i;
@@ -135,7 +135,7 @@ _ice_tx_queue_release_mbufs_vec(struct ice_tx_queue *txq)
if (dev->tx_pkt_burst == ice_xmit_pkts_vec_avx512 ||
dev->tx_pkt_burst == ice_xmit_pkts_vec_avx512_offload) {
- struct ice_vec_tx_entry *swr = (void *)txq->sw_ring;
+ struct ci_tx_entry_vec *swr = (void *)txq->sw_ring;
if (txq->tx_tail < i) {
for (; i < txq->nb_tx_desc; i++) {
diff --git a/drivers/net/ice/ice_rxtx_vec_sse.c b/drivers/net/ice/ice_rxtx_vec_sse.c
index 01533454ba..889b754cc1 100644
--- a/drivers/net/ice/ice_rxtx_vec_sse.c
+++ b/drivers/net/ice/ice_rxtx_vec_sse.c
@@ -699,7 +699,7 @@ ice_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
{
struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue;
volatile struct ice_tx_desc *txdp;
- struct ice_tx_entry *txep;
+ struct ci_tx_entry *txep;
uint16_t n, nb_commit, tx_id;
uint64_t flags = ICE_TD_CMD;
uint64_t rs = ICE_TX_DESC_CMD_RS | ICE_TD_CMD;
diff --git a/drivers/net/ixgbe/ixgbe_recycle_mbufs_vec_common.c b/drivers/net/ixgbe/ixgbe_recycle_mbufs_vec_common.c
index d451562269..2241726ad8 100644
--- a/drivers/net/ixgbe/ixgbe_recycle_mbufs_vec_common.c
+++ b/drivers/net/ixgbe/ixgbe_recycle_mbufs_vec_common.c
@@ -52,7 +52,7 @@ ixgbe_recycle_tx_mbufs_reuse_vec(void *tx_queue,
struct rte_eth_recycle_rxq_info *recycle_rxq_info)
{
struct ixgbe_tx_queue *txq = tx_queue;
- struct ixgbe_tx_entry *txep;
+ struct ci_tx_entry *txep;
struct rte_mbuf **rxep;
int i, n;
uint32_t status;
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 7d16eb9df7..db4b993ebc 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -100,7 +100,7 @@
static __rte_always_inline int
ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
{
- struct ixgbe_tx_entry *txep;
+ struct ci_tx_entry *txep;
uint32_t status;
int i, nb_free = 0;
struct rte_mbuf *m, *free[RTE_IXGBE_TX_MAX_FREE_BUF_SZ];
@@ -199,7 +199,7 @@ ixgbe_tx_fill_hw_ring(struct ixgbe_tx_queue *txq, struct rte_mbuf **pkts,
uint16_t nb_pkts)
{
volatile union ixgbe_adv_tx_desc *txdp = &(txq->tx_ring[txq->tx_tail]);
- struct ixgbe_tx_entry *txep = &(txq->sw_ring[txq->tx_tail]);
+ struct ci_tx_entry *txep = &txq->sw_ring[txq->tx_tail];
const int N_PER_LOOP = 4;
const int N_PER_LOOP_MASK = N_PER_LOOP-1;
int mainpart, leftover;
@@ -563,7 +563,7 @@ tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
static inline int
ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq)
{
- struct ixgbe_tx_entry *sw_ring = txq->sw_ring;
+ struct ci_tx_entry *sw_ring = txq->sw_ring;
volatile union ixgbe_adv_tx_desc *txr = txq->tx_ring;
uint16_t last_desc_cleaned = txq->last_desc_cleaned;
uint16_t nb_tx_desc = txq->nb_tx_desc;
@@ -624,8 +624,8 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
struct ixgbe_tx_queue *txq;
- struct ixgbe_tx_entry *sw_ring;
- struct ixgbe_tx_entry *txe, *txn;
+ struct ci_tx_entry *sw_ring;
+ struct ci_tx_entry *txe, *txn;
volatile union ixgbe_adv_tx_desc *txr;
volatile union ixgbe_adv_tx_desc *txd, *txp;
struct rte_mbuf *tx_pkt;
@@ -2352,7 +2352,7 @@ ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq)
static int
ixgbe_tx_done_cleanup_full(struct ixgbe_tx_queue *txq, uint32_t free_cnt)
{
- struct ixgbe_tx_entry *swr_ring = txq->sw_ring;
+ struct ci_tx_entry *swr_ring = txq->sw_ring;
uint16_t i, tx_last, tx_id;
uint16_t nb_tx_free_last;
uint16_t nb_tx_to_clean;
@@ -2490,7 +2490,7 @@ static void __rte_cold
ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
{
static const union ixgbe_adv_tx_desc zeroed_desc = {{0}};
- struct ixgbe_tx_entry *txe = txq->sw_ring;
+ struct ci_tx_entry *txe = txq->sw_ring;
uint16_t prev, i;
/* Zero out HW ring memory */
@@ -2795,7 +2795,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
/* Allocate software ring */
txq->sw_ring = rte_zmalloc_socket("txq->sw_ring",
- sizeof(struct ixgbe_tx_entry) * nb_desc,
+ sizeof(struct ci_tx_entry) * nb_desc,
RTE_CACHE_LINE_SIZE, socket_id);
if (txq->sw_ring == NULL) {
ixgbe_tx_queue_release(txq);
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
index 0550c1da60..1647396419 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx.h
@@ -5,6 +5,8 @@
#ifndef _IXGBE_RXTX_H_
#define _IXGBE_RXTX_H_
+#include <_common_intel/tx.h>
+
/*
* Rings setup and release.
*
@@ -75,22 +77,6 @@ struct ixgbe_scattered_rx_entry {
struct rte_mbuf *fbuf; /**< First segment of the fragmented packet. */
};
-/**
- * Structure associated with each descriptor of the TX ring of a TX queue.
- */
-struct ixgbe_tx_entry {
- struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
- uint16_t next_id; /**< Index of next descriptor in ring. */
- uint16_t last_id; /**< Index of last scattered descriptor. */
-};
-
-/**
- * Structure associated with each descriptor of the TX ring of a TX queue.
- */
-struct ixgbe_tx_entry_v {
- struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
-};
-
/**
* Structure associated with each RX queue.
*/
@@ -202,8 +188,8 @@ struct ixgbe_tx_queue {
volatile union ixgbe_adv_tx_desc *tx_ring;
uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */
union {
- struct ixgbe_tx_entry *sw_ring; /**< address of SW ring for scalar PMD. */
- struct ixgbe_tx_entry_v *sw_ring_v; /**< address of SW ring for vector PMD */
+ struct ci_tx_entry *sw_ring; /**< address of SW ring for scalar PMD. */
+ struct ci_tx_entry_vec *sw_ring_v; /**< address of SW ring for vector PMD */
};
volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */
uint16_t nb_tx_desc; /**< number of TX descriptors. */
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
index 2bab17c934..e9592c0d08 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
@@ -14,7 +14,7 @@
static __rte_always_inline int
ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
{
- struct ixgbe_tx_entry_v *txep;
+ struct ci_tx_entry_vec *txep;
uint32_t status;
uint32_t n;
uint32_t i;
@@ -69,7 +69,7 @@ ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
}
static __rte_always_inline void
-tx_backlog_entry(struct ixgbe_tx_entry_v *txep,
+tx_backlog_entry(struct ci_tx_entry_vec *txep,
struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
int i;
@@ -82,7 +82,7 @@ static inline void
_ixgbe_tx_queue_release_mbufs_vec(struct ixgbe_tx_queue *txq)
{
unsigned int i;
- struct ixgbe_tx_entry_v *txe;
+ struct ci_tx_entry_vec *txe;
const uint16_t max_desc = (uint16_t)(txq->nb_tx_desc - 1);
if (txq->sw_ring == NULL || txq->nb_tx_free == max_desc)
@@ -149,7 +149,7 @@ static inline void
_ixgbe_reset_tx_queue_vec(struct ixgbe_tx_queue *txq)
{
static const union ixgbe_adv_tx_desc zeroed_desc = { { 0 } };
- struct ixgbe_tx_entry_v *txe = txq->sw_ring_v;
+ struct ci_tx_entry_vec *txe = txq->sw_ring_v;
uint16_t i;
/* Zero out HW ring memory */
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
index 7b35093075..02b53c008e 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
@@ -573,7 +573,7 @@ ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
{
struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
volatile union ixgbe_adv_tx_desc *txdp;
- struct ixgbe_tx_entry_v *txep;
+ struct ci_tx_entry_vec *txep;
uint16_t n, nb_commit, tx_id;
uint64_t flags = DCMD_DTYP_FLAGS;
uint64_t rs = IXGBE_ADVTXD_DCMD_RS | DCMD_DTYP_FLAGS;
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
index a709bf8c7f..c8b5377c9f 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
@@ -695,7 +695,7 @@ ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
{
struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
volatile union ixgbe_adv_tx_desc *txdp;
- struct ixgbe_tx_entry_v *txep;
+ struct ci_tx_entry_vec *txep;
uint16_t n, nb_commit, tx_id;
uint64_t flags = DCMD_DTYP_FLAGS;
uint64_t rs = IXGBE_ADVTXD_DCMD_RS|DCMD_DTYP_FLAGS;
--
2.43.0
next prev parent reply other threads:[~2024-12-20 14:39 UTC|newest]
Thread overview: 127+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-11-22 12:53 [RFC PATCH 00/21] Reduce code duplication across Intel NIC drivers Bruce Richardson
2024-11-22 12:53 ` [RFC PATCH 01/21] common/intel_eth: add pkt reassembly fn for intel drivers Bruce Richardson
2024-11-22 12:53 ` [RFC PATCH 02/21] common/intel_eth: provide common Tx entry structures Bruce Richardson
2024-11-22 12:53 ` [RFC PATCH 03/21] common/intel_eth: add Tx mbuf ring replenish fn Bruce Richardson
2024-11-22 12:53 ` [RFC PATCH 04/21] drivers/net: align Tx queue struct field names Bruce Richardson
2024-11-22 12:53 ` [RFC PATCH 05/21] drivers/net: add prefix for driver-specific structs Bruce Richardson
2024-11-22 12:53 ` [RFC PATCH 06/21] common/intel_eth: merge ice and i40e Tx queue struct Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 07/21] net/iavf: use common Tx queue structure Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 08/21] net/ixgbe: convert Tx queue context cache field to ptr Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 09/21] net/ixgbe: use common Tx queue structure Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 10/21] common/intel_eth: pack " Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 11/21] common/intel_eth: add post-Tx buffer free function Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 12/21] common/intel_eth: add Tx buffer free fn for AVX-512 Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 13/21] net/iavf: use common Tx " Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 14/21] net/ice: move Tx queue mbuf cleanup fn to common Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 15/21] net/i40e: use common Tx queue mbuf cleanup fn Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 16/21] net/ixgbe: " Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 17/21] net/iavf: " Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 18/21] net/ice: use vector SW ring for all vector paths Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 19/21] net/i40e: " Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 20/21] net/iavf: " Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 21/21] net/ixgbe: use common Tx backlog entry fn Bruce Richardson
2024-11-25 16:25 ` [RFC PATCH 00/21] Reduce code duplication across Intel NIC drivers David Marchand
2024-11-25 16:31 ` Bruce Richardson
2024-11-26 14:57 ` Thomas Monjalon
2024-11-26 15:27 ` Bruce Richardson
2024-12-02 11:24 ` [PATCH v1 " Bruce Richardson
2024-12-02 11:24 ` [PATCH v1 01/21] net/_common_intel: add pkt reassembly fn for intel drivers Bruce Richardson
2024-12-02 11:24 ` [PATCH v1 02/21] net/_common_intel: provide common Tx entry structures Bruce Richardson
2024-12-02 11:24 ` [PATCH v1 03/21] net/_common_intel: add Tx mbuf ring replenish fn Bruce Richardson
2024-12-02 11:24 ` [PATCH v1 04/21] drivers/net: align Tx queue struct field names Bruce Richardson
2024-12-02 11:24 ` [PATCH v1 05/21] drivers/net: add prefix for driver-specific structs Bruce Richardson
2024-12-02 11:24 ` [PATCH v1 06/21] net/_common_intel: merge ice and i40e Tx queue struct Bruce Richardson
2024-12-02 11:24 ` [PATCH v1 07/21] net/iavf: use common Tx queue structure Bruce Richardson
2024-12-02 11:24 ` [PATCH v1 08/21] net/ixgbe: convert Tx queue context cache field to ptr Bruce Richardson
2024-12-02 11:24 ` [PATCH v1 09/21] net/ixgbe: use common Tx queue structure Bruce Richardson
2024-12-02 13:51 ` Medvedkin, Vladimir
2024-12-02 14:09 ` Bruce Richardson
2024-12-02 15:15 ` Bruce Richardson
2024-12-02 11:24 ` [PATCH v1 10/21] net/_common_intel: pack " Bruce Richardson
2024-12-02 11:24 ` [PATCH v1 11/21] net/_common_intel: add post-Tx buffer free function Bruce Richardson
2024-12-02 12:59 ` David Marchand
2024-12-02 13:12 ` Bruce Richardson
2024-12-02 13:24 ` Bruce Richardson
2024-12-02 13:55 ` David Marchand
2024-12-02 11:24 ` [PATCH v1 12/21] net/_common_intel: add Tx buffer free fn for AVX-512 Bruce Richardson
2024-12-02 11:24 ` [PATCH v1 13/21] net/iavf: use common Tx " Bruce Richardson
2024-12-02 11:24 ` [PATCH v1 14/21] net/ice: move Tx queue mbuf cleanup fn to common Bruce Richardson
2024-12-02 11:24 ` [PATCH v1 15/21] net/i40e: use common Tx queue mbuf cleanup fn Bruce Richardson
2024-12-02 11:24 ` [PATCH v1 16/21] net/ixgbe: " Bruce Richardson
2024-12-02 11:24 ` [PATCH v1 17/21] net/iavf: " Bruce Richardson
2024-12-02 11:24 ` [PATCH v1 18/21] net/ice: use vector SW ring for all vector paths Bruce Richardson
2024-12-02 11:24 ` [PATCH v1 19/21] net/i40e: " Bruce Richardson
2024-12-02 11:24 ` [PATCH v1 20/21] net/iavf: " Bruce Richardson
2024-12-02 11:24 ` [PATCH v1 21/21] net/ixgbe: use common Tx backlog entry fn Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 00/22] Reduce code duplication across Intel NIC drivers Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 01/22] net/_common_intel: add pkt reassembly fn for intel drivers Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 02/22] net/_common_intel: provide common Tx entry structures Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 03/22] net/_common_intel: add Tx mbuf ring replenish fn Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 04/22] drivers/net: align Tx queue struct field names Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 05/22] drivers/net: add prefix for driver-specific structs Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 06/22] net/_common_intel: merge ice and i40e Tx queue struct Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 07/22] net/iavf: use common Tx queue structure Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 08/22] net/ixgbe: convert Tx queue context cache field to ptr Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 09/22] net/ixgbe: use common Tx queue structure Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 10/22] net/_common_intel: pack " Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 11/22] net/_common_intel: add post-Tx buffer free function Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 12/22] net/_common_intel: add Tx buffer free fn for AVX-512 Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 13/22] net/iavf: use common Tx " Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 14/22] net/ice: move Tx queue mbuf cleanup fn to common Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 15/22] net/i40e: use common Tx queue mbuf cleanup fn Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 16/22] net/ixgbe: " Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 17/22] net/iavf: " Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 18/22] net/ice: use vector SW ring for all vector paths Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 19/22] net/i40e: " Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 20/22] net/iavf: " Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 21/22] net/_common_intel: remove unneeded code Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 22/22] net/ixgbe: use common Tx backlog entry fn Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 00/22] Reduce code duplication across Intel NIC drivers Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 01/22] net/_common_intel: add pkt reassembly fn for intel drivers Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 02/22] net/_common_intel: provide common Tx entry structures Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 03/22] net/_common_intel: add Tx mbuf ring replenish fn Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 04/22] drivers/net: align Tx queue struct field names Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 05/22] drivers/net: add prefix for driver-specific structs Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 06/22] net/_common_intel: merge ice and i40e Tx queue struct Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 07/22] net/iavf: use common Tx queue structure Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 08/22] net/ixgbe: convert Tx queue context cache field to ptr Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 09/22] net/ixgbe: use common Tx queue structure Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 10/22] net/_common_intel: pack " Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 11/22] net/_common_intel: add post-Tx buffer free function Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 12/22] net/_common_intel: add Tx buffer free fn for AVX-512 Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 13/22] net/iavf: use common Tx " Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 14/22] net/ice: move Tx queue mbuf cleanup fn to common Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 15/22] net/i40e: use common Tx queue mbuf cleanup fn Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 16/22] net/ixgbe: " Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 17/22] net/iavf: " Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 18/22] net/ice: use vector SW ring for all vector paths Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 19/22] net/i40e: " Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 20/22] net/iavf: " Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 21/22] net/_common_intel: remove unneeded code Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 22/22] net/ixgbe: use common Tx backlog entry fn Bruce Richardson
2024-12-20 14:38 ` [PATCH v4 00/24] Reduce code duplication across Intel NIC drivers Bruce Richardson
2024-12-20 14:38 ` [PATCH v4 01/24] net/_common_intel: add pkt reassembly fn for intel drivers Bruce Richardson
2024-12-20 16:15 ` Stephen Hemminger
2024-12-20 14:38 ` Bruce Richardson [this message]
2024-12-20 14:39 ` [PATCH v4 03/24] net/_common_intel: add Tx mbuf ring replenish fn Bruce Richardson
2024-12-20 14:39 ` [PATCH v4 04/24] drivers/net: align Tx queue struct field names Bruce Richardson
2024-12-20 14:39 ` [PATCH v4 05/24] drivers/net: add prefix for driver-specific structs Bruce Richardson
2024-12-20 14:39 ` [PATCH v4 06/24] net/_common_intel: merge ice and i40e Tx queue struct Bruce Richardson
2024-12-20 14:39 ` [PATCH v4 07/24] net/iavf: use common Tx queue structure Bruce Richardson
2024-12-20 14:39 ` [PATCH v4 08/24] net/ixgbe: convert Tx queue context cache field to ptr Bruce Richardson
2024-12-20 14:39 ` [PATCH v4 09/24] net/ixgbe: use common Tx queue structure Bruce Richardson
2024-12-20 14:39 ` [PATCH v4 10/24] net/_common_intel: pack " Bruce Richardson
2024-12-20 14:39 ` [PATCH v4 11/24] net/_common_intel: add post-Tx buffer free function Bruce Richardson
2024-12-20 14:39 ` [PATCH v4 12/24] net/_common_intel: add Tx buffer free fn for AVX-512 Bruce Richardson
2024-12-20 14:39 ` [PATCH v4 13/24] net/iavf: use common Tx " Bruce Richardson
2024-12-20 14:39 ` [PATCH v4 14/24] net/ice: move Tx queue mbuf cleanup fn to common Bruce Richardson
2024-12-20 14:39 ` [PATCH v4 15/24] net/i40e: use common Tx queue mbuf cleanup fn Bruce Richardson
2024-12-20 14:39 ` [PATCH v4 16/24] net/ixgbe: " Bruce Richardson
2024-12-20 14:39 ` [PATCH v4 17/24] net/iavf: " Bruce Richardson
2024-12-20 14:39 ` [PATCH v4 18/24] net/ice: use vector SW ring for all vector paths Bruce Richardson
2024-12-20 14:39 ` [PATCH v4 19/24] net/i40e: " Bruce Richardson
2024-12-20 14:39 ` [PATCH v4 20/24] net/iavf: " Bruce Richardson
2024-12-20 14:39 ` [PATCH v4 21/24] net/_common_intel: remove unneeded code Bruce Richardson
2024-12-20 14:39 ` [PATCH v4 22/24] net/ixgbe: use common Tx backlog entry fn Bruce Richardson
2024-12-20 14:39 ` [PATCH v4 23/24] net/_common_intel: create common mbuf initializer fn Bruce Richardson
2024-12-20 14:39 ` [PATCH v4 24/24] net/_common_intel: extract common Rx vector criteria Bruce Richardson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20241220143925.609044-3-bruce.richardson@intel.com \
--to=bruce.richardson@intel.com \
--cc=anatoly.burakov@intel.com \
--cc=dev@dpdk.org \
--cc=drc@linux.ibm.com \
--cc=ian.stokes@intel.com \
--cc=konstantin.v.ananyev@yandex.ru \
--cc=vladimir.medvedkin@intel.com \
--cc=wathsala.vithanage@arm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).