From: Bruce Richardson <bruce.richardson@intel.com>
To: dev@dpdk.org
Cc: Bruce Richardson <bruce.richardson@intel.com>,
Anatoly Burakov <anatoly.burakov@intel.com>,
Vladimir Medvedkin <vladimir.medvedkin@intel.com>,
Wathsala Vithanage <wathsala.vithanage@arm.com>,
Konstantin Ananyev <konstantin.v.ananyev@yandex.ru>
Subject: [PATCH v4 09/24] net/ixgbe: use common Tx queue structure
Date: Fri, 20 Dec 2024 14:39:06 +0000 [thread overview]
Message-ID: <20241220143925.609044-10-bruce.richardson@intel.com> (raw)
In-Reply-To: <20241220143925.609044-1-bruce.richardson@intel.com>
Merge in additional fields used by the ixgbe driver and then convert it
over to using the common Tx queue structure.
Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
---
drivers/net/_common_intel/tx.h | 14 +++-
drivers/net/ixgbe/ixgbe_ethdev.c | 4 +-
.../ixgbe/ixgbe_recycle_mbufs_vec_common.c | 2 +-
drivers/net/ixgbe/ixgbe_rxtx.c | 64 +++++++++----------
drivers/net/ixgbe/ixgbe_rxtx.h | 56 ++--------------
drivers/net/ixgbe/ixgbe_rxtx_vec_common.h | 26 ++++----
drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c | 14 ++--
drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c | 14 ++--
8 files changed, 80 insertions(+), 114 deletions(-)
diff --git a/drivers/net/_common_intel/tx.h b/drivers/net/_common_intel/tx.h
index c4a1a0c816..51ae3b051d 100644
--- a/drivers/net/_common_intel/tx.h
+++ b/drivers/net/_common_intel/tx.h
@@ -34,9 +34,13 @@ struct ci_tx_queue {
volatile struct i40e_tx_desc *i40e_tx_ring;
volatile struct iavf_tx_desc *iavf_tx_ring;
volatile struct ice_tx_desc *ice_tx_ring;
+ volatile union ixgbe_adv_tx_desc *ixgbe_tx_ring;
};
volatile uint8_t *qtx_tail; /* register address of tail */
- struct ci_tx_entry *sw_ring; /* virtual address of SW ring */
+ union {
+ struct ci_tx_entry *sw_ring; /* virtual address of SW ring */
+ struct ci_tx_entry_vec *sw_ring_vec;
+ };
rte_iova_t tx_ring_dma; /* TX ring DMA address */
uint16_t nb_tx_desc; /* number of TX descriptors */
uint16_t tx_tail; /* current value of tail register */
@@ -87,6 +91,14 @@ struct ci_tx_queue {
uint8_t tc;
bool use_ctx; /* with ctx info, each pkt needs two descriptors */
};
+ struct { /* ixgbe specific values */
+ const struct ixgbe_txq_ops *ops;
+ struct ixgbe_advctx_info *ctx_cache;
+ uint32_t ctx_curr;
+#ifdef RTE_LIB_SECURITY
+ uint8_t using_ipsec; /**< indicates that IPsec TX feature is in use */
+#endif
+ };
};
};
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 8bee97d191..5f18fbaad5 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -1118,7 +1118,7 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
* RX and TX function.
*/
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
- struct ixgbe_tx_queue *txq;
+ struct ci_tx_queue *txq;
/* TX queue function in primary, set by last queue initialized
* Tx queue may not initialized by primary process
*/
@@ -1623,7 +1623,7 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
* RX function
*/
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
- struct ixgbe_tx_queue *txq;
+ struct ci_tx_queue *txq;
/* TX queue function in primary, set by last queue initialized
* Tx queue may not initialized by primary process
*/
diff --git a/drivers/net/ixgbe/ixgbe_recycle_mbufs_vec_common.c b/drivers/net/ixgbe/ixgbe_recycle_mbufs_vec_common.c
index a878db3150..3fd05ed5eb 100644
--- a/drivers/net/ixgbe/ixgbe_recycle_mbufs_vec_common.c
+++ b/drivers/net/ixgbe/ixgbe_recycle_mbufs_vec_common.c
@@ -51,7 +51,7 @@ uint16_t
ixgbe_recycle_tx_mbufs_reuse_vec(void *tx_queue,
struct rte_eth_recycle_rxq_info *recycle_rxq_info)
{
- struct ixgbe_tx_queue *txq = tx_queue;
+ struct ci_tx_queue *txq = tx_queue;
struct ci_tx_entry *txep;
struct rte_mbuf **rxep;
int i, n;
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 2ca26cd132..344ef85685 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -98,7 +98,7 @@
* Return the total number of buffers freed.
*/
static __rte_always_inline int
-ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
+ixgbe_tx_free_bufs(struct ci_tx_queue *txq)
{
struct ci_tx_entry *txep;
uint32_t status;
@@ -195,7 +195,7 @@ tx1(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
* Copy mbuf pointers to the S/W ring.
*/
static inline void
-ixgbe_tx_fill_hw_ring(struct ixgbe_tx_queue *txq, struct rte_mbuf **pkts,
+ixgbe_tx_fill_hw_ring(struct ci_tx_queue *txq, struct rte_mbuf **pkts,
uint16_t nb_pkts)
{
volatile union ixgbe_adv_tx_desc *txdp = &txq->ixgbe_tx_ring[txq->tx_tail];
@@ -231,7 +231,7 @@ static inline uint16_t
tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
- struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
+ struct ci_tx_queue *txq = (struct ci_tx_queue *)tx_queue;
volatile union ixgbe_adv_tx_desc *tx_r = txq->ixgbe_tx_ring;
uint16_t n = 0;
@@ -344,7 +344,7 @@ ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
uint16_t nb_tx = 0;
- struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
+ struct ci_tx_queue *txq = (struct ci_tx_queue *)tx_queue;
while (nb_pkts) {
uint16_t ret, num;
@@ -362,7 +362,7 @@ ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
}
static inline void
-ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
+ixgbe_set_xmit_ctx(struct ci_tx_queue *txq,
volatile struct ixgbe_adv_tx_context_desc *ctx_txd,
uint64_t ol_flags, union ixgbe_tx_offload tx_offload,
__rte_unused uint64_t *mdata)
@@ -493,7 +493,7 @@ ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
* or create a new context descriptor.
*/
static inline uint32_t
-what_advctx_update(struct ixgbe_tx_queue *txq, uint64_t flags,
+what_advctx_update(struct ci_tx_queue *txq, uint64_t flags,
union ixgbe_tx_offload tx_offload)
{
/* If match with the current used context */
@@ -561,7 +561,7 @@ tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
/* Reset transmit descriptors after they have been used */
static inline int
-ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq)
+ixgbe_xmit_cleanup(struct ci_tx_queue *txq)
{
struct ci_tx_entry *sw_ring = txq->sw_ring;
volatile union ixgbe_adv_tx_desc *txr = txq->ixgbe_tx_ring;
@@ -623,7 +623,7 @@ uint16_t
ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
- struct ixgbe_tx_queue *txq;
+ struct ci_tx_queue *txq;
struct ci_tx_entry *sw_ring;
struct ci_tx_entry *txe, *txn;
volatile union ixgbe_adv_tx_desc *txr;
@@ -963,7 +963,7 @@ ixgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
int i, ret;
uint64_t ol_flags;
struct rte_mbuf *m;
- struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
+ struct ci_tx_queue *txq = (struct ci_tx_queue *)tx_queue;
for (i = 0; i < nb_pkts; i++) {
m = tx_pkts[i];
@@ -2335,7 +2335,7 @@ ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
**********************************************************************/
static void __rte_cold
-ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq)
+ixgbe_tx_queue_release_mbufs(struct ci_tx_queue *txq)
{
unsigned i;
@@ -2350,7 +2350,7 @@ ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq)
}
static int
-ixgbe_tx_done_cleanup_full(struct ixgbe_tx_queue *txq, uint32_t free_cnt)
+ixgbe_tx_done_cleanup_full(struct ci_tx_queue *txq, uint32_t free_cnt)
{
struct ci_tx_entry *swr_ring = txq->sw_ring;
uint16_t i, tx_last, tx_id;
@@ -2408,7 +2408,7 @@ ixgbe_tx_done_cleanup_full(struct ixgbe_tx_queue *txq, uint32_t free_cnt)
}
static int
-ixgbe_tx_done_cleanup_simple(struct ixgbe_tx_queue *txq,
+ixgbe_tx_done_cleanup_simple(struct ci_tx_queue *txq,
uint32_t free_cnt)
{
int i, n, cnt;
@@ -2432,7 +2432,7 @@ ixgbe_tx_done_cleanup_simple(struct ixgbe_tx_queue *txq,
}
static int
-ixgbe_tx_done_cleanup_vec(struct ixgbe_tx_queue *txq __rte_unused,
+ixgbe_tx_done_cleanup_vec(struct ci_tx_queue *txq __rte_unused,
uint32_t free_cnt __rte_unused)
{
return -ENOTSUP;
@@ -2441,7 +2441,7 @@ ixgbe_tx_done_cleanup_vec(struct ixgbe_tx_queue *txq __rte_unused,
int
ixgbe_dev_tx_done_cleanup(void *tx_queue, uint32_t free_cnt)
{
- struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
+ struct ci_tx_queue *txq = (struct ci_tx_queue *)tx_queue;
if (txq->offloads == 0 &&
#ifdef RTE_LIB_SECURITY
!(txq->using_ipsec) &&
@@ -2450,7 +2450,7 @@ ixgbe_dev_tx_done_cleanup(void *tx_queue, uint32_t free_cnt)
if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ &&
rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128 &&
(rte_eal_process_type() != RTE_PROC_PRIMARY ||
- txq->sw_ring_v != NULL)) {
+ txq->sw_ring_vec != NULL)) {
return ixgbe_tx_done_cleanup_vec(txq, free_cnt);
} else {
return ixgbe_tx_done_cleanup_simple(txq, free_cnt);
@@ -2461,7 +2461,7 @@ ixgbe_dev_tx_done_cleanup(void *tx_queue, uint32_t free_cnt)
}
static void __rte_cold
-ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
+ixgbe_tx_free_swring(struct ci_tx_queue *txq)
{
if (txq != NULL &&
txq->sw_ring != NULL)
@@ -2469,7 +2469,7 @@ ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
}
static void __rte_cold
-ixgbe_tx_queue_release(struct ixgbe_tx_queue *txq)
+ixgbe_tx_queue_release(struct ci_tx_queue *txq)
{
if (txq != NULL && txq->ops != NULL) {
txq->ops->release_mbufs(txq);
@@ -2487,7 +2487,7 @@ ixgbe_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
/* (Re)set dynamic ixgbe_tx_queue fields to defaults */
static void __rte_cold
-ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
+ixgbe_reset_tx_queue(struct ci_tx_queue *txq)
{
static const union ixgbe_adv_tx_desc zeroed_desc = {{0}};
struct ci_tx_entry *txe = txq->sw_ring;
@@ -2536,7 +2536,7 @@ static const struct ixgbe_txq_ops def_txq_ops = {
* in dev_init by secondary process when attaching to an existing ethdev.
*/
void __rte_cold
-ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
+ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ci_tx_queue *txq)
{
/* Use a simple Tx queue (no offloads, no multi segs) if possible */
if ((txq->offloads == 0) &&
@@ -2618,7 +2618,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
const struct rte_eth_txconf *tx_conf)
{
const struct rte_memzone *tz;
- struct ixgbe_tx_queue *txq;
+ struct ci_tx_queue *txq;
struct ixgbe_hw *hw;
uint16_t tx_rs_thresh, tx_free_thresh;
uint64_t offloads;
@@ -2740,12 +2740,12 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
}
/* First allocate the tx queue data structure */
- txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct ixgbe_tx_queue) +
+ txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct ci_tx_queue) +
sizeof(struct ixgbe_advctx_info) * IXGBE_CTX_NUM,
RTE_CACHE_LINE_SIZE, socket_id);
if (txq == NULL)
return -ENOMEM;
- txq->ctx_cache = RTE_PTR_ADD(txq, sizeof(struct ixgbe_tx_queue));
+ txq->ctx_cache = RTE_PTR_ADD(txq, sizeof(struct ci_tx_queue));
/*
* Allocate TX ring hardware descriptors. A memzone large enough to
@@ -3312,7 +3312,7 @@ ixgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
int
ixgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
{
- struct ixgbe_tx_queue *txq = tx_queue;
+ struct ci_tx_queue *txq = tx_queue;
volatile uint32_t *status;
uint32_t desc;
@@ -3377,7 +3377,7 @@ ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- struct ixgbe_tx_queue *txq = dev->data->tx_queues[i];
+ struct ci_tx_queue *txq = dev->data->tx_queues[i];
if (txq != NULL) {
txq->ops->release_mbufs(txq);
@@ -5284,7 +5284,7 @@ void __rte_cold
ixgbe_dev_tx_init(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
- struct ixgbe_tx_queue *txq;
+ struct ci_tx_queue *txq;
uint64_t bus_addr;
uint32_t hlreg0;
uint32_t txctrl;
@@ -5402,7 +5402,7 @@ int __rte_cold
ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
- struct ixgbe_tx_queue *txq;
+ struct ci_tx_queue *txq;
struct ixgbe_rx_queue *rxq;
uint32_t txdctl;
uint32_t dmatxctl;
@@ -5572,7 +5572,7 @@ int __rte_cold
ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
struct ixgbe_hw *hw;
- struct ixgbe_tx_queue *txq;
+ struct ci_tx_queue *txq;
uint32_t txdctl;
int poll_ms;
@@ -5611,7 +5611,7 @@ int __rte_cold
ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
struct ixgbe_hw *hw;
- struct ixgbe_tx_queue *txq;
+ struct ci_tx_queue *txq;
uint32_t txdctl;
uint32_t txtdh, txtdt;
int poll_ms;
@@ -5685,7 +5685,7 @@ void
ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_txq_info *qinfo)
{
- struct ixgbe_tx_queue *txq;
+ struct ci_tx_queue *txq;
txq = dev->data->tx_queues[queue_id];
@@ -5877,7 +5877,7 @@ void __rte_cold
ixgbevf_dev_tx_init(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
- struct ixgbe_tx_queue *txq;
+ struct ci_tx_queue *txq;
uint64_t bus_addr;
uint32_t txctrl;
uint16_t i;
@@ -5918,7 +5918,7 @@ void __rte_cold
ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
- struct ixgbe_tx_queue *txq;
+ struct ci_tx_queue *txq;
struct ixgbe_rx_queue *rxq;
uint32_t txdctl;
uint32_t rxdctl;
@@ -6127,7 +6127,7 @@ ixgbe_xmit_fixed_burst_vec(void __rte_unused *tx_queue,
}
int
-ixgbe_txq_vec_setup(struct ixgbe_tx_queue __rte_unused *txq)
+ixgbe_txq_vec_setup(struct ci_tx_queue *txq __rte_unused)
{
return -1;
}
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
index 847cacf7b5..4333e5bf2f 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx.h
@@ -180,56 +180,10 @@ struct ixgbe_advctx_info {
union ixgbe_tx_offload tx_offload_mask;
};
-/**
- * Structure associated with each TX queue.
- */
-struct ixgbe_tx_queue {
- /** TX ring virtual address. */
- volatile union ixgbe_adv_tx_desc *ixgbe_tx_ring;
- rte_iova_t tx_ring_dma; /**< TX ring DMA address. */
- union {
- struct ci_tx_entry *sw_ring; /**< address of SW ring for scalar PMD. */
- struct ci_tx_entry_vec *sw_ring_v; /**< address of SW ring for vector PMD */
- };
- volatile uint8_t *qtx_tail; /**< Address of TDT register. */
- uint16_t nb_tx_desc; /**< number of TX descriptors. */
- uint16_t tx_tail; /**< current value of TDT reg. */
- /**< Start freeing TX buffers if there are less free descriptors than
- this value. */
- uint16_t tx_free_thresh;
- /** Number of TX descriptors to use before RS bit is set. */
- uint16_t tx_rs_thresh;
- /** Number of TX descriptors used since RS bit was set. */
- uint16_t nb_tx_used;
- /** Index to last TX descriptor to have been cleaned. */
- uint16_t last_desc_cleaned;
- /** Total number of TX descriptors ready to be allocated. */
- uint16_t nb_tx_free;
- uint16_t tx_next_dd; /**< next desc to scan for DD bit */
- uint16_t tx_next_rs; /**< next desc to set RS bit */
- uint16_t queue_id; /**< TX queue index. */
- uint16_t reg_idx; /**< TX queue register index. */
- uint16_t port_id; /**< Device port identifier. */
- uint8_t pthresh; /**< Prefetch threshold register. */
- uint8_t hthresh; /**< Host threshold register. */
- uint8_t wthresh; /**< Write-back threshold reg. */
- uint64_t offloads; /**< Tx offload flags of RTE_ETH_TX_OFFLOAD_* */
- uint32_t ctx_curr; /**< Hardware context states. */
- /** Hardware context history. */
- struct ixgbe_advctx_info *ctx_cache;
- const struct ixgbe_txq_ops *ops; /**< txq ops */
- bool tx_deferred_start; /**< not in global dev start. */
-#ifdef RTE_LIB_SECURITY
- uint8_t using_ipsec;
- /**< indicates that IPsec TX feature is in use */
-#endif
- const struct rte_memzone *mz;
-};
-
struct ixgbe_txq_ops {
- void (*release_mbufs)(struct ixgbe_tx_queue *txq);
- void (*free_swring)(struct ixgbe_tx_queue *txq);
- void (*reset)(struct ixgbe_tx_queue *txq);
+ void (*release_mbufs)(struct ci_tx_queue *txq);
+ void (*free_swring)(struct ci_tx_queue *txq);
+ void (*reset)(struct ci_tx_queue *txq);
};
/*
@@ -250,7 +204,7 @@ struct ixgbe_txq_ops {
* the queue parameters. Used in tx_queue_setup by primary process and then
* in dev_init by secondary process when attaching to an existing ethdev.
*/
-void ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq);
+void ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ci_tx_queue *txq);
/**
* Sets the rx_pkt_burst callback in the ixgbe rte_eth_dev instance.
@@ -287,7 +241,7 @@ void ixgbe_recycle_rx_descriptors_refill_vec(void *rx_queue, uint16_t nb_mbufs);
uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
-int ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq);
+int ixgbe_txq_vec_setup(struct ci_tx_queue *txq);
uint64_t ixgbe_get_tx_port_offloads(struct rte_eth_dev *dev);
uint64_t ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev);
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
index ec334b5f65..06e760867c 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
@@ -12,7 +12,7 @@
#include "ixgbe_rxtx.h"
static __rte_always_inline int
-ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
+ixgbe_tx_free_bufs(struct ci_tx_queue *txq)
{
struct ci_tx_entry_vec *txep;
uint32_t status;
@@ -32,7 +32,7 @@ ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
* first buffer to free from S/W ring is at index
* tx_next_dd - (tx_rs_thresh-1)
*/
- txep = &txq->sw_ring_v[txq->tx_next_dd - (n - 1)];
+ txep = &txq->sw_ring_vec[txq->tx_next_dd - (n - 1)];
m = rte_pktmbuf_prefree_seg(txep[0].mbuf);
if (likely(m != NULL)) {
free[0] = m;
@@ -79,7 +79,7 @@ tx_backlog_entry(struct ci_tx_entry_vec *txep,
}
static inline void
-_ixgbe_tx_queue_release_mbufs_vec(struct ixgbe_tx_queue *txq)
+_ixgbe_tx_queue_release_mbufs_vec(struct ci_tx_queue *txq)
{
unsigned int i;
struct ci_tx_entry_vec *txe;
@@ -92,14 +92,14 @@ _ixgbe_tx_queue_release_mbufs_vec(struct ixgbe_tx_queue *txq)
for (i = txq->tx_next_dd - (txq->tx_rs_thresh - 1);
i != txq->tx_tail;
i = (i + 1) % txq->nb_tx_desc) {
- txe = &txq->sw_ring_v[i];
+ txe = &txq->sw_ring_vec[i];
rte_pktmbuf_free_seg(txe->mbuf);
}
txq->nb_tx_free = max_desc;
/* reset tx_entry */
for (i = 0; i < txq->nb_tx_desc; i++) {
- txe = &txq->sw_ring_v[i];
+ txe = &txq->sw_ring_vec[i];
txe->mbuf = NULL;
}
}
@@ -134,22 +134,22 @@ _ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq)
}
static inline void
-_ixgbe_tx_free_swring_vec(struct ixgbe_tx_queue *txq)
+_ixgbe_tx_free_swring_vec(struct ci_tx_queue *txq)
{
if (txq == NULL)
return;
if (txq->sw_ring != NULL) {
- rte_free(txq->sw_ring_v - 1);
- txq->sw_ring_v = NULL;
+ rte_free(txq->sw_ring_vec - 1);
+ txq->sw_ring_vec = NULL;
}
}
static inline void
-_ixgbe_reset_tx_queue_vec(struct ixgbe_tx_queue *txq)
+_ixgbe_reset_tx_queue_vec(struct ci_tx_queue *txq)
{
static const union ixgbe_adv_tx_desc zeroed_desc = { { 0 } };
- struct ci_tx_entry_vec *txe = txq->sw_ring_v;
+ struct ci_tx_entry_vec *txe = txq->sw_ring_vec;
uint16_t i;
/* Zero out HW ring memory */
@@ -198,14 +198,14 @@ ixgbe_rxq_vec_setup_default(struct ixgbe_rx_queue *rxq)
}
static inline int
-ixgbe_txq_vec_setup_default(struct ixgbe_tx_queue *txq,
+ixgbe_txq_vec_setup_default(struct ci_tx_queue *txq,
const struct ixgbe_txq_ops *txq_ops)
{
- if (txq->sw_ring_v == NULL)
+ if (txq->sw_ring_vec == NULL)
return -1;
/* leave the first one for overflow */
- txq->sw_ring_v = txq->sw_ring_v + 1;
+ txq->sw_ring_vec = txq->sw_ring_vec + 1;
txq->ops = txq_ops;
return 0;
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
index 06be7ec82a..cb749a3760 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
@@ -571,7 +571,7 @@ uint16_t
ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
- struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
+ struct ci_tx_queue *txq = (struct ci_tx_queue *)tx_queue;
volatile union ixgbe_adv_tx_desc *txdp;
struct ci_tx_entry_vec *txep;
uint16_t n, nb_commit, tx_id;
@@ -591,7 +591,7 @@ ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
tx_id = txq->tx_tail;
txdp = &txq->ixgbe_tx_ring[tx_id];
- txep = &txq->sw_ring_v[tx_id];
+ txep = &txq->sw_ring_vec[tx_id];
txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
@@ -611,7 +611,7 @@ ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
/* avoid reach the end of ring */
txdp = &txq->ixgbe_tx_ring[tx_id];
- txep = &txq->sw_ring_v[tx_id];
+ txep = &txq->sw_ring_vec[tx_id];
}
tx_backlog_entry(txep, tx_pkts, nb_commit);
@@ -634,7 +634,7 @@ ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
}
static void __rte_cold
-ixgbe_tx_queue_release_mbufs_vec(struct ixgbe_tx_queue *txq)
+ixgbe_tx_queue_release_mbufs_vec(struct ci_tx_queue *txq)
{
_ixgbe_tx_queue_release_mbufs_vec(txq);
}
@@ -646,13 +646,13 @@ ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq)
}
static void __rte_cold
-ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
+ixgbe_tx_free_swring(struct ci_tx_queue *txq)
{
_ixgbe_tx_free_swring_vec(txq);
}
static void __rte_cold
-ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
+ixgbe_reset_tx_queue(struct ci_tx_queue *txq)
{
_ixgbe_reset_tx_queue_vec(txq);
}
@@ -670,7 +670,7 @@ ixgbe_rxq_vec_setup(struct ixgbe_rx_queue *rxq)
}
int __rte_cold
-ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq)
+ixgbe_txq_vec_setup(struct ci_tx_queue *txq)
{
return ixgbe_txq_vec_setup_default(txq, &vec_txq_ops);
}
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
index a21a57bd55..e46550f76a 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
@@ -693,7 +693,7 @@ uint16_t
ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
- struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
+ struct ci_tx_queue *txq = (struct ci_tx_queue *)tx_queue;
volatile union ixgbe_adv_tx_desc *txdp;
struct ci_tx_entry_vec *txep;
uint16_t n, nb_commit, tx_id;
@@ -713,7 +713,7 @@ ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
tx_id = txq->tx_tail;
txdp = &txq->ixgbe_tx_ring[tx_id];
- txep = &txq->sw_ring_v[tx_id];
+ txep = &txq->sw_ring_vec[tx_id];
txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
@@ -734,7 +734,7 @@ ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
/* avoid reach the end of ring */
txdp = &txq->ixgbe_tx_ring[tx_id];
- txep = &txq->sw_ring_v[tx_id];
+ txep = &txq->sw_ring_vec[tx_id];
}
tx_backlog_entry(txep, tx_pkts, nb_commit);
@@ -757,7 +757,7 @@ ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
}
static void __rte_cold
-ixgbe_tx_queue_release_mbufs_vec(struct ixgbe_tx_queue *txq)
+ixgbe_tx_queue_release_mbufs_vec(struct ci_tx_queue *txq)
{
_ixgbe_tx_queue_release_mbufs_vec(txq);
}
@@ -769,13 +769,13 @@ ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq)
}
static void __rte_cold
-ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
+ixgbe_tx_free_swring(struct ci_tx_queue *txq)
{
_ixgbe_tx_free_swring_vec(txq);
}
static void __rte_cold
-ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
+ixgbe_reset_tx_queue(struct ci_tx_queue *txq)
{
_ixgbe_reset_tx_queue_vec(txq);
}
@@ -793,7 +793,7 @@ ixgbe_rxq_vec_setup(struct ixgbe_rx_queue *rxq)
}
int __rte_cold
-ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq)
+ixgbe_txq_vec_setup(struct ci_tx_queue *txq)
{
return ixgbe_txq_vec_setup_default(txq, &vec_txq_ops);
}
--
2.43.0
next prev parent reply other threads:[~2024-12-20 14:40 UTC|newest]
Thread overview: 127+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-11-22 12:53 [RFC PATCH 00/21] Reduce code duplication across Intel NIC drivers Bruce Richardson
2024-11-22 12:53 ` [RFC PATCH 01/21] common/intel_eth: add pkt reassembly fn for intel drivers Bruce Richardson
2024-11-22 12:53 ` [RFC PATCH 02/21] common/intel_eth: provide common Tx entry structures Bruce Richardson
2024-11-22 12:53 ` [RFC PATCH 03/21] common/intel_eth: add Tx mbuf ring replenish fn Bruce Richardson
2024-11-22 12:53 ` [RFC PATCH 04/21] drivers/net: align Tx queue struct field names Bruce Richardson
2024-11-22 12:53 ` [RFC PATCH 05/21] drivers/net: add prefix for driver-specific structs Bruce Richardson
2024-11-22 12:53 ` [RFC PATCH 06/21] common/intel_eth: merge ice and i40e Tx queue struct Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 07/21] net/iavf: use common Tx queue structure Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 08/21] net/ixgbe: convert Tx queue context cache field to ptr Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 09/21] net/ixgbe: use common Tx queue structure Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 10/21] common/intel_eth: pack " Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 11/21] common/intel_eth: add post-Tx buffer free function Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 12/21] common/intel_eth: add Tx buffer free fn for AVX-512 Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 13/21] net/iavf: use common Tx " Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 14/21] net/ice: move Tx queue mbuf cleanup fn to common Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 15/21] net/i40e: use common Tx queue mbuf cleanup fn Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 16/21] net/ixgbe: " Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 17/21] net/iavf: " Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 18/21] net/ice: use vector SW ring for all vector paths Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 19/21] net/i40e: " Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 20/21] net/iavf: " Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 21/21] net/ixgbe: use common Tx backlog entry fn Bruce Richardson
2024-11-25 16:25 ` [RFC PATCH 00/21] Reduce code duplication across Intel NIC drivers David Marchand
2024-11-25 16:31 ` Bruce Richardson
2024-11-26 14:57 ` Thomas Monjalon
2024-11-26 15:27 ` Bruce Richardson
2024-12-02 11:24 ` [PATCH v1 " Bruce Richardson
2024-12-02 11:24 ` [PATCH v1 01/21] net/_common_intel: add pkt reassembly fn for intel drivers Bruce Richardson
2024-12-02 11:24 ` [PATCH v1 02/21] net/_common_intel: provide common Tx entry structures Bruce Richardson
2024-12-02 11:24 ` [PATCH v1 03/21] net/_common_intel: add Tx mbuf ring replenish fn Bruce Richardson
2024-12-02 11:24 ` [PATCH v1 04/21] drivers/net: align Tx queue struct field names Bruce Richardson
2024-12-02 11:24 ` [PATCH v1 05/21] drivers/net: add prefix for driver-specific structs Bruce Richardson
2024-12-02 11:24 ` [PATCH v1 06/21] net/_common_intel: merge ice and i40e Tx queue struct Bruce Richardson
2024-12-02 11:24 ` [PATCH v1 07/21] net/iavf: use common Tx queue structure Bruce Richardson
2024-12-02 11:24 ` [PATCH v1 08/21] net/ixgbe: convert Tx queue context cache field to ptr Bruce Richardson
2024-12-02 11:24 ` [PATCH v1 09/21] net/ixgbe: use common Tx queue structure Bruce Richardson
2024-12-02 13:51 ` Medvedkin, Vladimir
2024-12-02 14:09 ` Bruce Richardson
2024-12-02 15:15 ` Bruce Richardson
2024-12-02 11:24 ` [PATCH v1 10/21] net/_common_intel: pack " Bruce Richardson
2024-12-02 11:24 ` [PATCH v1 11/21] net/_common_intel: add post-Tx buffer free function Bruce Richardson
2024-12-02 12:59 ` David Marchand
2024-12-02 13:12 ` Bruce Richardson
2024-12-02 13:24 ` Bruce Richardson
2024-12-02 13:55 ` David Marchand
2024-12-02 11:24 ` [PATCH v1 12/21] net/_common_intel: add Tx buffer free fn for AVX-512 Bruce Richardson
2024-12-02 11:24 ` [PATCH v1 13/21] net/iavf: use common Tx " Bruce Richardson
2024-12-02 11:24 ` [PATCH v1 14/21] net/ice: move Tx queue mbuf cleanup fn to common Bruce Richardson
2024-12-02 11:24 ` [PATCH v1 15/21] net/i40e: use common Tx queue mbuf cleanup fn Bruce Richardson
2024-12-02 11:24 ` [PATCH v1 16/21] net/ixgbe: " Bruce Richardson
2024-12-02 11:24 ` [PATCH v1 17/21] net/iavf: " Bruce Richardson
2024-12-02 11:24 ` [PATCH v1 18/21] net/ice: use vector SW ring for all vector paths Bruce Richardson
2024-12-02 11:24 ` [PATCH v1 19/21] net/i40e: " Bruce Richardson
2024-12-02 11:24 ` [PATCH v1 20/21] net/iavf: " Bruce Richardson
2024-12-02 11:24 ` [PATCH v1 21/21] net/ixgbe: use common Tx backlog entry fn Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 00/22] Reduce code duplication across Intel NIC drivers Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 01/22] net/_common_intel: add pkt reassembly fn for intel drivers Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 02/22] net/_common_intel: provide common Tx entry structures Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 03/22] net/_common_intel: add Tx mbuf ring replenish fn Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 04/22] drivers/net: align Tx queue struct field names Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 05/22] drivers/net: add prefix for driver-specific structs Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 06/22] net/_common_intel: merge ice and i40e Tx queue struct Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 07/22] net/iavf: use common Tx queue structure Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 08/22] net/ixgbe: convert Tx queue context cache field to ptr Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 09/22] net/ixgbe: use common Tx queue structure Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 10/22] net/_common_intel: pack " Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 11/22] net/_common_intel: add post-Tx buffer free function Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 12/22] net/_common_intel: add Tx buffer free fn for AVX-512 Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 13/22] net/iavf: use common Tx " Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 14/22] net/ice: move Tx queue mbuf cleanup fn to common Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 15/22] net/i40e: use common Tx queue mbuf cleanup fn Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 16/22] net/ixgbe: " Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 17/22] net/iavf: " Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 18/22] net/ice: use vector SW ring for all vector paths Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 19/22] net/i40e: " Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 20/22] net/iavf: " Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 21/22] net/_common_intel: remove unneeded code Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 22/22] net/ixgbe: use common Tx backlog entry fn Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 00/22] Reduce code duplication across Intel NIC drivers Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 01/22] net/_common_intel: add pkt reassembly fn for intel drivers Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 02/22] net/_common_intel: provide common Tx entry structures Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 03/22] net/_common_intel: add Tx mbuf ring replenish fn Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 04/22] drivers/net: align Tx queue struct field names Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 05/22] drivers/net: add prefix for driver-specific structs Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 06/22] net/_common_intel: merge ice and i40e Tx queue struct Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 07/22] net/iavf: use common Tx queue structure Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 08/22] net/ixgbe: convert Tx queue context cache field to ptr Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 09/22] net/ixgbe: use common Tx queue structure Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 10/22] net/_common_intel: pack " Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 11/22] net/_common_intel: add post-Tx buffer free function Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 12/22] net/_common_intel: add Tx buffer free fn for AVX-512 Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 13/22] net/iavf: use common Tx " Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 14/22] net/ice: move Tx queue mbuf cleanup fn to common Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 15/22] net/i40e: use common Tx queue mbuf cleanup fn Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 16/22] net/ixgbe: " Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 17/22] net/iavf: " Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 18/22] net/ice: use vector SW ring for all vector paths Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 19/22] net/i40e: " Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 20/22] net/iavf: " Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 21/22] net/_common_intel: remove unneeded code Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 22/22] net/ixgbe: use common Tx backlog entry fn Bruce Richardson
2024-12-20 14:38 ` [PATCH v4 00/24] Reduce code duplication across Intel NIC drivers Bruce Richardson
2024-12-20 14:38 ` [PATCH v4 01/24] net/_common_intel: add pkt reassembly fn for intel drivers Bruce Richardson
2024-12-20 16:15 ` Stephen Hemminger
2024-12-20 14:38 ` [PATCH v4 02/24] net/_common_intel: provide common Tx entry structures Bruce Richardson
2024-12-20 14:39 ` [PATCH v4 03/24] net/_common_intel: add Tx mbuf ring replenish fn Bruce Richardson
2024-12-20 14:39 ` [PATCH v4 04/24] drivers/net: align Tx queue struct field names Bruce Richardson
2024-12-20 14:39 ` [PATCH v4 05/24] drivers/net: add prefix for driver-specific structs Bruce Richardson
2024-12-20 14:39 ` [PATCH v4 06/24] net/_common_intel: merge ice and i40e Tx queue struct Bruce Richardson
2024-12-20 14:39 ` [PATCH v4 07/24] net/iavf: use common Tx queue structure Bruce Richardson
2024-12-20 14:39 ` [PATCH v4 08/24] net/ixgbe: convert Tx queue context cache field to ptr Bruce Richardson
2024-12-20 14:39 ` Bruce Richardson [this message]
2024-12-20 14:39 ` [PATCH v4 10/24] net/_common_intel: pack Tx queue structure Bruce Richardson
2024-12-20 14:39 ` [PATCH v4 11/24] net/_common_intel: add post-Tx buffer free function Bruce Richardson
2024-12-20 14:39 ` [PATCH v4 12/24] net/_common_intel: add Tx buffer free fn for AVX-512 Bruce Richardson
2024-12-20 14:39 ` [PATCH v4 13/24] net/iavf: use common Tx " Bruce Richardson
2024-12-20 14:39 ` [PATCH v4 14/24] net/ice: move Tx queue mbuf cleanup fn to common Bruce Richardson
2024-12-20 14:39 ` [PATCH v4 15/24] net/i40e: use common Tx queue mbuf cleanup fn Bruce Richardson
2024-12-20 14:39 ` [PATCH v4 16/24] net/ixgbe: " Bruce Richardson
2024-12-20 14:39 ` [PATCH v4 17/24] net/iavf: " Bruce Richardson
2024-12-20 14:39 ` [PATCH v4 18/24] net/ice: use vector SW ring for all vector paths Bruce Richardson
2024-12-20 14:39 ` [PATCH v4 19/24] net/i40e: " Bruce Richardson
2024-12-20 14:39 ` [PATCH v4 20/24] net/iavf: " Bruce Richardson
2024-12-20 14:39 ` [PATCH v4 21/24] net/_common_intel: remove unneeded code Bruce Richardson
2024-12-20 14:39 ` [PATCH v4 22/24] net/ixgbe: use common Tx backlog entry fn Bruce Richardson
2024-12-20 14:39 ` [PATCH v4 23/24] net/_common_intel: create common mbuf initializer fn Bruce Richardson
2024-12-20 14:39 ` [PATCH v4 24/24] net/_common_intel: extract common Rx vector criteria Bruce Richardson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20241220143925.609044-10-bruce.richardson@intel.com \
--to=bruce.richardson@intel.com \
--cc=anatoly.burakov@intel.com \
--cc=dev@dpdk.org \
--cc=konstantin.v.ananyev@yandex.ru \
--cc=vladimir.medvedkin@intel.com \
--cc=wathsala.vithanage@arm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).