From: Bruce Richardson <bruce.richardson@intel.com>
To: dev@dpdk.org
Cc: Bruce Richardson <bruce.richardson@intel.com>,
Ian Stokes <ian.stokes@intel.com>,
David Christensen <drc@linux.ibm.com>,
Konstantin Ananyev <konstantin.v.ananyev@yandex.ru>,
Wathsala Vithanage <wathsala.vithanage@arm.com>,
Vladimir Medvedkin <vladimir.medvedkin@intel.com>,
Anatoly Burakov <anatoly.burakov@intel.com>
Subject: [PATCH v4 05/24] drivers/net: add prefix for driver-specific structs
Date: Fri, 20 Dec 2024 14:39:02 +0000 [thread overview]
Message-ID: <20241220143925.609044-6-bruce.richardson@intel.com> (raw)
In-Reply-To: <20241220143925.609044-1-bruce.richardson@intel.com>
In preparation for merging the Tx structs for multiple drivers into a
single struct, rename the driver-specific pointers in each struct to
have a prefix on it, to avoid conflicts.
Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
---
drivers/net/i40e/i40e_fdir.c | 6 +--
.../net/i40e/i40e_recycle_mbufs_vec_common.c | 2 +-
drivers/net/i40e/i40e_rxtx.c | 30 ++++++------
drivers/net/i40e/i40e_rxtx.h | 4 +-
drivers/net/i40e/i40e_rxtx_vec_altivec.c | 6 +--
drivers/net/i40e/i40e_rxtx_vec_avx2.c | 6 +--
drivers/net/i40e/i40e_rxtx_vec_avx512.c | 8 ++--
drivers/net/i40e/i40e_rxtx_vec_common.h | 2 +-
drivers/net/i40e/i40e_rxtx_vec_neon.c | 6 +--
drivers/net/i40e/i40e_rxtx_vec_sse.c | 6 +--
drivers/net/iavf/iavf_rxtx.c | 24 +++++-----
drivers/net/iavf/iavf_rxtx.h | 4 +-
drivers/net/iavf/iavf_rxtx_vec_avx2.c | 6 +--
drivers/net/iavf/iavf_rxtx_vec_avx512.c | 14 +++---
drivers/net/iavf/iavf_rxtx_vec_common.h | 2 +-
drivers/net/iavf/iavf_rxtx_vec_sse.c | 6 +--
drivers/net/ice/ice_dcf_ethdev.c | 4 +-
drivers/net/ice/ice_rxtx.c | 48 +++++++++----------
drivers/net/ice/ice_rxtx.h | 4 +-
drivers/net/ice/ice_rxtx_vec_avx2.c | 6 +--
drivers/net/ice/ice_rxtx_vec_avx512.c | 8 ++--
drivers/net/ice/ice_rxtx_vec_common.h | 4 +-
drivers/net/ice/ice_rxtx_vec_sse.c | 6 +--
.../ixgbe/ixgbe_recycle_mbufs_vec_common.c | 2 +-
drivers/net/ixgbe/ixgbe_rxtx.c | 22 ++++-----
drivers/net/ixgbe/ixgbe_rxtx.h | 2 +-
drivers/net/ixgbe/ixgbe_rxtx_vec_common.h | 6 +--
drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c | 6 +--
drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c | 6 +--
29 files changed, 128 insertions(+), 128 deletions(-)
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index 47f79ecf11..c600167634 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -1383,7 +1383,7 @@ i40e_find_available_buffer(struct rte_eth_dev *dev)
volatile struct i40e_tx_desc *tmp_txdp;
tmp_tail = txq->tx_tail;
- tmp_txdp = &txq->tx_ring[tmp_tail + 1];
+ tmp_txdp = &txq->i40e_tx_ring[tmp_tail + 1];
do {
if ((tmp_txdp->cmd_type_offset_bsz &
@@ -1640,7 +1640,7 @@ i40e_flow_fdir_filter_programming(struct i40e_pf *pf,
PMD_DRV_LOG(INFO, "filling filter programming descriptor.");
fdirdp = (volatile struct i40e_filter_program_desc *)
- (&txq->tx_ring[txq->tx_tail]);
+ (&txq->i40e_tx_ring[txq->tx_tail]);
fdirdp->qindex_flex_ptype_vsi =
rte_cpu_to_le_32((fdir_action->rx_queue <<
@@ -1710,7 +1710,7 @@ i40e_flow_fdir_filter_programming(struct i40e_pf *pf,
fdirdp->fd_id = rte_cpu_to_le_32(filter->soft_id);
PMD_DRV_LOG(INFO, "filling transmit descriptor.");
- txdp = &txq->tx_ring[txq->tx_tail + 1];
+ txdp = &txq->i40e_tx_ring[txq->tx_tail + 1];
txdp->buffer_addr = rte_cpu_to_le_64(pf->fdir.dma_addr[txq->tx_tail >> 1]);
td_cmd = I40E_TX_DESC_CMD_EOP |
diff --git a/drivers/net/i40e/i40e_recycle_mbufs_vec_common.c b/drivers/net/i40e/i40e_recycle_mbufs_vec_common.c
index 260d238ce4..8679e5c1fd 100644
--- a/drivers/net/i40e/i40e_recycle_mbufs_vec_common.c
+++ b/drivers/net/i40e/i40e_recycle_mbufs_vec_common.c
@@ -75,7 +75,7 @@ i40e_recycle_tx_mbufs_reuse_vec(void *tx_queue,
return 0;
/* check DD bits on threshold descriptor */
- if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
+ if ((txq->i40e_tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) !=
rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
return 0;
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index b0bb20fe9a..34ef931859 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -379,7 +379,7 @@ static inline int
i40e_xmit_cleanup(struct i40e_tx_queue *txq)
{
struct ci_tx_entry *sw_ring = txq->sw_ring;
- volatile struct i40e_tx_desc *txd = txq->tx_ring;
+ volatile struct i40e_tx_desc *txd = txq->i40e_tx_ring;
uint16_t last_desc_cleaned = txq->last_desc_cleaned;
uint16_t nb_tx_desc = txq->nb_tx_desc;
uint16_t desc_to_clean_to;
@@ -1103,7 +1103,7 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
txq = tx_queue;
sw_ring = txq->sw_ring;
- txr = txq->tx_ring;
+ txr = txq->i40e_tx_ring;
tx_id = txq->tx_tail;
txe = &sw_ring[tx_id];
@@ -1338,7 +1338,7 @@ i40e_tx_free_bufs(struct i40e_tx_queue *txq)
const uint16_t k = RTE_ALIGN_FLOOR(tx_rs_thresh, RTE_I40E_TX_MAX_FREE_BUF_SZ);
const uint16_t m = tx_rs_thresh % RTE_I40E_TX_MAX_FREE_BUF_SZ;
- if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
+ if ((txq->i40e_tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) !=
rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
return 0;
@@ -1417,7 +1417,7 @@ i40e_tx_fill_hw_ring(struct i40e_tx_queue *txq,
struct rte_mbuf **pkts,
uint16_t nb_pkts)
{
- volatile struct i40e_tx_desc *txdp = &(txq->tx_ring[txq->tx_tail]);
+ volatile struct i40e_tx_desc *txdp = &txq->i40e_tx_ring[txq->tx_tail];
struct ci_tx_entry *txep = &txq->sw_ring[txq->tx_tail];
const int N_PER_LOOP = 4;
const int N_PER_LOOP_MASK = N_PER_LOOP - 1;
@@ -1445,7 +1445,7 @@ tx_xmit_pkts(struct i40e_tx_queue *txq,
struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
- volatile struct i40e_tx_desc *txr = txq->tx_ring;
+ volatile struct i40e_tx_desc *txr = txq->i40e_tx_ring;
uint16_t n = 0;
/**
@@ -1556,7 +1556,7 @@ i40e_xmit_pkts_check(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts
bool pkt_error = false;
const char *reason = NULL;
uint16_t good_pkts = nb_pkts;
- struct i40e_adapter *adapter = txq->vsi->adapter;
+ struct i40e_adapter *adapter = txq->i40e_vsi->adapter;
for (idx = 0; idx < nb_pkts; idx++) {
mb = tx_pkts[idx];
@@ -2329,7 +2329,7 @@ i40e_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
desc -= txq->nb_tx_desc;
}
- status = &txq->tx_ring[desc].cmd_type_offset_bsz;
+ status = &txq->i40e_tx_ring[desc].cmd_type_offset_bsz;
mask = rte_le_to_cpu_64(I40E_TXD_QW1_DTYPE_MASK);
expect = rte_cpu_to_le_64(
I40E_TX_DESC_DTYPE_DESC_DONE << I40E_TXD_QW1_DTYPE_SHIFT);
@@ -2527,7 +2527,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
/* Allocate TX hardware ring descriptors. */
ring_size = sizeof(struct i40e_tx_desc) * I40E_MAX_RING_DESC;
ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN);
- tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
+ tz = rte_eth_dma_zone_reserve(dev, "i40e_tx_ring", queue_idx,
ring_size, I40E_RING_BASE_ALIGN, socket_id);
if (!tz) {
i40e_tx_queue_release(txq);
@@ -2546,11 +2546,11 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq->reg_idx = reg_idx;
txq->port_id = dev->data->port_id;
txq->offloads = offloads;
- txq->vsi = vsi;
+ txq->i40e_vsi = vsi;
txq->tx_deferred_start = tx_conf->tx_deferred_start;
txq->tx_ring_dma = tz->iova;
- txq->tx_ring = (struct i40e_tx_desc *)tz->addr;
+ txq->i40e_tx_ring = (struct i40e_tx_desc *)tz->addr;
/* Allocate software ring */
txq->sw_ring =
@@ -2885,11 +2885,11 @@ i40e_reset_tx_queue(struct i40e_tx_queue *txq)
txe = txq->sw_ring;
size = sizeof(struct i40e_tx_desc) * txq->nb_tx_desc;
for (i = 0; i < size; i++)
- ((volatile char *)txq->tx_ring)[i] = 0;
+ ((volatile char *)txq->i40e_tx_ring)[i] = 0;
prev = (uint16_t)(txq->nb_tx_desc - 1);
for (i = 0; i < txq->nb_tx_desc; i++) {
- volatile struct i40e_tx_desc *txd = &txq->tx_ring[i];
+ volatile struct i40e_tx_desc *txd = &txq->i40e_tx_ring[i];
txd->cmd_type_offset_bsz =
rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE);
@@ -2914,7 +2914,7 @@ int
i40e_tx_queue_init(struct i40e_tx_queue *txq)
{
enum i40e_status_code err = I40E_SUCCESS;
- struct i40e_vsi *vsi = txq->vsi;
+ struct i40e_vsi *vsi = txq->i40e_vsi;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
uint16_t pf_q = txq->reg_idx;
struct i40e_hmc_obj_txq tx_ctx;
@@ -3207,10 +3207,10 @@ i40e_fdir_setup_tx_resources(struct i40e_pf *pf)
txq->nb_tx_desc = I40E_FDIR_NUM_TX_DESC;
txq->queue_id = I40E_FDIR_QUEUE_ID;
txq->reg_idx = pf->fdir.fdir_vsi->base_queue;
- txq->vsi = pf->fdir.fdir_vsi;
+ txq->i40e_vsi = pf->fdir.fdir_vsi;
txq->tx_ring_dma = tz->iova;
- txq->tx_ring = (struct i40e_tx_desc *)tz->addr;
+ txq->i40e_tx_ring = (struct i40e_tx_desc *)tz->addr;
/*
* don't need to allocate software ring and reset for the fdir
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index f420c98687..8315ee2f59 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -130,7 +130,7 @@ struct i40e_rx_queue {
struct i40e_tx_queue {
uint16_t nb_tx_desc; /**< number of TX descriptors */
rte_iova_t tx_ring_dma; /**< TX ring DMA address */
- volatile struct i40e_tx_desc *tx_ring; /**< TX ring virtual address */
+ volatile struct i40e_tx_desc *i40e_tx_ring; /**< TX ring virtual address */
struct ci_tx_entry *sw_ring; /**< virtual address of SW ring */
uint16_t tx_tail; /**< current value of tail register */
volatile uint8_t *qtx_tail; /**< register address of tail */
@@ -150,7 +150,7 @@ struct i40e_tx_queue {
uint16_t port_id; /**< Device port identifier. */
uint16_t queue_id; /**< TX queue index. */
uint16_t reg_idx;
- struct i40e_vsi *vsi; /**< the VSI this queue belongs to */
+ struct i40e_vsi *i40e_vsi; /**< the VSI this queue belongs to */
uint16_t tx_next_dd;
uint16_t tx_next_rs;
bool q_set; /**< indicate if tx queue has been configured */
diff --git a/drivers/net/i40e/i40e_rxtx_vec_altivec.c b/drivers/net/i40e/i40e_rxtx_vec_altivec.c
index 80f07a3e10..bf0e9ebd71 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_altivec.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_altivec.c
@@ -568,7 +568,7 @@ i40e_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
return 0;
tx_id = txq->tx_tail;
- txdp = &txq->tx_ring[tx_id];
+ txdp = &txq->i40e_tx_ring[tx_id];
txep = &txq->sw_ring[tx_id];
txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
@@ -588,7 +588,7 @@ i40e_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
/* avoid reach the end of ring */
- txdp = &txq->tx_ring[tx_id];
+ txdp = &txq->i40e_tx_ring[tx_id];
txep = &txq->sw_ring[tx_id];
}
@@ -598,7 +598,7 @@ i40e_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
tx_id = (uint16_t)(tx_id + nb_commit);
if (tx_id > txq->tx_next_rs) {
- txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
+ txq->i40e_tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) <<
I40E_TXD_QW1_CMD_SHIFT);
txq->tx_next_rs =
diff --git a/drivers/net/i40e/i40e_rxtx_vec_avx2.c b/drivers/net/i40e/i40e_rxtx_vec_avx2.c
index b26bae4757..5042e348db 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_avx2.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_avx2.c
@@ -758,7 +758,7 @@ i40e_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
return 0;
tx_id = txq->tx_tail;
- txdp = &txq->tx_ring[tx_id];
+ txdp = &txq->i40e_tx_ring[tx_id];
txep = &txq->sw_ring[tx_id];
txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
@@ -779,7 +779,7 @@ i40e_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
/* avoid reach the end of ring */
- txdp = &txq->tx_ring[tx_id];
+ txdp = &txq->i40e_tx_ring[tx_id];
txep = &txq->sw_ring[tx_id];
}
@@ -789,7 +789,7 @@ i40e_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
tx_id = (uint16_t)(tx_id + nb_commit);
if (tx_id > txq->tx_next_rs) {
- txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
+ txq->i40e_tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) <<
I40E_TXD_QW1_CMD_SHIFT);
txq->tx_next_rs =
diff --git a/drivers/net/i40e/i40e_rxtx_vec_avx512.c b/drivers/net/i40e/i40e_rxtx_vec_avx512.c
index 8b8a16daa8..04fbe3b2e3 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_avx512.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_avx512.c
@@ -764,7 +764,7 @@ i40e_tx_free_bufs_avx512(struct i40e_tx_queue *txq)
struct rte_mbuf *m, *free[RTE_I40E_TX_MAX_FREE_BUF_SZ];
/* check DD bits on threshold descriptor */
- if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
+ if ((txq->i40e_tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) !=
rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
return 0;
@@ -948,7 +948,7 @@ i40e_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
return 0;
tx_id = txq->tx_tail;
- txdp = &txq->tx_ring[tx_id];
+ txdp = &txq->i40e_tx_ring[tx_id];
txep = (void *)txq->sw_ring;
txep += tx_id;
@@ -970,7 +970,7 @@ i40e_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
/* avoid reach the end of ring */
- txdp = txq->tx_ring;
+ txdp = txq->i40e_tx_ring;
txep = (void *)txq->sw_ring;
}
@@ -980,7 +980,7 @@ i40e_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
tx_id = (uint16_t)(tx_id + nb_commit);
if (tx_id > txq->tx_next_rs) {
- txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
+ txq->i40e_tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) <<
I40E_TXD_QW1_CMD_SHIFT);
txq->tx_next_rs =
diff --git a/drivers/net/i40e/i40e_rxtx_vec_common.h b/drivers/net/i40e/i40e_rxtx_vec_common.h
index 325e99c1a4..e81f958361 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_common.h
+++ b/drivers/net/i40e/i40e_rxtx_vec_common.h
@@ -26,7 +26,7 @@ i40e_tx_free_bufs(struct i40e_tx_queue *txq)
struct rte_mbuf *m, *free[RTE_I40E_TX_MAX_FREE_BUF_SZ];
/* check DD bits on threshold descriptor */
- if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
+ if ((txq->i40e_tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) !=
rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
return 0;
diff --git a/drivers/net/i40e/i40e_rxtx_vec_neon.c b/drivers/net/i40e/i40e_rxtx_vec_neon.c
index 26bc345a0a..05191e4884 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_neon.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_neon.c
@@ -695,7 +695,7 @@ i40e_xmit_fixed_burst_vec(void *__rte_restrict tx_queue,
return 0;
tx_id = txq->tx_tail;
- txdp = &txq->tx_ring[tx_id];
+ txdp = &txq->i40e_tx_ring[tx_id];
txep = &txq->sw_ring[tx_id];
txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
@@ -715,7 +715,7 @@ i40e_xmit_fixed_burst_vec(void *__rte_restrict tx_queue,
txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
/* avoid reach the end of ring */
- txdp = &txq->tx_ring[tx_id];
+ txdp = &txq->i40e_tx_ring[tx_id];
txep = &txq->sw_ring[tx_id];
}
@@ -725,7 +725,7 @@ i40e_xmit_fixed_burst_vec(void *__rte_restrict tx_queue,
tx_id = (uint16_t)(tx_id + nb_commit);
if (tx_id > txq->tx_next_rs) {
- txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
+ txq->i40e_tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) <<
I40E_TXD_QW1_CMD_SHIFT);
txq->tx_next_rs =
diff --git a/drivers/net/i40e/i40e_rxtx_vec_sse.c b/drivers/net/i40e/i40e_rxtx_vec_sse.c
index ebc32b0d27..d81b553842 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_sse.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_sse.c
@@ -714,7 +714,7 @@ i40e_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
return 0;
tx_id = txq->tx_tail;
- txdp = &txq->tx_ring[tx_id];
+ txdp = &txq->i40e_tx_ring[tx_id];
txep = &txq->sw_ring[tx_id];
txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
@@ -734,7 +734,7 @@ i40e_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
/* avoid reach the end of ring */
- txdp = &txq->tx_ring[tx_id];
+ txdp = &txq->i40e_tx_ring[tx_id];
txep = &txq->sw_ring[tx_id];
}
@@ -744,7 +744,7 @@ i40e_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
tx_id = (uint16_t)(tx_id + nb_commit);
if (tx_id > txq->tx_next_rs) {
- txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
+ txq->i40e_tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) <<
I40E_TXD_QW1_CMD_SHIFT);
txq->tx_next_rs =
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index adaaeb4625..6eda91e76b 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -296,11 +296,11 @@ reset_tx_queue(struct iavf_tx_queue *txq)
txe = txq->sw_ring;
size = sizeof(struct iavf_tx_desc) * txq->nb_tx_desc;
for (i = 0; i < size; i++)
- ((volatile char *)txq->tx_ring)[i] = 0;
+ ((volatile char *)txq->iavf_tx_ring)[i] = 0;
prev = (uint16_t)(txq->nb_tx_desc - 1);
for (i = 0; i < txq->nb_tx_desc; i++) {
- txq->tx_ring[i].cmd_type_offset_bsz =
+ txq->iavf_tx_ring[i].cmd_type_offset_bsz =
rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE);
txe[i].mbuf = NULL;
txe[i].last_id = i;
@@ -851,7 +851,7 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq->port_id = dev->data->port_id;
txq->offloads = offloads;
txq->tx_deferred_start = tx_conf->tx_deferred_start;
- txq->vsi = vsi;
+ txq->iavf_vsi = vsi;
if (iavf_ipsec_crypto_supported(adapter))
txq->ipsec_crypto_pkt_md_offset =
@@ -872,7 +872,7 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
/* Allocate TX hardware ring descriptors. */
ring_size = sizeof(struct iavf_tx_desc) * IAVF_MAX_RING_DESC;
ring_size = RTE_ALIGN(ring_size, IAVF_DMA_MEM_ALIGN);
- mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
+ mz = rte_eth_dma_zone_reserve(dev, "iavf_tx_ring", queue_idx,
ring_size, IAVF_RING_BASE_ALIGN,
socket_id);
if (!mz) {
@@ -882,7 +882,7 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
return -ENOMEM;
}
txq->tx_ring_dma = mz->iova;
- txq->tx_ring = (struct iavf_tx_desc *)mz->addr;
+ txq->iavf_tx_ring = (struct iavf_tx_desc *)mz->addr;
txq->mz = mz;
reset_tx_queue(txq);
@@ -2385,7 +2385,7 @@ iavf_xmit_cleanup(struct iavf_tx_queue *txq)
uint16_t desc_to_clean_to;
uint16_t nb_tx_to_clean;
- volatile struct iavf_tx_desc *txd = txq->tx_ring;
+ volatile struct iavf_tx_desc *txd = txq->iavf_tx_ring;
desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
if (desc_to_clean_to >= nb_tx_desc)
@@ -2796,7 +2796,7 @@ uint16_t
iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
struct iavf_tx_queue *txq = tx_queue;
- volatile struct iavf_tx_desc *txr = txq->tx_ring;
+ volatile struct iavf_tx_desc *txr = txq->iavf_tx_ring;
struct ci_tx_entry *txe_ring = txq->sw_ring;
struct ci_tx_entry *txe, *txn;
struct rte_mbuf *mb, *mb_seg;
@@ -3803,10 +3803,10 @@ iavf_xmit_pkts_no_poll(void *tx_queue, struct rte_mbuf **tx_pkts,
struct iavf_tx_queue *txq = tx_queue;
enum iavf_tx_burst_type tx_burst_type;
- if (!txq->vsi || txq->vsi->adapter->no_poll)
+ if (!txq->iavf_vsi || txq->iavf_vsi->adapter->no_poll)
return 0;
- tx_burst_type = txq->vsi->adapter->tx_burst_type;
+ tx_burst_type = txq->iavf_vsi->adapter->tx_burst_type;
return iavf_tx_pkt_burst_ops[tx_burst_type](tx_queue,
tx_pkts, nb_pkts);
@@ -3824,9 +3824,9 @@ iavf_xmit_pkts_check(void *tx_queue, struct rte_mbuf **tx_pkts,
const char *reason = NULL;
bool pkt_error = false;
struct iavf_tx_queue *txq = tx_queue;
- struct iavf_adapter *adapter = txq->vsi->adapter;
+ struct iavf_adapter *adapter = txq->iavf_vsi->adapter;
enum iavf_tx_burst_type tx_burst_type =
- txq->vsi->adapter->tx_burst_type;
+ txq->iavf_vsi->adapter->tx_burst_type;
for (idx = 0; idx < nb_pkts; idx++) {
mb = tx_pkts[idx];
@@ -4440,7 +4440,7 @@ iavf_dev_tx_desc_status(void *tx_queue, uint16_t offset)
desc -= txq->nb_tx_desc;
}
- status = &txq->tx_ring[desc].cmd_type_offset_bsz;
+ status = &txq->iavf_tx_ring[desc].cmd_type_offset_bsz;
mask = rte_le_to_cpu_64(IAVF_TXD_QW1_DTYPE_MASK);
expect = rte_cpu_to_le_64(
IAVF_TX_DESC_DTYPE_DESC_DONE << IAVF_TXD_QW1_DTYPE_SHIFT);
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index 44e2de731c..cc1eaaf54c 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -276,7 +276,7 @@ struct iavf_rx_queue {
/* Structure associated with each TX queue. */
struct iavf_tx_queue {
const struct rte_memzone *mz; /* memzone for Tx ring */
- volatile struct iavf_tx_desc *tx_ring; /* Tx ring virtual address */
+ volatile struct iavf_tx_desc *iavf_tx_ring; /* Tx ring virtual address */
rte_iova_t tx_ring_dma; /* Tx ring DMA address */
struct ci_tx_entry *sw_ring; /* address array of SW ring */
uint16_t nb_tx_desc; /* ring length */
@@ -289,7 +289,7 @@ struct iavf_tx_queue {
uint16_t tx_free_thresh;
uint16_t tx_rs_thresh;
uint8_t rel_mbufs_type;
- struct iavf_vsi *vsi; /**< the VSI this queue belongs to */
+ struct iavf_vsi *iavf_vsi; /**< the VSI this queue belongs to */
uint16_t port_id;
uint16_t queue_id;
diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx2.c b/drivers/net/iavf/iavf_rxtx_vec_avx2.c
index 42e09a2adf..f33ceceee1 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_avx2.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_avx2.c
@@ -1751,7 +1751,7 @@ iavf_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
nb_commit = nb_pkts;
tx_id = txq->tx_tail;
- txdp = &txq->tx_ring[tx_id];
+ txdp = &txq->iavf_tx_ring[tx_id];
txep = &txq->sw_ring[tx_id];
txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
@@ -1772,7 +1772,7 @@ iavf_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
/* avoid reach the end of ring */
- txdp = &txq->tx_ring[tx_id];
+ txdp = &txq->iavf_tx_ring[tx_id];
txep = &txq->sw_ring[tx_id];
}
@@ -1782,7 +1782,7 @@ iavf_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
tx_id = (uint16_t)(tx_id + nb_commit);
if (tx_id > txq->tx_next_rs) {
- txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
+ txq->iavf_tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
rte_cpu_to_le_64(((uint64_t)IAVF_TX_DESC_CMD_RS) <<
IAVF_TXD_QW1_CMD_SHIFT);
txq->tx_next_rs =
diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx512.c b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
index dc1fef24f0..97420a75fd 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_avx512.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
@@ -1854,7 +1854,7 @@ iavf_tx_free_bufs_avx512(struct iavf_tx_queue *txq)
struct rte_mbuf *m, *free[IAVF_VPMD_TX_MAX_FREE_BUF];
/* check DD bits on threshold descriptor */
- if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
+ if ((txq->iavf_tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK)) !=
rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE))
return 0;
@@ -2328,7 +2328,7 @@ iavf_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
nb_commit = nb_pkts;
tx_id = txq->tx_tail;
- txdp = &txq->tx_ring[tx_id];
+ txdp = &txq->iavf_tx_ring[tx_id];
txep = (void *)txq->sw_ring;
txep += tx_id;
@@ -2350,7 +2350,7 @@ iavf_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
/* avoid reach the end of ring */
- txdp = &txq->tx_ring[tx_id];
+ txdp = &txq->iavf_tx_ring[tx_id];
txep = (void *)txq->sw_ring;
txep += tx_id;
}
@@ -2361,7 +2361,7 @@ iavf_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
tx_id = (uint16_t)(tx_id + nb_commit);
if (tx_id > txq->tx_next_rs) {
- txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
+ txq->iavf_tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
rte_cpu_to_le_64(((uint64_t)IAVF_TX_DESC_CMD_RS) <<
IAVF_TXD_QW1_CMD_SHIFT);
txq->tx_next_rs =
@@ -2397,7 +2397,7 @@ iavf_xmit_fixed_burst_vec_avx512_ctx(void *tx_queue, struct rte_mbuf **tx_pkts,
nb_pkts = nb_commit >> 1;
tx_id = txq->tx_tail;
- txdp = &txq->tx_ring[tx_id];
+ txdp = &txq->iavf_tx_ring[tx_id];
txep = (void *)txq->sw_ring;
txep += (tx_id >> 1);
@@ -2418,7 +2418,7 @@ iavf_xmit_fixed_burst_vec_avx512_ctx(void *tx_queue, struct rte_mbuf **tx_pkts,
txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
tx_id = 0;
/* avoid reach the end of ring */
- txdp = txq->tx_ring;
+ txdp = txq->iavf_tx_ring;
txep = (void *)txq->sw_ring;
}
@@ -2429,7 +2429,7 @@ iavf_xmit_fixed_burst_vec_avx512_ctx(void *tx_queue, struct rte_mbuf **tx_pkts,
tx_id = (uint16_t)(tx_id + nb_commit);
if (tx_id > txq->tx_next_rs) {
- txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
+ txq->iavf_tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
rte_cpu_to_le_64(((uint64_t)IAVF_TX_DESC_CMD_RS) <<
IAVF_TXD_QW1_CMD_SHIFT);
txq->tx_next_rs =
diff --git a/drivers/net/iavf/iavf_rxtx_vec_common.h b/drivers/net/iavf/iavf_rxtx_vec_common.h
index ff24055c34..6305c8cdd6 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_common.h
+++ b/drivers/net/iavf/iavf_rxtx_vec_common.h
@@ -26,7 +26,7 @@ iavf_tx_free_bufs(struct iavf_tx_queue *txq)
struct rte_mbuf *m, *free[IAVF_VPMD_TX_MAX_FREE_BUF];
/* check DD bits on threshold descriptor */
- if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
+ if ((txq->iavf_tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK)) !=
rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE))
return 0;
diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c
index ed8455d669..64c3bf0eaa 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_sse.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c
@@ -1383,7 +1383,7 @@ iavf_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
nb_commit = nb_pkts;
tx_id = txq->tx_tail;
- txdp = &txq->tx_ring[tx_id];
+ txdp = &txq->iavf_tx_ring[tx_id];
txep = &txq->sw_ring[tx_id];
txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
@@ -1403,7 +1403,7 @@ iavf_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
/* avoid reach the end of ring */
- txdp = &txq->tx_ring[tx_id];
+ txdp = &txq->iavf_tx_ring[tx_id];
txep = &txq->sw_ring[tx_id];
}
@@ -1413,7 +1413,7 @@ iavf_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
tx_id = (uint16_t)(tx_id + nb_commit);
if (tx_id > txq->tx_next_rs) {
- txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
+ txq->iavf_tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
rte_cpu_to_le_64(((uint64_t)IAVF_TX_DESC_CMD_RS) <<
IAVF_TXD_QW1_CMD_SHIFT);
txq->tx_next_rs =
diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c
index 4b98e4066b..4ffd1f5567 100644
--- a/drivers/net/ice/ice_dcf_ethdev.c
+++ b/drivers/net/ice/ice_dcf_ethdev.c
@@ -401,11 +401,11 @@ reset_tx_queue(struct ice_tx_queue *txq)
txe = txq->sw_ring;
size = sizeof(struct ice_tx_desc) * txq->nb_tx_desc;
for (i = 0; i < size; i++)
- ((volatile char *)txq->tx_ring)[i] = 0;
+ ((volatile char *)txq->ice_tx_ring)[i] = 0;
prev = (uint16_t)(txq->nb_tx_desc - 1);
for (i = 0; i < txq->nb_tx_desc; i++) {
- txq->tx_ring[i].cmd_type_offset_bsz =
+ txq->ice_tx_ring[i].cmd_type_offset_bsz =
rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE);
txe[i].mbuf = NULL;
txe[i].last_id = i;
diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index d584086a36..5ec92f6d0c 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -776,7 +776,7 @@ ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
if (!txq_elem)
return -ENOMEM;
- vsi = txq->vsi;
+ vsi = txq->ice_vsi;
hw = ICE_VSI_TO_HW(vsi);
pf = ICE_VSI_TO_PF(vsi);
@@ -966,7 +966,7 @@ ice_fdir_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
if (!txq_elem)
return -ENOMEM;
- vsi = txq->vsi;
+ vsi = txq->ice_vsi;
hw = ICE_VSI_TO_HW(vsi);
memset(&tx_ctx, 0, sizeof(tx_ctx));
@@ -1039,11 +1039,11 @@ ice_reset_tx_queue(struct ice_tx_queue *txq)
txe = txq->sw_ring;
size = sizeof(struct ice_tx_desc) * txq->nb_tx_desc;
for (i = 0; i < size; i++)
- ((volatile char *)txq->tx_ring)[i] = 0;
+ ((volatile char *)txq->ice_tx_ring)[i] = 0;
prev = (uint16_t)(txq->nb_tx_desc - 1);
for (i = 0; i < txq->nb_tx_desc; i++) {
- volatile struct ice_tx_desc *txd = &txq->tx_ring[i];
+ volatile struct ice_tx_desc *txd = &txq->ice_tx_ring[i];
txd->cmd_type_offset_bsz =
rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE);
@@ -1153,7 +1153,7 @@ ice_fdir_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
PMD_DRV_LOG(INFO, "TX queue %u not started", tx_queue_id);
return 0;
}
- vsi = txq->vsi;
+ vsi = txq->ice_vsi;
q_ids[0] = txq->reg_idx;
q_teids[0] = txq->q_teid;
@@ -1479,7 +1479,7 @@ ice_tx_queue_setup(struct rte_eth_dev *dev,
/* Allocate TX hardware ring descriptors. */
ring_size = sizeof(struct ice_tx_desc) * ICE_MAX_RING_DESC;
ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
- tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
+ tz = rte_eth_dma_zone_reserve(dev, "ice_tx_ring", queue_idx,
ring_size, ICE_RING_BASE_ALIGN,
socket_id);
if (!tz) {
@@ -1500,11 +1500,11 @@ ice_tx_queue_setup(struct rte_eth_dev *dev,
txq->reg_idx = vsi->base_queue + queue_idx;
txq->port_id = dev->data->port_id;
txq->offloads = offloads;
- txq->vsi = vsi;
+ txq->ice_vsi = vsi;
txq->tx_deferred_start = tx_conf->tx_deferred_start;
txq->tx_ring_dma = tz->iova;
- txq->tx_ring = tz->addr;
+ txq->ice_tx_ring = tz->addr;
/* Allocate software ring */
txq->sw_ring =
@@ -2372,7 +2372,7 @@ ice_tx_descriptor_status(void *tx_queue, uint16_t offset)
desc -= txq->nb_tx_desc;
}
- status = &txq->tx_ring[desc].cmd_type_offset_bsz;
+ status = &txq->ice_tx_ring[desc].cmd_type_offset_bsz;
mask = rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M);
expect = rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE <<
ICE_TXD_QW1_DTYPE_S);
@@ -2452,10 +2452,10 @@ ice_fdir_setup_tx_resources(struct ice_pf *pf)
txq->nb_tx_desc = ICE_FDIR_NUM_TX_DESC;
txq->queue_id = ICE_FDIR_QUEUE_ID;
txq->reg_idx = pf->fdir.fdir_vsi->base_queue;
- txq->vsi = pf->fdir.fdir_vsi;
+ txq->ice_vsi = pf->fdir.fdir_vsi;
txq->tx_ring_dma = tz->iova;
- txq->tx_ring = (struct ice_tx_desc *)tz->addr;
+ txq->ice_tx_ring = (struct ice_tx_desc *)tz->addr;
/*
* don't need to allocate software ring and reset for the fdir
* program queue just set the queue has been configured.
@@ -2838,7 +2838,7 @@ static inline int
ice_xmit_cleanup(struct ice_tx_queue *txq)
{
struct ci_tx_entry *sw_ring = txq->sw_ring;
- volatile struct ice_tx_desc *txd = txq->tx_ring;
+ volatile struct ice_tx_desc *txd = txq->ice_tx_ring;
uint16_t last_desc_cleaned = txq->last_desc_cleaned;
uint16_t nb_tx_desc = txq->nb_tx_desc;
uint16_t desc_to_clean_to;
@@ -2959,7 +2959,7 @@ uint16_t
ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
struct ice_tx_queue *txq;
- volatile struct ice_tx_desc *tx_ring;
+ volatile struct ice_tx_desc *ice_tx_ring;
volatile struct ice_tx_desc *txd;
struct ci_tx_entry *sw_ring;
struct ci_tx_entry *txe, *txn;
@@ -2981,7 +2981,7 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
txq = tx_queue;
sw_ring = txq->sw_ring;
- tx_ring = txq->tx_ring;
+ ice_tx_ring = txq->ice_tx_ring;
tx_id = txq->tx_tail;
txe = &sw_ring[tx_id];
@@ -3064,7 +3064,7 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
/* Setup TX context descriptor if required */
volatile struct ice_tx_ctx_desc *ctx_txd =
(volatile struct ice_tx_ctx_desc *)
- &tx_ring[tx_id];
+ &ice_tx_ring[tx_id];
uint16_t cd_l2tag2 = 0;
uint64_t cd_type_cmd_tso_mss = ICE_TX_DESC_DTYPE_CTX;
@@ -3082,7 +3082,7 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
cd_type_cmd_tso_mss |=
((uint64_t)ICE_TX_CTX_DESC_TSYN <<
ICE_TXD_CTX_QW1_CMD_S) |
- (((uint64_t)txq->vsi->adapter->ptp_tx_index <<
+ (((uint64_t)txq->ice_vsi->adapter->ptp_tx_index <<
ICE_TXD_CTX_QW1_TSYN_S) & ICE_TXD_CTX_QW1_TSYN_M);
ctx_txd->tunneling_params =
@@ -3106,7 +3106,7 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
m_seg = tx_pkt;
do {
- txd = &tx_ring[tx_id];
+ txd = &ice_tx_ring[tx_id];
txn = &sw_ring[txe->next_id];
if (txe->mbuf)
@@ -3134,7 +3134,7 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
txe->last_id = tx_last;
tx_id = txe->next_id;
txe = txn;
- txd = &tx_ring[tx_id];
+ txd = &ice_tx_ring[tx_id];
txn = &sw_ring[txe->next_id];
}
@@ -3187,7 +3187,7 @@ ice_tx_free_bufs(struct ice_tx_queue *txq)
struct ci_tx_entry *txep;
uint16_t i;
- if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
+ if ((txq->ice_tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) !=
rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
return 0;
@@ -3360,7 +3360,7 @@ static inline void
ice_tx_fill_hw_ring(struct ice_tx_queue *txq, struct rte_mbuf **pkts,
uint16_t nb_pkts)
{
- volatile struct ice_tx_desc *txdp = &txq->tx_ring[txq->tx_tail];
+ volatile struct ice_tx_desc *txdp = &txq->ice_tx_ring[txq->tx_tail];
struct ci_tx_entry *txep = &txq->sw_ring[txq->tx_tail];
const int N_PER_LOOP = 4;
const int N_PER_LOOP_MASK = N_PER_LOOP - 1;
@@ -3393,7 +3393,7 @@ tx_xmit_pkts(struct ice_tx_queue *txq,
struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
- volatile struct ice_tx_desc *txr = txq->tx_ring;
+ volatile struct ice_tx_desc *txr = txq->ice_tx_ring;
uint16_t n = 0;
/**
@@ -3722,7 +3722,7 @@ ice_xmit_pkts_check(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
bool pkt_error = false;
uint16_t good_pkts = nb_pkts;
const char *reason = NULL;
- struct ice_adapter *adapter = txq->vsi->adapter;
+ struct ice_adapter *adapter = txq->ice_vsi->adapter;
uint64_t ol_flags;
for (idx = 0; idx < nb_pkts; idx++) {
@@ -4701,11 +4701,11 @@ ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc)
uint16_t i;
fdirdp = (volatile struct ice_fltr_desc *)
- (&txq->tx_ring[txq->tx_tail]);
+ (&txq->ice_tx_ring[txq->tx_tail]);
fdirdp->qidx_compq_space_stat = fdir_desc->qidx_compq_space_stat;
fdirdp->dtype_cmd_vsi_fdid = fdir_desc->dtype_cmd_vsi_fdid;
- txdp = &txq->tx_ring[txq->tx_tail + 1];
+ txdp = &txq->ice_tx_ring[txq->tx_tail + 1];
txdp->buf_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);
td_cmd = ICE_TX_DESC_CMD_EOP |
ICE_TX_DESC_CMD_RS |
diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h
index 8d1a1a8676..3257f449f5 100644
--- a/drivers/net/ice/ice_rxtx.h
+++ b/drivers/net/ice/ice_rxtx.h
@@ -148,7 +148,7 @@ struct ice_rx_queue {
struct ice_tx_queue {
uint16_t nb_tx_desc; /* number of TX descriptors */
rte_iova_t tx_ring_dma; /* TX ring DMA address */
- volatile struct ice_tx_desc *tx_ring; /* TX ring virtual address */
+ volatile struct ice_tx_desc *ice_tx_ring; /* TX ring virtual address */
struct ci_tx_entry *sw_ring; /* virtual address of SW ring */
uint16_t tx_tail; /* current value of tail register */
volatile uint8_t *qtx_tail; /* register address of tail */
@@ -171,7 +171,7 @@ struct ice_tx_queue {
uint32_t q_teid; /* TX schedule node id. */
uint16_t reg_idx;
uint64_t offloads;
- struct ice_vsi *vsi; /* the VSI this queue belongs to */
+ struct ice_vsi *ice_vsi; /* the VSI this queue belongs to */
uint16_t tx_next_dd;
uint16_t tx_next_rs;
uint64_t mbuf_errors;
diff --git a/drivers/net/ice/ice_rxtx_vec_avx2.c b/drivers/net/ice/ice_rxtx_vec_avx2.c
index 336697e72d..dde07ac99e 100644
--- a/drivers/net/ice/ice_rxtx_vec_avx2.c
+++ b/drivers/net/ice/ice_rxtx_vec_avx2.c
@@ -874,7 +874,7 @@ ice_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
return 0;
tx_id = txq->tx_tail;
- txdp = &txq->tx_ring[tx_id];
+ txdp = &txq->ice_tx_ring[tx_id];
txep = &txq->sw_ring[tx_id];
txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
@@ -895,7 +895,7 @@ ice_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
/* avoid reach the end of ring */
- txdp = &txq->tx_ring[tx_id];
+ txdp = &txq->ice_tx_ring[tx_id];
txep = &txq->sw_ring[tx_id];
}
@@ -905,7 +905,7 @@ ice_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
tx_id = (uint16_t)(tx_id + nb_commit);
if (tx_id > txq->tx_next_rs) {
- txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
+ txq->ice_tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
ICE_TXD_QW1_CMD_S);
txq->tx_next_rs =
diff --git a/drivers/net/ice/ice_rxtx_vec_avx512.c b/drivers/net/ice/ice_rxtx_vec_avx512.c
index 6b6aa3f1fe..e4d0270176 100644
--- a/drivers/net/ice/ice_rxtx_vec_avx512.c
+++ b/drivers/net/ice/ice_rxtx_vec_avx512.c
@@ -869,7 +869,7 @@ ice_tx_free_bufs_avx512(struct ice_tx_queue *txq)
struct rte_mbuf *m, *free[ICE_TX_MAX_FREE_BUF_SZ];
/* check DD bits on threshold descriptor */
- if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
+ if ((txq->ice_tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) !=
rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
return 0;
@@ -1071,7 +1071,7 @@ ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
return 0;
tx_id = txq->tx_tail;
- txdp = &txq->tx_ring[tx_id];
+ txdp = &txq->ice_tx_ring[tx_id];
txep = (void *)txq->sw_ring;
txep += tx_id;
@@ -1093,7 +1093,7 @@ ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
/* avoid reach the end of ring */
- txdp = txq->tx_ring;
+ txdp = txq->ice_tx_ring;
txep = (void *)txq->sw_ring;
}
@@ -1103,7 +1103,7 @@ ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
tx_id = (uint16_t)(tx_id + nb_commit);
if (tx_id > txq->tx_next_rs) {
- txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
+ txq->ice_tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
ICE_TXD_QW1_CMD_S);
txq->tx_next_rs =
diff --git a/drivers/net/ice/ice_rxtx_vec_common.h b/drivers/net/ice/ice_rxtx_vec_common.h
index 32e4541267..7b865b53ad 100644
--- a/drivers/net/ice/ice_rxtx_vec_common.h
+++ b/drivers/net/ice/ice_rxtx_vec_common.h
@@ -22,7 +22,7 @@ ice_tx_free_bufs_vec(struct ice_tx_queue *txq)
struct rte_mbuf *m, *free[ICE_TX_MAX_FREE_BUF_SZ];
/* check DD bits on threshold descriptor */
- if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
+ if ((txq->ice_tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) !=
rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
return 0;
@@ -121,7 +121,7 @@ _ice_tx_queue_release_mbufs_vec(struct ice_tx_queue *txq)
i = txq->tx_next_dd - txq->tx_rs_thresh + 1;
#ifdef __AVX512VL__
- struct rte_eth_dev *dev = &rte_eth_devices[txq->vsi->adapter->pf.dev_data->port_id];
+ struct rte_eth_dev *dev = &rte_eth_devices[txq->ice_vsi->adapter->pf.dev_data->port_id];
if (dev->tx_pkt_burst == ice_xmit_pkts_vec_avx512 ||
dev->tx_pkt_burst == ice_xmit_pkts_vec_avx512_offload) {
diff --git a/drivers/net/ice/ice_rxtx_vec_sse.c b/drivers/net/ice/ice_rxtx_vec_sse.c
index debdd8f6a2..364207e8a8 100644
--- a/drivers/net/ice/ice_rxtx_vec_sse.c
+++ b/drivers/net/ice/ice_rxtx_vec_sse.c
@@ -717,7 +717,7 @@ ice_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
return 0;
tx_id = txq->tx_tail;
- txdp = &txq->tx_ring[tx_id];
+ txdp = &txq->ice_tx_ring[tx_id];
txep = &txq->sw_ring[tx_id];
txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
@@ -737,7 +737,7 @@ ice_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
/* avoid reach the end of ring */
- txdp = &txq->tx_ring[tx_id];
+ txdp = &txq->ice_tx_ring[tx_id];
txep = &txq->sw_ring[tx_id];
}
@@ -747,7 +747,7 @@ ice_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
tx_id = (uint16_t)(tx_id + nb_commit);
if (tx_id > txq->tx_next_rs) {
- txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
+ txq->ice_tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
ICE_TXD_QW1_CMD_S);
txq->tx_next_rs =
diff --git a/drivers/net/ixgbe/ixgbe_recycle_mbufs_vec_common.c b/drivers/net/ixgbe/ixgbe_recycle_mbufs_vec_common.c
index 2241726ad8..a878db3150 100644
--- a/drivers/net/ixgbe/ixgbe_recycle_mbufs_vec_common.c
+++ b/drivers/net/ixgbe/ixgbe_recycle_mbufs_vec_common.c
@@ -72,7 +72,7 @@ ixgbe_recycle_tx_mbufs_reuse_vec(void *tx_queue,
return 0;
/* check DD bits on threshold descriptor */
- status = txq->tx_ring[txq->tx_next_dd].wb.status;
+ status = txq->ixgbe_tx_ring[txq->tx_next_dd].wb.status;
if (!(status & IXGBE_ADVTXD_STAT_DD))
return 0;
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 0a80b944f0..f7ddbba1b6 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -106,7 +106,7 @@ ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
struct rte_mbuf *m, *free[RTE_IXGBE_TX_MAX_FREE_BUF_SZ];
/* check DD bit on threshold descriptor */
- status = txq->tx_ring[txq->tx_next_dd].wb.status;
+ status = txq->ixgbe_tx_ring[txq->tx_next_dd].wb.status;
if (!(status & rte_cpu_to_le_32(IXGBE_ADVTXD_STAT_DD)))
return 0;
@@ -198,7 +198,7 @@ static inline void
ixgbe_tx_fill_hw_ring(struct ixgbe_tx_queue *txq, struct rte_mbuf **pkts,
uint16_t nb_pkts)
{
- volatile union ixgbe_adv_tx_desc *txdp = &(txq->tx_ring[txq->tx_tail]);
+ volatile union ixgbe_adv_tx_desc *txdp = &txq->ixgbe_tx_ring[txq->tx_tail];
struct ci_tx_entry *txep = &txq->sw_ring[txq->tx_tail];
const int N_PER_LOOP = 4;
const int N_PER_LOOP_MASK = N_PER_LOOP-1;
@@ -232,7 +232,7 @@ tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
- volatile union ixgbe_adv_tx_desc *tx_r = txq->tx_ring;
+ volatile union ixgbe_adv_tx_desc *tx_r = txq->ixgbe_tx_ring;
uint16_t n = 0;
/*
@@ -564,7 +564,7 @@ static inline int
ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq)
{
struct ci_tx_entry *sw_ring = txq->sw_ring;
- volatile union ixgbe_adv_tx_desc *txr = txq->tx_ring;
+ volatile union ixgbe_adv_tx_desc *txr = txq->ixgbe_tx_ring;
uint16_t last_desc_cleaned = txq->last_desc_cleaned;
uint16_t nb_tx_desc = txq->nb_tx_desc;
uint16_t desc_to_clean_to;
@@ -652,7 +652,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
tx_offload.data[1] = 0;
txq = tx_queue;
sw_ring = txq->sw_ring;
- txr = txq->tx_ring;
+ txr = txq->ixgbe_tx_ring;
tx_id = txq->tx_tail;
txe = &sw_ring[tx_id];
txp = NULL;
@@ -2495,13 +2495,13 @@ ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
/* Zero out HW ring memory */
for (i = 0; i < txq->nb_tx_desc; i++) {
- txq->tx_ring[i] = zeroed_desc;
+ txq->ixgbe_tx_ring[i] = zeroed_desc;
}
/* Initialize SW ring entries */
prev = (uint16_t) (txq->nb_tx_desc - 1);
for (i = 0; i < txq->nb_tx_desc; i++) {
- volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i];
+ volatile union ixgbe_adv_tx_desc *txd = &txq->ixgbe_tx_ring[i];
txd->wb.status = rte_cpu_to_le_32(IXGBE_TXD_STAT_DD);
txe[i].mbuf = NULL;
@@ -2751,7 +2751,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
* handle the maximum ring size is allocated in order to allow for
* resizing in later calls to the queue setup function.
*/
- tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
+ tz = rte_eth_dma_zone_reserve(dev, "ixgbe_tx_ring", queue_idx,
sizeof(union ixgbe_adv_tx_desc) * IXGBE_MAX_RING_DESC,
IXGBE_ALIGN, socket_id);
if (tz == NULL) {
@@ -2791,7 +2791,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq->qtx_tail = IXGBE_PCI_REG_ADDR(hw, IXGBE_TDT(txq->reg_idx));
txq->tx_ring_dma = tz->iova;
- txq->tx_ring = (union ixgbe_adv_tx_desc *) tz->addr;
+ txq->ixgbe_tx_ring = (union ixgbe_adv_tx_desc *)tz->addr;
/* Allocate software ring */
txq->sw_ring = rte_zmalloc_socket("txq->sw_ring",
@@ -2802,7 +2802,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
return -ENOMEM;
}
PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
- txq->sw_ring, txq->tx_ring, txq->tx_ring_dma);
+ txq->sw_ring, txq->ixgbe_tx_ring, txq->tx_ring_dma);
/* set up vector or scalar TX function as appropriate */
ixgbe_set_tx_function(dev, txq);
@@ -3328,7 +3328,7 @@ ixgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
desc -= txq->nb_tx_desc;
}
- status = &txq->tx_ring[desc].wb.status;
+ status = &txq->ixgbe_tx_ring[desc].wb.status;
if (*status & rte_cpu_to_le_32(IXGBE_ADVTXD_STAT_DD))
return RTE_ETH_TX_DESC_DONE;
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
index 00e2009b3e..f6bae37cf3 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx.h
@@ -185,7 +185,7 @@ struct ixgbe_advctx_info {
*/
struct ixgbe_tx_queue {
/** TX ring virtual address. */
- volatile union ixgbe_adv_tx_desc *tx_ring;
+ volatile union ixgbe_adv_tx_desc *ixgbe_tx_ring;
rte_iova_t tx_ring_dma; /**< TX ring DMA address. */
union {
struct ci_tx_entry *sw_ring; /**< address of SW ring for scalar PMD. */
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
index e9592c0d08..cc51bf6eed 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
@@ -22,7 +22,7 @@ ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
struct rte_mbuf *m, *free[RTE_IXGBE_TX_MAX_FREE_BUF_SZ];
/* check DD bit on threshold descriptor */
- status = txq->tx_ring[txq->tx_next_dd].wb.status;
+ status = txq->ixgbe_tx_ring[txq->tx_next_dd].wb.status;
if (!(status & IXGBE_ADVTXD_STAT_DD))
return 0;
@@ -154,11 +154,11 @@ _ixgbe_reset_tx_queue_vec(struct ixgbe_tx_queue *txq)
/* Zero out HW ring memory */
for (i = 0; i < txq->nb_tx_desc; i++)
- txq->tx_ring[i] = zeroed_desc;
+ txq->ixgbe_tx_ring[i] = zeroed_desc;
/* Initialize SW ring entries */
for (i = 0; i < txq->nb_tx_desc; i++) {
- volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i];
+ volatile union ixgbe_adv_tx_desc *txd = &txq->ixgbe_tx_ring[i];
txd->wb.status = IXGBE_TXD_STAT_DD;
txe[i].mbuf = NULL;
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
index 871c1a7cd2..06be7ec82a 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
@@ -590,7 +590,7 @@ ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
return 0;
tx_id = txq->tx_tail;
- txdp = &txq->tx_ring[tx_id];
+ txdp = &txq->ixgbe_tx_ring[tx_id];
txep = &txq->sw_ring_v[tx_id];
txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
@@ -610,7 +610,7 @@ ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
/* avoid reach the end of ring */
- txdp = &txq->tx_ring[tx_id];
+ txdp = &txq->ixgbe_tx_ring[tx_id];
txep = &txq->sw_ring_v[tx_id];
}
@@ -620,7 +620,7 @@ ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
tx_id = (uint16_t)(tx_id + nb_commit);
if (tx_id > txq->tx_next_rs) {
- txq->tx_ring[txq->tx_next_rs].read.cmd_type_len |=
+ txq->ixgbe_tx_ring[txq->tx_next_rs].read.cmd_type_len |=
rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
txq->tx_next_rs = (uint16_t)(txq->tx_next_rs +
txq->tx_rs_thresh);
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
index 37f2079519..a21a57bd55 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
@@ -712,7 +712,7 @@ ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
return 0;
tx_id = txq->tx_tail;
- txdp = &txq->tx_ring[tx_id];
+ txdp = &txq->ixgbe_tx_ring[tx_id];
txep = &txq->sw_ring_v[tx_id];
txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
@@ -733,7 +733,7 @@ ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
/* avoid reach the end of ring */
- txdp = &(txq->tx_ring[tx_id]);
+ txdp = &txq->ixgbe_tx_ring[tx_id];
txep = &txq->sw_ring_v[tx_id];
}
@@ -743,7 +743,7 @@ ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
tx_id = (uint16_t)(tx_id + nb_commit);
if (tx_id > txq->tx_next_rs) {
- txq->tx_ring[txq->tx_next_rs].read.cmd_type_len |=
+ txq->ixgbe_tx_ring[txq->tx_next_rs].read.cmd_type_len |=
rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
txq->tx_next_rs = (uint16_t)(txq->tx_next_rs +
txq->tx_rs_thresh);
--
2.43.0
next prev parent reply other threads:[~2024-12-20 14:40 UTC|newest]
Thread overview: 127+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-11-22 12:53 [RFC PATCH 00/21] Reduce code duplication across Intel NIC drivers Bruce Richardson
2024-11-22 12:53 ` [RFC PATCH 01/21] common/intel_eth: add pkt reassembly fn for intel drivers Bruce Richardson
2024-11-22 12:53 ` [RFC PATCH 02/21] common/intel_eth: provide common Tx entry structures Bruce Richardson
2024-11-22 12:53 ` [RFC PATCH 03/21] common/intel_eth: add Tx mbuf ring replenish fn Bruce Richardson
2024-11-22 12:53 ` [RFC PATCH 04/21] drivers/net: align Tx queue struct field names Bruce Richardson
2024-11-22 12:53 ` [RFC PATCH 05/21] drivers/net: add prefix for driver-specific structs Bruce Richardson
2024-11-22 12:53 ` [RFC PATCH 06/21] common/intel_eth: merge ice and i40e Tx queue struct Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 07/21] net/iavf: use common Tx queue structure Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 08/21] net/ixgbe: convert Tx queue context cache field to ptr Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 09/21] net/ixgbe: use common Tx queue structure Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 10/21] common/intel_eth: pack " Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 11/21] common/intel_eth: add post-Tx buffer free function Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 12/21] common/intel_eth: add Tx buffer free fn for AVX-512 Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 13/21] net/iavf: use common Tx " Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 14/21] net/ice: move Tx queue mbuf cleanup fn to common Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 15/21] net/i40e: use common Tx queue mbuf cleanup fn Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 16/21] net/ixgbe: " Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 17/21] net/iavf: " Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 18/21] net/ice: use vector SW ring for all vector paths Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 19/21] net/i40e: " Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 20/21] net/iavf: " Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 21/21] net/ixgbe: use common Tx backlog entry fn Bruce Richardson
2024-11-25 16:25 ` [RFC PATCH 00/21] Reduce code duplication across Intel NIC drivers David Marchand
2024-11-25 16:31 ` Bruce Richardson
2024-11-26 14:57 ` Thomas Monjalon
2024-11-26 15:27 ` Bruce Richardson
2024-12-02 11:24 ` [PATCH v1 " Bruce Richardson
2024-12-02 11:24 ` [PATCH v1 01/21] net/_common_intel: add pkt reassembly fn for intel drivers Bruce Richardson
2024-12-02 11:24 ` [PATCH v1 02/21] net/_common_intel: provide common Tx entry structures Bruce Richardson
2024-12-02 11:24 ` [PATCH v1 03/21] net/_common_intel: add Tx mbuf ring replenish fn Bruce Richardson
2024-12-02 11:24 ` [PATCH v1 04/21] drivers/net: align Tx queue struct field names Bruce Richardson
2024-12-02 11:24 ` [PATCH v1 05/21] drivers/net: add prefix for driver-specific structs Bruce Richardson
2024-12-02 11:24 ` [PATCH v1 06/21] net/_common_intel: merge ice and i40e Tx queue struct Bruce Richardson
2024-12-02 11:24 ` [PATCH v1 07/21] net/iavf: use common Tx queue structure Bruce Richardson
2024-12-02 11:24 ` [PATCH v1 08/21] net/ixgbe: convert Tx queue context cache field to ptr Bruce Richardson
2024-12-02 11:24 ` [PATCH v1 09/21] net/ixgbe: use common Tx queue structure Bruce Richardson
2024-12-02 13:51 ` Medvedkin, Vladimir
2024-12-02 14:09 ` Bruce Richardson
2024-12-02 15:15 ` Bruce Richardson
2024-12-02 11:24 ` [PATCH v1 10/21] net/_common_intel: pack " Bruce Richardson
2024-12-02 11:24 ` [PATCH v1 11/21] net/_common_intel: add post-Tx buffer free function Bruce Richardson
2024-12-02 12:59 ` David Marchand
2024-12-02 13:12 ` Bruce Richardson
2024-12-02 13:24 ` Bruce Richardson
2024-12-02 13:55 ` David Marchand
2024-12-02 11:24 ` [PATCH v1 12/21] net/_common_intel: add Tx buffer free fn for AVX-512 Bruce Richardson
2024-12-02 11:24 ` [PATCH v1 13/21] net/iavf: use common Tx " Bruce Richardson
2024-12-02 11:24 ` [PATCH v1 14/21] net/ice: move Tx queue mbuf cleanup fn to common Bruce Richardson
2024-12-02 11:24 ` [PATCH v1 15/21] net/i40e: use common Tx queue mbuf cleanup fn Bruce Richardson
2024-12-02 11:24 ` [PATCH v1 16/21] net/ixgbe: " Bruce Richardson
2024-12-02 11:24 ` [PATCH v1 17/21] net/iavf: " Bruce Richardson
2024-12-02 11:24 ` [PATCH v1 18/21] net/ice: use vector SW ring for all vector paths Bruce Richardson
2024-12-02 11:24 ` [PATCH v1 19/21] net/i40e: " Bruce Richardson
2024-12-02 11:24 ` [PATCH v1 20/21] net/iavf: " Bruce Richardson
2024-12-02 11:24 ` [PATCH v1 21/21] net/ixgbe: use common Tx backlog entry fn Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 00/22] Reduce code duplication across Intel NIC drivers Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 01/22] net/_common_intel: add pkt reassembly fn for intel drivers Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 02/22] net/_common_intel: provide common Tx entry structures Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 03/22] net/_common_intel: add Tx mbuf ring replenish fn Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 04/22] drivers/net: align Tx queue struct field names Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 05/22] drivers/net: add prefix for driver-specific structs Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 06/22] net/_common_intel: merge ice and i40e Tx queue struct Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 07/22] net/iavf: use common Tx queue structure Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 08/22] net/ixgbe: convert Tx queue context cache field to ptr Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 09/22] net/ixgbe: use common Tx queue structure Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 10/22] net/_common_intel: pack " Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 11/22] net/_common_intel: add post-Tx buffer free function Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 12/22] net/_common_intel: add Tx buffer free fn for AVX-512 Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 13/22] net/iavf: use common Tx " Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 14/22] net/ice: move Tx queue mbuf cleanup fn to common Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 15/22] net/i40e: use common Tx queue mbuf cleanup fn Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 16/22] net/ixgbe: " Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 17/22] net/iavf: " Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 18/22] net/ice: use vector SW ring for all vector paths Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 19/22] net/i40e: " Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 20/22] net/iavf: " Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 21/22] net/_common_intel: remove unneeded code Bruce Richardson
2024-12-03 16:41 ` [PATCH v2 22/22] net/ixgbe: use common Tx backlog entry fn Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 00/22] Reduce code duplication across Intel NIC drivers Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 01/22] net/_common_intel: add pkt reassembly fn for intel drivers Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 02/22] net/_common_intel: provide common Tx entry structures Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 03/22] net/_common_intel: add Tx mbuf ring replenish fn Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 04/22] drivers/net: align Tx queue struct field names Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 05/22] drivers/net: add prefix for driver-specific structs Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 06/22] net/_common_intel: merge ice and i40e Tx queue struct Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 07/22] net/iavf: use common Tx queue structure Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 08/22] net/ixgbe: convert Tx queue context cache field to ptr Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 09/22] net/ixgbe: use common Tx queue structure Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 10/22] net/_common_intel: pack " Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 11/22] net/_common_intel: add post-Tx buffer free function Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 12/22] net/_common_intel: add Tx buffer free fn for AVX-512 Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 13/22] net/iavf: use common Tx " Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 14/22] net/ice: move Tx queue mbuf cleanup fn to common Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 15/22] net/i40e: use common Tx queue mbuf cleanup fn Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 16/22] net/ixgbe: " Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 17/22] net/iavf: " Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 18/22] net/ice: use vector SW ring for all vector paths Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 19/22] net/i40e: " Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 20/22] net/iavf: " Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 21/22] net/_common_intel: remove unneeded code Bruce Richardson
2024-12-11 17:33 ` [PATCH v3 22/22] net/ixgbe: use common Tx backlog entry fn Bruce Richardson
2024-12-20 14:38 ` [PATCH v4 00/24] Reduce code duplication across Intel NIC drivers Bruce Richardson
2024-12-20 14:38 ` [PATCH v4 01/24] net/_common_intel: add pkt reassembly fn for intel drivers Bruce Richardson
2024-12-20 16:15 ` Stephen Hemminger
2024-12-20 14:38 ` [PATCH v4 02/24] net/_common_intel: provide common Tx entry structures Bruce Richardson
2024-12-20 14:39 ` [PATCH v4 03/24] net/_common_intel: add Tx mbuf ring replenish fn Bruce Richardson
2024-12-20 14:39 ` [PATCH v4 04/24] drivers/net: align Tx queue struct field names Bruce Richardson
2024-12-20 14:39 ` Bruce Richardson [this message]
2024-12-20 14:39 ` [PATCH v4 06/24] net/_common_intel: merge ice and i40e Tx queue struct Bruce Richardson
2024-12-20 14:39 ` [PATCH v4 07/24] net/iavf: use common Tx queue structure Bruce Richardson
2024-12-20 14:39 ` [PATCH v4 08/24] net/ixgbe: convert Tx queue context cache field to ptr Bruce Richardson
2024-12-20 14:39 ` [PATCH v4 09/24] net/ixgbe: use common Tx queue structure Bruce Richardson
2024-12-20 14:39 ` [PATCH v4 10/24] net/_common_intel: pack " Bruce Richardson
2024-12-20 14:39 ` [PATCH v4 11/24] net/_common_intel: add post-Tx buffer free function Bruce Richardson
2024-12-20 14:39 ` [PATCH v4 12/24] net/_common_intel: add Tx buffer free fn for AVX-512 Bruce Richardson
2024-12-20 14:39 ` [PATCH v4 13/24] net/iavf: use common Tx " Bruce Richardson
2024-12-20 14:39 ` [PATCH v4 14/24] net/ice: move Tx queue mbuf cleanup fn to common Bruce Richardson
2024-12-20 14:39 ` [PATCH v4 15/24] net/i40e: use common Tx queue mbuf cleanup fn Bruce Richardson
2024-12-20 14:39 ` [PATCH v4 16/24] net/ixgbe: " Bruce Richardson
2024-12-20 14:39 ` [PATCH v4 17/24] net/iavf: " Bruce Richardson
2024-12-20 14:39 ` [PATCH v4 18/24] net/ice: use vector SW ring for all vector paths Bruce Richardson
2024-12-20 14:39 ` [PATCH v4 19/24] net/i40e: " Bruce Richardson
2024-12-20 14:39 ` [PATCH v4 20/24] net/iavf: " Bruce Richardson
2024-12-20 14:39 ` [PATCH v4 21/24] net/_common_intel: remove unneeded code Bruce Richardson
2024-12-20 14:39 ` [PATCH v4 22/24] net/ixgbe: use common Tx backlog entry fn Bruce Richardson
2024-12-20 14:39 ` [PATCH v4 23/24] net/_common_intel: create common mbuf initializer fn Bruce Richardson
2024-12-20 14:39 ` [PATCH v4 24/24] net/_common_intel: extract common Rx vector criteria Bruce Richardson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20241220143925.609044-6-bruce.richardson@intel.com \
--to=bruce.richardson@intel.com \
--cc=anatoly.burakov@intel.com \
--cc=dev@dpdk.org \
--cc=drc@linux.ibm.com \
--cc=ian.stokes@intel.com \
--cc=konstantin.v.ananyev@yandex.ru \
--cc=vladimir.medvedkin@intel.com \
--cc=wathsala.vithanage@arm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).