From: Bruce Richardson <bruce.richardson@intel.com>
To: dev@dpdk.org
Cc: Bruce Richardson <bruce.richardson@intel.com>,
Ian Stokes <ian.stokes@intel.com>,
David Christensen <drc@linux.ibm.com>,
Konstantin Ananyev <konstantin.v.ananyev@yandex.ru>,
Wathsala Vithanage <wathsala.vithanage@arm.com>,
Vladimir Medvedkin <vladimir.medvedkin@intel.com>,
Anatoly Burakov <anatoly.burakov@intel.com>
Subject: [RFC PATCH 05/21] drivers/net: add prefix for driver-specific structs
Date: Fri, 22 Nov 2024 12:53:58 +0000 [thread overview]
Message-ID: <20241122125418.2857301-6-bruce.richardson@intel.com> (raw)
In-Reply-To: <20241122125418.2857301-1-bruce.richardson@intel.com>
In preparation for merging the Tx structs for multiple drivers into a
single struct, rename the driver-specific pointers in each struct to
have a prefix on it, to avoid conflicts.
Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
---
drivers/net/i40e/i40e_fdir.c | 6 +--
.../net/i40e/i40e_recycle_mbufs_vec_common.c | 2 +-
drivers/net/i40e/i40e_rxtx.c | 30 ++++++------
drivers/net/i40e/i40e_rxtx.h | 4 +-
drivers/net/i40e/i40e_rxtx_vec_altivec.c | 6 +--
drivers/net/i40e/i40e_rxtx_vec_avx2.c | 6 +--
drivers/net/i40e/i40e_rxtx_vec_avx512.c | 8 ++--
drivers/net/i40e/i40e_rxtx_vec_common.h | 2 +-
drivers/net/i40e/i40e_rxtx_vec_neon.c | 6 +--
drivers/net/i40e/i40e_rxtx_vec_sse.c | 6 +--
drivers/net/iavf/iavf_rxtx.c | 24 +++++-----
drivers/net/iavf/iavf_rxtx.h | 4 +-
drivers/net/iavf/iavf_rxtx_vec_avx2.c | 6 +--
drivers/net/iavf/iavf_rxtx_vec_avx512.c | 14 +++---
drivers/net/iavf/iavf_rxtx_vec_common.h | 2 +-
drivers/net/iavf/iavf_rxtx_vec_sse.c | 6 +--
drivers/net/ice/ice_dcf_ethdev.c | 4 +-
drivers/net/ice/ice_rxtx.c | 48 +++++++++----------
drivers/net/ice/ice_rxtx.h | 4 +-
drivers/net/ice/ice_rxtx_vec_avx2.c | 6 +--
drivers/net/ice/ice_rxtx_vec_avx512.c | 8 ++--
drivers/net/ice/ice_rxtx_vec_common.h | 4 +-
drivers/net/ice/ice_rxtx_vec_sse.c | 6 +--
.../ixgbe/ixgbe_recycle_mbufs_vec_common.c | 2 +-
drivers/net/ixgbe/ixgbe_rxtx.c | 22 ++++-----
drivers/net/ixgbe/ixgbe_rxtx.h | 2 +-
drivers/net/ixgbe/ixgbe_rxtx_vec_common.h | 6 +--
drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c | 6 +--
drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c | 6 +--
29 files changed, 128 insertions(+), 128 deletions(-)
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index 47f79ecf11..c600167634 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -1383,7 +1383,7 @@ i40e_find_available_buffer(struct rte_eth_dev *dev)
volatile struct i40e_tx_desc *tmp_txdp;
tmp_tail = txq->tx_tail;
- tmp_txdp = &txq->tx_ring[tmp_tail + 1];
+ tmp_txdp = &txq->i40e_tx_ring[tmp_tail + 1];
do {
if ((tmp_txdp->cmd_type_offset_bsz &
@@ -1640,7 +1640,7 @@ i40e_flow_fdir_filter_programming(struct i40e_pf *pf,
PMD_DRV_LOG(INFO, "filling filter programming descriptor.");
fdirdp = (volatile struct i40e_filter_program_desc *)
- (&txq->tx_ring[txq->tx_tail]);
+ (&txq->i40e_tx_ring[txq->tx_tail]);
fdirdp->qindex_flex_ptype_vsi =
rte_cpu_to_le_32((fdir_action->rx_queue <<
@@ -1710,7 +1710,7 @@ i40e_flow_fdir_filter_programming(struct i40e_pf *pf,
fdirdp->fd_id = rte_cpu_to_le_32(filter->soft_id);
PMD_DRV_LOG(INFO, "filling transmit descriptor.");
- txdp = &txq->tx_ring[txq->tx_tail + 1];
+ txdp = &txq->i40e_tx_ring[txq->tx_tail + 1];
txdp->buffer_addr = rte_cpu_to_le_64(pf->fdir.dma_addr[txq->tx_tail >> 1]);
td_cmd = I40E_TX_DESC_CMD_EOP |
diff --git a/drivers/net/i40e/i40e_recycle_mbufs_vec_common.c b/drivers/net/i40e/i40e_recycle_mbufs_vec_common.c
index 5a23adc6a4..167ee8d428 100644
--- a/drivers/net/i40e/i40e_recycle_mbufs_vec_common.c
+++ b/drivers/net/i40e/i40e_recycle_mbufs_vec_common.c
@@ -75,7 +75,7 @@ i40e_recycle_tx_mbufs_reuse_vec(void *tx_queue,
return 0;
/* check DD bits on threshold descriptor */
- if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
+ if ((txq->i40e_tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) !=
rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
return 0;
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 20e72cac54..5b8edac3b2 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -379,7 +379,7 @@ static inline int
i40e_xmit_cleanup(struct i40e_tx_queue *txq)
{
struct ieth_tx_entry *sw_ring = txq->sw_ring;
- volatile struct i40e_tx_desc *txd = txq->tx_ring;
+ volatile struct i40e_tx_desc *txd = txq->i40e_tx_ring;
uint16_t last_desc_cleaned = txq->last_desc_cleaned;
uint16_t nb_tx_desc = txq->nb_tx_desc;
uint16_t desc_to_clean_to;
@@ -1103,7 +1103,7 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
txq = tx_queue;
sw_ring = txq->sw_ring;
- txr = txq->tx_ring;
+ txr = txq->i40e_tx_ring;
tx_id = txq->tx_tail;
txe = &sw_ring[tx_id];
@@ -1338,7 +1338,7 @@ i40e_tx_free_bufs(struct i40e_tx_queue *txq)
const uint16_t k = RTE_ALIGN_FLOOR(tx_rs_thresh, RTE_I40E_TX_MAX_FREE_BUF_SZ);
const uint16_t m = tx_rs_thresh % RTE_I40E_TX_MAX_FREE_BUF_SZ;
- if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
+ if ((txq->i40e_tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) !=
rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
return 0;
@@ -1417,7 +1417,7 @@ i40e_tx_fill_hw_ring(struct i40e_tx_queue *txq,
struct rte_mbuf **pkts,
uint16_t nb_pkts)
{
- volatile struct i40e_tx_desc *txdp = &(txq->tx_ring[txq->tx_tail]);
+ volatile struct i40e_tx_desc *txdp = &(txq->i40e_tx_ring[txq->tx_tail]);
struct ieth_tx_entry *txep = &(txq->sw_ring[txq->tx_tail]);
const int N_PER_LOOP = 4;
const int N_PER_LOOP_MASK = N_PER_LOOP - 1;
@@ -1445,7 +1445,7 @@ tx_xmit_pkts(struct i40e_tx_queue *txq,
struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
- volatile struct i40e_tx_desc *txr = txq->tx_ring;
+ volatile struct i40e_tx_desc *txr = txq->i40e_tx_ring;
uint16_t n = 0;
/**
@@ -1556,7 +1556,7 @@ i40e_xmit_pkts_check(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts
bool pkt_error = false;
const char *reason = NULL;
uint16_t good_pkts = nb_pkts;
- struct i40e_adapter *adapter = txq->vsi->adapter;
+ struct i40e_adapter *adapter = txq->i40e_vsi->adapter;
for (idx = 0; idx < nb_pkts; idx++) {
mb = tx_pkts[idx];
@@ -2329,7 +2329,7 @@ i40e_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
desc -= txq->nb_tx_desc;
}
- status = &txq->tx_ring[desc].cmd_type_offset_bsz;
+ status = &txq->i40e_tx_ring[desc].cmd_type_offset_bsz;
mask = rte_le_to_cpu_64(I40E_TXD_QW1_DTYPE_MASK);
expect = rte_cpu_to_le_64(
I40E_TX_DESC_DTYPE_DESC_DONE << I40E_TXD_QW1_DTYPE_SHIFT);
@@ -2527,7 +2527,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
/* Allocate TX hardware ring descriptors. */
ring_size = sizeof(struct i40e_tx_desc) * I40E_MAX_RING_DESC;
ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN);
- tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
+ tz = rte_eth_dma_zone_reserve(dev, "i40e_tx_ring", queue_idx,
ring_size, I40E_RING_BASE_ALIGN, socket_id);
if (!tz) {
i40e_tx_queue_release(txq);
@@ -2546,11 +2546,11 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq->reg_idx = reg_idx;
txq->port_id = dev->data->port_id;
txq->offloads = offloads;
- txq->vsi = vsi;
+ txq->i40e_vsi = vsi;
txq->tx_deferred_start = tx_conf->tx_deferred_start;
txq->tx_ring_dma = tz->iova;
- txq->tx_ring = (struct i40e_tx_desc *)tz->addr;
+ txq->i40e_tx_ring = (struct i40e_tx_desc *)tz->addr;
/* Allocate software ring */
txq->sw_ring =
@@ -2885,11 +2885,11 @@ i40e_reset_tx_queue(struct i40e_tx_queue *txq)
txe = txq->sw_ring;
size = sizeof(struct i40e_tx_desc) * txq->nb_tx_desc;
for (i = 0; i < size; i++)
- ((volatile char *)txq->tx_ring)[i] = 0;
+ ((volatile char *)txq->i40e_tx_ring)[i] = 0;
prev = (uint16_t)(txq->nb_tx_desc - 1);
for (i = 0; i < txq->nb_tx_desc; i++) {
- volatile struct i40e_tx_desc *txd = &txq->tx_ring[i];
+ volatile struct i40e_tx_desc *txd = &txq->i40e_tx_ring[i];
txd->cmd_type_offset_bsz =
rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE);
@@ -2914,7 +2914,7 @@ int
i40e_tx_queue_init(struct i40e_tx_queue *txq)
{
enum i40e_status_code err = I40E_SUCCESS;
- struct i40e_vsi *vsi = txq->vsi;
+ struct i40e_vsi *vsi = txq->i40e_vsi;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
uint16_t pf_q = txq->reg_idx;
struct i40e_hmc_obj_txq tx_ctx;
@@ -3207,10 +3207,10 @@ i40e_fdir_setup_tx_resources(struct i40e_pf *pf)
txq->nb_tx_desc = I40E_FDIR_NUM_TX_DESC;
txq->queue_id = I40E_FDIR_QUEUE_ID;
txq->reg_idx = pf->fdir.fdir_vsi->base_queue;
- txq->vsi = pf->fdir.fdir_vsi;
+ txq->i40e_vsi = pf->fdir.fdir_vsi;
txq->tx_ring_dma = tz->iova;
- txq->tx_ring = (struct i40e_tx_desc *)tz->addr;
+ txq->i40e_tx_ring = (struct i40e_tx_desc *)tz->addr;
/*
* don't need to allocate software ring and reset for the fdir
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index c5fbadc9e2..030c381e0c 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -130,7 +130,7 @@ struct i40e_rx_queue {
struct i40e_tx_queue {
uint16_t nb_tx_desc; /**< number of TX descriptors */
rte_iova_t tx_ring_dma; /**< TX ring DMA address */
- volatile struct i40e_tx_desc *tx_ring; /**< TX ring virtual address */
+ volatile struct i40e_tx_desc *i40e_tx_ring; /**< TX ring virtual address */
struct ieth_tx_entry *sw_ring; /**< virtual address of SW ring */
uint16_t tx_tail; /**< current value of tail register */
volatile uint8_t *qtx_tail; /**< register address of tail */
@@ -150,7 +150,7 @@ struct i40e_tx_queue {
uint16_t port_id; /**< Device port identifier. */
uint16_t queue_id; /**< TX queue index. */
uint16_t reg_idx;
- struct i40e_vsi *vsi; /**< the VSI this queue belongs to */
+ struct i40e_vsi *i40e_vsi; /**< the VSI this queue belongs to */
uint16_t tx_next_dd;
uint16_t tx_next_rs;
bool q_set; /**< indicate if tx queue has been configured */
diff --git a/drivers/net/i40e/i40e_rxtx_vec_altivec.c b/drivers/net/i40e/i40e_rxtx_vec_altivec.c
index 614af752b8..aed78e4a1a 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_altivec.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_altivec.c
@@ -568,7 +568,7 @@ i40e_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
return 0;
tx_id = txq->tx_tail;
- txdp = &txq->tx_ring[tx_id];
+ txdp = &txq->i40e_tx_ring[tx_id];
txep = &txq->sw_ring[tx_id];
txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
@@ -588,7 +588,7 @@ i40e_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
/* avoid reach the end of ring */
- txdp = &txq->tx_ring[tx_id];
+ txdp = &txq->i40e_tx_ring[tx_id];
txep = &txq->sw_ring[tx_id];
}
@@ -598,7 +598,7 @@ i40e_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
tx_id = (uint16_t)(tx_id + nb_commit);
if (tx_id > txq->tx_next_rs) {
- txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
+ txq->i40e_tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) <<
I40E_TXD_QW1_CMD_SHIFT);
txq->tx_next_rs =
diff --git a/drivers/net/i40e/i40e_rxtx_vec_avx2.c b/drivers/net/i40e/i40e_rxtx_vec_avx2.c
index 2b0a774d47..6b7c96c683 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_avx2.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_avx2.c
@@ -758,7 +758,7 @@ i40e_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
return 0;
tx_id = txq->tx_tail;
- txdp = &txq->tx_ring[tx_id];
+ txdp = &txq->i40e_tx_ring[tx_id];
txep = &txq->sw_ring[tx_id];
txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
@@ -779,7 +779,7 @@ i40e_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
/* avoid reach the end of ring */
- txdp = &txq->tx_ring[tx_id];
+ txdp = &txq->i40e_tx_ring[tx_id];
txep = &txq->sw_ring[tx_id];
}
@@ -789,7 +789,7 @@ i40e_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
tx_id = (uint16_t)(tx_id + nb_commit);
if (tx_id > txq->tx_next_rs) {
- txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
+ txq->i40e_tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) <<
I40E_TXD_QW1_CMD_SHIFT);
txq->tx_next_rs =
diff --git a/drivers/net/i40e/i40e_rxtx_vec_avx512.c b/drivers/net/i40e/i40e_rxtx_vec_avx512.c
index 25ed4c78a7..33c1655c9a 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_avx512.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_avx512.c
@@ -764,7 +764,7 @@ i40e_tx_free_bufs_avx512(struct i40e_tx_queue *txq)
struct rte_mbuf *m, *free[RTE_I40E_TX_MAX_FREE_BUF_SZ];
/* check DD bits on threshold descriptor */
- if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
+ if ((txq->i40e_tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) !=
rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
return 0;
@@ -948,7 +948,7 @@ i40e_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
return 0;
tx_id = txq->tx_tail;
- txdp = &txq->tx_ring[tx_id];
+ txdp = &txq->i40e_tx_ring[tx_id];
txep = (void *)txq->sw_ring;
txep += tx_id;
@@ -970,7 +970,7 @@ i40e_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
/* avoid reach the end of ring */
- txdp = txq->tx_ring;
+ txdp = txq->i40e_tx_ring;
txep = (void *)txq->sw_ring;
}
@@ -980,7 +980,7 @@ i40e_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
tx_id = (uint16_t)(tx_id + nb_commit);
if (tx_id > txq->tx_next_rs) {
- txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
+ txq->i40e_tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) <<
I40E_TXD_QW1_CMD_SHIFT);
txq->tx_next_rs =
diff --git a/drivers/net/i40e/i40e_rxtx_vec_common.h b/drivers/net/i40e/i40e_rxtx_vec_common.h
index 676c3b1034..a70d9fce78 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_common.h
+++ b/drivers/net/i40e/i40e_rxtx_vec_common.h
@@ -26,7 +26,7 @@ i40e_tx_free_bufs(struct i40e_tx_queue *txq)
struct rte_mbuf *m, *free[RTE_I40E_TX_MAX_FREE_BUF_SZ];
/* check DD bits on threshold descriptor */
- if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
+ if ((txq->i40e_tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) !=
rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
return 0;
diff --git a/drivers/net/i40e/i40e_rxtx_vec_neon.c b/drivers/net/i40e/i40e_rxtx_vec_neon.c
index 2df7f3fed2..23aaf3a739 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_neon.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_neon.c
@@ -695,7 +695,7 @@ i40e_xmit_fixed_burst_vec(void *__rte_restrict tx_queue,
return 0;
tx_id = txq->tx_tail;
- txdp = &txq->tx_ring[tx_id];
+ txdp = &txq->i40e_tx_ring[tx_id];
txep = &txq->sw_ring[tx_id];
txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
@@ -715,7 +715,7 @@ i40e_xmit_fixed_burst_vec(void *__rte_restrict tx_queue,
txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
/* avoid reach the end of ring */
- txdp = &txq->tx_ring[tx_id];
+ txdp = &txq->i40e_tx_ring[tx_id];
txep = &txq->sw_ring[tx_id];
}
@@ -725,7 +725,7 @@ i40e_xmit_fixed_burst_vec(void *__rte_restrict tx_queue,
tx_id = (uint16_t)(tx_id + nb_commit);
if (tx_id > txq->tx_next_rs) {
- txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
+ txq->i40e_tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) <<
I40E_TXD_QW1_CMD_SHIFT);
txq->tx_next_rs =
diff --git a/drivers/net/i40e/i40e_rxtx_vec_sse.c b/drivers/net/i40e/i40e_rxtx_vec_sse.c
index 23fbd9f852..499b6e6ff7 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_sse.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_sse.c
@@ -714,7 +714,7 @@ i40e_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
return 0;
tx_id = txq->tx_tail;
- txdp = &txq->tx_ring[tx_id];
+ txdp = &txq->i40e_tx_ring[tx_id];
txep = &txq->sw_ring[tx_id];
txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
@@ -734,7 +734,7 @@ i40e_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
/* avoid reach the end of ring */
- txdp = &txq->tx_ring[tx_id];
+ txdp = &txq->i40e_tx_ring[tx_id];
txep = &txq->sw_ring[tx_id];
}
@@ -744,7 +744,7 @@ i40e_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
tx_id = (uint16_t)(tx_id + nb_commit);
if (tx_id > txq->tx_next_rs) {
- txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
+ txq->i40e_tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) <<
I40E_TXD_QW1_CMD_SHIFT);
txq->tx_next_rs =
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index b6d287245f..2d0f8eda79 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -296,11 +296,11 @@ reset_tx_queue(struct iavf_tx_queue *txq)
txe = txq->sw_ring;
size = sizeof(struct iavf_tx_desc) * txq->nb_tx_desc;
for (i = 0; i < size; i++)
- ((volatile char *)txq->tx_ring)[i] = 0;
+ ((volatile char *)txq->iavf_tx_ring)[i] = 0;
prev = (uint16_t)(txq->nb_tx_desc - 1);
for (i = 0; i < txq->nb_tx_desc; i++) {
- txq->tx_ring[i].cmd_type_offset_bsz =
+ txq->iavf_tx_ring[i].cmd_type_offset_bsz =
rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE);
txe[i].mbuf = NULL;
txe[i].last_id = i;
@@ -851,7 +851,7 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq->port_id = dev->data->port_id;
txq->offloads = offloads;
txq->tx_deferred_start = tx_conf->tx_deferred_start;
- txq->vsi = vsi;
+ txq->iavf_vsi = vsi;
if (iavf_ipsec_crypto_supported(adapter))
txq->ipsec_crypto_pkt_md_offset =
@@ -872,7 +872,7 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
/* Allocate TX hardware ring descriptors. */
ring_size = sizeof(struct iavf_tx_desc) * IAVF_MAX_RING_DESC;
ring_size = RTE_ALIGN(ring_size, IAVF_DMA_MEM_ALIGN);
- mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
+ mz = rte_eth_dma_zone_reserve(dev, "iavf_tx_ring", queue_idx,
ring_size, IAVF_RING_BASE_ALIGN,
socket_id);
if (!mz) {
@@ -882,7 +882,7 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
return -ENOMEM;
}
txq->tx_ring_dma = mz->iova;
- txq->tx_ring = (struct iavf_tx_desc *)mz->addr;
+ txq->iavf_tx_ring = (struct iavf_tx_desc *)mz->addr;
txq->mz = mz;
reset_tx_queue(txq);
@@ -2385,7 +2385,7 @@ iavf_xmit_cleanup(struct iavf_tx_queue *txq)
uint16_t desc_to_clean_to;
uint16_t nb_tx_to_clean;
- volatile struct iavf_tx_desc *txd = txq->tx_ring;
+ volatile struct iavf_tx_desc *txd = txq->iavf_tx_ring;
desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
if (desc_to_clean_to >= nb_tx_desc)
@@ -2796,7 +2796,7 @@ uint16_t
iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
struct iavf_tx_queue *txq = tx_queue;
- volatile struct iavf_tx_desc *txr = txq->tx_ring;
+ volatile struct iavf_tx_desc *txr = txq->iavf_tx_ring;
struct ieth_tx_entry *txe_ring = txq->sw_ring;
struct ieth_tx_entry *txe, *txn;
struct rte_mbuf *mb, *mb_seg;
@@ -3803,10 +3803,10 @@ iavf_xmit_pkts_no_poll(void *tx_queue, struct rte_mbuf **tx_pkts,
struct iavf_tx_queue *txq = tx_queue;
enum iavf_tx_burst_type tx_burst_type;
- if (!txq->vsi || txq->vsi->adapter->no_poll)
+ if (!txq->iavf_vsi || txq->iavf_vsi->adapter->no_poll)
return 0;
- tx_burst_type = txq->vsi->adapter->tx_burst_type;
+ tx_burst_type = txq->iavf_vsi->adapter->tx_burst_type;
return iavf_tx_pkt_burst_ops[tx_burst_type](tx_queue,
tx_pkts, nb_pkts);
@@ -3824,9 +3824,9 @@ iavf_xmit_pkts_check(void *tx_queue, struct rte_mbuf **tx_pkts,
const char *reason = NULL;
bool pkt_error = false;
struct iavf_tx_queue *txq = tx_queue;
- struct iavf_adapter *adapter = txq->vsi->adapter;
+ struct iavf_adapter *adapter = txq->iavf_vsi->adapter;
enum iavf_tx_burst_type tx_burst_type =
- txq->vsi->adapter->tx_burst_type;
+ txq->iavf_vsi->adapter->tx_burst_type;
for (idx = 0; idx < nb_pkts; idx++) {
mb = tx_pkts[idx];
@@ -4440,7 +4440,7 @@ iavf_dev_tx_desc_status(void *tx_queue, uint16_t offset)
desc -= txq->nb_tx_desc;
}
- status = &txq->tx_ring[desc].cmd_type_offset_bsz;
+ status = &txq->iavf_tx_ring[desc].cmd_type_offset_bsz;
mask = rte_le_to_cpu_64(IAVF_TXD_QW1_DTYPE_MASK);
expect = rte_cpu_to_le_64(
IAVF_TX_DESC_DTYPE_DESC_DONE << IAVF_TXD_QW1_DTYPE_SHIFT);
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index 759f1759a7..cba6d0573b 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -276,7 +276,7 @@ struct iavf_rx_queue {
/* Structure associated with each TX queue. */
struct iavf_tx_queue {
const struct rte_memzone *mz; /* memzone for Tx ring */
- volatile struct iavf_tx_desc *tx_ring; /* Tx ring virtual address */
+ volatile struct iavf_tx_desc *iavf_tx_ring; /* Tx ring virtual address */
rte_iova_t tx_ring_dma; /* Tx ring DMA address */
struct ieth_tx_entry *sw_ring; /* address array of SW ring */
uint16_t nb_tx_desc; /* ring length */
@@ -289,7 +289,7 @@ struct iavf_tx_queue {
uint16_t tx_free_thresh;
uint16_t tx_rs_thresh;
uint8_t rel_mbufs_type;
- struct iavf_vsi *vsi; /**< the VSI this queue belongs to */
+ struct iavf_vsi *iavf_vsi; /**< the VSI this queue belongs to */
uint16_t port_id;
uint16_t queue_id;
diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx2.c b/drivers/net/iavf/iavf_rxtx_vec_avx2.c
index a63763cdec..94cf9c0038 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_avx2.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_avx2.c
@@ -1750,7 +1750,7 @@ iavf_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
return 0;
tx_id = txq->tx_tail;
- txdp = &txq->tx_ring[tx_id];
+ txdp = &txq->iavf_tx_ring[tx_id];
txep = &txq->sw_ring[tx_id];
txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
@@ -1771,7 +1771,7 @@ iavf_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
/* avoid reach the end of ring */
- txdp = &txq->tx_ring[tx_id];
+ txdp = &txq->iavf_tx_ring[tx_id];
txep = &txq->sw_ring[tx_id];
}
@@ -1781,7 +1781,7 @@ iavf_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
tx_id = (uint16_t)(tx_id + nb_commit);
if (tx_id > txq->tx_next_rs) {
- txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
+ txq->iavf_tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
rte_cpu_to_le_64(((uint64_t)IAVF_TX_DESC_CMD_RS) <<
IAVF_TXD_QW1_CMD_SHIFT);
txq->tx_next_rs =
diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx512.c b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
index e04d66d757..dd45bc0fd9 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_avx512.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
@@ -1854,7 +1854,7 @@ iavf_tx_free_bufs_avx512(struct iavf_tx_queue *txq)
struct rte_mbuf *m, *free[IAVF_VPMD_TX_MAX_FREE_BUF];
/* check DD bits on threshold descriptor */
- if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
+ if ((txq->iavf_tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK)) !=
rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE))
return 0;
@@ -2327,7 +2327,7 @@ iavf_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
return 0;
tx_id = txq->tx_tail;
- txdp = &txq->tx_ring[tx_id];
+ txdp = &txq->iavf_tx_ring[tx_id];
txep = (void *)txq->sw_ring;
txep += tx_id;
@@ -2349,7 +2349,7 @@ iavf_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
/* avoid reach the end of ring */
- txdp = &txq->tx_ring[tx_id];
+ txdp = &txq->iavf_tx_ring[tx_id];
txep = (void *)txq->sw_ring;
txep += tx_id;
}
@@ -2360,7 +2360,7 @@ iavf_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
tx_id = (uint16_t)(tx_id + nb_commit);
if (tx_id > txq->tx_next_rs) {
- txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
+ txq->iavf_tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
rte_cpu_to_le_64(((uint64_t)IAVF_TX_DESC_CMD_RS) <<
IAVF_TXD_QW1_CMD_SHIFT);
txq->tx_next_rs =
@@ -2396,7 +2396,7 @@ iavf_xmit_fixed_burst_vec_avx512_ctx(void *tx_queue, struct rte_mbuf **tx_pkts,
nb_pkts = nb_commit >> 1;
tx_id = txq->tx_tail;
- txdp = &txq->tx_ring[tx_id];
+ txdp = &txq->iavf_tx_ring[tx_id];
txep = (void *)txq->sw_ring;
txep += (tx_id >> 1);
@@ -2417,7 +2417,7 @@ iavf_xmit_fixed_burst_vec_avx512_ctx(void *tx_queue, struct rte_mbuf **tx_pkts,
txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
tx_id = 0;
/* avoid reach the end of ring */
- txdp = txq->tx_ring;
+ txdp = txq->iavf_tx_ring;
txep = (void *)txq->sw_ring;
}
@@ -2428,7 +2428,7 @@ iavf_xmit_fixed_burst_vec_avx512_ctx(void *tx_queue, struct rte_mbuf **tx_pkts,
tx_id = (uint16_t)(tx_id + nb_commit);
if (tx_id > txq->tx_next_rs) {
- txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
+ txq->iavf_tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
rte_cpu_to_le_64(((uint64_t)IAVF_TX_DESC_CMD_RS) <<
IAVF_TXD_QW1_CMD_SHIFT);
txq->tx_next_rs =
diff --git a/drivers/net/iavf/iavf_rxtx_vec_common.h b/drivers/net/iavf/iavf_rxtx_vec_common.h
index 0a9243a684..b8b5e74b89 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_common.h
+++ b/drivers/net/iavf/iavf_rxtx_vec_common.h
@@ -26,7 +26,7 @@ iavf_tx_free_bufs(struct iavf_tx_queue *txq)
struct rte_mbuf *m, *free[IAVF_VPMD_TX_MAX_FREE_BUF];
/* check DD bits on threshold descriptor */
- if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
+ if ((txq->iavf_tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK)) !=
rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE))
return 0;
diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c
index e9d19525ae..0a896a6e6f 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_sse.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c
@@ -1383,7 +1383,7 @@ iavf_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
nb_commit = nb_pkts;
tx_id = txq->tx_tail;
- txdp = &txq->tx_ring[tx_id];
+ txdp = &txq->iavf_tx_ring[tx_id];
txep = &txq->sw_ring[tx_id];
txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
@@ -1403,7 +1403,7 @@ iavf_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
/* avoid reach the end of ring */
- txdp = &txq->tx_ring[tx_id];
+ txdp = &txq->iavf_tx_ring[tx_id];
txep = &txq->sw_ring[tx_id];
}
@@ -1413,7 +1413,7 @@ iavf_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
tx_id = (uint16_t)(tx_id + nb_commit);
if (tx_id > txq->tx_next_rs) {
- txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
+ txq->iavf_tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
rte_cpu_to_le_64(((uint64_t)IAVF_TX_DESC_CMD_RS) <<
IAVF_TXD_QW1_CMD_SHIFT);
txq->tx_next_rs =
diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c
index f37dd2fdc1..9485494f86 100644
--- a/drivers/net/ice/ice_dcf_ethdev.c
+++ b/drivers/net/ice/ice_dcf_ethdev.c
@@ -401,11 +401,11 @@ reset_tx_queue(struct ice_tx_queue *txq)
txe = txq->sw_ring;
size = sizeof(struct ice_tx_desc) * txq->nb_tx_desc;
for (i = 0; i < size; i++)
- ((volatile char *)txq->tx_ring)[i] = 0;
+ ((volatile char *)txq->ice_tx_ring)[i] = 0;
prev = (uint16_t)(txq->nb_tx_desc - 1);
for (i = 0; i < txq->nb_tx_desc; i++) {
- txq->tx_ring[i].cmd_type_offset_bsz =
+ txq->ice_tx_ring[i].cmd_type_offset_bsz =
rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE);
txe[i].mbuf = NULL;
txe[i].last_id = i;
diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index 9faa878caf..df9b09ae0c 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -776,7 +776,7 @@ ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
if (!txq_elem)
return -ENOMEM;
- vsi = txq->vsi;
+ vsi = txq->ice_vsi;
hw = ICE_VSI_TO_HW(vsi);
pf = ICE_VSI_TO_PF(vsi);
@@ -966,7 +966,7 @@ ice_fdir_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
if (!txq_elem)
return -ENOMEM;
- vsi = txq->vsi;
+ vsi = txq->ice_vsi;
hw = ICE_VSI_TO_HW(vsi);
memset(&tx_ctx, 0, sizeof(tx_ctx));
@@ -1039,11 +1039,11 @@ ice_reset_tx_queue(struct ice_tx_queue *txq)
txe = txq->sw_ring;
size = sizeof(struct ice_tx_desc) * txq->nb_tx_desc;
for (i = 0; i < size; i++)
- ((volatile char *)txq->tx_ring)[i] = 0;
+ ((volatile char *)txq->ice_tx_ring)[i] = 0;
prev = (uint16_t)(txq->nb_tx_desc - 1);
for (i = 0; i < txq->nb_tx_desc; i++) {
- volatile struct ice_tx_desc *txd = &txq->tx_ring[i];
+ volatile struct ice_tx_desc *txd = &txq->ice_tx_ring[i];
txd->cmd_type_offset_bsz =
rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE);
@@ -1153,7 +1153,7 @@ ice_fdir_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
PMD_DRV_LOG(INFO, "TX queue %u not started", tx_queue_id);
return 0;
}
- vsi = txq->vsi;
+ vsi = txq->ice_vsi;
q_ids[0] = txq->reg_idx;
q_teids[0] = txq->q_teid;
@@ -1479,7 +1479,7 @@ ice_tx_queue_setup(struct rte_eth_dev *dev,
/* Allocate TX hardware ring descriptors. */
ring_size = sizeof(struct ice_tx_desc) * ICE_MAX_RING_DESC;
ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
- tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
+ tz = rte_eth_dma_zone_reserve(dev, "ice_tx_ring", queue_idx,
ring_size, ICE_RING_BASE_ALIGN,
socket_id);
if (!tz) {
@@ -1500,11 +1500,11 @@ ice_tx_queue_setup(struct rte_eth_dev *dev,
txq->reg_idx = vsi->base_queue + queue_idx;
txq->port_id = dev->data->port_id;
txq->offloads = offloads;
- txq->vsi = vsi;
+ txq->ice_vsi = vsi;
txq->tx_deferred_start = tx_conf->tx_deferred_start;
txq->tx_ring_dma = tz->iova;
- txq->tx_ring = tz->addr;
+ txq->ice_tx_ring = tz->addr;
/* Allocate software ring */
txq->sw_ring =
@@ -2372,7 +2372,7 @@ ice_tx_descriptor_status(void *tx_queue, uint16_t offset)
desc -= txq->nb_tx_desc;
}
- status = &txq->tx_ring[desc].cmd_type_offset_bsz;
+ status = &txq->ice_tx_ring[desc].cmd_type_offset_bsz;
mask = rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M);
expect = rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE <<
ICE_TXD_QW1_DTYPE_S);
@@ -2452,10 +2452,10 @@ ice_fdir_setup_tx_resources(struct ice_pf *pf)
txq->nb_tx_desc = ICE_FDIR_NUM_TX_DESC;
txq->queue_id = ICE_FDIR_QUEUE_ID;
txq->reg_idx = pf->fdir.fdir_vsi->base_queue;
- txq->vsi = pf->fdir.fdir_vsi;
+ txq->ice_vsi = pf->fdir.fdir_vsi;
txq->tx_ring_dma = tz->iova;
- txq->tx_ring = (struct ice_tx_desc *)tz->addr;
+ txq->ice_tx_ring = (struct ice_tx_desc *)tz->addr;
/*
* don't need to allocate software ring and reset for the fdir
* program queue just set the queue has been configured.
@@ -2838,7 +2838,7 @@ static inline int
ice_xmit_cleanup(struct ice_tx_queue *txq)
{
struct ieth_tx_entry *sw_ring = txq->sw_ring;
- volatile struct ice_tx_desc *txd = txq->tx_ring;
+ volatile struct ice_tx_desc *txd = txq->ice_tx_ring;
uint16_t last_desc_cleaned = txq->last_desc_cleaned;
uint16_t nb_tx_desc = txq->nb_tx_desc;
uint16_t desc_to_clean_to;
@@ -2959,7 +2959,7 @@ uint16_t
ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
struct ice_tx_queue *txq;
- volatile struct ice_tx_desc *tx_ring;
+ volatile struct ice_tx_desc *ice_tx_ring;
volatile struct ice_tx_desc *txd;
struct ieth_tx_entry *sw_ring;
struct ieth_tx_entry *txe, *txn;
@@ -2981,7 +2981,7 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
txq = tx_queue;
sw_ring = txq->sw_ring;
- tx_ring = txq->tx_ring;
+ ice_tx_ring = txq->ice_tx_ring;
tx_id = txq->tx_tail;
txe = &sw_ring[tx_id];
@@ -3064,7 +3064,7 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
/* Setup TX context descriptor if required */
volatile struct ice_tx_ctx_desc *ctx_txd =
(volatile struct ice_tx_ctx_desc *)
- &tx_ring[tx_id];
+ &ice_tx_ring[tx_id];
uint16_t cd_l2tag2 = 0;
uint64_t cd_type_cmd_tso_mss = ICE_TX_DESC_DTYPE_CTX;
@@ -3082,7 +3082,7 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
cd_type_cmd_tso_mss |=
((uint64_t)ICE_TX_CTX_DESC_TSYN <<
ICE_TXD_CTX_QW1_CMD_S) |
- (((uint64_t)txq->vsi->adapter->ptp_tx_index <<
+ (((uint64_t)txq->ice_vsi->adapter->ptp_tx_index <<
ICE_TXD_CTX_QW1_TSYN_S) & ICE_TXD_CTX_QW1_TSYN_M);
ctx_txd->tunneling_params =
@@ -3106,7 +3106,7 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
m_seg = tx_pkt;
do {
- txd = &tx_ring[tx_id];
+ txd = &ice_tx_ring[tx_id];
txn = &sw_ring[txe->next_id];
if (txe->mbuf)
@@ -3134,7 +3134,7 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
txe->last_id = tx_last;
tx_id = txe->next_id;
txe = txn;
- txd = &tx_ring[tx_id];
+ txd = &ice_tx_ring[tx_id];
txn = &sw_ring[txe->next_id];
}
@@ -3187,7 +3187,7 @@ ice_tx_free_bufs(struct ice_tx_queue *txq)
struct ieth_tx_entry *txep;
uint16_t i;
- if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
+ if ((txq->ice_tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) !=
rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
return 0;
@@ -3360,7 +3360,7 @@ static inline void
ice_tx_fill_hw_ring(struct ice_tx_queue *txq, struct rte_mbuf **pkts,
uint16_t nb_pkts)
{
- volatile struct ice_tx_desc *txdp = &txq->tx_ring[txq->tx_tail];
+ volatile struct ice_tx_desc *txdp = &txq->ice_tx_ring[txq->tx_tail];
struct ieth_tx_entry *txep = &txq->sw_ring[txq->tx_tail];
const int N_PER_LOOP = 4;
const int N_PER_LOOP_MASK = N_PER_LOOP - 1;
@@ -3393,7 +3393,7 @@ tx_xmit_pkts(struct ice_tx_queue *txq,
struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
- volatile struct ice_tx_desc *txr = txq->tx_ring;
+ volatile struct ice_tx_desc *txr = txq->ice_tx_ring;
uint16_t n = 0;
/**
@@ -3722,7 +3722,7 @@ ice_xmit_pkts_check(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
bool pkt_error = false;
uint16_t good_pkts = nb_pkts;
const char *reason = NULL;
- struct ice_adapter *adapter = txq->vsi->adapter;
+ struct ice_adapter *adapter = txq->ice_vsi->adapter;
uint64_t ol_flags;
for (idx = 0; idx < nb_pkts; idx++) {
@@ -4701,11 +4701,11 @@ ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc)
uint16_t i;
fdirdp = (volatile struct ice_fltr_desc *)
- (&txq->tx_ring[txq->tx_tail]);
+ (&txq->ice_tx_ring[txq->tx_tail]);
fdirdp->qidx_compq_space_stat = fdir_desc->qidx_compq_space_stat;
fdirdp->dtype_cmd_vsi_fdid = fdir_desc->dtype_cmd_vsi_fdid;
- txdp = &txq->tx_ring[txq->tx_tail + 1];
+ txdp = &txq->ice_tx_ring[txq->tx_tail + 1];
txdp->buf_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);
td_cmd = ICE_TX_DESC_CMD_EOP |
ICE_TX_DESC_CMD_RS |
diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h
index 615bed8a60..91f8ed2036 100644
--- a/drivers/net/ice/ice_rxtx.h
+++ b/drivers/net/ice/ice_rxtx.h
@@ -148,7 +148,7 @@ struct ice_rx_queue {
struct ice_tx_queue {
uint16_t nb_tx_desc; /* number of TX descriptors */
rte_iova_t tx_ring_dma; /* TX ring DMA address */
- volatile struct ice_tx_desc *tx_ring; /* TX ring virtual address */
+ volatile struct ice_tx_desc *ice_tx_ring; /* TX ring virtual address */
struct ieth_tx_entry *sw_ring; /* virtual address of SW ring */
uint16_t tx_tail; /* current value of tail register */
volatile uint8_t *qtx_tail; /* register address of tail */
@@ -171,7 +171,7 @@ struct ice_tx_queue {
uint32_t q_teid; /* TX schedule node id. */
uint16_t reg_idx;
uint64_t offloads;
- struct ice_vsi *vsi; /* the VSI this queue belongs to */
+ struct ice_vsi *ice_vsi; /* the VSI this queue belongs to */
uint16_t tx_next_dd;
uint16_t tx_next_rs;
uint64_t mbuf_errors;
diff --git a/drivers/net/ice/ice_rxtx_vec_avx2.c b/drivers/net/ice/ice_rxtx_vec_avx2.c
index 657b40858b..d4c76686f7 100644
--- a/drivers/net/ice/ice_rxtx_vec_avx2.c
+++ b/drivers/net/ice/ice_rxtx_vec_avx2.c
@@ -874,7 +874,7 @@ ice_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
return 0;
tx_id = txq->tx_tail;
- txdp = &txq->tx_ring[tx_id];
+ txdp = &txq->ice_tx_ring[tx_id];
txep = &txq->sw_ring[tx_id];
txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
@@ -895,7 +895,7 @@ ice_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
/* avoid reach the end of ring */
- txdp = &txq->tx_ring[tx_id];
+ txdp = &txq->ice_tx_ring[tx_id];
txep = &txq->sw_ring[tx_id];
}
@@ -905,7 +905,7 @@ ice_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
tx_id = (uint16_t)(tx_id + nb_commit);
if (tx_id > txq->tx_next_rs) {
- txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
+ txq->ice_tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
ICE_TXD_QW1_CMD_S);
txq->tx_next_rs =
diff --git a/drivers/net/ice/ice_rxtx_vec_avx512.c b/drivers/net/ice/ice_rxtx_vec_avx512.c
index 5ba6d15ef0..1126a30bf8 100644
--- a/drivers/net/ice/ice_rxtx_vec_avx512.c
+++ b/drivers/net/ice/ice_rxtx_vec_avx512.c
@@ -869,7 +869,7 @@ ice_tx_free_bufs_avx512(struct ice_tx_queue *txq)
struct rte_mbuf *m, *free[ICE_TX_MAX_FREE_BUF_SZ];
/* check DD bits on threshold descriptor */
- if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
+ if ((txq->ice_tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) !=
rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
return 0;
@@ -1071,7 +1071,7 @@ ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
return 0;
tx_id = txq->tx_tail;
- txdp = &txq->tx_ring[tx_id];
+ txdp = &txq->ice_tx_ring[tx_id];
txep = (void *)txq->sw_ring;
txep += tx_id;
@@ -1093,7 +1093,7 @@ ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
/* avoid reach the end of ring */
- txdp = txq->tx_ring;
+ txdp = txq->ice_tx_ring;
txep = (void *)txq->sw_ring;
}
@@ -1103,7 +1103,7 @@ ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
tx_id = (uint16_t)(tx_id + nb_commit);
if (tx_id > txq->tx_next_rs) {
- txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
+ txq->ice_tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
ICE_TXD_QW1_CMD_S);
txq->tx_next_rs =
diff --git a/drivers/net/ice/ice_rxtx_vec_common.h b/drivers/net/ice/ice_rxtx_vec_common.h
index 5266ba2d53..b2e3c0f6b7 100644
--- a/drivers/net/ice/ice_rxtx_vec_common.h
+++ b/drivers/net/ice/ice_rxtx_vec_common.h
@@ -22,7 +22,7 @@ ice_tx_free_bufs_vec(struct ice_tx_queue *txq)
struct rte_mbuf *m, *free[ICE_TX_MAX_FREE_BUF_SZ];
/* check DD bits on threshold descriptor */
- if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
+ if ((txq->ice_tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) !=
rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
return 0;
@@ -121,7 +121,7 @@ _ice_tx_queue_release_mbufs_vec(struct ice_tx_queue *txq)
i = txq->tx_next_dd - txq->tx_rs_thresh + 1;
#ifdef __AVX512VL__
- struct rte_eth_dev *dev = &rte_eth_devices[txq->vsi->adapter->pf.dev_data->port_id];
+ struct rte_eth_dev *dev = &rte_eth_devices[txq->ice_vsi->adapter->pf.dev_data->port_id];
if (dev->tx_pkt_burst == ice_xmit_pkts_vec_avx512 ||
dev->tx_pkt_burst == ice_xmit_pkts_vec_avx512_offload) {
diff --git a/drivers/net/ice/ice_rxtx_vec_sse.c b/drivers/net/ice/ice_rxtx_vec_sse.c
index 4f603976c5..5db66f3c6a 100644
--- a/drivers/net/ice/ice_rxtx_vec_sse.c
+++ b/drivers/net/ice/ice_rxtx_vec_sse.c
@@ -717,7 +717,7 @@ ice_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
return 0;
tx_id = txq->tx_tail;
- txdp = &txq->tx_ring[tx_id];
+ txdp = &txq->ice_tx_ring[tx_id];
txep = &txq->sw_ring[tx_id];
txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
@@ -737,7 +737,7 @@ ice_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
/* avoid reach the end of ring */
- txdp = &txq->tx_ring[tx_id];
+ txdp = &txq->ice_tx_ring[tx_id];
txep = &txq->sw_ring[tx_id];
}
@@ -747,7 +747,7 @@ ice_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
tx_id = (uint16_t)(tx_id + nb_commit);
if (tx_id > txq->tx_next_rs) {
- txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
+ txq->ice_tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
ICE_TXD_QW1_CMD_S);
txq->tx_next_rs =
diff --git a/drivers/net/ixgbe/ixgbe_recycle_mbufs_vec_common.c b/drivers/net/ixgbe/ixgbe_recycle_mbufs_vec_common.c
index 4c8f6b7b64..546825f334 100644
--- a/drivers/net/ixgbe/ixgbe_recycle_mbufs_vec_common.c
+++ b/drivers/net/ixgbe/ixgbe_recycle_mbufs_vec_common.c
@@ -72,7 +72,7 @@ ixgbe_recycle_tx_mbufs_reuse_vec(void *tx_queue,
return 0;
/* check DD bits on threshold descriptor */
- status = txq->tx_ring[txq->tx_next_dd].wb.status;
+ status = txq->ixgbe_tx_ring[txq->tx_next_dd].wb.status;
if (!(status & IXGBE_ADVTXD_STAT_DD))
return 0;
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 96a1021e48..c3b704c201 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -106,7 +106,7 @@ ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
struct rte_mbuf *m, *free[RTE_IXGBE_TX_MAX_FREE_BUF_SZ];
/* check DD bit on threshold descriptor */
- status = txq->tx_ring[txq->tx_next_dd].wb.status;
+ status = txq->ixgbe_tx_ring[txq->tx_next_dd].wb.status;
if (!(status & rte_cpu_to_le_32(IXGBE_ADVTXD_STAT_DD)))
return 0;
@@ -198,7 +198,7 @@ static inline void
ixgbe_tx_fill_hw_ring(struct ixgbe_tx_queue *txq, struct rte_mbuf **pkts,
uint16_t nb_pkts)
{
- volatile union ixgbe_adv_tx_desc *txdp = &(txq->tx_ring[txq->tx_tail]);
+ volatile union ixgbe_adv_tx_desc *txdp = &(txq->ixgbe_tx_ring[txq->tx_tail]);
struct ieth_tx_entry *txep = &(txq->sw_ring[txq->tx_tail]);
const int N_PER_LOOP = 4;
const int N_PER_LOOP_MASK = N_PER_LOOP-1;
@@ -232,7 +232,7 @@ tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
- volatile union ixgbe_adv_tx_desc *tx_r = txq->tx_ring;
+ volatile union ixgbe_adv_tx_desc *tx_r = txq->ixgbe_tx_ring;
uint16_t n = 0;
/*
@@ -564,7 +564,7 @@ static inline int
ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq)
{
struct ieth_tx_entry *sw_ring = txq->sw_ring;
- volatile union ixgbe_adv_tx_desc *txr = txq->tx_ring;
+ volatile union ixgbe_adv_tx_desc *txr = txq->ixgbe_tx_ring;
uint16_t last_desc_cleaned = txq->last_desc_cleaned;
uint16_t nb_tx_desc = txq->nb_tx_desc;
uint16_t desc_to_clean_to;
@@ -652,7 +652,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
tx_offload.data[1] = 0;
txq = tx_queue;
sw_ring = txq->sw_ring;
- txr = txq->tx_ring;
+ txr = txq->ixgbe_tx_ring;
tx_id = txq->tx_tail;
txe = &sw_ring[tx_id];
txp = NULL;
@@ -2495,13 +2495,13 @@ ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
/* Zero out HW ring memory */
for (i = 0; i < txq->nb_tx_desc; i++) {
- txq->tx_ring[i] = zeroed_desc;
+ txq->ixgbe_tx_ring[i] = zeroed_desc;
}
/* Initialize SW ring entries */
prev = (uint16_t) (txq->nb_tx_desc - 1);
for (i = 0; i < txq->nb_tx_desc; i++) {
- volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i];
+ volatile union ixgbe_adv_tx_desc *txd = &txq->ixgbe_tx_ring[i];
txd->wb.status = rte_cpu_to_le_32(IXGBE_TXD_STAT_DD);
txe[i].mbuf = NULL;
@@ -2751,7 +2751,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
* handle the maximum ring size is allocated in order to allow for
* resizing in later calls to the queue setup function.
*/
- tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
+ tz = rte_eth_dma_zone_reserve(dev, "ixgbe_tx_ring", queue_idx,
sizeof(union ixgbe_adv_tx_desc) * IXGBE_MAX_RING_DESC,
IXGBE_ALIGN, socket_id);
if (tz == NULL) {
@@ -2791,7 +2791,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq->qtx_tail = IXGBE_PCI_REG_ADDR(hw, IXGBE_TDT(txq->reg_idx));
txq->tx_ring_dma = tz->iova;
- txq->tx_ring = (union ixgbe_adv_tx_desc *) tz->addr;
+ txq->ixgbe_tx_ring = (union ixgbe_adv_tx_desc *) tz->addr;
/* Allocate software ring */
txq->sw_ring = rte_zmalloc_socket("txq->sw_ring",
@@ -2802,7 +2802,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
return -ENOMEM;
}
PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
- txq->sw_ring, txq->tx_ring, txq->tx_ring_dma);
+ txq->sw_ring, txq->ixgbe_tx_ring, txq->tx_ring_dma);
/* set up vector or scalar TX function as appropriate */
ixgbe_set_tx_function(dev, txq);
@@ -3328,7 +3328,7 @@ ixgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
desc -= txq->nb_tx_desc;
}
- status = &txq->tx_ring[desc].wb.status;
+ status = &txq->ixgbe_tx_ring[desc].wb.status;
if (*status & rte_cpu_to_le_32(IXGBE_ADVTXD_STAT_DD))
return RTE_ETH_TX_DESC_DONE;
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
index e3e6ebb9e8..4e437f95e3 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx.h
@@ -185,7 +185,7 @@ struct ixgbe_advctx_info {
*/
struct ixgbe_tx_queue {
/** TX ring virtual address. */
- volatile union ixgbe_adv_tx_desc *tx_ring;
+ volatile union ixgbe_adv_tx_desc *ixgbe_tx_ring;
rte_iova_t tx_ring_dma; /**< TX ring DMA address. */
union {
struct ieth_tx_entry *sw_ring; /**< address of SW ring for scalar PMD. */
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
index d25875935e..fc254ef3d3 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
@@ -22,7 +22,7 @@ ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
struct rte_mbuf *m, *free[RTE_IXGBE_TX_MAX_FREE_BUF_SZ];
/* check DD bit on threshold descriptor */
- status = txq->tx_ring[txq->tx_next_dd].wb.status;
+ status = txq->ixgbe_tx_ring[txq->tx_next_dd].wb.status;
if (!(status & IXGBE_ADVTXD_STAT_DD))
return 0;
@@ -154,11 +154,11 @@ _ixgbe_reset_tx_queue_vec(struct ixgbe_tx_queue *txq)
/* Zero out HW ring memory */
for (i = 0; i < txq->nb_tx_desc; i++)
- txq->tx_ring[i] = zeroed_desc;
+ txq->ixgbe_tx_ring[i] = zeroed_desc;
/* Initialize SW ring entries */
for (i = 0; i < txq->nb_tx_desc; i++) {
- volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i];
+ volatile union ixgbe_adv_tx_desc *txd = &txq->ixgbe_tx_ring[i];
txd->wb.status = IXGBE_TXD_STAT_DD;
txe[i].mbuf = NULL;
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
index 100f77cea6..e4381802c8 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
@@ -590,7 +590,7 @@ ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
return 0;
tx_id = txq->tx_tail;
- txdp = &txq->tx_ring[tx_id];
+ txdp = &txq->ixgbe_tx_ring[tx_id];
txep = &txq->sw_ring_v[tx_id];
txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
@@ -610,7 +610,7 @@ ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
/* avoid reach the end of ring */
- txdp = &txq->tx_ring[tx_id];
+ txdp = &txq->ixgbe_tx_ring[tx_id];
txep = &txq->sw_ring_v[tx_id];
}
@@ -620,7 +620,7 @@ ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
tx_id = (uint16_t)(tx_id + nb_commit);
if (tx_id > txq->tx_next_rs) {
- txq->tx_ring[txq->tx_next_rs].read.cmd_type_len |=
+ txq->ixgbe_tx_ring[txq->tx_next_rs].read.cmd_type_len |=
rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
txq->tx_next_rs = (uint16_t)(txq->tx_next_rs +
txq->tx_rs_thresh);
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
index 017e3d6674..4c8cc22f59 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
@@ -712,7 +712,7 @@ ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
return 0;
tx_id = txq->tx_tail;
- txdp = &txq->tx_ring[tx_id];
+ txdp = &txq->ixgbe_tx_ring[tx_id];
txep = &txq->sw_ring_v[tx_id];
txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
@@ -733,7 +733,7 @@ ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
/* avoid reach the end of ring */
- txdp = &(txq->tx_ring[tx_id]);
+ txdp = &(txq->ixgbe_tx_ring[tx_id]);
txep = &txq->sw_ring_v[tx_id];
}
@@ -743,7 +743,7 @@ ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
tx_id = (uint16_t)(tx_id + nb_commit);
if (tx_id > txq->tx_next_rs) {
- txq->tx_ring[txq->tx_next_rs].read.cmd_type_len |=
+ txq->ixgbe_tx_ring[txq->tx_next_rs].read.cmd_type_len |=
rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
txq->tx_next_rs = (uint16_t)(txq->tx_next_rs +
txq->tx_rs_thresh);
--
2.43.0
next prev parent reply other threads:[~2024-11-22 12:55 UTC|newest]
Thread overview: 22+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-11-22 12:53 [RFC PATCH 00/21] Reduce code duplication across Intel NIC drivers Bruce Richardson
2024-11-22 12:53 ` [RFC PATCH 01/21] common/intel_eth: add pkt reassembly fn for intel drivers Bruce Richardson
2024-11-22 12:53 ` [RFC PATCH 02/21] common/intel_eth: provide common Tx entry structures Bruce Richardson
2024-11-22 12:53 ` [RFC PATCH 03/21] common/intel_eth: add Tx mbuf ring replenish fn Bruce Richardson
2024-11-22 12:53 ` [RFC PATCH 04/21] drivers/net: align Tx queue struct field names Bruce Richardson
2024-11-22 12:53 ` Bruce Richardson [this message]
2024-11-22 12:53 ` [RFC PATCH 06/21] common/intel_eth: merge ice and i40e Tx queue struct Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 07/21] net/iavf: use common Tx queue structure Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 08/21] net/ixgbe: convert Tx queue context cache field to ptr Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 09/21] net/ixgbe: use common Tx queue structure Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 10/21] common/intel_eth: pack " Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 11/21] common/intel_eth: add post-Tx buffer free function Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 12/21] common/intel_eth: add Tx buffer free fn for AVX-512 Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 13/21] net/iavf: use common Tx " Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 14/21] net/ice: move Tx queue mbuf cleanup fn to common Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 15/21] net/i40e: use common Tx queue mbuf cleanup fn Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 16/21] net/ixgbe: " Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 17/21] net/iavf: " Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 18/21] net/ice: use vector SW ring for all vector paths Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 19/21] net/i40e: " Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 20/21] net/iavf: " Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 21/21] net/ixgbe: use common Tx backlog entry fn Bruce Richardson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20241122125418.2857301-6-bruce.richardson@intel.com \
--to=bruce.richardson@intel.com \
--cc=anatoly.burakov@intel.com \
--cc=dev@dpdk.org \
--cc=drc@linux.ibm.com \
--cc=ian.stokes@intel.com \
--cc=konstantin.v.ananyev@yandex.ru \
--cc=vladimir.medvedkin@intel.com \
--cc=wathsala.vithanage@arm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).