From: Bruce Richardson <bruce.richardson@intel.com>
To: dev@dpdk.org
Cc: Bruce Richardson <bruce.richardson@intel.com>,
Vladimir Medvedkin <vladimir.medvedkin@intel.com>,
Ian Stokes <ian.stokes@intel.com>,
Konstantin Ananyev <konstantin.v.ananyev@yandex.ru>
Subject: [RFC PATCH 20/21] net/iavf: use vector SW ring for all vector paths
Date: Fri, 22 Nov 2024 12:54:13 +0000 [thread overview]
Message-ID: <20241122125418.2857301-21-bruce.richardson@intel.com> (raw)
In-Reply-To: <20241122125418.2857301-1-bruce.richardson@intel.com>
The AVX-512 code path used a smaller SW ring structure only containing
the mbuf pointer, but no other fields. The other fields are only used in
the scalar code path, so update all vector driver code paths (AVX2, SSE)
to use the smaller, faster structure.
Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
---
drivers/net/iavf/iavf_rxtx.c | 7 -------
drivers/net/iavf/iavf_rxtx_vec_avx2.c | 12 ++++++------
drivers/net/iavf/iavf_rxtx_vec_avx512.c | 8 --------
drivers/net/iavf/iavf_rxtx_vec_common.h | 6 ------
drivers/net/iavf/iavf_rxtx_vec_sse.c | 14 +++++++-------
5 files changed, 13 insertions(+), 34 deletions(-)
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index c574b23f34..869fce00eb 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -4193,14 +4193,7 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
txq = dev->data->tx_queues[i];
if (!txq)
continue;
-#ifdef CC_AVX512_SUPPORT
- if (use_avx512)
- iavf_txq_vec_setup_avx512(txq);
- else
- iavf_txq_vec_setup(txq);
-#else
iavf_txq_vec_setup(txq);
-#endif
}
if (no_poll_on_link_down) {
diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx2.c b/drivers/net/iavf/iavf_rxtx_vec_avx2.c
index 25dc339303..e0c7146c9b 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_avx2.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_avx2.c
@@ -1736,14 +1736,14 @@ iavf_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
{
struct ieth_tx_queue *txq = (struct ieth_tx_queue *)tx_queue;
volatile struct iavf_tx_desc *txdp;
- struct ieth_tx_entry *txep;
+ struct ieth_vec_tx_entry *txep;
uint16_t n, nb_commit, tx_id;
/* bit2 is reserved and must be set to 1 according to Spec */
uint64_t flags = IAVF_TX_DESC_CMD_EOP | IAVF_TX_DESC_CMD_ICRC;
uint64_t rs = IAVF_TX_DESC_CMD_RS | flags;
if (txq->nb_tx_free < txq->tx_free_thresh)
- iavf_tx_free_bufs(txq);
+ ieth_tx_free_bufs_vector(txq, iavf_tx_desc_done, false);
nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
if (unlikely(nb_pkts == 0))
@@ -1751,13 +1751,13 @@ iavf_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
tx_id = txq->tx_tail;
txdp = &txq->iavf_tx_ring[tx_id];
- txep = &txq->sw_ring[tx_id];
+ txep = &txq->sw_ring_v[tx_id];
txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
n = (uint16_t)(txq->nb_tx_desc - tx_id);
if (nb_commit >= n) {
- ieth_tx_backlog_entry(txep, tx_pkts, n);
+ ieth_tx_backlog_entry_vec(txep, tx_pkts, n);
iavf_vtx(txdp, tx_pkts, n - 1, flags, offload);
tx_pkts += (n - 1);
@@ -1772,10 +1772,10 @@ iavf_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
/* avoid reach the end of ring */
txdp = &txq->iavf_tx_ring[tx_id];
- txep = &txq->sw_ring[tx_id];
+ txep = &txq->sw_ring_v[tx_id];
}
- ieth_tx_backlog_entry(txep, tx_pkts, nb_commit);
+ ieth_tx_backlog_entry_vec(txep, tx_pkts, nb_commit);
iavf_vtx(txdp, tx_pkts, nb_commit, flags, offload);
diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx512.c b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
index 16cfd6a5b3..bda5fb3b22 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_avx512.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
@@ -2356,14 +2356,6 @@ iavf_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
return iavf_xmit_pkts_vec_avx512_cmn(tx_queue, tx_pkts, nb_pkts, false);
}
-int __rte_cold
-iavf_txq_vec_setup_avx512(struct ieth_tx_queue *txq)
-{
- txq->vector_tx = true;
- txq->vector_sw_ring = true;
- return 0;
-}
-
uint16_t
iavf_xmit_pkts_vec_avx512_offload(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
diff --git a/drivers/net/iavf/iavf_rxtx_vec_common.h b/drivers/net/iavf/iavf_rxtx_vec_common.h
index 20d8262e7f..14569e9e3b 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_common.h
+++ b/drivers/net/iavf/iavf_rxtx_vec_common.h
@@ -24,12 +24,6 @@ iavf_tx_desc_done(struct ieth_tx_queue *txq, uint16_t idx)
rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE);
}
-static __rte_always_inline int
-iavf_tx_free_bufs(struct ieth_tx_queue *txq)
-{
- return ieth_tx_free_bufs(txq, iavf_tx_desc_done);
-}
-
static inline void
_iavf_rx_queue_release_mbufs_vec(struct iavf_rx_queue *rxq)
{
diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c
index 21ad685ff1..89f4a22271 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_sse.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c
@@ -1368,14 +1368,14 @@ iavf_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
{
struct ieth_tx_queue *txq = (struct ieth_tx_queue *)tx_queue;
volatile struct iavf_tx_desc *txdp;
- struct ieth_tx_entry *txep;
+ struct ieth_vec_tx_entry *txep;
uint16_t n, nb_commit, tx_id;
uint64_t flags = IAVF_TX_DESC_CMD_EOP | 0x04; /* bit 2 must be set */
uint64_t rs = IAVF_TX_DESC_CMD_RS | flags;
int i;
if (txq->nb_tx_free < txq->tx_free_thresh)
- iavf_tx_free_bufs(txq);
+ ieth_tx_free_bufs_vector(txq, iavf_tx_desc_done, false);
nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
if (unlikely(nb_pkts == 0))
@@ -1384,13 +1384,13 @@ iavf_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
tx_id = txq->tx_tail;
txdp = &txq->iavf_tx_ring[tx_id];
- txep = &txq->sw_ring[tx_id];
+ txep = &txq->sw_ring_v[tx_id];
txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
n = (uint16_t)(txq->nb_tx_desc - tx_id);
if (nb_commit >= n) {
- ieth_tx_backlog_entry(txep, tx_pkts, n);
+ ieth_tx_backlog_entry_vec(txep, tx_pkts, n);
for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp)
vtx1(txdp, *tx_pkts, flags);
@@ -1404,10 +1404,10 @@ iavf_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
/* avoid reach the end of ring */
txdp = &txq->iavf_tx_ring[tx_id];
- txep = &txq->sw_ring[tx_id];
+ txep = &txq->sw_ring_v[tx_id];
}
- ieth_tx_backlog_entry(txep, tx_pkts, nb_commit);
+ ieth_tx_backlog_entry_vec(txep, tx_pkts, nb_commit);
iavf_vtx(txdp, tx_pkts, nb_commit, flags);
@@ -1462,7 +1462,7 @@ int __rte_cold
iavf_txq_vec_setup(struct ieth_tx_queue *txq)
{
txq->vector_tx = true;
- txq->vector_sw_ring = false;
+ txq->vector_sw_ring = txq->vector_tx;
return 0;
}
--
2.43.0
next prev parent reply other threads:[~2024-11-22 12:57 UTC|newest]
Thread overview: 22+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-11-22 12:53 [RFC PATCH 00/21] Reduce code duplication across Intel NIC drivers Bruce Richardson
2024-11-22 12:53 ` [RFC PATCH 01/21] common/intel_eth: add pkt reassembly fn for intel drivers Bruce Richardson
2024-11-22 12:53 ` [RFC PATCH 02/21] common/intel_eth: provide common Tx entry structures Bruce Richardson
2024-11-22 12:53 ` [RFC PATCH 03/21] common/intel_eth: add Tx mbuf ring replenish fn Bruce Richardson
2024-11-22 12:53 ` [RFC PATCH 04/21] drivers/net: align Tx queue struct field names Bruce Richardson
2024-11-22 12:53 ` [RFC PATCH 05/21] drivers/net: add prefix for driver-specific structs Bruce Richardson
2024-11-22 12:53 ` [RFC PATCH 06/21] common/intel_eth: merge ice and i40e Tx queue struct Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 07/21] net/iavf: use common Tx queue structure Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 08/21] net/ixgbe: convert Tx queue context cache field to ptr Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 09/21] net/ixgbe: use common Tx queue structure Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 10/21] common/intel_eth: pack " Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 11/21] common/intel_eth: add post-Tx buffer free function Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 12/21] common/intel_eth: add Tx buffer free fn for AVX-512 Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 13/21] net/iavf: use common Tx " Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 14/21] net/ice: move Tx queue mbuf cleanup fn to common Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 15/21] net/i40e: use common Tx queue mbuf cleanup fn Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 16/21] net/ixgbe: " Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 17/21] net/iavf: " Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 18/21] net/ice: use vector SW ring for all vector paths Bruce Richardson
2024-11-22 12:54 ` [RFC PATCH 19/21] net/i40e: " Bruce Richardson
2024-11-22 12:54 ` Bruce Richardson [this message]
2024-11-22 12:54 ` [RFC PATCH 21/21] net/ixgbe: use common Tx backlog entry fn Bruce Richardson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20241122125418.2857301-21-bruce.richardson@intel.com \
--to=bruce.richardson@intel.com \
--cc=dev@dpdk.org \
--cc=ian.stokes@intel.com \
--cc=konstantin.v.ananyev@yandex.ru \
--cc=vladimir.medvedkin@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).