* [PATCH 1/4] net/idpf: use the new common vector capability function
2025-09-11 14:31 [PATCH 0/4] idpf and cpfl rx path selection simplification Ciara Loftus
@ 2025-09-11 14:31 ` Ciara Loftus
2025-09-11 14:35 ` Bruce Richardson
2025-09-11 14:31 ` [PATCH 2/4] net/idpf: use the common Rx path selection infrastructure Ciara Loftus
` (2 subsequent siblings)
3 siblings, 1 reply; 7+ messages in thread
From: Ciara Loftus @ 2025-09-11 14:31 UTC (permalink / raw)
To: dev; +Cc: Ciara Loftus
Use the new function for determining the maximum simd bitwidth in
the idpf driver. An additional check is required for the AVX512DQ flag
which is not performed in the common function. Since no other drivers
require this flag, the check will remain outside of the common function.
Signed-off-by: Ciara Loftus <ciara.loftus@intel.com>
---
drivers/net/intel/idpf/idpf_rxtx.c | 50 ++++++-------------
drivers/net/intel/idpf/idpf_rxtx_vec_common.h | 19 +++++++
2 files changed, 33 insertions(+), 36 deletions(-)
diff --git a/drivers/net/intel/idpf/idpf_rxtx.c b/drivers/net/intel/idpf/idpf_rxtx.c
index 5510cbd30a..c9eb7f66d2 100644
--- a/drivers/net/intel/idpf/idpf_rxtx.c
+++ b/drivers/net/intel/idpf/idpf_rxtx.c
@@ -762,26 +762,13 @@ idpf_set_rx_function(struct rte_eth_dev *dev)
struct idpf_vport *vport = dev->data->dev_private;
#ifdef RTE_ARCH_X86
struct idpf_rx_queue *rxq;
+ enum rte_vect_max_simd rx_simd_width = RTE_VECT_SIMD_DISABLED;
int i;
if (idpf_rx_vec_dev_check_default(dev) == IDPF_VECTOR_PATH &&
rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
vport->rx_vec_allowed = true;
-
- if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 &&
- rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
- vport->rx_use_avx2 = true;
-
- if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512)
-#ifdef CC_AVX512_SUPPORT
- if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
- rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1 &&
- rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512DQ))
- vport->rx_use_avx512 = true;
-#else
- PMD_DRV_LOG(NOTICE,
- "AVX512 is not supported in build env");
-#endif /* CC_AVX512_SUPPORT */
+ rx_simd_width = idpf_get_max_simd_bitwidth();
} else {
vport->rx_vec_allowed = false;
}
@@ -795,7 +782,7 @@ idpf_set_rx_function(struct rte_eth_dev *dev)
(void)idpf_qc_splitq_rx_vec_setup(rxq);
}
#ifdef CC_AVX512_SUPPORT
- if (vport->rx_use_avx512) {
+ if (rx_simd_width == RTE_VECT_SIMD_512) {
PMD_DRV_LOG(NOTICE,
"Using Split AVX512 Vector Rx (port %d).",
dev->data->port_id);
@@ -815,7 +802,7 @@ idpf_set_rx_function(struct rte_eth_dev *dev)
(void)idpf_qc_singleq_rx_vec_setup(rxq);
}
#ifdef CC_AVX512_SUPPORT
- if (vport->rx_use_avx512) {
+ if (rx_simd_width == RTE_VECT_SIMD_512) {
PMD_DRV_LOG(NOTICE,
"Using Single AVX512 Vector Rx (port %d).",
dev->data->port_id);
@@ -823,7 +810,7 @@ idpf_set_rx_function(struct rte_eth_dev *dev)
return;
}
#endif /* CC_AVX512_SUPPORT */
- if (vport->rx_use_avx2) {
+ if (rx_simd_width == RTE_VECT_SIMD_256) {
PMD_DRV_LOG(NOTICE,
"Using Single AVX2 Vector Rx (port %d).",
dev->data->port_id);
@@ -871,6 +858,7 @@ idpf_set_tx_function(struct rte_eth_dev *dev)
{
struct idpf_vport *vport = dev->data->dev_private;
#ifdef RTE_ARCH_X86
+ enum rte_vect_max_simd tx_simd_width = RTE_VECT_SIMD_DISABLED;
#ifdef CC_AVX512_SUPPORT
struct ci_tx_queue *txq;
int i;
@@ -879,22 +867,12 @@ idpf_set_tx_function(struct rte_eth_dev *dev)
if (idpf_tx_vec_dev_check_default(dev) == IDPF_VECTOR_PATH &&
rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
vport->tx_vec_allowed = true;
-
- if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 &&
- rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
- vport->tx_use_avx2 = true;
-
- if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512)
+ tx_simd_width = idpf_get_max_simd_bitwidth();
#ifdef CC_AVX512_SUPPORT
- {
- if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
- rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1)
- vport->tx_use_avx512 = true;
- if (vport->tx_use_avx512) {
- for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txq = dev->data->tx_queues[i];
- idpf_qc_tx_vec_avx512_setup(txq);
- }
+ if (tx_simd_width == RTE_VECT_SIMD_512) {
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ idpf_qc_tx_vec_avx512_setup(txq);
}
}
#else
@@ -910,7 +888,7 @@ idpf_set_tx_function(struct rte_eth_dev *dev)
if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
if (vport->tx_vec_allowed) {
#ifdef CC_AVX512_SUPPORT
- if (vport->tx_use_avx512) {
+ if (tx_simd_width == RTE_VECT_SIMD_512) {
PMD_DRV_LOG(NOTICE,
"Using Split AVX512 Vector Tx (port %d).",
dev->data->port_id);
@@ -928,7 +906,7 @@ idpf_set_tx_function(struct rte_eth_dev *dev)
} else {
if (vport->tx_vec_allowed) {
#ifdef CC_AVX512_SUPPORT
- if (vport->tx_use_avx512) {
+ if (tx_simd_width == RTE_VECT_SIMD_512) {
for (i = 0; i < dev->data->nb_tx_queues; i++) {
txq = dev->data->tx_queues[i];
if (txq == NULL)
@@ -943,7 +921,7 @@ idpf_set_tx_function(struct rte_eth_dev *dev)
return;
}
#endif /* CC_AVX512_SUPPORT */
- if (vport->tx_use_avx2) {
+ if (tx_simd_width == RTE_VECT_SIMD_256) {
PMD_DRV_LOG(NOTICE,
"Using Single AVX2 Vector Tx (port %d).",
dev->data->port_id);
diff --git a/drivers/net/intel/idpf/idpf_rxtx_vec_common.h b/drivers/net/intel/idpf/idpf_rxtx_vec_common.h
index ff3ae56baf..50992b7989 100644
--- a/drivers/net/intel/idpf/idpf_rxtx_vec_common.h
+++ b/drivers/net/intel/idpf/idpf_rxtx_vec_common.h
@@ -11,6 +11,9 @@
#include "idpf_ethdev.h"
#include "idpf_rxtx.h"
#include "../common/rx.h"
+#ifdef RTE_ARCH_X86
+#include "../common/rx_vec_x86.h"
+#endif
#define IDPF_SCALAR_PATH 0
#define IDPF_VECTOR_PATH 1
@@ -129,4 +132,20 @@ idpf_tx_vec_dev_check_default(struct rte_eth_dev *dev)
return IDPF_VECTOR_PATH;
}
+#ifdef RTE_ARCH_X86
+static inline enum rte_vect_max_simd
+idpf_get_max_simd_bitwidth(void)
+{
+ if (rte_vect_get_max_simd_bitwidth() == 512 &&
+ rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512DQ) == 0) {
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
+ return RTE_VECT_SIMD_256;
+ else
+ return RTE_VECT_SIMD_DISABLED;
+ }
+
+ return ci_get_x86_max_simd_bitwidth();
+}
+#endif
+
#endif /*_IDPF_RXTX_VEC_COMMON_H_*/
--
2.34.1
^ permalink raw reply [flat|nested] 7+ messages in thread
* [PATCH 2/4] net/idpf: use the common Rx path selection infrastructure
2025-09-11 14:31 [PATCH 0/4] idpf and cpfl rx path selection simplification Ciara Loftus
2025-09-11 14:31 ` [PATCH 1/4] net/idpf: use the new common vector capability function Ciara Loftus
@ 2025-09-11 14:31 ` Ciara Loftus
2025-09-11 16:21 ` Bruce Richardson
2025-09-11 14:31 ` [PATCH 3/4] net/cpfl: use the new common vector capability function Ciara Loftus
2025-09-11 14:31 ` [PATCH 4/4] net/cpfl: use the common Rx path selection infrastructure Ciara Loftus
3 siblings, 1 reply; 7+ messages in thread
From: Ciara Loftus @ 2025-09-11 14:31 UTC (permalink / raw)
To: dev; +Cc: Ciara Loftus
Update the common rx path selection infrastructure to include the
feature "single queue" which is relevant for the idpf and cpfl drivers.
Replace the existing complicated logic in the idpf driver with the use
of the common function.
Signed-off-by: Ciara Loftus <ciara.loftus@intel.com>
---
drivers/net/intel/common/rx.h | 5 +
drivers/net/intel/idpf/idpf_common_device.h | 12 +++
drivers/net/intel/idpf/idpf_common_rxtx.c | 24 +++++
drivers/net/intel/idpf/idpf_common_rxtx.h | 12 +++
drivers/net/intel/idpf/idpf_ethdev.c | 2 +
drivers/net/intel/idpf/idpf_rxtx.c | 103 ++++++--------------
6 files changed, 83 insertions(+), 75 deletions(-)
diff --git a/drivers/net/intel/common/rx.h b/drivers/net/intel/common/rx.h
index 770284f7ab..741808f573 100644
--- a/drivers/net/intel/common/rx.h
+++ b/drivers/net/intel/common/rx.h
@@ -131,6 +131,7 @@ struct ci_rx_path_features_extra {
bool flex_desc;
bool bulk_alloc;
bool disabled;
+ bool single_queue;
};
struct ci_rx_path_features {
@@ -278,6 +279,10 @@ ci_rx_path_select(struct ci_rx_path_features req_features,
if (path_features->extra.flex_desc != req_features.extra.flex_desc)
continue;
+ /* If requested, ensure the path supports single queue RX. */
+ if (path_features->extra.single_queue != req_features.extra.single_queue)
+ continue;
+
/* If requested, ensure the path supports scattered RX. */
if (path_features->extra.scattered != req_features.extra.scattered)
continue;
diff --git a/drivers/net/intel/idpf/idpf_common_device.h b/drivers/net/intel/idpf/idpf_common_device.h
index 5f3e4a4fcf..62665ad286 100644
--- a/drivers/net/intel/idpf/idpf_common_device.h
+++ b/drivers/net/intel/idpf/idpf_common_device.h
@@ -44,6 +44,16 @@
(sizeof(struct virtchnl2_ptype) + \
(((p)->proto_id_count ? ((p)->proto_id_count - 1) : 0) * sizeof((p)->proto_id[0])))
+enum idpf_rx_func_type {
+ IDPF_RX_DEFAULT,
+ IDPF_RX_SINGLEQ,
+ IDPF_RX_SINGLEQ_SCATTERED,
+ IDPF_RX_SINGLEQ_AVX2,
+ IDPF_RX_AVX512,
+ IDPF_RX_SINGLQ_AVX512,
+ IDPF_RX_MAX
+};
+
struct idpf_adapter {
struct idpf_hw hw;
struct virtchnl2_version_info virtchnl_version;
@@ -59,6 +69,8 @@ struct idpf_adapter {
/* For timestamp */
uint64_t time_hw;
+
+ enum idpf_rx_func_type rx_func_type;
};
struct idpf_chunks_info {
diff --git a/drivers/net/intel/idpf/idpf_common_rxtx.c b/drivers/net/intel/idpf/idpf_common_rxtx.c
index eb25b091d8..97a5ce9b87 100644
--- a/drivers/net/intel/idpf/idpf_common_rxtx.c
+++ b/drivers/net/intel/idpf/idpf_common_rxtx.c
@@ -7,6 +7,8 @@
#include <rte_errno.h>
#include "idpf_common_rxtx.h"
+#include "idpf_common_device.h"
+#include "../common/rx.h"
int idpf_timestamp_dynfield_offset = -1;
uint64_t idpf_timestamp_dynflag;
@@ -1622,3 +1624,25 @@ idpf_qc_splitq_rx_vec_setup(struct idpf_rx_queue *rxq)
rxq->bufq2->idpf_ops = &def_rx_ops_vec;
return idpf_rxq_vec_setup_default(rxq->bufq2);
}
+
+RTE_EXPORT_INTERNAL_SYMBOL(idpf_rx_path_infos)
+const struct ci_rx_path_info idpf_rx_path_infos[] = {
+ [IDPF_RX_DEFAULT] = {idpf_dp_splitq_recv_pkts, "Scalar Split",
+ {IDPF_RX_SCALAR_OFFLOADS, RTE_VECT_SIMD_DISABLED}},
+ [IDPF_RX_SINGLEQ] = {idpf_dp_singleq_recv_pkts, "Scalar Single Queue",
+ {IDPF_RX_SCALAR_OFFLOADS, RTE_VECT_SIMD_DISABLED, {.single_queue = true}}},
+ [IDPF_RX_SINGLEQ_SCATTERED] = {
+ idpf_dp_singleq_recv_scatter_pkts, "Scalar Single Queue Scattered",
+ {IDPF_RX_SCALAR_OFFLOADS, RTE_VECT_SIMD_DISABLED,
+ {.scattered = true, .single_queue = true}}},
+#ifdef RTE_ARCH_X86
+ [IDPF_RX_SINGLEQ_AVX2] = {idpf_dp_singleq_recv_pkts_avx2, "AVX2 Single Queue",
+ {IDPF_RX_VECTOR_OFFLOADS, RTE_VECT_SIMD_256, {.single_queue = true}}},
+#ifdef CC_AVX512_SUPPORT
+ [IDPF_RX_AVX512] = {idpf_dp_splitq_recv_pkts_avx512, "AVX-512 Split",
+ {IDPF_RX_VECTOR_OFFLOADS, RTE_VECT_SIMD_512}},
+ [IDPF_RX_SINGLQ_AVX512] = {idpf_dp_singleq_recv_pkts_avx512, "AVX-512 Single Queue",
+ {IDPF_RX_VECTOR_OFFLOADS, RTE_VECT_SIMD_512, {.single_queue = true}}},
+#endif /* CC_AVX512_SUPPORT */
+#endif /* RTE_ARCH_X86 */
+};
diff --git a/drivers/net/intel/idpf/idpf_common_rxtx.h b/drivers/net/intel/idpf/idpf_common_rxtx.h
index f84a760334..3bc3323af4 100644
--- a/drivers/net/intel/idpf/idpf_common_rxtx.h
+++ b/drivers/net/intel/idpf/idpf_common_rxtx.h
@@ -11,6 +11,7 @@
#include "idpf_common_device.h"
#include "../common/tx.h"
+#include "../common/rx.h"
#define IDPF_RX_MAX_BURST 32
@@ -96,6 +97,15 @@
#define IDPF_RX_SPLIT_BUFQ1_ID 1
#define IDPF_RX_SPLIT_BUFQ2_ID 2
+#define IDPF_RX_SCALAR_OFFLOADS ( \
+ RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | \
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM | \
+ RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
+ RTE_ETH_RX_OFFLOAD_TIMESTAMP | \
+ RTE_ETH_RX_OFFLOAD_SCATTER)
+#define IDPF_RX_VECTOR_OFFLOADS 0
+
struct idpf_rx_stats {
RTE_ATOMIC(uint64_t) mbuf_alloc_failed;
};
@@ -253,4 +263,6 @@ uint16_t idpf_dp_singleq_xmit_pkts_avx2(void *tx_queue,
struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+extern const struct ci_rx_path_info idpf_rx_path_infos[IDPF_RX_MAX];
+
#endif /* _IDPF_COMMON_RXTX_H_ */
diff --git a/drivers/net/intel/idpf/idpf_ethdev.c b/drivers/net/intel/idpf/idpf_ethdev.c
index 90720909bf..c04842c9df 100644
--- a/drivers/net/intel/idpf/idpf_ethdev.c
+++ b/drivers/net/intel/idpf/idpf_ethdev.c
@@ -694,6 +694,8 @@ idpf_dev_configure(struct rte_eth_dev *dev)
(dev->data->mtu == 0) ? IDPF_DEFAULT_MTU : dev->data->mtu +
IDPF_ETH_OVERHEAD;
+ vport->adapter->rx_func_type = IDPF_RX_DEFAULT;
+
return 0;
}
diff --git a/drivers/net/intel/idpf/idpf_rxtx.c b/drivers/net/intel/idpf/idpf_rxtx.c
index c9eb7f66d2..76fab1d400 100644
--- a/drivers/net/intel/idpf/idpf_rxtx.c
+++ b/drivers/net/intel/idpf/idpf_rxtx.c
@@ -760,97 +760,50 @@ void
idpf_set_rx_function(struct rte_eth_dev *dev)
{
struct idpf_vport *vport = dev->data->dev_private;
+ struct idpf_adapter *ad = vport->adapter;
+ struct ci_rx_path_features req_features = {
+ .rx_offloads = dev->data->dev_conf.rxmode.offloads,
+ .simd_width = RTE_VECT_SIMD_DISABLED,
+ };
#ifdef RTE_ARCH_X86
struct idpf_rx_queue *rxq;
- enum rte_vect_max_simd rx_simd_width = RTE_VECT_SIMD_DISABLED;
int i;
if (idpf_rx_vec_dev_check_default(dev) == IDPF_VECTOR_PATH &&
- rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
- vport->rx_vec_allowed = true;
- rx_simd_width = idpf_get_max_simd_bitwidth();
- } else {
- vport->rx_vec_allowed = false;
- }
+ rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
+ req_features.simd_width = idpf_get_max_simd_bitwidth();
#endif /* RTE_ARCH_X86 */
+ req_features.extra.single_queue = (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE);
+ req_features.extra.scattered = dev->data->scattered_rx;
+
+ ad->rx_func_type = ci_rx_path_select(req_features,
+ &idpf_rx_path_infos[0],
+ IDPF_RX_MAX,
+ IDPF_RX_DEFAULT);
+
#ifdef RTE_ARCH_X86
- if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
- if (vport->rx_vec_allowed) {
+ if (idpf_rx_path_infos[ad->rx_func_type].features.simd_width >= RTE_VECT_SIMD_256) {
+ /* Vector function selected. Prepare the rxq accordingly. */
+ if (idpf_rx_path_infos[ad->rx_func_type].features.extra.single_queue) {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- (void)idpf_qc_splitq_rx_vec_setup(rxq);
- }
-#ifdef CC_AVX512_SUPPORT
- if (rx_simd_width == RTE_VECT_SIMD_512) {
- PMD_DRV_LOG(NOTICE,
- "Using Split AVX512 Vector Rx (port %d).",
- dev->data->port_id);
- dev->rx_pkt_burst = idpf_dp_splitq_recv_pkts_avx512;
- return;
- }
-#endif /* CC_AVX512_SUPPORT */
- }
- PMD_DRV_LOG(NOTICE,
- "Using Split Scalar Rx (port %d).",
- dev->data->port_id);
- dev->rx_pkt_burst = idpf_dp_splitq_recv_pkts;
- } else {
- if (vport->rx_vec_allowed) {
- for (i = 0; i < dev->data->nb_tx_queues; i++) {
rxq = dev->data->rx_queues[i];
(void)idpf_qc_singleq_rx_vec_setup(rxq);
}
-#ifdef CC_AVX512_SUPPORT
- if (rx_simd_width == RTE_VECT_SIMD_512) {
- PMD_DRV_LOG(NOTICE,
- "Using Single AVX512 Vector Rx (port %d).",
- dev->data->port_id);
- dev->rx_pkt_burst = idpf_dp_singleq_recv_pkts_avx512;
- return;
- }
-#endif /* CC_AVX512_SUPPORT */
- if (rx_simd_width == RTE_VECT_SIMD_256) {
- PMD_DRV_LOG(NOTICE,
- "Using Single AVX2 Vector Rx (port %d).",
- dev->data->port_id);
- dev->rx_pkt_burst = idpf_dp_singleq_recv_pkts_avx2;
- return;
+ } else {
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ (void)idpf_qc_splitq_rx_vec_setup(rxq);
}
}
- if (dev->data->scattered_rx) {
- PMD_DRV_LOG(NOTICE,
- "Using Single Scalar Scatterd Rx (port %d).",
- dev->data->port_id);
- dev->rx_pkt_burst = idpf_dp_singleq_recv_scatter_pkts;
- return;
- }
- PMD_DRV_LOG(NOTICE,
- "Using Single Scalar Rx (port %d).",
- dev->data->port_id);
- dev->rx_pkt_burst = idpf_dp_singleq_recv_pkts;
}
-#else
- if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
- PMD_DRV_LOG(NOTICE,
- "Using Split Scalar Rx (port %d).",
- dev->data->port_id);
- dev->rx_pkt_burst = idpf_dp_splitq_recv_pkts;
- } else {
- if (dev->data->scattered_rx) {
- PMD_DRV_LOG(NOTICE,
- "Using Single Scalar Scatterd Rx (port %d).",
- dev->data->port_id);
- dev->rx_pkt_burst = idpf_dp_singleq_recv_scatter_pkts;
- return;
- }
- PMD_DRV_LOG(NOTICE,
- "Using Single Scalar Rx (port %d).",
- dev->data->port_id);
- dev->rx_pkt_burst = idpf_dp_singleq_recv_pkts;
- }
-#endif /* RTE_ARCH_X86 */
+#endif
+
+ dev->rx_pkt_burst = idpf_rx_path_infos[ad->rx_func_type].pkt_burst;
+ PMD_DRV_LOG(NOTICE, "Using %s (port %d).",
+ idpf_rx_path_infos[ad->rx_func_type].info, dev->data->port_id);
+
}
void
--
2.34.1
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH 2/4] net/idpf: use the common Rx path selection infrastructure
2025-09-11 14:31 ` [PATCH 2/4] net/idpf: use the common Rx path selection infrastructure Ciara Loftus
@ 2025-09-11 16:21 ` Bruce Richardson
0 siblings, 0 replies; 7+ messages in thread
From: Bruce Richardson @ 2025-09-11 16:21 UTC (permalink / raw)
To: Ciara Loftus; +Cc: dev
On Thu, Sep 11, 2025 at 02:31:43PM +0000, Ciara Loftus wrote:
> Update the common rx path selection infrastructure to include the
> feature "single queue" which is relevant for the idpf and cpfl drivers.
> Replace the existing complicated logic in the idpf driver with the use
> of the common function.
>
> Signed-off-by: Ciara Loftus <ciara.loftus@intel.com>
> ---
> drivers/net/intel/common/rx.h | 5 +
> drivers/net/intel/idpf/idpf_common_device.h | 12 +++
> drivers/net/intel/idpf/idpf_common_rxtx.c | 24 +++++
> drivers/net/intel/idpf/idpf_common_rxtx.h | 12 +++
> drivers/net/intel/idpf/idpf_ethdev.c | 2 +
> drivers/net/intel/idpf/idpf_rxtx.c | 103 ++++++--------------
> 6 files changed, 83 insertions(+), 75 deletions(-)
>
> diff --git a/drivers/net/intel/common/rx.h b/drivers/net/intel/common/rx.h
> index 770284f7ab..741808f573 100644
> --- a/drivers/net/intel/common/rx.h
> +++ b/drivers/net/intel/common/rx.h
> @@ -131,6 +131,7 @@ struct ci_rx_path_features_extra {
> bool flex_desc;
> bool bulk_alloc;
> bool disabled;
> + bool single_queue;
> };
>
> struct ci_rx_path_features {
> @@ -278,6 +279,10 @@ ci_rx_path_select(struct ci_rx_path_features req_features,
> if (path_features->extra.flex_desc != req_features.extra.flex_desc)
> continue;
>
> + /* If requested, ensure the path supports single queue RX. */
> + if (path_features->extra.single_queue != req_features.extra.single_queue)
> + continue;
> +
> /* If requested, ensure the path supports scattered RX. */
> if (path_features->extra.scattered != req_features.extra.scattered)
> continue;
> diff --git a/drivers/net/intel/idpf/idpf_common_device.h b/drivers/net/intel/idpf/idpf_common_device.h
> index 5f3e4a4fcf..62665ad286 100644
> --- a/drivers/net/intel/idpf/idpf_common_device.h
> +++ b/drivers/net/intel/idpf/idpf_common_device.h
> @@ -44,6 +44,16 @@
> (sizeof(struct virtchnl2_ptype) + \
> (((p)->proto_id_count ? ((p)->proto_id_count - 1) : 0) * sizeof((p)->proto_id[0])))
>
> +enum idpf_rx_func_type {
> + IDPF_RX_DEFAULT,
> + IDPF_RX_SINGLEQ,
> + IDPF_RX_SINGLEQ_SCATTERED,
> + IDPF_RX_SINGLEQ_AVX2,
> + IDPF_RX_AVX512,
> + IDPF_RX_SINGLQ_AVX512,
> + IDPF_RX_MAX
> +};
> +
> struct idpf_adapter {
> struct idpf_hw hw;
> struct virtchnl2_version_info virtchnl_version;
> @@ -59,6 +69,8 @@ struct idpf_adapter {
>
> /* For timestamp */
> uint64_t time_hw;
> +
> + enum idpf_rx_func_type rx_func_type;
> };
>
> struct idpf_chunks_info {
> diff --git a/drivers/net/intel/idpf/idpf_common_rxtx.c b/drivers/net/intel/idpf/idpf_common_rxtx.c
> index eb25b091d8..97a5ce9b87 100644
> --- a/drivers/net/intel/idpf/idpf_common_rxtx.c
> +++ b/drivers/net/intel/idpf/idpf_common_rxtx.c
> @@ -7,6 +7,8 @@
> #include <rte_errno.h>
>
> #include "idpf_common_rxtx.h"
> +#include "idpf_common_device.h"
> +#include "../common/rx.h"
>
> int idpf_timestamp_dynfield_offset = -1;
> uint64_t idpf_timestamp_dynflag;
> @@ -1622,3 +1624,25 @@ idpf_qc_splitq_rx_vec_setup(struct idpf_rx_queue *rxq)
> rxq->bufq2->idpf_ops = &def_rx_ops_vec;
> return idpf_rxq_vec_setup_default(rxq->bufq2);
> }
> +
> +RTE_EXPORT_INTERNAL_SYMBOL(idpf_rx_path_infos)
> +const struct ci_rx_path_info idpf_rx_path_infos[] = {
> + [IDPF_RX_DEFAULT] = {idpf_dp_splitq_recv_pkts, "Scalar Split",
> + {IDPF_RX_SCALAR_OFFLOADS, RTE_VECT_SIMD_DISABLED}},
> + [IDPF_RX_SINGLEQ] = {idpf_dp_singleq_recv_pkts, "Scalar Single Queue",
> + {IDPF_RX_SCALAR_OFFLOADS, RTE_VECT_SIMD_DISABLED, {.single_queue = true}}},
> + [IDPF_RX_SINGLEQ_SCATTERED] = {
> + idpf_dp_singleq_recv_scatter_pkts, "Scalar Single Queue Scattered",
> + {IDPF_RX_SCALAR_OFFLOADS, RTE_VECT_SIMD_DISABLED,
> + {.scattered = true, .single_queue = true}}},
> +#ifdef RTE_ARCH_X86
> + [IDPF_RX_SINGLEQ_AVX2] = {idpf_dp_singleq_recv_pkts_avx2, "AVX2 Single Queue",
> + {IDPF_RX_VECTOR_OFFLOADS, RTE_VECT_SIMD_256, {.single_queue = true}}},
> +#ifdef CC_AVX512_SUPPORT
> + [IDPF_RX_AVX512] = {idpf_dp_splitq_recv_pkts_avx512, "AVX-512 Split",
> + {IDPF_RX_VECTOR_OFFLOADS, RTE_VECT_SIMD_512}},
> + [IDPF_RX_SINGLQ_AVX512] = {idpf_dp_singleq_recv_pkts_avx512, "AVX-512 Single Queue",
> + {IDPF_RX_VECTOR_OFFLOADS, RTE_VECT_SIMD_512, {.single_queue = true}}},
> +#endif /* CC_AVX512_SUPPORT */
> +#endif /* RTE_ARCH_X86 */
A minor point, but I don't particularly like this form of init for the
array. If it was all in table form, with each entry on a single line, then
it would be ok, since we could have neat columns. However, since we are
word-wrapping the lines, I'd tend towards having one element per line and
using named initializers.
^ permalink raw reply [flat|nested] 7+ messages in thread
* [PATCH 3/4] net/cpfl: use the new common vector capability function
2025-09-11 14:31 [PATCH 0/4] idpf and cpfl rx path selection simplification Ciara Loftus
2025-09-11 14:31 ` [PATCH 1/4] net/idpf: use the new common vector capability function Ciara Loftus
2025-09-11 14:31 ` [PATCH 2/4] net/idpf: use the common Rx path selection infrastructure Ciara Loftus
@ 2025-09-11 14:31 ` Ciara Loftus
2025-09-11 14:31 ` [PATCH 4/4] net/cpfl: use the common Rx path selection infrastructure Ciara Loftus
3 siblings, 0 replies; 7+ messages in thread
From: Ciara Loftus @ 2025-09-11 14:31 UTC (permalink / raw)
To: dev; +Cc: Ciara Loftus
Use the new function for determining the maximum simd bitwidth in
the cpfl driver. An additional check is added for the AVX512DQ CPU flag
as it is not present in the common function since it is not a common
requirement for all drivers. Also, remove unused elements related to AVX
capability from the idpf_vport structure, now that neither the idpf or
cpfl drivers use them.
Signed-off-by: Ciara Loftus <ciara.loftus@intel.com>
---
drivers/net/intel/cpfl/cpfl_rxtx.c | 50 ++++++-------------
drivers/net/intel/cpfl/cpfl_rxtx_vec_common.h | 20 ++++++++
drivers/net/intel/idpf/idpf_common_device.h | 4 --
3 files changed, 34 insertions(+), 40 deletions(-)
diff --git a/drivers/net/intel/cpfl/cpfl_rxtx.c b/drivers/net/intel/cpfl/cpfl_rxtx.c
index 02e81f7f34..0f5b645f89 100644
--- a/drivers/net/intel/cpfl/cpfl_rxtx.c
+++ b/drivers/net/intel/cpfl/cpfl_rxtx.c
@@ -1411,26 +1411,13 @@ cpfl_set_rx_function(struct rte_eth_dev *dev)
struct idpf_vport *vport = &cpfl_vport->base;
#ifdef RTE_ARCH_X86
struct cpfl_rx_queue *cpfl_rxq;
+ enum rte_vect_max_simd rx_simd_width = RTE_VECT_SIMD_DISABLED;
int i;
if (cpfl_rx_vec_dev_check_default(dev) == CPFL_VECTOR_PATH &&
rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
vport->rx_vec_allowed = true;
-
- if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 &&
- rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
- vport->rx_use_avx2 = true;
-
- if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512)
-#ifdef CC_AVX512_SUPPORT
- if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
- rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1 &&
- rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512DQ))
- vport->rx_use_avx512 = true;
-#else
- PMD_DRV_LOG(NOTICE,
- "AVX512 is not supported in build env");
-#endif /* CC_AVX512_SUPPORT */
+ rx_simd_width = cpfl_get_max_simd_bitwidth();
} else {
vport->rx_vec_allowed = false;
}
@@ -1446,7 +1433,7 @@ cpfl_set_rx_function(struct rte_eth_dev *dev)
(void)idpf_qc_splitq_rx_vec_setup(&cpfl_rxq->base);
}
#ifdef CC_AVX512_SUPPORT
- if (vport->rx_use_avx512) {
+ if (rx_simd_width == RTE_VECT_SIMD_512) {
PMD_DRV_LOG(NOTICE,
"Using Split AVX512 Vector Rx (port %d).",
dev->data->port_id);
@@ -1466,7 +1453,7 @@ cpfl_set_rx_function(struct rte_eth_dev *dev)
(void)idpf_qc_singleq_rx_vec_setup(&cpfl_rxq->base);
}
#ifdef CC_AVX512_SUPPORT
- if (vport->rx_use_avx512) {
+ if (rx_simd_width == RTE_VECT_SIMD_512) {
PMD_DRV_LOG(NOTICE,
"Using Single AVX512 Vector Rx (port %d).",
dev->data->port_id);
@@ -1474,7 +1461,7 @@ cpfl_set_rx_function(struct rte_eth_dev *dev)
return;
}
#endif /* CC_AVX512_SUPPORT */
- if (vport->rx_use_avx2) {
+ if (rx_simd_width == RTE_VECT_SIMD_256) {
PMD_DRV_LOG(NOTICE,
"Using Single AVX2 Vector Rx (port %d).",
dev->data->port_id);
@@ -1522,6 +1509,7 @@ cpfl_set_tx_function(struct rte_eth_dev *dev)
struct cpfl_vport *cpfl_vport = dev->data->dev_private;
struct idpf_vport *vport = &cpfl_vport->base;
#ifdef RTE_ARCH_X86
+ enum rte_vect_max_simd tx_simd_width = RTE_VECT_SIMD_DISABLED;
#ifdef CC_AVX512_SUPPORT
struct cpfl_tx_queue *cpfl_txq;
int i;
@@ -1530,22 +1518,12 @@ cpfl_set_tx_function(struct rte_eth_dev *dev)
if (cpfl_tx_vec_dev_check_default(dev) == CPFL_VECTOR_PATH &&
rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
vport->tx_vec_allowed = true;
-
- if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 &&
- rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
- vport->tx_use_avx2 = true;
-
- if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512)
+ tx_simd_width = cpfl_get_max_simd_bitwidth();
#ifdef CC_AVX512_SUPPORT
- {
- if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
- rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1)
- vport->tx_use_avx512 = true;
- if (vport->tx_use_avx512) {
- for (i = 0; i < dev->data->nb_tx_queues; i++) {
- cpfl_txq = dev->data->tx_queues[i];
- idpf_qc_tx_vec_avx512_setup(&cpfl_txq->base);
- }
+ if (tx_simd_width == RTE_VECT_SIMD_512) {
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ cpfl_txq = dev->data->tx_queues[i];
+ idpf_qc_tx_vec_avx512_setup(&cpfl_txq->base);
}
}
#else
@@ -1561,7 +1539,7 @@ cpfl_set_tx_function(struct rte_eth_dev *dev)
if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
if (vport->tx_vec_allowed) {
#ifdef CC_AVX512_SUPPORT
- if (vport->tx_use_avx512) {
+ if (tx_simd_width == RTE_VECT_SIMD_512) {
PMD_DRV_LOG(NOTICE,
"Using Split AVX512 Vector Tx (port %d).",
dev->data->port_id);
@@ -1579,7 +1557,7 @@ cpfl_set_tx_function(struct rte_eth_dev *dev)
} else {
if (vport->tx_vec_allowed) {
#ifdef CC_AVX512_SUPPORT
- if (vport->tx_use_avx512) {
+ if (tx_simd_width == RTE_VECT_SIMD_512) {
for (i = 0; i < dev->data->nb_tx_queues; i++) {
cpfl_txq = dev->data->tx_queues[i];
if (cpfl_txq == NULL)
@@ -1594,7 +1572,7 @@ cpfl_set_tx_function(struct rte_eth_dev *dev)
return;
}
#endif /* CC_AVX512_SUPPORT */
- if (vport->tx_use_avx2) {
+ if (tx_simd_width == RTE_VECT_SIMD_256) {
PMD_DRV_LOG(NOTICE,
"Using Single AVX2 Vector Tx (port %d).",
dev->data->port_id);
diff --git a/drivers/net/intel/cpfl/cpfl_rxtx_vec_common.h b/drivers/net/intel/cpfl/cpfl_rxtx_vec_common.h
index f1e555b5f8..e2131e4d4d 100644
--- a/drivers/net/intel/cpfl/cpfl_rxtx_vec_common.h
+++ b/drivers/net/intel/cpfl/cpfl_rxtx_vec_common.h
@@ -11,6 +11,10 @@
#include "cpfl_ethdev.h"
#include "cpfl_rxtx.h"
+#ifdef RTE_ARCH_X86
+#include "../common/rx_vec_x86.h"
+#endif
+
#define CPFL_SCALAR_PATH 0
#define CPFL_VECTOR_PATH 1
#define CPFL_RX_NO_VECTOR_FLAGS ( \
@@ -121,4 +125,20 @@ cpfl_tx_vec_dev_check_default(struct rte_eth_dev *dev)
return CPFL_VECTOR_PATH;
}
+#ifdef RTE_ARCH_X86
+static inline enum rte_vect_max_simd
+cpfl_get_max_simd_bitwidth(void)
+{
+ if (rte_vect_get_max_simd_bitwidth() == 512 &&
+ rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512DQ) == 0) {
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
+ return RTE_VECT_SIMD_256;
+ else
+ return RTE_VECT_SIMD_DISABLED;
+ }
+
+ return ci_get_x86_max_simd_bitwidth();
+}
+#endif
+
#endif /*_CPFL_RXTX_VEC_COMMON_H_*/
diff --git a/drivers/net/intel/idpf/idpf_common_device.h b/drivers/net/intel/idpf/idpf_common_device.h
index 62665ad286..11baa195e5 100644
--- a/drivers/net/intel/idpf/idpf_common_device.h
+++ b/drivers/net/intel/idpf/idpf_common_device.h
@@ -135,10 +135,6 @@ struct idpf_vport {
bool rx_vec_allowed;
bool tx_vec_allowed;
- bool rx_use_avx2;
- bool tx_use_avx2;
- bool rx_use_avx512;
- bool tx_use_avx512;
struct virtchnl2_vport_stats eth_stats_offset;
--
2.34.1
^ permalink raw reply [flat|nested] 7+ messages in thread
* [PATCH 4/4] net/cpfl: use the common Rx path selection infrastructure
2025-09-11 14:31 [PATCH 0/4] idpf and cpfl rx path selection simplification Ciara Loftus
` (2 preceding siblings ...)
2025-09-11 14:31 ` [PATCH 3/4] net/cpfl: use the new common vector capability function Ciara Loftus
@ 2025-09-11 14:31 ` Ciara Loftus
3 siblings, 0 replies; 7+ messages in thread
From: Ciara Loftus @ 2025-09-11 14:31 UTC (permalink / raw)
To: dev; +Cc: Ciara Loftus
Replace the existing complicated logic with the use of the common
function.
Signed-off-by: Ciara Loftus <ciara.loftus@intel.com>
---
drivers/net/intel/cpfl/cpfl_rxtx.c | 105 ++++++--------------
drivers/net/intel/idpf/idpf_common_device.h | 1 -
2 files changed, 29 insertions(+), 77 deletions(-)
diff --git a/drivers/net/intel/cpfl/cpfl_rxtx.c b/drivers/net/intel/cpfl/cpfl_rxtx.c
index 0f5b645f89..c9e2a48417 100644
--- a/drivers/net/intel/cpfl/cpfl_rxtx.c
+++ b/drivers/net/intel/cpfl/cpfl_rxtx.c
@@ -1409,98 +1409,51 @@ cpfl_set_rx_function(struct rte_eth_dev *dev)
{
struct cpfl_vport *cpfl_vport = dev->data->dev_private;
struct idpf_vport *vport = &cpfl_vport->base;
+ struct idpf_adapter *ad = vport->adapter;
+ struct ci_rx_path_features req_features = {
+ .rx_offloads = dev->data->dev_conf.rxmode.offloads,
+ .simd_width = RTE_VECT_SIMD_DISABLED,
+ };
#ifdef RTE_ARCH_X86
struct cpfl_rx_queue *cpfl_rxq;
- enum rte_vect_max_simd rx_simd_width = RTE_VECT_SIMD_DISABLED;
int i;
if (cpfl_rx_vec_dev_check_default(dev) == CPFL_VECTOR_PATH &&
- rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
- vport->rx_vec_allowed = true;
- rx_simd_width = cpfl_get_max_simd_bitwidth();
- } else {
- vport->rx_vec_allowed = false;
- }
+ rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
+ req_features.simd_width = cpfl_get_max_simd_bitwidth();
#endif /* RTE_ARCH_X86 */
+ req_features.extra.single_queue = (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE);
+ req_features.extra.scattered = dev->data->scattered_rx;
+
+ ad->rx_func_type = ci_rx_path_select(req_features,
+ &idpf_rx_path_infos[0],
+ IDPF_RX_MAX,
+ IDPF_RX_DEFAULT);
+
#ifdef RTE_ARCH_X86
- if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
- if (vport->rx_vec_allowed) {
+ if (idpf_rx_path_infos[ad->rx_func_type].features.simd_width >= RTE_VECT_SIMD_256) {
+ /* Vector function selected. Prepare the rxq accordingly. */
+ if (idpf_rx_path_infos[ad->rx_func_type].features.extra.single_queue) {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
cpfl_rxq = dev->data->rx_queues[i];
- if (cpfl_rxq->hairpin_info.hairpin_q)
- continue;
- (void)idpf_qc_splitq_rx_vec_setup(&cpfl_rxq->base);
- }
-#ifdef CC_AVX512_SUPPORT
- if (rx_simd_width == RTE_VECT_SIMD_512) {
- PMD_DRV_LOG(NOTICE,
- "Using Split AVX512 Vector Rx (port %d).",
- dev->data->port_id);
- dev->rx_pkt_burst = idpf_dp_splitq_recv_pkts_avx512;
- return;
+ (void)idpf_qc_singleq_rx_vec_setup(&cpfl_rxq->base);
}
-#endif /* CC_AVX512_SUPPORT */
- }
- PMD_DRV_LOG(NOTICE,
- "Using Split Scalar Rx (port %d).",
- dev->data->port_id);
- dev->rx_pkt_burst = idpf_dp_splitq_recv_pkts;
- } else {
- if (vport->rx_vec_allowed) {
+ } else {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
cpfl_rxq = dev->data->rx_queues[i];
- (void)idpf_qc_singleq_rx_vec_setup(&cpfl_rxq->base);
- }
-#ifdef CC_AVX512_SUPPORT
- if (rx_simd_width == RTE_VECT_SIMD_512) {
- PMD_DRV_LOG(NOTICE,
- "Using Single AVX512 Vector Rx (port %d).",
- dev->data->port_id);
- dev->rx_pkt_burst = idpf_dp_singleq_recv_pkts_avx512;
- return;
- }
-#endif /* CC_AVX512_SUPPORT */
- if (rx_simd_width == RTE_VECT_SIMD_256) {
- PMD_DRV_LOG(NOTICE,
- "Using Single AVX2 Vector Rx (port %d).",
- dev->data->port_id);
- dev->rx_pkt_burst = idpf_dp_singleq_recv_pkts_avx2;
- return;
+ if (cpfl_rxq->hairpin_info.hairpin_q)
+ continue;
+ (void)idpf_qc_splitq_rx_vec_setup(&cpfl_rxq->base);
}
}
- if (dev->data->scattered_rx) {
- PMD_DRV_LOG(NOTICE,
- "Using Single Scalar Scatterd Rx (port %d).",
- dev->data->port_id);
- dev->rx_pkt_burst = idpf_dp_singleq_recv_scatter_pkts;
- return;
- }
- PMD_DRV_LOG(NOTICE,
- "Using Single Scalar Rx (port %d).",
- dev->data->port_id);
- dev->rx_pkt_burst = idpf_dp_singleq_recv_pkts;
- }
-#else
- if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
- PMD_DRV_LOG(NOTICE,
- "Using Split Scalar Rx (port %d).",
- dev->data->port_id);
- dev->rx_pkt_burst = idpf_dp_splitq_recv_pkts;
- } else {
- if (dev->data->scattered_rx) {
- PMD_DRV_LOG(NOTICE,
- "Using Single Scalar Scatterd Rx (port %d).",
- dev->data->port_id);
- dev->rx_pkt_burst = idpf_dp_singleq_recv_scatter_pkts;
- return;
- }
- PMD_DRV_LOG(NOTICE,
- "Using Single Scalar Rx (port %d).",
- dev->data->port_id);
- dev->rx_pkt_burst = idpf_dp_singleq_recv_pkts;
}
-#endif /* RTE_ARCH_X86 */
+#endif
+
+ dev->rx_pkt_burst = idpf_rx_path_infos[ad->rx_func_type].pkt_burst;
+ PMD_DRV_LOG(NOTICE, "Using %s (port %d).",
+ idpf_rx_path_infos[ad->rx_func_type].info, dev->data->port_id);
+
}
void
diff --git a/drivers/net/intel/idpf/idpf_common_device.h b/drivers/net/intel/idpf/idpf_common_device.h
index 11baa195e5..3b95d519c6 100644
--- a/drivers/net/intel/idpf/idpf_common_device.h
+++ b/drivers/net/intel/idpf/idpf_common_device.h
@@ -133,7 +133,6 @@ struct idpf_vport {
uint16_t devarg_id;
- bool rx_vec_allowed;
bool tx_vec_allowed;
struct virtchnl2_vport_stats eth_stats_offset;
--
2.34.1
^ permalink raw reply [flat|nested] 7+ messages in thread