automatic DPDK test reports
 help / color / mirror / Atom feed
From: dpdklab@iol.unh.edu
To: test-report@dpdk.org
Cc: dpdk-test-reports@iol.unh.edu
Subject: |WARNING| pw120860 [PATCH] [v1] common/idpf: add AVX512 data path for split queue model
Date: Wed, 14 Dec 2022 01:18:02 -0500 (EST)	[thread overview]
Message-ID: <20221214061802.5F37110618C@noxus.dpdklab.iol.unh.edu> (raw)

[-- Attachment #1: Type: text/plain, Size: 6621 bytes --]

Test-Label: iol-testing
Test-Status: WARNING
http://dpdk.org/patch/120860

_apply patch failure_

Submitter: Wenjun Wu <wenjun1.wu@intel.com>
Date: Wednesday, December 14 2022 06:07:06 
Applied on: CommitID:c581c49cd3fcaff596fbe566e270b442e6326c79
Apply patch set 120860 failed:

.git/rebase-apply/patch:1052: space before tab in indent.
 	idpf_singleq_xmit_pkts;
.git/rebase-apply/patch:1053: space before tab in indent.
 	idpf_prep_pkts;
.git/rebase-apply/patch:1054: space before tab in indent.
 	idpf_singleq_rx_vec_setup;
.git/rebase-apply/patch:1058: space before tab in indent.
 	idpf_singleq_recv_pkts_avx512;
.git/rebase-apply/patch:1060: space before tab in indent.
 	idpf_singleq_xmit_pkts_avx512;
Checking patch drivers/common/idpf/idpf_common_rxtx.c...
error: drivers/common/idpf/idpf_common_rxtx.c: does not exist in index
Checking patch drivers/common/idpf/idpf_common_rxtx.h...
error: drivers/common/idpf/idpf_common_rxtx.h: does not exist in index
Checking patch drivers/common/idpf/idpf_common_rxtx_avx512.c...
error: drivers/common/idpf/idpf_common_rxtx_avx512.c: does not exist in index
Checking patch drivers/common/idpf/version.map...
error: while searching for:
	idpf_singleq_xmit_pkts;
	idpf_prep_pkts;
	idpf_singleq_rx_vec_setup;
	idpf_singleq_tx_vec_setup_avx512;
	idpf_singleq_recv_pkts_avx512;
	idpf_singleq_xmit_pkts_avx512;

	local: *;
};

error: patch failed: drivers/common/idpf/version.map:44
Checking patch drivers/common/idpf/version.map.rej...
Checking patch drivers/net/idpf/idpf_rxtx.c...
error: while searching for:
		if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512)
#ifdef CC_AVX512_SUPPORT
			if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
			    rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1)
				vport->rx_use_avx512 = true;
#else
		PMD_DRV_LOG(NOTICE,

error: patch failed: drivers/net/idpf/idpf_rxtx.c:761
error: while searching for:

#ifdef RTE_ARCH_X86
	if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
		dev->rx_pkt_burst = idpf_splitq_recv_pkts;
	} else {
		if (vport->rx_vec_allowed) {

error: patch failed: drivers/net/idpf/idpf_rxtx.c:774
error: while searching for:
	int i;
#endif /* CC_AVX512_SUPPORT */

	if (idpf_rx_vec_dev_check_default(dev) == IDPF_VECTOR_PATH &&
	    rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
		vport->tx_vec_allowed = true;
		if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512)
#ifdef CC_AVX512_SUPPORT
			if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
			    rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1)
				vport->tx_use_avx512 = true;
#else
		PMD_DRV_LOG(NOTICE,
			    "AVX512 is not supported in build env");

error: patch failed: drivers/net/idpf/idpf_rxtx.c:809
Hunk #4 succeeded at 2290 (offset 1440 lines).
error: while searching for:
		if (vport->tx_vec_allowed) {
#ifdef CC_AVX512_SUPPORT
			if (vport->tx_use_avx512) {
				for (i = 0; i < dev->data->nb_tx_queues; i++) {
					txq = dev->data->tx_queues[i];
					if (txq == NULL)
						continue;
					idpf_singleq_tx_vec_setup_avx512(txq);
				}
				dev->tx_pkt_burst = idpf_singleq_xmit_pkts_avx512;
				dev->tx_pkt_prepare = idpf_prep_pkts;
				return;

error: patch failed: drivers/net/idpf/idpf_rxtx.c:834
Checking patch drivers/net/idpf/idpf_rxtx_vec_common.h...
Applying patch drivers/common/idpf/version.map with 1 reject...
Rejected hunk #1.
Applied patch drivers/common/idpf/version.map.rej cleanly.
Applying patch drivers/net/idpf/idpf_rxtx.c with 4 rejects...
Rejected hunk #1.
Rejected hunk #2.
Rejected hunk #3.
Hunk #4 applied cleanly.
Rejected hunk #5.
Applied patch drivers/net/idpf/idpf_rxtx_vec_common.h cleanly.
diff a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map	(rejected hunks)
@@ -45,9 +45,12 @@ INTERNAL {
 	idpf_singleq_xmit_pkts;
 	idpf_prep_pkts;
 	idpf_singleq_rx_vec_setup;
-	idpf_singleq_tx_vec_setup_avx512;
+	idpf_splitq_rx_vec_setup;
+	idpf_tx_vec_setup_avx512;
 	idpf_singleq_recv_pkts_avx512;
+	idpf_splitq_recv_pkts_avx512;
 	idpf_singleq_xmit_pkts_avx512;
+	idpf_splitq_xmit_pkts_avx512;
 	idpf_update_stats;
 	idpf_query_stats;
 	idpf_vc_set_rss_key;
diff a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c	(rejected hunks)
@@ -761,7 +761,8 @@ idpf_set_rx_function(struct rte_eth_dev *dev)
 		if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512)
 #ifdef CC_AVX512_SUPPORT
 			if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
-			    rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1)
+			    rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1 &&
+			    rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512DQ))
 				vport->rx_use_avx512 = true;
 #else
 		PMD_DRV_LOG(NOTICE,
@@ -774,6 +775,20 @@ idpf_set_rx_function(struct rte_eth_dev *dev)
 
 #ifdef RTE_ARCH_X86
 	if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
+#ifdef RTE_ARCH_X86
+		if (vport->rx_vec_allowed) {
+			for (i = 0; i < dev->data->nb_tx_queues; i++) {
+				rxq = dev->data->rx_queues[i];
+				(void)idpf_splitq_rx_vec_setup(rxq);
+			}
+#ifdef CC_AVX512_SUPPORT
+			if (vport->rx_use_avx512) {
+				dev->rx_pkt_burst = idpf_splitq_recv_pkts_avx512;
+				return;
+			}
+#endif
+		}
+#endif
 		dev->rx_pkt_burst = idpf_splitq_recv_pkts;
 	} else {
 		if (vport->rx_vec_allowed) {
@@ -809,14 +824,22 @@ idpf_set_tx_function(struct rte_eth_dev *dev)
 	int i;
 #endif /* CC_AVX512_SUPPORT */
 
-	if (idpf_rx_vec_dev_check_default(dev) == IDPF_VECTOR_PATH &&
+	if (idpf_tx_vec_dev_check_default(dev) == IDPF_VECTOR_PATH &&
 	    rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
 		vport->tx_vec_allowed = true;
 		if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512)
 #ifdef CC_AVX512_SUPPORT
+		{
 			if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
 			    rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1)
 				vport->tx_use_avx512 = true;
+			if (vport->tx_use_avx512) {
+				for (i = 0; i < dev->data->nb_tx_queues; i++) {
+					txq = dev->data->tx_queues[i];
+					idpf_tx_vec_setup_avx512(txq);
+				}
+			}
+		}
 #else
 		PMD_DRV_LOG(NOTICE,
 			    "AVX512 is not supported in build env");
@@ -834,12 +868,6 @@ idpf_set_tx_function(struct rte_eth_dev *dev)
 		if (vport->tx_vec_allowed) {
 #ifdef CC_AVX512_SUPPORT
 			if (vport->tx_use_avx512) {
-				for (i = 0; i < dev->data->nb_tx_queues; i++) {
-					txq = dev->data->tx_queues[i];
-					if (txq == NULL)
-						continue;
-					idpf_singleq_tx_vec_setup_avx512(txq);
-				}
 				dev->tx_pkt_burst = idpf_singleq_xmit_pkts_avx512;
 				dev->tx_pkt_prepare = idpf_prep_pkts;
 				return;

https://lab.dpdk.org/results/dashboard/patchsets/24724/

UNH-IOL DPDK Community Lab

             reply	other threads:[~2022-12-14  6:18 UTC|newest]

Thread overview: 2+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-12-14  6:18 dpdklab [this message]
     [not found] <20221214060706.962541-1-wenjun1.wu@intel.com>
2022-12-14  6:00 ` |WARNING| pw120860 [PATCH v1] " qemudev

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20221214061802.5F37110618C@noxus.dpdklab.iol.unh.edu \
    --to=dpdklab@iol.unh.edu \
    --cc=dpdk-test-reports@iol.unh.edu \
    --cc=test-report@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).