automatic DPDK test reports
 help / color / mirror / Atom feed
* |WARNING| pw123210 [PATCH] [v11, 1/1] common/idpf: add AVX512 data path for split queue model
@ 2023-02-07  9:08 dpdklab
  0 siblings, 0 replies; 3+ messages in thread
From: dpdklab @ 2023-02-07  9:08 UTC (permalink / raw)
  To: test-report; +Cc: dpdk-test-reports

[-- Attachment #1: Type: text/plain, Size: 10764 bytes --]

Test-Label: iol-testing
Test-Status: WARNING
http://dpdk.org/patch/123210

_apply patch failure_

Submitter: Wenjun Wu <wenjun1.wu@intel.com>
Date: Tuesday, February 07 2023 08:45:49 
Applied on: CommitID:56ee8af9fd80493a9dd888ccc01ef4f1db02e491
Apply patch set 123210 failed:

Checking patch drivers/common/idpf/idpf_common_rxtx.c...
error: drivers/common/idpf/idpf_common_rxtx.c: does not exist in index
Checking patch drivers/common/idpf/idpf_common_rxtx.h...
error: drivers/common/idpf/idpf_common_rxtx.h: does not exist in index
Checking patch drivers/common/idpf/idpf_common_rxtx_avx512.c...
error: drivers/common/idpf/idpf_common_rxtx_avx512.c: does not exist in index
Checking patch drivers/common/idpf/version.map...
error: while searching for:
	idpf_dp_singleq_xmit_pkts;
	idpf_dp_singleq_xmit_pkts_avx512;
	idpf_dp_splitq_recv_pkts;
	idpf_dp_splitq_xmit_pkts;

	idpf_qc_rx_thresh_check;
	idpf_qc_rx_queue_release;

error: patch failed: drivers/common/idpf/version.map:10
error: while searching for:
	idpf_qc_single_rxq_mbufs_alloc;
	idpf_qc_single_tx_queue_reset;
	idpf_qc_singleq_rx_vec_setup;
	idpf_qc_singleq_tx_vec_avx512_setup;
	idpf_qc_split_rx_bufq_reset;
	idpf_qc_split_rx_descq_reset;
	idpf_qc_split_rx_queue_reset;
	idpf_qc_split_rxq_mbufs_alloc;
	idpf_qc_split_tx_complq_reset;
	idpf_qc_split_tx_descq_reset;
	idpf_qc_ts_mbuf_register;
	idpf_qc_tx_queue_release;
	idpf_qc_tx_thresh_check;
	idpf_qc_txq_mbufs_release;

	idpf_vc_api_version_check;

error: patch failed: drivers/common/idpf/version.map:19
Checking patch drivers/net/idpf/idpf_rxtx.c...
error: while searching for:
		if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512)
#ifdef CC_AVX512_SUPPORT
			if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
			    rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1)
				vport->rx_use_avx512 = true;
#else
		PMD_DRV_LOG(NOTICE,

error: patch failed: drivers/net/idpf/idpf_rxtx.c:758
error: while searching for:

#ifdef RTE_ARCH_X86
	if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
		dev->rx_pkt_burst = idpf_dp_splitq_recv_pkts;
	} else {
		if (vport->rx_vec_allowed) {

error: patch failed: drivers/net/idpf/idpf_rxtx.c:771
error: while searching for:
			}
#ifdef CC_AVX512_SUPPORT
			if (vport->rx_use_avx512) {
				dev->rx_pkt_burst = idpf_dp_singleq_recv_pkts_avx512;
				return;
			}
#endif /* CC_AVX512_SUPPORT */
		}

		dev->rx_pkt_burst = idpf_dp_singleq_recv_pkts;
	}
#else
	if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT)
		dev->rx_pkt_burst = idpf_dp_splitq_recv_pkts;
	else
		dev->rx_pkt_burst = idpf_dp_singleq_recv_pkts;
#endif /* RTE_ARCH_X86 */
}


error: patch failed: drivers/net/idpf/idpf_rxtx.c:780
error: while searching for:
	int i;
#endif /* CC_AVX512_SUPPORT */

	if (idpf_rx_vec_dev_check_default(dev) == IDPF_VECTOR_PATH &&
	    rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
		vport->tx_vec_allowed = true;
		if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512)
#ifdef CC_AVX512_SUPPORT
			if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
			    rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1)
				vport->tx_use_avx512 = true;
#else
		PMD_DRV_LOG(NOTICE,
			    "AVX512 is not supported in build env");

error: patch failed: drivers/net/idpf/idpf_rxtx.c:806
error: while searching for:
	}
#endif /* RTE_ARCH_X86 */

	if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
		dev->tx_pkt_burst = idpf_dp_splitq_xmit_pkts;
		dev->tx_pkt_prepare = idpf_dp_prep_pkts;
	} else {
#ifdef RTE_ARCH_X86
		if (vport->tx_vec_allowed) {
#ifdef CC_AVX512_SUPPORT
			if (vport->tx_use_avx512) {

error: patch failed: drivers/net/idpf/idpf_rxtx.c:823
error: while searching for:
					txq = dev->data->tx_queues[i];
					if (txq == NULL)
						continue;
					idpf_qc_singleq_tx_vec_avx512_setup(txq);
				}
				dev->tx_pkt_burst = idpf_dp_singleq_xmit_pkts_avx512;
				dev->tx_pkt_prepare = idpf_dp_prep_pkts;
				return;
			}
#endif /* CC_AVX512_SUPPORT */
		}
#endif /* RTE_ARCH_X86 */
		dev->tx_pkt_burst = idpf_dp_singleq_xmit_pkts;
		dev->tx_pkt_prepare = idpf_dp_prep_pkts;
	}
}

error: patch failed: drivers/net/idpf/idpf_rxtx.c:835
Checking patch drivers/net/idpf/idpf_rxtx_vec_common.h...
Applying patch drivers/common/idpf/version.map with 2 rejects...
Rejected hunk #1.
Rejected hunk #2.
Applying patch drivers/net/idpf/idpf_rxtx.c with 6 rejects...
Rejected hunk #1.
Rejected hunk #2.
Rejected hunk #3.
Rejected hunk #4.
Rejected hunk #5.
Rejected hunk #6.
Applied patch drivers/net/idpf/idpf_rxtx_vec_common.h cleanly.
diff a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map	(rejected hunks)
@@ -10,7 +10,9 @@ INTERNAL {
 	idpf_dp_singleq_xmit_pkts;
 	idpf_dp_singleq_xmit_pkts_avx512;
 	idpf_dp_splitq_recv_pkts;
+	idpf_dp_splitq_recv_pkts_avx512;
 	idpf_dp_splitq_xmit_pkts;
+	idpf_dp_splitq_xmit_pkts_avx512;
 
 	idpf_qc_rx_thresh_check;
 	idpf_qc_rx_queue_release;
@@ -19,16 +21,17 @@ INTERNAL {
 	idpf_qc_single_rxq_mbufs_alloc;
 	idpf_qc_single_tx_queue_reset;
 	idpf_qc_singleq_rx_vec_setup;
-	idpf_qc_singleq_tx_vec_avx512_setup;
 	idpf_qc_split_rx_bufq_reset;
 	idpf_qc_split_rx_descq_reset;
 	idpf_qc_split_rx_queue_reset;
 	idpf_qc_split_rxq_mbufs_alloc;
 	idpf_qc_split_tx_complq_reset;
 	idpf_qc_split_tx_descq_reset;
+	idpf_qc_splitq_rx_vec_setup;
 	idpf_qc_ts_mbuf_register;
 	idpf_qc_tx_queue_release;
 	idpf_qc_tx_thresh_check;
+	idpf_qc_tx_vec_avx512_setup;
 	idpf_qc_txq_mbufs_release;
 
 	idpf_vc_api_version_check;
diff a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c	(rejected hunks)
@@ -758,7 +758,8 @@ idpf_set_rx_function(struct rte_eth_dev *dev)
 		if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512)
 #ifdef CC_AVX512_SUPPORT
 			if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
-			    rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1)
+			    rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1 &&
+			    rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512DQ))
 				vport->rx_use_avx512 = true;
 #else
 		PMD_DRV_LOG(NOTICE,
@@ -771,6 +772,24 @@ idpf_set_rx_function(struct rte_eth_dev *dev)
 
 #ifdef RTE_ARCH_X86
 	if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
+		if (vport->rx_vec_allowed) {
+			for (i = 0; i < dev->data->nb_rx_queues; i++) {
+				rxq = dev->data->rx_queues[i];
+				(void)idpf_qc_splitq_rx_vec_setup(rxq);
+			}
+#ifdef CC_AVX512_SUPPORT
+			if (vport->rx_use_avx512) {
+				PMD_DRV_LOG(NOTICE,
+					    "Using Split AVX512 Vector Rx (port %d).",
+					    dev->data->port_id);
+				dev->rx_pkt_burst = idpf_dp_splitq_recv_pkts_avx512;
+				return;
+			}
+#endif /* CC_AVX512_SUPPORT */
+		}
+		PMD_DRV_LOG(NOTICE,
+			    "Using Split Scalar Rx (port %d).",
+			    dev->data->port_id);
 		dev->rx_pkt_burst = idpf_dp_splitq_recv_pkts;
 	} else {
 		if (vport->rx_vec_allowed) {
@@ -780,19 +799,31 @@ idpf_set_rx_function(struct rte_eth_dev *dev)
 			}
 #ifdef CC_AVX512_SUPPORT
 			if (vport->rx_use_avx512) {
+				PMD_DRV_LOG(NOTICE,
+					    "Using Single AVX512 Vector Rx (port %d).",
+					    dev->data->port_id);
 				dev->rx_pkt_burst = idpf_dp_singleq_recv_pkts_avx512;
 				return;
 			}
 #endif /* CC_AVX512_SUPPORT */
 		}
-
+		PMD_DRV_LOG(NOTICE,
+			    "Using Single Scalar Rx (port %d).",
+			    dev->data->port_id);
 		dev->rx_pkt_burst = idpf_dp_singleq_recv_pkts;
 	}
 #else
-	if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT)
+	if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
+		PMD_DRV_LOG(NOTICE,
+			    "Using Split Scalar Rx (port %d).",
+			    dev->data->port_id);
 		dev->rx_pkt_burst = idpf_dp_splitq_recv_pkts;
-	else
+	} else {
+		PMD_DRV_LOG(NOTICE,
+			    "Using Single Scalar Rx (port %d).",
+			    dev->data->port_id);
 		dev->rx_pkt_burst = idpf_dp_singleq_recv_pkts;
+	}
 #endif /* RTE_ARCH_X86 */
 }
 
@@ -806,14 +837,22 @@ idpf_set_tx_function(struct rte_eth_dev *dev)
 	int i;
 #endif /* CC_AVX512_SUPPORT */
 
-	if (idpf_rx_vec_dev_check_default(dev) == IDPF_VECTOR_PATH &&
+	if (idpf_tx_vec_dev_check_default(dev) == IDPF_VECTOR_PATH &&
 	    rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
 		vport->tx_vec_allowed = true;
 		if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512)
 #ifdef CC_AVX512_SUPPORT
+		{
 			if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
 			    rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1)
 				vport->tx_use_avx512 = true;
+			if (vport->tx_use_avx512) {
+				for (i = 0; i < dev->data->nb_tx_queues; i++) {
+					txq = dev->data->tx_queues[i];
+					idpf_qc_tx_vec_avx512_setup(txq);
+				}
+			}
+		}
 #else
 		PMD_DRV_LOG(NOTICE,
 			    "AVX512 is not supported in build env");
@@ -823,11 +862,26 @@ idpf_set_tx_function(struct rte_eth_dev *dev)
 	}
 #endif /* RTE_ARCH_X86 */
 
+#ifdef RTE_ARCH_X86
 	if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
+		if (vport->tx_vec_allowed) {
+#ifdef CC_AVX512_SUPPORT
+			if (vport->tx_use_avx512) {
+				PMD_DRV_LOG(NOTICE,
+					    "Using Split AVX512 Vector Tx (port %d).",
+					    dev->data->port_id);
+				dev->tx_pkt_burst = idpf_dp_splitq_xmit_pkts_avx512;
+				dev->tx_pkt_prepare = idpf_dp_prep_pkts;
+				return;
+			}
+#endif /* CC_AVX512_SUPPORT */
+		}
+		PMD_DRV_LOG(NOTICE,
+			    "Using Split Scalar Tx (port %d).",
+			    dev->data->port_id);
 		dev->tx_pkt_burst = idpf_dp_splitq_xmit_pkts;
 		dev->tx_pkt_prepare = idpf_dp_prep_pkts;
 	} else {
-#ifdef RTE_ARCH_X86
 		if (vport->tx_vec_allowed) {
 #ifdef CC_AVX512_SUPPORT
 			if (vport->tx_use_avx512) {
@@ -835,16 +889,36 @@ idpf_set_tx_function(struct rte_eth_dev *dev)
 					txq = dev->data->tx_queues[i];
 					if (txq == NULL)
 						continue;
-					idpf_qc_singleq_tx_vec_avx512_setup(txq);
+					idpf_qc_tx_vec_avx512_setup(txq);
 				}
+				PMD_DRV_LOG(NOTICE,
+					    "Using Single AVX512 Vector Tx (port %d).",
+					    dev->data->port_id);
 				dev->tx_pkt_burst = idpf_dp_singleq_xmit_pkts_avx512;
 				dev->tx_pkt_prepare = idpf_dp_prep_pkts;
 				return;
 			}
 #endif /* CC_AVX512_SUPPORT */
 		}
-#endif /* RTE_ARCH_X86 */
+		PMD_DRV_LOG(NOTICE,
+			    "Using Single Scalar Tx (port %d).",
+			    dev->data->port_id);
+		dev->tx_pkt_burst = idpf_dp_singleq_xmit_pkts;
+		dev->tx_pkt_prepare = idpf_dp_prep_pkts;
+	}
+#else
+	if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
+		PMD_DRV_LOG(NOTICE,
+			    "Using Split Scalar Tx (port %d).",
+			    dev->data->port_id);
+		dev->tx_pkt_burst = idpf_dp_splitq_xmit_pkts;
+		dev->tx_pkt_prepare = idpf_dp_prep_pkts;
+	} else {
+		PMD_DRV_LOG(NOTICE,
+			    "Using Single Scalar Tx (port %d).",
+			    dev->data->port_id);
 		dev->tx_pkt_burst = idpf_dp_singleq_xmit_pkts;
 		dev->tx_pkt_prepare = idpf_dp_prep_pkts;
 	}
+#endif /* RTE_ARCH_X86 */
 }

https://lab.dpdk.org/results/dashboard/patchsets/25246/

UNH-IOL DPDK Community Lab

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2023-02-07  9:08 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
     [not found] <20230207084549.2225214-2-wenjun1.wu@intel.com>
2023-02-07  8:30 ` |WARNING| pw123210 [PATCH v11 1/1] common/idpf: add AVX512 data path for split queue model qemudev
2023-02-07  8:39 ` |SUCCESS| " checkpatch
2023-02-07  9:08 |WARNING| pw123210 [PATCH] [v11, " dpdklab

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).