DPDK patches and discussions
 help / color / mirror / Atom feed
From: Tyler Retzlaff <roretzla@linux.microsoft.com>
To: dev@dpdk.org
Cc: "Mattias Rönnblom" <mattias.ronnblom@ericsson.com>,
	"Morten Brørup" <mb@smartsharesystems.com>,
	"Abdullah Sevincer" <abdullah.sevincer@intel.com>,
	"Ajit Khaparde" <ajit.khaparde@broadcom.com>,
	"Alok Prasad" <palok@marvell.com>,
	"Anatoly Burakov" <anatoly.burakov@intel.com>,
	"Andrew Rybchenko" <andrew.rybchenko@oktetlabs.ru>,
	"Anoob Joseph" <anoobj@marvell.com>,
	"Bruce Richardson" <bruce.richardson@intel.com>,
	"Byron Marohn" <byron.marohn@intel.com>,
	"Chenbo Xia" <chenbox@nvidia.com>,
	"Chengwen Feng" <fengchengwen@huawei.com>,
	"Ciara Loftus" <ciara.loftus@intel.com>,
	"Ciara Power" <ciara.power@intel.com>,
	"Dariusz Sosnowski" <dsosnowski@nvidia.com>,
	"David Hunt" <david.hunt@intel.com>,
	"Devendra Singh Rawat" <dsinghrawat@marvell.com>,
	"Erik Gabriel Carrillo" <erik.g.carrillo@intel.com>,
	"Guoyang Zhou" <zhouguoyang@huawei.com>,
	"Harman Kalra" <hkalra@marvell.com>,
	"Harry van Haaren" <harry.van.haaren@intel.com>,
	"Honnappa Nagarahalli" <honnappa.nagarahalli@arm.com>,
	"Jakub Grajciar" <jgrajcia@cisco.com>,
	"Jerin Jacob" <jerinj@marvell.com>,
	"Jeroen de Borst" <jeroendb@google.com>,
	"Jian Wang" <jianwang@trustnetic.com>,
	"Jiawen Wu" <jiawenwu@trustnetic.com>,
	"Jie Hai" <haijie1@huawei.com>,
	"Jingjing Wu" <jingjing.wu@intel.com>,
	"Joshua Washington" <joshwash@google.com>,
	"Joyce Kong" <joyce.kong@arm.com>,
	"Junfeng Guo" <junfeng.guo@intel.com>,
	"Kevin Laatz" <kevin.laatz@intel.com>,
	"Konstantin Ananyev" <konstantin.v.ananyev@yandex.ru>,
	"Liang Ma" <liangma@liangbit.com>,
	"Long Li" <longli@microsoft.com>,
	"Maciej Czekaj" <mczekaj@marvell.com>,
	"Matan Azrad" <matan@nvidia.com>,
	"Maxime Coquelin" <maxime.coquelin@redhat.com>,
	"Nicolas Chautru" <nicolas.chautru@intel.com>,
	"Ori Kam" <orika@nvidia.com>,
	"Pavan Nikhilesh" <pbhagavatula@marvell.com>,
	"Peter Mccarthy" <peter.mccarthy@intel.com>,
	"Rahul Lakkireddy" <rahul.lakkireddy@chelsio.com>,
	"Reshma Pattan" <reshma.pattan@intel.com>,
	"Rosen Xu" <rosen.xu@intel.com>,
	"Ruifeng Wang" <ruifeng.wang@arm.com>,
	"Rushil Gupta" <rushilg@google.com>,
	"Sameh Gobriel" <sameh.gobriel@intel.com>,
	"Sivaprasad Tummala" <sivaprasad.tummala@amd.com>,
	"Somnath Kotur" <somnath.kotur@broadcom.com>,
	"Stephen Hemminger" <stephen@networkplumber.org>,
	"Suanming Mou" <suanmingm@nvidia.com>,
	"Sunil Kumar Kori" <skori@marvell.com>,
	"Sunil Uttarwar" <sunilprakashrao.uttarwar@amd.com>,
	"Tetsuya Mukawa" <mtetsuyah@gmail.com>,
	"Vamsi Attunuru" <vattunuru@marvell.com>,
	"Viacheslav Ovsiienko" <viacheslavo@nvidia.com>,
	"Vladimir Medvedkin" <vladimir.medvedkin@intel.com>,
	"Xiaoyun Wang" <cloud.wangxiaoyun@huawei.com>,
	"Yipeng Wang" <yipeng1.wang@intel.com>,
	"Yisen Zhuang" <yisen.zhuang@huawei.com>,
	"Yuying Zhang" <Yuying.Zhang@intel.com>,
	"Yuying Zhang" <yuying.zhang@intel.com>,
	"Ziyang Xuan" <xuanziyang2@huawei.com>,
	"Tyler Retzlaff" <roretzla@linux.microsoft.com>
Subject: [PATCH v3 45/45] app/test-bbdev: use rte stdatomic API
Date: Wed, 27 Mar 2024 15:37:58 -0700	[thread overview]
Message-ID: <1711579078-10624-46-git-send-email-roretzla@linux.microsoft.com> (raw)
In-Reply-To: <1711579078-10624-1-git-send-email-roretzla@linux.microsoft.com>

Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.

Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
 app/test-bbdev/test_bbdev_perf.c | 183 +++++++++++++++++++++++----------------
 1 file changed, 110 insertions(+), 73 deletions(-)

diff --git a/app/test-bbdev/test_bbdev_perf.c b/app/test-bbdev/test_bbdev_perf.c
index dcce00a..9694ed3 100644
--- a/app/test-bbdev/test_bbdev_perf.c
+++ b/app/test-bbdev/test_bbdev_perf.c
@@ -144,7 +144,7 @@ struct test_op_params {
 	uint16_t num_to_process;
 	uint16_t num_lcores;
 	int vector_mask;
-	uint16_t sync;
+	RTE_ATOMIC(uint16_t) sync;
 	struct test_buffers q_bufs[RTE_MAX_NUMA_NODES][MAX_QUEUES];
 };
 
@@ -159,9 +159,9 @@ struct thread_params {
 	uint8_t iter_count;
 	double iter_average;
 	double bler;
-	uint16_t nb_dequeued;
-	int16_t processing_status;
-	uint16_t burst_sz;
+	RTE_ATOMIC(uint16_t) nb_dequeued;
+	RTE_ATOMIC(int16_t) processing_status;
+	RTE_ATOMIC(uint16_t) burst_sz;
 	struct test_op_params *op_params;
 	struct rte_bbdev_dec_op *dec_ops[MAX_BURST];
 	struct rte_bbdev_enc_op *enc_ops[MAX_BURST];
@@ -3195,56 +3195,64 @@ typedef int (test_case_function)(struct active_device *ad,
 	}
 
 	if (unlikely(event != RTE_BBDEV_EVENT_DEQUEUE)) {
-		__atomic_store_n(&tp->processing_status, TEST_FAILED, __ATOMIC_RELAXED);
+		rte_atomic_store_explicit(&tp->processing_status, TEST_FAILED,
+		    rte_memory_order_relaxed);
 		printf(
 			"Dequeue interrupt handler called for incorrect event!\n");
 		return;
 	}
 
-	burst_sz = __atomic_load_n(&tp->burst_sz, __ATOMIC_RELAXED);
+	burst_sz = rte_atomic_load_explicit(&tp->burst_sz, rte_memory_order_relaxed);
 	num_ops = tp->op_params->num_to_process;
 
 	if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC)
 		deq = rte_bbdev_dequeue_dec_ops(dev_id, queue_id,
 				&tp->dec_ops[
-					__atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED)],
+					rte_atomic_load_explicit(&tp->nb_dequeued,
+					    rte_memory_order_relaxed)],
 				burst_sz);
 	else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC)
 		deq = rte_bbdev_dequeue_ldpc_dec_ops(dev_id, queue_id,
 				&tp->dec_ops[
-					__atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED)],
+					rte_atomic_load_explicit(&tp->nb_dequeued,
+					    rte_memory_order_relaxed)],
 				burst_sz);
 	else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_ENC)
 		deq = rte_bbdev_dequeue_ldpc_enc_ops(dev_id, queue_id,
 				&tp->enc_ops[
-					__atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED)],
+					rte_atomic_load_explicit(&tp->nb_dequeued,
+					    rte_memory_order_relaxed)],
 				burst_sz);
 	else if (test_vector.op_type == RTE_BBDEV_OP_FFT)
 		deq = rte_bbdev_dequeue_fft_ops(dev_id, queue_id,
 				&tp->fft_ops[
-					__atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED)],
+					rte_atomic_load_explicit(&tp->nb_dequeued,
+					    rte_memory_order_relaxed)],
 				burst_sz);
 	else if (test_vector.op_type == RTE_BBDEV_OP_MLDTS)
 		deq = rte_bbdev_dequeue_mldts_ops(dev_id, queue_id,
 				&tp->mldts_ops[
-					__atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED)],
+					rte_atomic_load_explicit(&tp->nb_dequeued,
+					    rte_memory_order_relaxed)],
 				burst_sz);
 	else /*RTE_BBDEV_OP_TURBO_ENC*/
 		deq = rte_bbdev_dequeue_enc_ops(dev_id, queue_id,
 				&tp->enc_ops[
-					__atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED)],
+					rte_atomic_load_explicit(&tp->nb_dequeued,
+					    rte_memory_order_relaxed)],
 				burst_sz);
 
 	if (deq < burst_sz) {
 		printf(
 			"After receiving the interrupt all operations should be dequeued. Expected: %u, got: %u\n",
 			burst_sz, deq);
-		__atomic_store_n(&tp->processing_status, TEST_FAILED, __ATOMIC_RELAXED);
+		rte_atomic_store_explicit(&tp->processing_status, TEST_FAILED,
+		    rte_memory_order_relaxed);
 		return;
 	}
 
-	if (__atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED) + deq < num_ops) {
-		__atomic_fetch_add(&tp->nb_dequeued, deq, __ATOMIC_RELAXED);
+	if (rte_atomic_load_explicit(&tp->nb_dequeued, rte_memory_order_relaxed) + deq < num_ops) {
+		rte_atomic_fetch_add_explicit(&tp->nb_dequeued, deq, rte_memory_order_relaxed);
 		return;
 	}
 
@@ -3288,7 +3296,8 @@ typedef int (test_case_function)(struct active_device *ad,
 
 	if (ret) {
 		printf("Buffers validation failed\n");
-		__atomic_store_n(&tp->processing_status, TEST_FAILED, __ATOMIC_RELAXED);
+		rte_atomic_store_explicit(&tp->processing_status, TEST_FAILED,
+		    rte_memory_order_relaxed);
 	}
 
 	switch (test_vector.op_type) {
@@ -3315,7 +3324,8 @@ typedef int (test_case_function)(struct active_device *ad,
 		break;
 	default:
 		printf("Unknown op type: %d\n", test_vector.op_type);
-		__atomic_store_n(&tp->processing_status, TEST_FAILED, __ATOMIC_RELAXED);
+		rte_atomic_store_explicit(&tp->processing_status, TEST_FAILED,
+		    rte_memory_order_relaxed);
 		return;
 	}
 
@@ -3324,7 +3334,7 @@ typedef int (test_case_function)(struct active_device *ad,
 	tp->mbps += (((double)(num_ops * tb_len_bits)) / 1000000.0) /
 			((double)total_time / (double)rte_get_tsc_hz());
 
-	__atomic_fetch_add(&tp->nb_dequeued, deq, __ATOMIC_RELAXED);
+	rte_atomic_fetch_add_explicit(&tp->nb_dequeued, deq, rte_memory_order_relaxed);
 }
 
 static int
@@ -3362,10 +3372,11 @@ typedef int (test_case_function)(struct active_device *ad,
 
 	bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
 
-	__atomic_store_n(&tp->processing_status, 0, __ATOMIC_RELAXED);
-	__atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&tp->processing_status, 0, rte_memory_order_relaxed);
+	rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
 
-	rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+	rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+	    rte_memory_order_relaxed);
 
 	ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops,
 				num_to_process);
@@ -3415,15 +3426,17 @@ typedef int (test_case_function)(struct active_device *ad,
 			 * the number of operations is not a multiple of
 			 * burst size.
 			 */
-			__atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
+			rte_atomic_store_explicit(&tp->burst_sz, num_to_enq,
+			    rte_memory_order_relaxed);
 
 			/* Wait until processing of previous batch is
 			 * completed
 			 */
-			rte_wait_until_equal_16(&tp->nb_dequeued, enqueued, __ATOMIC_RELAXED);
+			rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->nb_dequeued, enqueued,
+			    rte_memory_order_relaxed);
 		}
 		if (j != TEST_REPETITIONS - 1)
-			__atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+			rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
 	}
 
 	return TEST_SUCCESS;
@@ -3459,10 +3472,11 @@ typedef int (test_case_function)(struct active_device *ad,
 
 	bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
 
-	__atomic_store_n(&tp->processing_status, 0, __ATOMIC_RELAXED);
-	__atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&tp->processing_status, 0, rte_memory_order_relaxed);
+	rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
 
-	rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+	rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+	    rte_memory_order_relaxed);
 
 	ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops,
 				num_to_process);
@@ -3506,15 +3520,17 @@ typedef int (test_case_function)(struct active_device *ad,
 			 * the number of operations is not a multiple of
 			 * burst size.
 			 */
-			__atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
+			rte_atomic_store_explicit(&tp->burst_sz, num_to_enq,
+			    rte_memory_order_relaxed);
 
 			/* Wait until processing of previous batch is
 			 * completed
 			 */
-			rte_wait_until_equal_16(&tp->nb_dequeued, enqueued, __ATOMIC_RELAXED);
+			rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->nb_dequeued, enqueued,
+			    rte_memory_order_relaxed);
 		}
 		if (j != TEST_REPETITIONS - 1)
-			__atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+			rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
 	}
 
 	return TEST_SUCCESS;
@@ -3549,10 +3565,11 @@ typedef int (test_case_function)(struct active_device *ad,
 
 	bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
 
-	__atomic_store_n(&tp->processing_status, 0, __ATOMIC_RELAXED);
-	__atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&tp->processing_status, 0, rte_memory_order_relaxed);
+	rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
 
-	rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+	rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+	    rte_memory_order_relaxed);
 
 	ret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp, ops,
 			num_to_process);
@@ -3592,15 +3609,17 @@ typedef int (test_case_function)(struct active_device *ad,
 			 * the number of operations is not a multiple of
 			 * burst size.
 			 */
-			__atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
+			rte_atomic_store_explicit(&tp->burst_sz, num_to_enq,
+			    rte_memory_order_relaxed);
 
 			/* Wait until processing of previous batch is
 			 * completed
 			 */
-			rte_wait_until_equal_16(&tp->nb_dequeued, enqueued, __ATOMIC_RELAXED);
+			rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->nb_dequeued, enqueued,
+			    rte_memory_order_relaxed);
 		}
 		if (j != TEST_REPETITIONS - 1)
-			__atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+			rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
 	}
 
 	return TEST_SUCCESS;
@@ -3636,10 +3655,11 @@ typedef int (test_case_function)(struct active_device *ad,
 
 	bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
 
-	__atomic_store_n(&tp->processing_status, 0, __ATOMIC_RELAXED);
-	__atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&tp->processing_status, 0, rte_memory_order_relaxed);
+	rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
 
-	rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+	rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+	    rte_memory_order_relaxed);
 
 	ret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp, ops,
 			num_to_process);
@@ -3681,15 +3701,17 @@ typedef int (test_case_function)(struct active_device *ad,
 			 * the number of operations is not a multiple of
 			 * burst size.
 			 */
-			__atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
+			rte_atomic_store_explicit(&tp->burst_sz, num_to_enq,
+			    rte_memory_order_relaxed);
 
 			/* Wait until processing of previous batch is
 			 * completed
 			 */
-			rte_wait_until_equal_16(&tp->nb_dequeued, enqueued, __ATOMIC_RELAXED);
+			rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->nb_dequeued, enqueued,
+			    rte_memory_order_relaxed);
 		}
 		if (j != TEST_REPETITIONS - 1)
-			__atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+			rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
 	}
 
 	return TEST_SUCCESS;
@@ -3725,10 +3747,11 @@ typedef int (test_case_function)(struct active_device *ad,
 
 	bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
 
-	__atomic_store_n(&tp->processing_status, 0, __ATOMIC_RELAXED);
-	__atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&tp->processing_status, 0, rte_memory_order_relaxed);
+	rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
 
-	rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+	rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+	    rte_memory_order_relaxed);
 
 	ret = rte_bbdev_fft_op_alloc_bulk(tp->op_params->mp, ops,
 			num_to_process);
@@ -3769,15 +3792,17 @@ typedef int (test_case_function)(struct active_device *ad,
 			 * the number of operations is not a multiple of
 			 * burst size.
 			 */
-			__atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
+			rte_atomic_store_explicit(&tp->burst_sz, num_to_enq,
+			    rte_memory_order_relaxed);
 
 			/* Wait until processing of previous batch is
 			 * completed
 			 */
-			rte_wait_until_equal_16(&tp->nb_dequeued, enqueued, __ATOMIC_RELAXED);
+			rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->nb_dequeued, enqueued,
+			    rte_memory_order_relaxed);
 		}
 		if (j != TEST_REPETITIONS - 1)
-			__atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+			rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
 	}
 
 	return TEST_SUCCESS;
@@ -3811,10 +3836,11 @@ typedef int (test_case_function)(struct active_device *ad,
 
 	bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
 
-	__atomic_store_n(&tp->processing_status, 0, __ATOMIC_RELAXED);
-	__atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&tp->processing_status, 0, rte_memory_order_relaxed);
+	rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
 
-	rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+	rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+	    rte_memory_order_relaxed);
 
 	ret = rte_bbdev_mldts_op_alloc_bulk(tp->op_params->mp, ops, num_to_process);
 	TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_to_process);
@@ -3851,15 +3877,17 @@ typedef int (test_case_function)(struct active_device *ad,
 			 * the number of operations is not a multiple of
 			 * burst size.
 			 */
-			__atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
+			rte_atomic_store_explicit(&tp->burst_sz, num_to_enq,
+			    rte_memory_order_relaxed);
 
 			/* Wait until processing of previous batch is
 			 * completed
 			 */
-			rte_wait_until_equal_16(&tp->nb_dequeued, enqueued, __ATOMIC_RELAXED);
+			rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->nb_dequeued, enqueued,
+			    rte_memory_order_relaxed);
 		}
 		if (j != TEST_REPETITIONS - 1)
-			__atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+			rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
 	}
 
 	return TEST_SUCCESS;
@@ -3894,7 +3922,8 @@ typedef int (test_case_function)(struct active_device *ad,
 
 	bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
 
-	rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+	rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+	    rte_memory_order_relaxed);
 
 	ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);
 	TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_ops);
@@ -4013,7 +4042,8 @@ typedef int (test_case_function)(struct active_device *ad,
 
 	bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
 
-	rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+	rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+	    rte_memory_order_relaxed);
 
 	ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);
 	TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_ops);
@@ -4148,7 +4178,8 @@ typedef int (test_case_function)(struct active_device *ad,
 
 	bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
 
-	rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+	rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+	    rte_memory_order_relaxed);
 
 	ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);
 	TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_ops);
@@ -4271,7 +4302,8 @@ typedef int (test_case_function)(struct active_device *ad,
 
 	bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
 
-	rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+	rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+	    rte_memory_order_relaxed);
 
 	ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);
 	TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_ops);
@@ -4402,7 +4434,8 @@ typedef int (test_case_function)(struct active_device *ad,
 
 	bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
 
-	rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+	rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+	    rte_memory_order_relaxed);
 
 	ret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp, ops_enq,
 			num_ops);
@@ -4503,7 +4536,8 @@ typedef int (test_case_function)(struct active_device *ad,
 
 	bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
 
-	rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+	rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+	    rte_memory_order_relaxed);
 
 	ret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp, ops_enq,
 			num_ops);
@@ -4604,7 +4638,8 @@ typedef int (test_case_function)(struct active_device *ad,
 
 	bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
 
-	rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+	rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+	    rte_memory_order_relaxed);
 
 	ret = rte_bbdev_fft_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);
 	TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_ops);
@@ -4702,7 +4737,8 @@ typedef int (test_case_function)(struct active_device *ad,
 
 	bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
 
-	rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+	rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+	    rte_memory_order_relaxed);
 
 	ret = rte_bbdev_mldts_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);
 	TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_ops);
@@ -4898,7 +4934,7 @@ typedef int (test_case_function)(struct active_device *ad,
 	else
 		return TEST_SKIPPED;
 
-	__atomic_store_n(&op_params->sync, SYNC_WAIT, __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&op_params->sync, SYNC_WAIT, rte_memory_order_relaxed);
 
 	/* Main core is set at first entry */
 	t_params[0].dev_id = ad->dev_id;
@@ -4921,7 +4957,7 @@ typedef int (test_case_function)(struct active_device *ad,
 				&t_params[used_cores++], lcore_id);
 	}
 
-	__atomic_store_n(&op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&op_params->sync, SYNC_START, rte_memory_order_relaxed);
 	ret = bler_function(&t_params[0]);
 
 	/* Main core is always used */
@@ -5024,7 +5060,7 @@ typedef int (test_case_function)(struct active_device *ad,
 			throughput_function = throughput_pmd_lcore_enc;
 	}
 
-	__atomic_store_n(&op_params->sync, SYNC_WAIT, __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&op_params->sync, SYNC_WAIT, rte_memory_order_relaxed);
 
 	/* Main core is set at first entry */
 	t_params[0].dev_id = ad->dev_id;
@@ -5047,7 +5083,7 @@ typedef int (test_case_function)(struct active_device *ad,
 				&t_params[used_cores++], lcore_id);
 	}
 
-	__atomic_store_n(&op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&op_params->sync, SYNC_START, rte_memory_order_relaxed);
 	ret = throughput_function(&t_params[0]);
 
 	/* Main core is always used */
@@ -5077,29 +5113,30 @@ typedef int (test_case_function)(struct active_device *ad,
 	 * Wait for main lcore operations.
 	 */
 	tp = &t_params[0];
-	while ((__atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED) <
+	while ((rte_atomic_load_explicit(&tp->nb_dequeued, rte_memory_order_relaxed) <
 		op_params->num_to_process) &&
-		(__atomic_load_n(&tp->processing_status, __ATOMIC_RELAXED) !=
+		(rte_atomic_load_explicit(&tp->processing_status, rte_memory_order_relaxed) !=
 		TEST_FAILED))
 		rte_pause();
 
 	tp->ops_per_sec /= TEST_REPETITIONS;
 	tp->mbps /= TEST_REPETITIONS;
-	ret |= (int)__atomic_load_n(&tp->processing_status, __ATOMIC_RELAXED);
+	ret |= (int)rte_atomic_load_explicit(&tp->processing_status, rte_memory_order_relaxed);
 
 	/* Wait for worker lcores operations */
 	for (used_cores = 1; used_cores < num_lcores; used_cores++) {
 		tp = &t_params[used_cores];
 
-		while ((__atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED) <
+		while ((rte_atomic_load_explicit(&tp->nb_dequeued, rte_memory_order_relaxed) <
 			op_params->num_to_process) &&
-			(__atomic_load_n(&tp->processing_status, __ATOMIC_RELAXED) !=
-			TEST_FAILED))
+			(rte_atomic_load_explicit(&tp->processing_status,
+			    rte_memory_order_relaxed) != TEST_FAILED))
 			rte_pause();
 
 		tp->ops_per_sec /= TEST_REPETITIONS;
 		tp->mbps /= TEST_REPETITIONS;
-		ret |= (int)__atomic_load_n(&tp->processing_status, __ATOMIC_RELAXED);
+		ret |= (int)rte_atomic_load_explicit(&tp->processing_status,
+		    rte_memory_order_relaxed);
 	}
 
 	/* Print throughput if test passed */
-- 
1.8.3.1


  parent reply	other threads:[~2024-03-27 22:42 UTC|newest]

Thread overview: 200+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-03-20 20:50 [PATCH 00/46] use " Tyler Retzlaff
2024-03-20 20:50 ` [PATCH 01/46] net/mlx5: use rte " Tyler Retzlaff
2024-03-20 20:50 ` [PATCH 02/46] net/ixgbe: " Tyler Retzlaff
2024-03-20 20:50 ` [PATCH 03/46] net/iavf: " Tyler Retzlaff
2024-03-20 20:50 ` [PATCH 04/46] net/ice: " Tyler Retzlaff
2024-03-20 20:50 ` [PATCH 05/46] net/i40e: " Tyler Retzlaff
2024-03-20 20:50 ` [PATCH 06/46] net/hns3: " Tyler Retzlaff
2024-03-20 20:50 ` [PATCH 07/46] net/bnxt: " Tyler Retzlaff
2024-03-20 20:50 ` [PATCH 08/46] net/cpfl: " Tyler Retzlaff
2024-03-20 20:50 ` [PATCH 09/46] net/af_xdp: " Tyler Retzlaff
2024-03-20 20:50 ` [PATCH 10/46] net/octeon_ep: " Tyler Retzlaff
2024-03-20 20:50 ` [PATCH 11/46] net/octeontx: " Tyler Retzlaff
2024-03-20 20:50 ` [PATCH 12/46] net/cxgbe: " Tyler Retzlaff
2024-03-20 20:50 ` [PATCH 13/46] net/gve: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 14/46] net/memif: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 15/46] net/sfc: " Tyler Retzlaff
2024-03-21 18:11   ` Aaron Conole
2024-03-21 18:15     ` Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 16/46] net/thunderx: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 17/46] net/virtio: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 18/46] net/hinic: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 19/46] net/idpf: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 20/46] net/qede: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 21/46] net/ring: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 22/46] vdpa/mlx5: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 23/46] raw/ifpga: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 24/46] event/opdl: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 25/46] event/octeontx: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 26/46] event/dsw: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 27/46] dma/skeleton: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 28/46] crypto/octeontx: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 29/46] common/mlx5: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 30/46] common/idpf: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 31/46] common/iavf: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 32/46] baseband/acc: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 33/46] net/txgbe: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 34/46] net/null: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 35/46] event/dlb2: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 36/46] dma/idxd: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 37/46] crypto/ccp: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 38/46] common/cpt: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 39/46] bus/vmbus: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 40/46] examples: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 41/46] app/dumpcap: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 42/46] app/test: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 43/46] app/test-eventdev: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 44/46] app/test-crypto-perf: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 45/46] app/test-compress-perf: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 46/46] app/test-bbdev: " Tyler Retzlaff
2024-03-21 15:33 ` [PATCH 00/46] use " Stephen Hemminger
2024-03-21 16:22   ` Tyler Retzlaff
2024-03-21 19:16 ` [PATCH v2 00/45] " Tyler Retzlaff
2024-03-21 19:16   ` [PATCH v2 01/45] net/mlx5: use rte " Tyler Retzlaff
2024-03-21 19:16   ` [PATCH v2 02/45] net/ixgbe: " Tyler Retzlaff
2024-03-21 19:16   ` [PATCH v2 03/45] net/iavf: " Tyler Retzlaff
2024-03-21 19:16   ` [PATCH v2 04/45] net/ice: " Tyler Retzlaff
2024-03-21 19:16   ` [PATCH v2 05/45] net/i40e: " Tyler Retzlaff
2024-03-21 19:16   ` [PATCH v2 06/45] net/hns3: " Tyler Retzlaff
2024-03-21 19:16   ` [PATCH v2 07/45] net/bnxt: " Tyler Retzlaff
2024-03-21 19:16   ` [PATCH v2 08/45] net/cpfl: " Tyler Retzlaff
2024-03-21 19:16   ` [PATCH v2 09/45] net/af_xdp: " Tyler Retzlaff
2024-03-21 19:16   ` [PATCH v2 10/45] net/octeon_ep: " Tyler Retzlaff
2024-03-21 19:16   ` [PATCH v2 11/45] net/octeontx: " Tyler Retzlaff
2024-03-21 19:16   ` [PATCH v2 12/45] net/cxgbe: " Tyler Retzlaff
2024-03-21 19:17   ` [PATCH v2 13/45] net/gve: " Tyler Retzlaff
2024-03-21 19:17   ` [PATCH v2 14/45] net/memif: " Tyler Retzlaff
2024-03-21 19:17   ` [PATCH v2 15/45] net/thunderx: " Tyler Retzlaff
2024-03-21 19:17   ` [PATCH v2 16/45] net/virtio: " Tyler Retzlaff
2024-03-21 19:17   ` [PATCH v2 17/45] net/hinic: " Tyler Retzlaff
2024-03-21 19:17   ` [PATCH v2 18/45] net/idpf: " Tyler Retzlaff
2024-03-21 19:17   ` [PATCH v2 19/45] net/qede: " Tyler Retzlaff
2024-03-21 19:17   ` [PATCH v2 20/45] net/ring: " Tyler Retzlaff
2024-03-21 19:17   ` [PATCH v2 21/45] vdpa/mlx5: " Tyler Retzlaff
2024-03-21 19:17   ` [PATCH v2 22/45] raw/ifpga: " Tyler Retzlaff
2024-03-21 19:17   ` [PATCH v2 23/45] event/opdl: " Tyler Retzlaff
2024-03-21 19:17   ` [PATCH v2 24/45] event/octeontx: " Tyler Retzlaff
2024-03-21 19:17   ` [PATCH v2 25/45] event/dsw: " Tyler Retzlaff
2024-03-21 20:51     ` Mattias Rönnblom
2024-03-21 19:17   ` [PATCH v2 26/45] dma/skeleton: " Tyler Retzlaff
2024-03-21 19:17   ` [PATCH v2 27/45] crypto/octeontx: " Tyler Retzlaff
2024-03-21 19:17   ` [PATCH v2 28/45] common/mlx5: " Tyler Retzlaff
2024-03-21 19:17   ` [PATCH v2 29/45] common/idpf: " Tyler Retzlaff
2024-03-21 19:17   ` [PATCH v2 30/45] common/iavf: " Tyler Retzlaff
2024-03-21 19:17   ` [PATCH v2 31/45] baseband/acc: " Tyler Retzlaff
2024-03-21 19:17   ` [PATCH v2 32/45] net/txgbe: " Tyler Retzlaff
2024-03-21 19:17   ` [PATCH v2 33/45] net/null: " Tyler Retzlaff
2024-03-21 19:17   ` [PATCH v2 34/45] event/dlb2: " Tyler Retzlaff
2024-03-21 21:03     ` Mattias Rönnblom
2024-04-09 19:31       ` Sevincer, Abdullah
2024-03-21 19:17   ` [PATCH v2 35/45] dma/idxd: " Tyler Retzlaff
2024-03-21 19:17   ` [PATCH v2 36/45] crypto/ccp: " Tyler Retzlaff
2024-03-21 19:17   ` [PATCH v2 37/45] common/cpt: " Tyler Retzlaff
2024-03-21 19:17   ` [PATCH v2 38/45] bus/vmbus: " Tyler Retzlaff
2024-03-21 21:12     ` Mattias Rönnblom
2024-03-21 21:34       ` Long Li
2024-03-22  7:04         ` Mattias Rönnblom
2024-03-22 19:32           ` Long Li
2024-03-22 19:34     ` Long Li
2024-03-25 16:41       ` Tyler Retzlaff
2024-03-21 19:17   ` [PATCH v2 39/45] examples: " Tyler Retzlaff
2024-03-21 19:17   ` [PATCH v2 40/45] app/dumpcap: " Tyler Retzlaff
2024-03-21 19:17   ` [PATCH v2 41/45] app/test: " Tyler Retzlaff
2024-03-21 19:17   ` [PATCH v2 42/45] app/test-eventdev: " Tyler Retzlaff
2024-03-21 19:17   ` [PATCH v2 43/45] app/test-crypto-perf: " Tyler Retzlaff
2024-03-21 19:17   ` [PATCH v2 44/45] app/test-compress-perf: " Tyler Retzlaff
2024-03-21 19:17   ` [PATCH v2 45/45] app/test-bbdev: " Tyler Retzlaff
2024-03-27 22:37 ` [PATCH v3 00/45] use " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 01/45] net/mlx5: use rte " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 02/45] net/ixgbe: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 03/45] net/iavf: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 04/45] net/ice: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 05/45] net/i40e: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 06/45] net/hns3: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 07/45] net/bnxt: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 08/45] net/cpfl: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 09/45] net/af_xdp: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 10/45] net/octeon_ep: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 11/45] net/octeontx: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 12/45] net/cxgbe: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 13/45] net/gve: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 14/45] net/memif: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 15/45] net/thunderx: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 16/45] net/virtio: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 17/45] net/hinic: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 18/45] net/idpf: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 19/45] net/qede: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 20/45] net/ring: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 21/45] vdpa/mlx5: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 22/45] raw/ifpga: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 23/45] event/opdl: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 24/45] event/octeontx: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 25/45] event/dsw: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 26/45] dma/skeleton: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 27/45] crypto/octeontx: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 28/45] common/mlx5: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 29/45] common/idpf: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 30/45] common/iavf: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 31/45] baseband/acc: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 32/45] net/txgbe: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 33/45] net/null: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 34/45] event/dlb2: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 35/45] dma/idxd: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 36/45] crypto/ccp: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 37/45] common/cpt: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 38/45] bus/vmbus: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 39/45] examples: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 40/45] app/dumpcap: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 41/45] app/test: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 42/45] app/test-eventdev: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 43/45] app/test-crypto-perf: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 44/45] app/test-compress-perf: " Tyler Retzlaff
2024-03-27 22:37   ` Tyler Retzlaff [this message]
2024-03-29  2:07   ` [PATCH v3 00/45] use " Tyler Retzlaff
2024-04-19 23:05 ` [PATCH v4 " Tyler Retzlaff
2024-04-19 23:05   ` [PATCH v4 01/45] net/mlx5: use rte " Tyler Retzlaff
2024-04-20  8:03     ` Morten Brørup
2024-04-19 23:06   ` [PATCH v4 02/45] net/ixgbe: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 03/45] net/iavf: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 04/45] net/ice: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 05/45] net/i40e: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 06/45] net/hns3: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 07/45] net/bnxt: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 08/45] net/cpfl: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 09/45] net/af_xdp: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 10/45] net/octeon_ep: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 11/45] net/octeontx: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 12/45] net/cxgbe: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 13/45] net/gve: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 14/45] net/memif: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 15/45] net/thunderx: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 16/45] net/virtio: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 17/45] net/hinic: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 18/45] net/idpf: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 19/45] net/qede: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 20/45] net/ring: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 21/45] vdpa/mlx5: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 22/45] raw/ifpga: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 23/45] event/opdl: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 24/45] event/octeontx: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 25/45] event/dsw: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 26/45] dma/skeleton: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 27/45] crypto/octeontx: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 28/45] common/mlx5: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 29/45] common/idpf: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 30/45] common/iavf: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 31/45] baseband/acc: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 32/45] net/txgbe: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 33/45] net/null: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 34/45] event/dlb2: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 35/45] dma/idxd: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 36/45] crypto/ccp: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 37/45] common/cpt: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 38/45] bus/vmbus: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 39/45] examples: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 40/45] app/dumpcap: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 41/45] app/test: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 42/45] app/test-eventdev: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 43/45] app/test-crypto-perf: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 44/45] app/test-compress-perf: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 45/45] app/test-bbdev: " Tyler Retzlaff

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1711579078-10624-46-git-send-email-roretzla@linux.microsoft.com \
    --to=roretzla@linux.microsoft.com \
    --cc=Yuying.Zhang@intel.com \
    --cc=abdullah.sevincer@intel.com \
    --cc=ajit.khaparde@broadcom.com \
    --cc=anatoly.burakov@intel.com \
    --cc=andrew.rybchenko@oktetlabs.ru \
    --cc=anoobj@marvell.com \
    --cc=bruce.richardson@intel.com \
    --cc=byron.marohn@intel.com \
    --cc=chenbox@nvidia.com \
    --cc=ciara.loftus@intel.com \
    --cc=ciara.power@intel.com \
    --cc=cloud.wangxiaoyun@huawei.com \
    --cc=david.hunt@intel.com \
    --cc=dev@dpdk.org \
    --cc=dsinghrawat@marvell.com \
    --cc=dsosnowski@nvidia.com \
    --cc=erik.g.carrillo@intel.com \
    --cc=fengchengwen@huawei.com \
    --cc=haijie1@huawei.com \
    --cc=harry.van.haaren@intel.com \
    --cc=hkalra@marvell.com \
    --cc=honnappa.nagarahalli@arm.com \
    --cc=jerinj@marvell.com \
    --cc=jeroendb@google.com \
    --cc=jgrajcia@cisco.com \
    --cc=jianwang@trustnetic.com \
    --cc=jiawenwu@trustnetic.com \
    --cc=jingjing.wu@intel.com \
    --cc=joshwash@google.com \
    --cc=joyce.kong@arm.com \
    --cc=junfeng.guo@intel.com \
    --cc=kevin.laatz@intel.com \
    --cc=konstantin.v.ananyev@yandex.ru \
    --cc=liangma@liangbit.com \
    --cc=longli@microsoft.com \
    --cc=matan@nvidia.com \
    --cc=mattias.ronnblom@ericsson.com \
    --cc=maxime.coquelin@redhat.com \
    --cc=mb@smartsharesystems.com \
    --cc=mczekaj@marvell.com \
    --cc=mtetsuyah@gmail.com \
    --cc=nicolas.chautru@intel.com \
    --cc=orika@nvidia.com \
    --cc=palok@marvell.com \
    --cc=pbhagavatula@marvell.com \
    --cc=peter.mccarthy@intel.com \
    --cc=rahul.lakkireddy@chelsio.com \
    --cc=reshma.pattan@intel.com \
    --cc=rosen.xu@intel.com \
    --cc=ruifeng.wang@arm.com \
    --cc=rushilg@google.com \
    --cc=sameh.gobriel@intel.com \
    --cc=sivaprasad.tummala@amd.com \
    --cc=skori@marvell.com \
    --cc=somnath.kotur@broadcom.com \
    --cc=stephen@networkplumber.org \
    --cc=suanmingm@nvidia.com \
    --cc=sunilprakashrao.uttarwar@amd.com \
    --cc=vattunuru@marvell.com \
    --cc=viacheslavo@nvidia.com \
    --cc=vladimir.medvedkin@intel.com \
    --cc=xuanziyang2@huawei.com \
    --cc=yipeng1.wang@intel.com \
    --cc=yisen.zhuang@huawei.com \
    --cc=zhouguoyang@huawei.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).