DPDK patches and discussions
 help / color / mirror / Atom feed
From: Tyler Retzlaff <roretzla@linux.microsoft.com>
To: dev@dpdk.org
Cc: Akhil Goyal <gakhil@marvell.com>,
	Anatoly Burakov <anatoly.burakov@intel.com>,
	Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>,
	Bruce Richardson <bruce.richardson@intel.com>,
	Chenbo Xia <chenbo.xia@intel.com>,
	Ciara Power <ciara.power@intel.com>,
	David Christensen <drc@linux.vnet.ibm.com>,
	David Hunt <david.hunt@intel.com>,
	Dmitry Kozlyuk <dmitry.kozliuk@gmail.com>,
	Dmitry Malloy <dmitrym@microsoft.com>,
	Elena Agostini <eagostini@nvidia.com>,
	Erik Gabriel Carrillo <erik.g.carrillo@intel.com>,
	Fan Zhang <fanzhang.oss@gmail.com>,
	Ferruh Yigit <ferruh.yigit@amd.com>,
	Harman Kalra <hkalra@marvell.com>,
	Harry van Haaren <harry.van.haaren@intel.com>,
	Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>,
	Jerin Jacob <jerinj@marvell.com>,
	Konstantin Ananyev <konstantin.v.ananyev@yandex.ru>,
	Matan Azrad <matan@nvidia.com>,
	Maxime Coquelin <maxime.coquelin@redhat.com>,
	Narcisa Ana Maria Vasile <navasile@linux.microsoft.com>,
	Nicolas Chautru <nicolas.chautru@intel.com>,
	Olivier Matz <olivier.matz@6wind.com>, Ori Kam <orika@nvidia.com>,
	Pallavi Kadam <pallavi.kadam@intel.com>,
	Pavan Nikhilesh <pbhagavatula@marvell.com>,
	Reshma Pattan <reshma.pattan@intel.com>,
	Sameh Gobriel <sameh.gobriel@intel.com>,
	Shijith Thotton <sthotton@marvell.com>,
	Sivaprasad Tummala <sivaprasad.tummala@amd.com>,
	Stephen Hemminger <stephen@networkplumber.org>,
	Suanming Mou <suanmingm@nvidia.com>,
	Sunil Kumar Kori <skori@marvell.com>,
	Thomas Monjalon <thomas@monjalon.net>,
	Viacheslav Ovsiienko <viacheslavo@nvidia.com>,
	Vladimir Medvedkin <vladimir.medvedkin@intel.com>,
	Yipeng Wang <yipeng1.wang@intel.com>,
	Tyler Retzlaff <roretzla@linux.microsoft.com>
Subject: [PATCH v3 09/19] rcu: use rte optional stdatomic API
Date: Wed, 25 Oct 2023 17:31:44 -0700	[thread overview]
Message-ID: <1698280314-25861-10-git-send-email-roretzla@linux.microsoft.com> (raw)
In-Reply-To: <1698280314-25861-1-git-send-email-roretzla@linux.microsoft.com>

Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional stdatomic API

Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
 lib/rcu/rte_rcu_qsbr.c | 48 +++++++++++++++++------------------
 lib/rcu/rte_rcu_qsbr.h | 68 +++++++++++++++++++++++++-------------------------
 2 files changed, 58 insertions(+), 58 deletions(-)

diff --git a/lib/rcu/rte_rcu_qsbr.c b/lib/rcu/rte_rcu_qsbr.c
index 17be93e..4dc7714 100644
--- a/lib/rcu/rte_rcu_qsbr.c
+++ b/lib/rcu/rte_rcu_qsbr.c
@@ -102,21 +102,21 @@
 	 * go out of sync. Hence, additional checks are required.
 	 */
 	/* Check if the thread is already registered */
-	old_bmap = __atomic_load_n(__RTE_QSBR_THRID_ARRAY_ELM(v, i),
-					__ATOMIC_RELAXED);
+	old_bmap = rte_atomic_load_explicit(__RTE_QSBR_THRID_ARRAY_ELM(v, i),
+					rte_memory_order_relaxed);
 	if (old_bmap & 1UL << id)
 		return 0;
 
 	do {
 		new_bmap = old_bmap | (1UL << id);
-		success = __atomic_compare_exchange(
+		success = rte_atomic_compare_exchange_strong_explicit(
 					__RTE_QSBR_THRID_ARRAY_ELM(v, i),
-					&old_bmap, &new_bmap, 0,
-					__ATOMIC_RELEASE, __ATOMIC_RELAXED);
+					&old_bmap, new_bmap,
+					rte_memory_order_release, rte_memory_order_relaxed);
 
 		if (success)
-			__atomic_fetch_add(&v->num_threads,
-						1, __ATOMIC_RELAXED);
+			rte_atomic_fetch_add_explicit(&v->num_threads,
+						1, rte_memory_order_relaxed);
 		else if (old_bmap & (1UL << id))
 			/* Someone else registered this thread.
 			 * Counter should not be incremented.
@@ -154,8 +154,8 @@
 	 * go out of sync. Hence, additional checks are required.
 	 */
 	/* Check if the thread is already unregistered */
-	old_bmap = __atomic_load_n(__RTE_QSBR_THRID_ARRAY_ELM(v, i),
-					__ATOMIC_RELAXED);
+	old_bmap = rte_atomic_load_explicit(__RTE_QSBR_THRID_ARRAY_ELM(v, i),
+					rte_memory_order_relaxed);
 	if (!(old_bmap & (1UL << id)))
 		return 0;
 
@@ -165,14 +165,14 @@
 		 * completed before removal of the thread from the list of
 		 * reporting threads.
 		 */
-		success = __atomic_compare_exchange(
+		success = rte_atomic_compare_exchange_strong_explicit(
 					__RTE_QSBR_THRID_ARRAY_ELM(v, i),
-					&old_bmap, &new_bmap, 0,
-					__ATOMIC_RELEASE, __ATOMIC_RELAXED);
+					&old_bmap, new_bmap,
+					rte_memory_order_release, rte_memory_order_relaxed);
 
 		if (success)
-			__atomic_fetch_sub(&v->num_threads,
-						1, __ATOMIC_RELAXED);
+			rte_atomic_fetch_sub_explicit(&v->num_threads,
+						1, rte_memory_order_relaxed);
 		else if (!(old_bmap & (1UL << id)))
 			/* Someone else unregistered this thread.
 			 * Counter should not be incremented.
@@ -227,8 +227,8 @@
 
 	fprintf(f, "  Registered thread IDs = ");
 	for (i = 0; i < v->num_elems; i++) {
-		bmap = __atomic_load_n(__RTE_QSBR_THRID_ARRAY_ELM(v, i),
-					__ATOMIC_ACQUIRE);
+		bmap = rte_atomic_load_explicit(__RTE_QSBR_THRID_ARRAY_ELM(v, i),
+					rte_memory_order_acquire);
 		id = i << __RTE_QSBR_THRID_INDEX_SHIFT;
 		while (bmap) {
 			t = __builtin_ctzl(bmap);
@@ -241,26 +241,26 @@
 	fprintf(f, "\n");
 
 	fprintf(f, "  Token = %" PRIu64 "\n",
-			__atomic_load_n(&v->token, __ATOMIC_ACQUIRE));
+			rte_atomic_load_explicit(&v->token, rte_memory_order_acquire));
 
 	fprintf(f, "  Least Acknowledged Token = %" PRIu64 "\n",
-			__atomic_load_n(&v->acked_token, __ATOMIC_ACQUIRE));
+			rte_atomic_load_explicit(&v->acked_token, rte_memory_order_acquire));
 
 	fprintf(f, "Quiescent State Counts for readers:\n");
 	for (i = 0; i < v->num_elems; i++) {
-		bmap = __atomic_load_n(__RTE_QSBR_THRID_ARRAY_ELM(v, i),
-					__ATOMIC_ACQUIRE);
+		bmap = rte_atomic_load_explicit(__RTE_QSBR_THRID_ARRAY_ELM(v, i),
+					rte_memory_order_acquire);
 		id = i << __RTE_QSBR_THRID_INDEX_SHIFT;
 		while (bmap) {
 			t = __builtin_ctzl(bmap);
 			fprintf(f, "thread ID = %u, count = %" PRIu64 ", lock count = %u\n",
 				id + t,
-				__atomic_load_n(
+				rte_atomic_load_explicit(
 					&v->qsbr_cnt[id + t].cnt,
-					__ATOMIC_RELAXED),
-				__atomic_load_n(
+					rte_memory_order_relaxed),
+				rte_atomic_load_explicit(
 					&v->qsbr_cnt[id + t].lock_cnt,
-					__ATOMIC_RELAXED));
+					rte_memory_order_relaxed));
 			bmap &= ~(1UL << t);
 		}
 	}
diff --git a/lib/rcu/rte_rcu_qsbr.h b/lib/rcu/rte_rcu_qsbr.h
index 87e1b55..9f4aed2 100644
--- a/lib/rcu/rte_rcu_qsbr.h
+++ b/lib/rcu/rte_rcu_qsbr.h
@@ -63,11 +63,11 @@
  * Given thread id needs to be converted to index into the array and
  * the id within the array element.
  */
-#define __RTE_QSBR_THRID_ARRAY_ELM_SIZE (sizeof(uint64_t) * 8)
+#define __RTE_QSBR_THRID_ARRAY_ELM_SIZE (sizeof(RTE_ATOMIC(uint64_t)) * 8)
 #define __RTE_QSBR_THRID_ARRAY_SIZE(max_threads) \
 	RTE_ALIGN(RTE_ALIGN_MUL_CEIL(max_threads, \
 		__RTE_QSBR_THRID_ARRAY_ELM_SIZE) >> 3, RTE_CACHE_LINE_SIZE)
-#define __RTE_QSBR_THRID_ARRAY_ELM(v, i) ((uint64_t *) \
+#define __RTE_QSBR_THRID_ARRAY_ELM(v, i) ((uint64_t __rte_atomic *) \
 	((struct rte_rcu_qsbr_cnt *)(v + 1) + v->max_threads) + i)
 #define __RTE_QSBR_THRID_INDEX_SHIFT 6
 #define __RTE_QSBR_THRID_MASK 0x3f
@@ -75,13 +75,13 @@
 
 /* Worker thread counter */
 struct rte_rcu_qsbr_cnt {
-	uint64_t cnt;
+	RTE_ATOMIC(uint64_t) cnt;
 	/**< Quiescent state counter. Value 0 indicates the thread is offline
 	 *   64b counter is used to avoid adding more code to address
 	 *   counter overflow. Changing this to 32b would require additional
 	 *   changes to various APIs.
 	 */
-	uint32_t lock_cnt;
+	RTE_ATOMIC(uint32_t) lock_cnt;
 	/**< Lock counter. Used when RTE_LIBRTE_RCU_DEBUG is enabled */
 } __rte_cache_aligned;
 
@@ -97,16 +97,16 @@ struct rte_rcu_qsbr_cnt {
  * 2) Register thread ID array
  */
 struct rte_rcu_qsbr {
-	uint64_t token __rte_cache_aligned;
+	RTE_ATOMIC(uint64_t) token __rte_cache_aligned;
 	/**< Counter to allow for multiple concurrent quiescent state queries */
-	uint64_t acked_token;
+	RTE_ATOMIC(uint64_t) acked_token;
 	/**< Least token acked by all the threads in the last call to
 	 *   rte_rcu_qsbr_check API.
 	 */
 
 	uint32_t num_elems __rte_cache_aligned;
 	/**< Number of elements in the thread ID array */
-	uint32_t num_threads;
+	RTE_ATOMIC(uint32_t) num_threads;
 	/**< Number of threads currently using this QS variable */
 	uint32_t max_threads;
 	/**< Maximum number of threads using this QS variable */
@@ -311,13 +311,13 @@ struct rte_rcu_qsbr_dq_parameters {
 	 * the following will not move down after the load of any shared
 	 * data structure.
 	 */
-	t = __atomic_load_n(&v->token, __ATOMIC_RELAXED);
+	t = rte_atomic_load_explicit(&v->token, rte_memory_order_relaxed);
 
-	/* __atomic_store_n(cnt, __ATOMIC_RELAXED) is used to ensure
+	/* rte_atomic_store_explicit(cnt, rte_memory_order_relaxed) is used to ensure
 	 * 'cnt' (64b) is accessed atomically.
 	 */
-	__atomic_store_n(&v->qsbr_cnt[thread_id].cnt,
-		t, __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&v->qsbr_cnt[thread_id].cnt,
+		t, rte_memory_order_relaxed);
 
 	/* The subsequent load of the data structure should not
 	 * move above the store. Hence a store-load barrier
@@ -326,7 +326,7 @@ struct rte_rcu_qsbr_dq_parameters {
 	 * writer might not see that the reader is online, even though
 	 * the reader is referencing the shared data structure.
 	 */
-	rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
+	rte_atomic_thread_fence(rte_memory_order_seq_cst);
 }
 
 /**
@@ -362,8 +362,8 @@ struct rte_rcu_qsbr_dq_parameters {
 	 * data structure can not move after this store.
 	 */
 
-	__atomic_store_n(&v->qsbr_cnt[thread_id].cnt,
-		__RTE_QSBR_CNT_THR_OFFLINE, __ATOMIC_RELEASE);
+	rte_atomic_store_explicit(&v->qsbr_cnt[thread_id].cnt,
+		__RTE_QSBR_CNT_THR_OFFLINE, rte_memory_order_release);
 }
 
 /**
@@ -394,8 +394,8 @@ struct rte_rcu_qsbr_dq_parameters {
 
 #if defined(RTE_LIBRTE_RCU_DEBUG)
 	/* Increment the lock counter */
-	__atomic_fetch_add(&v->qsbr_cnt[thread_id].lock_cnt,
-				1, __ATOMIC_ACQUIRE);
+	rte_atomic_fetch_add_explicit(&v->qsbr_cnt[thread_id].lock_cnt,
+				1, rte_memory_order_acquire);
 #endif
 }
 
@@ -427,8 +427,8 @@ struct rte_rcu_qsbr_dq_parameters {
 
 #if defined(RTE_LIBRTE_RCU_DEBUG)
 	/* Decrement the lock counter */
-	__atomic_fetch_sub(&v->qsbr_cnt[thread_id].lock_cnt,
-				1, __ATOMIC_RELEASE);
+	rte_atomic_fetch_sub_explicit(&v->qsbr_cnt[thread_id].lock_cnt,
+				1, rte_memory_order_release);
 
 	__RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, WARNING,
 				"Lock counter %u. Nested locks?\n",
@@ -461,7 +461,7 @@ struct rte_rcu_qsbr_dq_parameters {
 	 * structure are visible to the workers before the token
 	 * update is visible.
 	 */
-	t = __atomic_fetch_add(&v->token, 1, __ATOMIC_RELEASE) + 1;
+	t = rte_atomic_fetch_add_explicit(&v->token, 1, rte_memory_order_release) + 1;
 
 	return t;
 }
@@ -493,16 +493,16 @@ struct rte_rcu_qsbr_dq_parameters {
 	 * Later loads of the shared data structure should not move
 	 * above this load. Hence, use load-acquire.
 	 */
-	t = __atomic_load_n(&v->token, __ATOMIC_ACQUIRE);
+	t = rte_atomic_load_explicit(&v->token, rte_memory_order_acquire);
 
 	/* Check if there are updates available from the writer.
 	 * Inform the writer that updates are visible to this reader.
 	 * Prior loads of the shared data structure should not move
 	 * beyond this store. Hence use store-release.
 	 */
-	if (t != __atomic_load_n(&v->qsbr_cnt[thread_id].cnt, __ATOMIC_RELAXED))
-		__atomic_store_n(&v->qsbr_cnt[thread_id].cnt,
-					 t, __ATOMIC_RELEASE);
+	if (t != rte_atomic_load_explicit(&v->qsbr_cnt[thread_id].cnt, rte_memory_order_relaxed))
+		rte_atomic_store_explicit(&v->qsbr_cnt[thread_id].cnt,
+					 t, rte_memory_order_release);
 
 	__RTE_RCU_DP_LOG(DEBUG, "%s: update: token = %" PRIu64 ", Thread ID = %d",
 		__func__, t, thread_id);
@@ -517,7 +517,7 @@ struct rte_rcu_qsbr_dq_parameters {
 	uint32_t i, j, id;
 	uint64_t bmap;
 	uint64_t c;
-	uint64_t *reg_thread_id;
+	RTE_ATOMIC(uint64_t) *reg_thread_id;
 	uint64_t acked_token = __RTE_QSBR_CNT_MAX;
 
 	for (i = 0, reg_thread_id = __RTE_QSBR_THRID_ARRAY_ELM(v, 0);
@@ -526,7 +526,7 @@ struct rte_rcu_qsbr_dq_parameters {
 		/* Load the current registered thread bit map before
 		 * loading the reader thread quiescent state counters.
 		 */
-		bmap = __atomic_load_n(reg_thread_id, __ATOMIC_ACQUIRE);
+		bmap = rte_atomic_load_explicit(reg_thread_id, rte_memory_order_acquire);
 		id = i << __RTE_QSBR_THRID_INDEX_SHIFT;
 
 		while (bmap) {
@@ -534,9 +534,9 @@ struct rte_rcu_qsbr_dq_parameters {
 			__RTE_RCU_DP_LOG(DEBUG,
 				"%s: check: token = %" PRIu64 ", wait = %d, Bit Map = 0x%" PRIx64 ", Thread ID = %d",
 				__func__, t, wait, bmap, id + j);
-			c = __atomic_load_n(
+			c = rte_atomic_load_explicit(
 					&v->qsbr_cnt[id + j].cnt,
-					__ATOMIC_ACQUIRE);
+					rte_memory_order_acquire);
 			__RTE_RCU_DP_LOG(DEBUG,
 				"%s: status: token = %" PRIu64 ", wait = %d, Thread QS cnt = %" PRIu64 ", Thread ID = %d",
 				__func__, t, wait, c, id+j);
@@ -554,8 +554,8 @@ struct rte_rcu_qsbr_dq_parameters {
 				/* This thread might have unregistered.
 				 * Re-read the bitmap.
 				 */
-				bmap = __atomic_load_n(reg_thread_id,
-						__ATOMIC_ACQUIRE);
+				bmap = rte_atomic_load_explicit(reg_thread_id,
+						rte_memory_order_acquire);
 
 				continue;
 			}
@@ -576,8 +576,8 @@ struct rte_rcu_qsbr_dq_parameters {
 	 * no need to update this very accurately using compare-and-swap.
 	 */
 	if (acked_token != __RTE_QSBR_CNT_MAX)
-		__atomic_store_n(&v->acked_token, acked_token,
-			__ATOMIC_RELAXED);
+		rte_atomic_store_explicit(&v->acked_token, acked_token,
+			rte_memory_order_relaxed);
 
 	return 1;
 }
@@ -598,7 +598,7 @@ struct rte_rcu_qsbr_dq_parameters {
 			"%s: check: token = %" PRIu64 ", wait = %d, Thread ID = %d",
 			__func__, t, wait, i);
 		while (1) {
-			c = __atomic_load_n(&cnt->cnt, __ATOMIC_ACQUIRE);
+			c = rte_atomic_load_explicit(&cnt->cnt, rte_memory_order_acquire);
 			__RTE_RCU_DP_LOG(DEBUG,
 				"%s: status: token = %" PRIu64 ", wait = %d, Thread QS cnt = %" PRIu64 ", Thread ID = %d",
 				__func__, t, wait, c, i);
@@ -628,8 +628,8 @@ struct rte_rcu_qsbr_dq_parameters {
 	 * no need to update this very accurately using compare-and-swap.
 	 */
 	if (acked_token != __RTE_QSBR_CNT_MAX)
-		__atomic_store_n(&v->acked_token, acked_token,
-			__ATOMIC_RELAXED);
+		rte_atomic_store_explicit(&v->acked_token, acked_token,
+			rte_memory_order_relaxed);
 
 	return 1;
 }
-- 
1.8.3.1


  parent reply	other threads:[~2023-10-26  0:33 UTC|newest]

Thread overview: 91+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-10-16 23:08 [PATCH 00/21] " Tyler Retzlaff
2023-10-16 23:08 ` [PATCH 01/21] power: fix use of rte stdatomic Tyler Retzlaff
2023-10-16 23:08 ` [PATCH 02/21] event/cnxk: remove single " Tyler Retzlaff
2023-10-16 23:08 ` [PATCH 03/21] power: use rte optional stdatomic API Tyler Retzlaff
2023-10-16 23:08 ` [PATCH 04/21] bbdev: " Tyler Retzlaff
2023-10-16 23:08 ` [PATCH 05/21] eal: " Tyler Retzlaff
2023-10-16 23:08 ` [PATCH 06/21] eventdev: " Tyler Retzlaff
2023-10-16 23:08 ` [PATCH 07/21] gpudev: " Tyler Retzlaff
2023-10-16 23:08 ` [PATCH 08/21] ipsec: " Tyler Retzlaff
2023-10-16 23:08 ` [PATCH 09/21] mbuf: " Tyler Retzlaff
2023-10-16 23:08 ` [PATCH 10/21] mempool: " Tyler Retzlaff
2023-10-16 23:08 ` [PATCH 11/21] rcu: " Tyler Retzlaff
2023-10-16 23:08 ` [PATCH 12/21] pdump: " Tyler Retzlaff
2023-10-16 23:08 ` [PATCH 13/21] stack: " Tyler Retzlaff
2023-10-16 23:08 ` [PATCH 14/21] telemetry: " Tyler Retzlaff
2023-10-16 23:08 ` [PATCH 15/21] vhost: " Tyler Retzlaff
2023-10-16 23:09 ` [PATCH 16/21] cryptodev: " Tyler Retzlaff
2023-10-16 23:09 ` [PATCH 17/21] distributor: " Tyler Retzlaff
2023-10-16 23:09 ` [PATCH 18/21] ethdev: " Tyler Retzlaff
2023-10-16 23:09 ` [PATCH 19/21] hash: " Tyler Retzlaff
2023-10-16 23:09 ` [PATCH 20/21] timer: " Tyler Retzlaff
2023-10-16 23:09 ` [PATCH 21/21] ring: " Tyler Retzlaff
2023-10-17 20:30 ` [PATCH v2 00/19] " Tyler Retzlaff
2023-10-17 20:30   ` [PATCH v2 01/19] power: " Tyler Retzlaff
2023-10-17 20:31   ` [PATCH v2 02/19] bbdev: " Tyler Retzlaff
2023-10-17 20:31   ` [PATCH v2 03/19] eal: " Tyler Retzlaff
2023-10-17 20:31   ` [PATCH v2 04/19] eventdev: " Tyler Retzlaff
2023-10-17 20:31   ` [PATCH v2 05/19] gpudev: " Tyler Retzlaff
2023-10-17 20:31   ` [PATCH v2 06/19] ipsec: " Tyler Retzlaff
2023-10-24  8:45     ` Konstantin Ananyev
2023-10-17 20:31   ` [PATCH v2 07/19] mbuf: " Tyler Retzlaff
2023-10-24  8:46     ` Konstantin Ananyev
2023-10-17 20:31   ` [PATCH v2 08/19] mempool: " Tyler Retzlaff
2023-10-24  8:47     ` Konstantin Ananyev
2023-10-17 20:31   ` [PATCH v2 09/19] rcu: " Tyler Retzlaff
2023-10-25  9:41     ` Ruifeng Wang
2023-10-25 22:38       ` Tyler Retzlaff
2023-10-26  4:24         ` Ruifeng Wang
2023-10-26 16:36           ` Tyler Retzlaff
2023-10-17 20:31   ` [PATCH v2 10/19] pdump: " Tyler Retzlaff
2023-10-17 20:31   ` [PATCH v2 11/19] stack: " Tyler Retzlaff
2023-10-24  8:48     ` Konstantin Ananyev
2023-10-17 20:31   ` [PATCH v2 12/19] telemetry: " Tyler Retzlaff
2023-10-17 20:31   ` [PATCH v2 13/19] vhost: " Tyler Retzlaff
2023-10-17 20:31   ` [PATCH v2 14/19] cryptodev: " Tyler Retzlaff
2023-10-17 20:31   ` [PATCH v2 15/19] distributor: " Tyler Retzlaff
2023-10-17 20:31   ` [PATCH v2 16/19] ethdev: " Tyler Retzlaff
2023-10-17 20:31   ` [PATCH v2 17/19] hash: " Tyler Retzlaff
2023-10-17 20:31   ` [PATCH v2 18/19] timer: " Tyler Retzlaff
2023-10-17 20:31   ` [PATCH v2 19/19] ring: " Tyler Retzlaff
2023-10-24  8:43     ` Konstantin Ananyev
2023-10-24  9:56       ` Morten Brørup
2023-10-24 15:58         ` Tyler Retzlaff
2023-10-24 16:36           ` Morten Brørup
2023-10-24 16:29       ` Tyler Retzlaff
2023-10-25 10:06         ` Konstantin Ananyev
2023-10-25 22:49           ` Tyler Retzlaff
2023-10-25 23:22             ` Tyler Retzlaff
2023-10-17 23:55   ` [PATCH v2 00/19] " Stephen Hemminger
2023-10-26  0:31 ` [PATCH v3 " Tyler Retzlaff
2023-10-26  0:31   ` [PATCH v3 01/19] power: " Tyler Retzlaff
2023-10-26  0:31   ` [PATCH v3 02/19] bbdev: " Tyler Retzlaff
2023-10-26 11:57     ` Maxime Coquelin
2023-10-26  0:31   ` [PATCH v3 03/19] eal: " Tyler Retzlaff
2023-10-26  0:31   ` [PATCH v3 04/19] eventdev: " Tyler Retzlaff
2023-10-26  0:31   ` [PATCH v3 05/19] gpudev: " Tyler Retzlaff
2023-10-26  0:31   ` [PATCH v3 06/19] ipsec: " Tyler Retzlaff
2023-10-26 15:54     ` [EXT] " Akhil Goyal
2023-10-27 12:59     ` Konstantin Ananyev
2023-10-26  0:31   ` [PATCH v3 07/19] mbuf: " Tyler Retzlaff
2023-10-27 13:03     ` Konstantin Ananyev
2023-10-26  0:31   ` [PATCH v3 08/19] mempool: " Tyler Retzlaff
2023-10-27 13:01     ` Konstantin Ananyev
2023-10-26  0:31   ` Tyler Retzlaff [this message]
2023-10-26  0:31   ` [PATCH v3 10/19] pdump: " Tyler Retzlaff
2023-10-26  0:31   ` [PATCH v3 11/19] stack: " Tyler Retzlaff
2023-10-26  0:31   ` [PATCH v3 12/19] telemetry: " Tyler Retzlaff
2023-10-26  0:31   ` [PATCH v3 13/19] vhost: " Tyler Retzlaff
2023-10-26 11:57     ` Maxime Coquelin
2023-10-26  0:31   ` [PATCH v3 14/19] cryptodev: " Tyler Retzlaff
2023-10-26 15:53     ` [EXT] " Akhil Goyal
2023-10-27 13:05     ` Konstantin Ananyev
2023-10-26  0:31   ` [PATCH v3 15/19] distributor: " Tyler Retzlaff
2023-10-26  0:31   ` [PATCH v3 16/19] ethdev: " Tyler Retzlaff
2023-10-27 13:04     ` Konstantin Ananyev
2023-10-26  0:31   ` [PATCH v3 17/19] hash: " Tyler Retzlaff
2023-10-26  0:31   ` [PATCH v3 18/19] timer: " Tyler Retzlaff
2023-10-26  0:31   ` [PATCH v3 19/19] ring: " Tyler Retzlaff
2023-10-27 12:58     ` Konstantin Ananyev
2023-10-26 13:47   ` [PATCH v3 00/19] " David Marchand
2023-10-30 15:34   ` David Marchand

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1698280314-25861-10-git-send-email-roretzla@linux.microsoft.com \
    --to=roretzla@linux.microsoft.com \
    --cc=anatoly.burakov@intel.com \
    --cc=andrew.rybchenko@oktetlabs.ru \
    --cc=bruce.richardson@intel.com \
    --cc=chenbo.xia@intel.com \
    --cc=ciara.power@intel.com \
    --cc=david.hunt@intel.com \
    --cc=dev@dpdk.org \
    --cc=dmitry.kozliuk@gmail.com \
    --cc=dmitrym@microsoft.com \
    --cc=drc@linux.vnet.ibm.com \
    --cc=eagostini@nvidia.com \
    --cc=erik.g.carrillo@intel.com \
    --cc=fanzhang.oss@gmail.com \
    --cc=ferruh.yigit@amd.com \
    --cc=gakhil@marvell.com \
    --cc=harry.van.haaren@intel.com \
    --cc=hkalra@marvell.com \
    --cc=honnappa.nagarahalli@arm.com \
    --cc=jerinj@marvell.com \
    --cc=konstantin.v.ananyev@yandex.ru \
    --cc=matan@nvidia.com \
    --cc=maxime.coquelin@redhat.com \
    --cc=navasile@linux.microsoft.com \
    --cc=nicolas.chautru@intel.com \
    --cc=olivier.matz@6wind.com \
    --cc=orika@nvidia.com \
    --cc=pallavi.kadam@intel.com \
    --cc=pbhagavatula@marvell.com \
    --cc=reshma.pattan@intel.com \
    --cc=sameh.gobriel@intel.com \
    --cc=sivaprasad.tummala@amd.com \
    --cc=skori@marvell.com \
    --cc=stephen@networkplumber.org \
    --cc=sthotton@marvell.com \
    --cc=suanmingm@nvidia.com \
    --cc=thomas@monjalon.net \
    --cc=viacheslavo@nvidia.com \
    --cc=vladimir.medvedkin@intel.com \
    --cc=yipeng1.wang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).