DPDK patches and discussions
 help / color / mirror / Atom feed
From: Tyler Retzlaff <roretzla@linux.microsoft.com>
To: dev@dpdk.org
Cc: Akhil Goyal <gakhil@marvell.com>,
	Anatoly Burakov <anatoly.burakov@intel.com>,
	Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>,
	Bruce Richardson <bruce.richardson@intel.com>,
	Chenbo Xia <chenbo.xia@intel.com>,
	Ciara Power <ciara.power@intel.com>,
	David Christensen <drc@linux.vnet.ibm.com>,
	David Hunt <david.hunt@intel.com>,
	Dmitry Kozlyuk <dmitry.kozliuk@gmail.com>,
	Dmitry Malloy <dmitrym@microsoft.com>,
	Elena Agostini <eagostini@nvidia.com>,
	Erik Gabriel Carrillo <erik.g.carrillo@intel.com>,
	Fan Zhang <fanzhang.oss@gmail.com>,
	Ferruh Yigit <ferruh.yigit@amd.com>,
	Harman Kalra <hkalra@marvell.com>,
	Harry van Haaren <harry.van.haaren@intel.com>,
	Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>,
	Jerin Jacob <jerinj@marvell.com>,
	Konstantin Ananyev <konstantin.v.ananyev@yandex.ru>,
	Matan Azrad <matan@nvidia.com>,
	Maxime Coquelin <maxime.coquelin@redhat.com>,
	Narcisa Ana Maria Vasile <navasile@linux.microsoft.com>,
	Nicolas Chautru <nicolas.chautru@intel.com>,
	Olivier Matz <olivier.matz@6wind.com>, Ori Kam <orika@nvidia.com>,
	Pallavi Kadam <pallavi.kadam@intel.com>,
	Pavan Nikhilesh <pbhagavatula@marvell.com>,
	Reshma Pattan <reshma.pattan@intel.com>,
	Sameh Gobriel <sameh.gobriel@intel.com>,
	Shijith Thotton <sthotton@marvell.com>,
	Sivaprasad Tummala <sivaprasad.tummala@amd.com>,
	Stephen Hemminger <stephen@networkplumber.org>,
	Suanming Mou <suanmingm@nvidia.com>,
	Sunil Kumar Kori <skori@marvell.com>,
	Thomas Monjalon <thomas@monjalon.net>,
	Viacheslav Ovsiienko <viacheslavo@nvidia.com>,
	Vladimir Medvedkin <vladimir.medvedkin@intel.com>,
	Yipeng Wang <yipeng1.wang@intel.com>,
	Tyler Retzlaff <roretzla@linux.microsoft.com>
Subject: [PATCH 19/21] hash: use rte optional stdatomic API
Date: Mon, 16 Oct 2023 16:09:03 -0700	[thread overview]
Message-ID: <1697497745-20664-20-git-send-email-roretzla@linux.microsoft.com> (raw)
In-Reply-To: <1697497745-20664-1-git-send-email-roretzla@linux.microsoft.com>

Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional stdatomic API

Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
 lib/hash/rte_cuckoo_hash.c | 116 ++++++++++++++++++++++-----------------------
 lib/hash/rte_cuckoo_hash.h |   6 +--
 2 files changed, 61 insertions(+), 61 deletions(-)

diff --git a/lib/hash/rte_cuckoo_hash.c b/lib/hash/rte_cuckoo_hash.c
index 19b23f2..b2cf60d 100644
--- a/lib/hash/rte_cuckoo_hash.c
+++ b/lib/hash/rte_cuckoo_hash.c
@@ -149,7 +149,7 @@ struct rte_hash *
 	unsigned int writer_takes_lock = 0;
 	unsigned int no_free_on_del = 0;
 	uint32_t *ext_bkt_to_free = NULL;
-	uint32_t *tbl_chng_cnt = NULL;
+	RTE_ATOMIC(uint32_t) *tbl_chng_cnt = NULL;
 	struct lcore_cache *local_free_slots = NULL;
 	unsigned int readwrite_concur_lf_support = 0;
 	uint32_t i;
@@ -713,9 +713,9 @@ struct rte_hash *
 				 * variable. Release the application data
 				 * to the readers.
 				 */
-				__atomic_store_n(&k->pdata,
+				rte_atomic_store_explicit(&k->pdata,
 					data,
-					__ATOMIC_RELEASE);
+					rte_memory_order_release);
 				/*
 				 * Return index where key is stored,
 				 * subtracting the first dummy index
@@ -776,9 +776,9 @@ struct rte_hash *
 			 * key_idx is the guard variable for signature
 			 * and key.
 			 */
-			__atomic_store_n(&prim_bkt->key_idx[i],
+			rte_atomic_store_explicit(&prim_bkt->key_idx[i],
 					 new_idx,
-					 __ATOMIC_RELEASE);
+					 rte_memory_order_release);
 			break;
 		}
 	}
@@ -851,9 +851,9 @@ struct rte_hash *
 		if (unlikely(&h->buckets[prev_alt_bkt_idx]
 				!= curr_bkt)) {
 			/* revert it to empty, otherwise duplicated keys */
-			__atomic_store_n(&curr_bkt->key_idx[curr_slot],
+			rte_atomic_store_explicit(&curr_bkt->key_idx[curr_slot],
 				EMPTY_SLOT,
-				__ATOMIC_RELEASE);
+				rte_memory_order_release);
 			__hash_rw_writer_unlock(h);
 			return -1;
 		}
@@ -865,13 +865,13 @@ struct rte_hash *
 			 * Since there is one writer, load acquires on
 			 * tbl_chng_cnt are not required.
 			 */
-			__atomic_store_n(h->tbl_chng_cnt,
+			rte_atomic_store_explicit(h->tbl_chng_cnt,
 					 *h->tbl_chng_cnt + 1,
-					 __ATOMIC_RELEASE);
+					 rte_memory_order_release);
 			/* The store to sig_current should not
 			 * move above the store to tbl_chng_cnt.
 			 */
-			__atomic_thread_fence(__ATOMIC_RELEASE);
+			__atomic_thread_fence(rte_memory_order_release);
 		}
 
 		/* Need to swap current/alt sig to allow later
@@ -881,9 +881,9 @@ struct rte_hash *
 		curr_bkt->sig_current[curr_slot] =
 			prev_bkt->sig_current[prev_slot];
 		/* Release the updated bucket entry */
-		__atomic_store_n(&curr_bkt->key_idx[curr_slot],
+		rte_atomic_store_explicit(&curr_bkt->key_idx[curr_slot],
 			prev_bkt->key_idx[prev_slot],
-			__ATOMIC_RELEASE);
+			rte_memory_order_release);
 
 		curr_slot = prev_slot;
 		curr_node = prev_node;
@@ -897,20 +897,20 @@ struct rte_hash *
 		 * Since there is one writer, load acquires on
 		 * tbl_chng_cnt are not required.
 		 */
-		__atomic_store_n(h->tbl_chng_cnt,
+		rte_atomic_store_explicit(h->tbl_chng_cnt,
 				 *h->tbl_chng_cnt + 1,
-				 __ATOMIC_RELEASE);
+				 rte_memory_order_release);
 		/* The store to sig_current should not
 		 * move above the store to tbl_chng_cnt.
 		 */
-		__atomic_thread_fence(__ATOMIC_RELEASE);
+		__atomic_thread_fence(rte_memory_order_release);
 	}
 
 	curr_bkt->sig_current[curr_slot] = sig;
 	/* Release the new bucket entry */
-	__atomic_store_n(&curr_bkt->key_idx[curr_slot],
+	rte_atomic_store_explicit(&curr_bkt->key_idx[curr_slot],
 			 new_idx,
-			 __ATOMIC_RELEASE);
+			 rte_memory_order_release);
 
 	__hash_rw_writer_unlock(h);
 
@@ -1076,9 +1076,9 @@ struct rte_hash *
 	 * not leak after the store of pdata in the key store. i.e. pdata is
 	 * the guard variable. Release the application data to the readers.
 	 */
-	__atomic_store_n(&new_k->pdata,
+	rte_atomic_store_explicit(&new_k->pdata,
 		data,
-		__ATOMIC_RELEASE);
+		rte_memory_order_release);
 	/* Copy key */
 	memcpy(new_k->key, key, h->key_len);
 
@@ -1149,9 +1149,9 @@ struct rte_hash *
 				 * key_idx is the guard variable for signature
 				 * and key.
 				 */
-				__atomic_store_n(&cur_bkt->key_idx[i],
+				rte_atomic_store_explicit(&cur_bkt->key_idx[i],
 						 slot_id,
-						 __ATOMIC_RELEASE);
+						 rte_memory_order_release);
 				__hash_rw_writer_unlock(h);
 				return slot_id - 1;
 			}
@@ -1185,9 +1185,9 @@ struct rte_hash *
 	 * the store to key_idx. i.e. key_idx is the guard variable
 	 * for signature and key.
 	 */
-	__atomic_store_n(&(h->buckets_ext[ext_bkt_id - 1]).key_idx[0],
+	rte_atomic_store_explicit(&(h->buckets_ext[ext_bkt_id - 1]).key_idx[0],
 			 slot_id,
-			 __ATOMIC_RELEASE);
+			 rte_memory_order_release);
 	/* Link the new bucket to sec bucket linked list */
 	last = rte_hash_get_last_bkt(sec_bkt);
 	last->next = &h->buckets_ext[ext_bkt_id - 1];
@@ -1290,17 +1290,17 @@ struct rte_hash *
 		 * key comparison will ensure that the lookup fails.
 		 */
 		if (bkt->sig_current[i] == sig) {
-			key_idx = __atomic_load_n(&bkt->key_idx[i],
-					  __ATOMIC_ACQUIRE);
+			key_idx = rte_atomic_load_explicit(&bkt->key_idx[i],
+					  rte_memory_order_acquire);
 			if (key_idx != EMPTY_SLOT) {
 				k = (struct rte_hash_key *) ((char *)keys +
 						key_idx * h->key_entry_size);
 
 				if (rte_hash_cmp_eq(key, k->key, h) == 0) {
 					if (data != NULL) {
-						*data = __atomic_load_n(
+						*data = rte_atomic_load_explicit(
 							&k->pdata,
-							__ATOMIC_ACQUIRE);
+							rte_memory_order_acquire);
 					}
 					/*
 					 * Return index where key is stored,
@@ -1374,8 +1374,8 @@ struct rte_hash *
 		 * starts. Acquire semantics will make sure that
 		 * loads in search_one_bucket are not hoisted.
 		 */
-		cnt_b = __atomic_load_n(h->tbl_chng_cnt,
-				__ATOMIC_ACQUIRE);
+		cnt_b = rte_atomic_load_explicit(h->tbl_chng_cnt,
+				rte_memory_order_acquire);
 
 		/* Check if key is in primary location */
 		bkt = &h->buckets[prim_bucket_idx];
@@ -1396,7 +1396,7 @@ struct rte_hash *
 		/* The loads of sig_current in search_one_bucket
 		 * should not move below the load from tbl_chng_cnt.
 		 */
-		__atomic_thread_fence(__ATOMIC_ACQUIRE);
+		__atomic_thread_fence(rte_memory_order_acquire);
 		/* Re-read the table change counter to check if the
 		 * table has changed during search. If yes, re-do
 		 * the search.
@@ -1405,8 +1405,8 @@ struct rte_hash *
 		 * and key index in secondary bucket will make sure
 		 * that it does not get hoisted.
 		 */
-		cnt_a = __atomic_load_n(h->tbl_chng_cnt,
-					__ATOMIC_ACQUIRE);
+		cnt_a = rte_atomic_load_explicit(h->tbl_chng_cnt,
+					rte_memory_order_acquire);
 	} while (cnt_b != cnt_a);
 
 	return -ENOENT;
@@ -1611,26 +1611,26 @@ struct rte_hash *
 	for (i = RTE_HASH_BUCKET_ENTRIES - 1; i >= 0; i--) {
 		if (last_bkt->key_idx[i] != EMPTY_SLOT) {
 			cur_bkt->sig_current[pos] = last_bkt->sig_current[i];
-			__atomic_store_n(&cur_bkt->key_idx[pos],
+			rte_atomic_store_explicit(&cur_bkt->key_idx[pos],
 					 last_bkt->key_idx[i],
-					 __ATOMIC_RELEASE);
+					 rte_memory_order_release);
 			if (h->readwrite_concur_lf_support) {
 				/* Inform the readers that the table has changed
 				 * Since there is one writer, load acquire on
 				 * tbl_chng_cnt is not required.
 				 */
-				__atomic_store_n(h->tbl_chng_cnt,
+				rte_atomic_store_explicit(h->tbl_chng_cnt,
 					 *h->tbl_chng_cnt + 1,
-					 __ATOMIC_RELEASE);
+					 rte_memory_order_release);
 				/* The store to sig_current should
 				 * not move above the store to tbl_chng_cnt.
 				 */
-				__atomic_thread_fence(__ATOMIC_RELEASE);
+				__atomic_thread_fence(rte_memory_order_release);
 			}
 			last_bkt->sig_current[i] = NULL_SIGNATURE;
-			__atomic_store_n(&last_bkt->key_idx[i],
+			rte_atomic_store_explicit(&last_bkt->key_idx[i],
 					 EMPTY_SLOT,
-					 __ATOMIC_RELEASE);
+					 rte_memory_order_release);
 			return;
 		}
 	}
@@ -1650,8 +1650,8 @@ struct rte_hash *
 
 	/* Check if key is in bucket */
 	for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
-		key_idx = __atomic_load_n(&bkt->key_idx[i],
-					  __ATOMIC_ACQUIRE);
+		key_idx = rte_atomic_load_explicit(&bkt->key_idx[i],
+					  rte_memory_order_acquire);
 		if (bkt->sig_current[i] == sig && key_idx != EMPTY_SLOT) {
 			k = (struct rte_hash_key *) ((char *)keys +
 					key_idx * h->key_entry_size);
@@ -1663,9 +1663,9 @@ struct rte_hash *
 				if (!h->no_free_on_del)
 					remove_entry(h, bkt, i);
 
-				__atomic_store_n(&bkt->key_idx[i],
+				rte_atomic_store_explicit(&bkt->key_idx[i],
 						 EMPTY_SLOT,
-						 __ATOMIC_RELEASE);
+						 rte_memory_order_release);
 
 				*pos = i;
 				/*
@@ -2077,8 +2077,8 @@ struct rte_hash *
 		 * starts. Acquire semantics will make sure that
 		 * loads in compare_signatures are not hoisted.
 		 */
-		cnt_b = __atomic_load_n(h->tbl_chng_cnt,
-					__ATOMIC_ACQUIRE);
+		cnt_b = rte_atomic_load_explicit(h->tbl_chng_cnt,
+					rte_memory_order_acquire);
 
 		/* Compare signatures and prefetch key slot of first hit */
 		for (i = 0; i < num_keys; i++) {
@@ -2121,9 +2121,9 @@ struct rte_hash *
 						__builtin_ctzl(prim_hitmask[i])
 						>> 1;
 				uint32_t key_idx =
-				__atomic_load_n(
+				rte_atomic_load_explicit(
 					&primary_bkt[i]->key_idx[hit_index],
-					__ATOMIC_ACQUIRE);
+					rte_memory_order_acquire);
 				const struct rte_hash_key *key_slot =
 					(const struct rte_hash_key *)(
 					(const char *)h->key_store +
@@ -2137,9 +2137,9 @@ struct rte_hash *
 					!rte_hash_cmp_eq(
 						key_slot->key, keys[i], h)) {
 					if (data != NULL)
-						data[i] = __atomic_load_n(
+						data[i] = rte_atomic_load_explicit(
 							&key_slot->pdata,
-							__ATOMIC_ACQUIRE);
+							rte_memory_order_acquire);
 
 					hits |= 1ULL << i;
 					positions[i] = key_idx - 1;
@@ -2153,9 +2153,9 @@ struct rte_hash *
 						__builtin_ctzl(sec_hitmask[i])
 						>> 1;
 				uint32_t key_idx =
-				__atomic_load_n(
+				rte_atomic_load_explicit(
 					&secondary_bkt[i]->key_idx[hit_index],
-					__ATOMIC_ACQUIRE);
+					rte_memory_order_acquire);
 				const struct rte_hash_key *key_slot =
 					(const struct rte_hash_key *)(
 					(const char *)h->key_store +
@@ -2170,9 +2170,9 @@ struct rte_hash *
 					!rte_hash_cmp_eq(
 						key_slot->key, keys[i], h)) {
 					if (data != NULL)
-						data[i] = __atomic_load_n(
+						data[i] = rte_atomic_load_explicit(
 							&key_slot->pdata,
-							__ATOMIC_ACQUIRE);
+							rte_memory_order_acquire);
 
 					hits |= 1ULL << i;
 					positions[i] = key_idx - 1;
@@ -2216,7 +2216,7 @@ struct rte_hash *
 		/* The loads of sig_current in compare_signatures
 		 * should not move below the load from tbl_chng_cnt.
 		 */
-		__atomic_thread_fence(__ATOMIC_ACQUIRE);
+		__atomic_thread_fence(rte_memory_order_acquire);
 		/* Re-read the table change counter to check if the
 		 * table has changed during search. If yes, re-do
 		 * the search.
@@ -2225,8 +2225,8 @@ struct rte_hash *
 		 * key index will make sure that it does not get
 		 * hoisted.
 		 */
-		cnt_a = __atomic_load_n(h->tbl_chng_cnt,
-					__ATOMIC_ACQUIRE);
+		cnt_a = rte_atomic_load_explicit(h->tbl_chng_cnt,
+					rte_memory_order_acquire);
 	} while (cnt_b != cnt_a);
 
 	if (hit_mask != NULL)
@@ -2498,8 +2498,8 @@ struct rte_hash *
 	idx = *next % RTE_HASH_BUCKET_ENTRIES;
 
 	/* If current position is empty, go to the next one */
-	while ((position = __atomic_load_n(&h->buckets[bucket_idx].key_idx[idx],
-					__ATOMIC_ACQUIRE)) == EMPTY_SLOT) {
+	while ((position = rte_atomic_load_explicit(&h->buckets[bucket_idx].key_idx[idx],
+					rte_memory_order_acquire)) == EMPTY_SLOT) {
 		(*next)++;
 		/* End of table */
 		if (*next == total_entries_main)
diff --git a/lib/hash/rte_cuckoo_hash.h b/lib/hash/rte_cuckoo_hash.h
index eb2644f..f7afc4d 100644
--- a/lib/hash/rte_cuckoo_hash.h
+++ b/lib/hash/rte_cuckoo_hash.h
@@ -137,7 +137,7 @@ struct lcore_cache {
 struct rte_hash_key {
 	union {
 		uintptr_t idata;
-		void *pdata;
+		RTE_ATOMIC(void *) pdata;
 	};
 	/* Variable key size */
 	char key[0];
@@ -155,7 +155,7 @@ enum rte_hash_sig_compare_function {
 struct rte_hash_bucket {
 	uint16_t sig_current[RTE_HASH_BUCKET_ENTRIES];
 
-	uint32_t key_idx[RTE_HASH_BUCKET_ENTRIES];
+	RTE_ATOMIC(uint32_t) key_idx[RTE_HASH_BUCKET_ENTRIES];
 
 	uint8_t flag[RTE_HASH_BUCKET_ENTRIES];
 
@@ -229,7 +229,7 @@ struct rte_hash {
 	 * is piggy-backed to freeing of the key index.
 	 */
 	uint32_t *ext_bkt_to_free;
-	uint32_t *tbl_chng_cnt;
+	RTE_ATOMIC(uint32_t) *tbl_chng_cnt;
 	/**< Indicates if the hash table changed from last read. */
 } __rte_cache_aligned;
 
-- 
1.8.3.1


  parent reply	other threads:[~2023-10-16 23:11 UTC|newest]

Thread overview: 91+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-10-16 23:08 [PATCH 00/21] " Tyler Retzlaff
2023-10-16 23:08 ` [PATCH 01/21] power: fix use of rte stdatomic Tyler Retzlaff
2023-10-16 23:08 ` [PATCH 02/21] event/cnxk: remove single " Tyler Retzlaff
2023-10-16 23:08 ` [PATCH 03/21] power: use rte optional stdatomic API Tyler Retzlaff
2023-10-16 23:08 ` [PATCH 04/21] bbdev: " Tyler Retzlaff
2023-10-16 23:08 ` [PATCH 05/21] eal: " Tyler Retzlaff
2023-10-16 23:08 ` [PATCH 06/21] eventdev: " Tyler Retzlaff
2023-10-16 23:08 ` [PATCH 07/21] gpudev: " Tyler Retzlaff
2023-10-16 23:08 ` [PATCH 08/21] ipsec: " Tyler Retzlaff
2023-10-16 23:08 ` [PATCH 09/21] mbuf: " Tyler Retzlaff
2023-10-16 23:08 ` [PATCH 10/21] mempool: " Tyler Retzlaff
2023-10-16 23:08 ` [PATCH 11/21] rcu: " Tyler Retzlaff
2023-10-16 23:08 ` [PATCH 12/21] pdump: " Tyler Retzlaff
2023-10-16 23:08 ` [PATCH 13/21] stack: " Tyler Retzlaff
2023-10-16 23:08 ` [PATCH 14/21] telemetry: " Tyler Retzlaff
2023-10-16 23:08 ` [PATCH 15/21] vhost: " Tyler Retzlaff
2023-10-16 23:09 ` [PATCH 16/21] cryptodev: " Tyler Retzlaff
2023-10-16 23:09 ` [PATCH 17/21] distributor: " Tyler Retzlaff
2023-10-16 23:09 ` [PATCH 18/21] ethdev: " Tyler Retzlaff
2023-10-16 23:09 ` Tyler Retzlaff [this message]
2023-10-16 23:09 ` [PATCH 20/21] timer: " Tyler Retzlaff
2023-10-16 23:09 ` [PATCH 21/21] ring: " Tyler Retzlaff
2023-10-17 20:30 ` [PATCH v2 00/19] " Tyler Retzlaff
2023-10-17 20:30   ` [PATCH v2 01/19] power: " Tyler Retzlaff
2023-10-17 20:31   ` [PATCH v2 02/19] bbdev: " Tyler Retzlaff
2023-10-17 20:31   ` [PATCH v2 03/19] eal: " Tyler Retzlaff
2023-10-17 20:31   ` [PATCH v2 04/19] eventdev: " Tyler Retzlaff
2023-10-17 20:31   ` [PATCH v2 05/19] gpudev: " Tyler Retzlaff
2023-10-17 20:31   ` [PATCH v2 06/19] ipsec: " Tyler Retzlaff
2023-10-24  8:45     ` Konstantin Ananyev
2023-10-17 20:31   ` [PATCH v2 07/19] mbuf: " Tyler Retzlaff
2023-10-24  8:46     ` Konstantin Ananyev
2023-10-17 20:31   ` [PATCH v2 08/19] mempool: " Tyler Retzlaff
2023-10-24  8:47     ` Konstantin Ananyev
2023-10-17 20:31   ` [PATCH v2 09/19] rcu: " Tyler Retzlaff
2023-10-25  9:41     ` Ruifeng Wang
2023-10-25 22:38       ` Tyler Retzlaff
2023-10-26  4:24         ` Ruifeng Wang
2023-10-26 16:36           ` Tyler Retzlaff
2023-10-17 20:31   ` [PATCH v2 10/19] pdump: " Tyler Retzlaff
2023-10-17 20:31   ` [PATCH v2 11/19] stack: " Tyler Retzlaff
2023-10-24  8:48     ` Konstantin Ananyev
2023-10-17 20:31   ` [PATCH v2 12/19] telemetry: " Tyler Retzlaff
2023-10-17 20:31   ` [PATCH v2 13/19] vhost: " Tyler Retzlaff
2023-10-17 20:31   ` [PATCH v2 14/19] cryptodev: " Tyler Retzlaff
2023-10-17 20:31   ` [PATCH v2 15/19] distributor: " Tyler Retzlaff
2023-10-17 20:31   ` [PATCH v2 16/19] ethdev: " Tyler Retzlaff
2023-10-17 20:31   ` [PATCH v2 17/19] hash: " Tyler Retzlaff
2023-10-17 20:31   ` [PATCH v2 18/19] timer: " Tyler Retzlaff
2023-10-17 20:31   ` [PATCH v2 19/19] ring: " Tyler Retzlaff
2023-10-24  8:43     ` Konstantin Ananyev
2023-10-24  9:56       ` Morten Brørup
2023-10-24 15:58         ` Tyler Retzlaff
2023-10-24 16:36           ` Morten Brørup
2023-10-24 16:29       ` Tyler Retzlaff
2023-10-25 10:06         ` Konstantin Ananyev
2023-10-25 22:49           ` Tyler Retzlaff
2023-10-25 23:22             ` Tyler Retzlaff
2023-10-17 23:55   ` [PATCH v2 00/19] " Stephen Hemminger
2023-10-26  0:31 ` [PATCH v3 " Tyler Retzlaff
2023-10-26  0:31   ` [PATCH v3 01/19] power: " Tyler Retzlaff
2023-10-26  0:31   ` [PATCH v3 02/19] bbdev: " Tyler Retzlaff
2023-10-26 11:57     ` Maxime Coquelin
2023-10-26  0:31   ` [PATCH v3 03/19] eal: " Tyler Retzlaff
2023-10-26  0:31   ` [PATCH v3 04/19] eventdev: " Tyler Retzlaff
2023-10-26  0:31   ` [PATCH v3 05/19] gpudev: " Tyler Retzlaff
2023-10-26  0:31   ` [PATCH v3 06/19] ipsec: " Tyler Retzlaff
2023-10-26 15:54     ` [EXT] " Akhil Goyal
2023-10-27 12:59     ` Konstantin Ananyev
2023-10-26  0:31   ` [PATCH v3 07/19] mbuf: " Tyler Retzlaff
2023-10-27 13:03     ` Konstantin Ananyev
2023-10-26  0:31   ` [PATCH v3 08/19] mempool: " Tyler Retzlaff
2023-10-27 13:01     ` Konstantin Ananyev
2023-10-26  0:31   ` [PATCH v3 09/19] rcu: " Tyler Retzlaff
2023-10-26  0:31   ` [PATCH v3 10/19] pdump: " Tyler Retzlaff
2023-10-26  0:31   ` [PATCH v3 11/19] stack: " Tyler Retzlaff
2023-10-26  0:31   ` [PATCH v3 12/19] telemetry: " Tyler Retzlaff
2023-10-26  0:31   ` [PATCH v3 13/19] vhost: " Tyler Retzlaff
2023-10-26 11:57     ` Maxime Coquelin
2023-10-26  0:31   ` [PATCH v3 14/19] cryptodev: " Tyler Retzlaff
2023-10-26 15:53     ` [EXT] " Akhil Goyal
2023-10-27 13:05     ` Konstantin Ananyev
2023-10-26  0:31   ` [PATCH v3 15/19] distributor: " Tyler Retzlaff
2023-10-26  0:31   ` [PATCH v3 16/19] ethdev: " Tyler Retzlaff
2023-10-27 13:04     ` Konstantin Ananyev
2023-10-26  0:31   ` [PATCH v3 17/19] hash: " Tyler Retzlaff
2023-10-26  0:31   ` [PATCH v3 18/19] timer: " Tyler Retzlaff
2023-10-26  0:31   ` [PATCH v3 19/19] ring: " Tyler Retzlaff
2023-10-27 12:58     ` Konstantin Ananyev
2023-10-26 13:47   ` [PATCH v3 00/19] " David Marchand
2023-10-30 15:34   ` David Marchand

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1697497745-20664-20-git-send-email-roretzla@linux.microsoft.com \
    --to=roretzla@linux.microsoft.com \
    --cc=anatoly.burakov@intel.com \
    --cc=andrew.rybchenko@oktetlabs.ru \
    --cc=bruce.richardson@intel.com \
    --cc=chenbo.xia@intel.com \
    --cc=ciara.power@intel.com \
    --cc=david.hunt@intel.com \
    --cc=dev@dpdk.org \
    --cc=dmitry.kozliuk@gmail.com \
    --cc=dmitrym@microsoft.com \
    --cc=drc@linux.vnet.ibm.com \
    --cc=eagostini@nvidia.com \
    --cc=erik.g.carrillo@intel.com \
    --cc=fanzhang.oss@gmail.com \
    --cc=ferruh.yigit@amd.com \
    --cc=gakhil@marvell.com \
    --cc=harry.van.haaren@intel.com \
    --cc=hkalra@marvell.com \
    --cc=honnappa.nagarahalli@arm.com \
    --cc=jerinj@marvell.com \
    --cc=konstantin.v.ananyev@yandex.ru \
    --cc=matan@nvidia.com \
    --cc=maxime.coquelin@redhat.com \
    --cc=navasile@linux.microsoft.com \
    --cc=nicolas.chautru@intel.com \
    --cc=olivier.matz@6wind.com \
    --cc=orika@nvidia.com \
    --cc=pallavi.kadam@intel.com \
    --cc=pbhagavatula@marvell.com \
    --cc=reshma.pattan@intel.com \
    --cc=sameh.gobriel@intel.com \
    --cc=sivaprasad.tummala@amd.com \
    --cc=skori@marvell.com \
    --cc=stephen@networkplumber.org \
    --cc=sthotton@marvell.com \
    --cc=suanmingm@nvidia.com \
    --cc=thomas@monjalon.net \
    --cc=viacheslavo@nvidia.com \
    --cc=vladimir.medvedkin@intel.com \
    --cc=yipeng1.wang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).