From: Tyler Retzlaff <roretzla@linux.microsoft.com>
To: dev@dpdk.org
Cc: Akhil Goyal <gakhil@marvell.com>,
Anatoly Burakov <anatoly.burakov@intel.com>,
Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>,
Bruce Richardson <bruce.richardson@intel.com>,
Chenbo Xia <chenbo.xia@intel.com>,
Ciara Power <ciara.power@intel.com>,
David Christensen <drc@linux.vnet.ibm.com>,
David Hunt <david.hunt@intel.com>,
Dmitry Kozlyuk <dmitry.kozliuk@gmail.com>,
Dmitry Malloy <dmitrym@microsoft.com>,
Elena Agostini <eagostini@nvidia.com>,
Erik Gabriel Carrillo <erik.g.carrillo@intel.com>,
Fan Zhang <fanzhang.oss@gmail.com>,
Ferruh Yigit <ferruh.yigit@amd.com>,
Harman Kalra <hkalra@marvell.com>,
Harry van Haaren <harry.van.haaren@intel.com>,
Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>,
Jerin Jacob <jerinj@marvell.com>,
Konstantin Ananyev <konstantin.v.ananyev@yandex.ru>,
Matan Azrad <matan@nvidia.com>,
Maxime Coquelin <maxime.coquelin@redhat.com>,
Narcisa Ana Maria Vasile <navasile@linux.microsoft.com>,
Nicolas Chautru <nicolas.chautru@intel.com>,
Olivier Matz <olivier.matz@6wind.com>, Ori Kam <orika@nvidia.com>,
Pallavi Kadam <pallavi.kadam@intel.com>,
Pavan Nikhilesh <pbhagavatula@marvell.com>,
Reshma Pattan <reshma.pattan@intel.com>,
Sameh Gobriel <sameh.gobriel@intel.com>,
Shijith Thotton <sthotton@marvell.com>,
Sivaprasad Tummala <sivaprasad.tummala@amd.com>,
Stephen Hemminger <stephen@networkplumber.org>,
Suanming Mou <suanmingm@nvidia.com>,
Sunil Kumar Kori <skori@marvell.com>,
Thomas Monjalon <thomas@monjalon.net>,
Viacheslav Ovsiienko <viacheslavo@nvidia.com>,
Vladimir Medvedkin <vladimir.medvedkin@intel.com>,
Yipeng Wang <yipeng1.wang@intel.com>,
Tyler Retzlaff <roretzla@linux.microsoft.com>
Subject: [PATCH 18/21] ethdev: use rte optional stdatomic API
Date: Mon, 16 Oct 2023 16:09:02 -0700 [thread overview]
Message-ID: <1697497745-20664-19-git-send-email-roretzla@linux.microsoft.com> (raw)
In-Reply-To: <1697497745-20664-1-git-send-email-roretzla@linux.microsoft.com>
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional stdatomic API
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
lib/ethdev/ethdev_driver.h | 16 ++++++++--------
lib/ethdev/ethdev_private.c | 6 +++---
lib/ethdev/rte_ethdev.c | 24 ++++++++++++------------
lib/ethdev/rte_ethdev.h | 16 ++++++++--------
lib/ethdev/rte_ethdev_core.h | 2 +-
5 files changed, 32 insertions(+), 32 deletions(-)
diff --git a/lib/ethdev/ethdev_driver.h b/lib/ethdev/ethdev_driver.h
index deb23ad..b482cd1 100644
--- a/lib/ethdev/ethdev_driver.h
+++ b/lib/ethdev/ethdev_driver.h
@@ -30,7 +30,7 @@
* queue on Rx and Tx.
*/
struct rte_eth_rxtx_callback {
- struct rte_eth_rxtx_callback *next;
+ RTE_ATOMIC(struct rte_eth_rxtx_callback *) next;
union{
rte_rx_callback_fn rx;
rte_tx_callback_fn tx;
@@ -80,12 +80,12 @@ struct rte_eth_dev {
* User-supplied functions called from rx_burst to post-process
* received packets before passing them to the user
*/
- struct rte_eth_rxtx_callback *post_rx_burst_cbs[RTE_MAX_QUEUES_PER_PORT];
+ RTE_ATOMIC(struct rte_eth_rxtx_callback *) post_rx_burst_cbs[RTE_MAX_QUEUES_PER_PORT];
/**
* User-supplied functions called from tx_burst to pre-process
* received packets before passing them to the driver for transmission
*/
- struct rte_eth_rxtx_callback *pre_tx_burst_cbs[RTE_MAX_QUEUES_PER_PORT];
+ RTE_ATOMIC(struct rte_eth_rxtx_callback *) pre_tx_burst_cbs[RTE_MAX_QUEUES_PER_PORT];
enum rte_eth_dev_state state; /**< Flag indicating the port state */
void *security_ctx; /**< Context for security ops */
@@ -1655,7 +1655,7 @@ int rte_eth_dev_callback_process(struct rte_eth_dev *dev,
rte_eth_linkstatus_set(struct rte_eth_dev *dev,
const struct rte_eth_link *new_link)
{
- uint64_t *dev_link = (uint64_t *)&(dev->data->dev_link);
+ RTE_ATOMIC(uint64_t) *dev_link = (uint64_t __rte_atomic *)&(dev->data->dev_link);
union {
uint64_t val64;
struct rte_eth_link link;
@@ -1663,8 +1663,8 @@ int rte_eth_dev_callback_process(struct rte_eth_dev *dev,
RTE_BUILD_BUG_ON(sizeof(*new_link) != sizeof(uint64_t));
- orig.val64 = __atomic_exchange_n(dev_link, *(const uint64_t *)new_link,
- __ATOMIC_SEQ_CST);
+ orig.val64 = rte_atomic_exchange_explicit(dev_link, *(const uint64_t *)new_link,
+ rte_memory_order_seq_cst);
return (orig.link.link_status == new_link->link_status) ? -1 : 0;
}
@@ -1682,12 +1682,12 @@ int rte_eth_dev_callback_process(struct rte_eth_dev *dev,
rte_eth_linkstatus_get(const struct rte_eth_dev *dev,
struct rte_eth_link *link)
{
- uint64_t *src = (uint64_t *)&(dev->data->dev_link);
+ RTE_ATOMIC(uint64_t) *src = (uint64_t __rte_atomic *)&(dev->data->dev_link);
uint64_t *dst = (uint64_t *)link;
RTE_BUILD_BUG_ON(sizeof(*link) != sizeof(uint64_t));
- *dst = __atomic_load_n(src, __ATOMIC_SEQ_CST);
+ *dst = rte_atomic_load_explicit(src, rte_memory_order_seq_cst);
}
/**
diff --git a/lib/ethdev/ethdev_private.c b/lib/ethdev/ethdev_private.c
index 7cc7f28..82e2568 100644
--- a/lib/ethdev/ethdev_private.c
+++ b/lib/ethdev/ethdev_private.c
@@ -245,7 +245,7 @@ struct dummy_queue {
void
eth_dev_fp_ops_reset(struct rte_eth_fp_ops *fpo)
{
- static void *dummy_data[RTE_MAX_QUEUES_PER_PORT];
+ static RTE_ATOMIC(void *) dummy_data[RTE_MAX_QUEUES_PER_PORT];
uintptr_t port_id = fpo - rte_eth_fp_ops;
per_port_queues[port_id].rx_warn_once = false;
@@ -278,10 +278,10 @@ struct dummy_queue {
fpo->recycle_rx_descriptors_refill = dev->recycle_rx_descriptors_refill;
fpo->rxq.data = dev->data->rx_queues;
- fpo->rxq.clbk = (void **)(uintptr_t)dev->post_rx_burst_cbs;
+ fpo->rxq.clbk = (void * __rte_atomic *)(uintptr_t)dev->post_rx_burst_cbs;
fpo->txq.data = dev->data->tx_queues;
- fpo->txq.clbk = (void **)(uintptr_t)dev->pre_tx_burst_cbs;
+ fpo->txq.clbk = (void * __rte_atomic *)(uintptr_t)dev->pre_tx_burst_cbs;
}
uint16_t
diff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c
index 9dabcb5..af23ac0 100644
--- a/lib/ethdev/rte_ethdev.c
+++ b/lib/ethdev/rte_ethdev.c
@@ -5654,9 +5654,9 @@ int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id,
/* Stores to cb->fn and cb->param should complete before
* cb is visible to data plane.
*/
- __atomic_store_n(
+ rte_atomic_store_explicit(
&rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
- cb, __ATOMIC_RELEASE);
+ cb, rte_memory_order_release);
} else {
while (tail->next)
@@ -5664,7 +5664,7 @@ int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id,
/* Stores to cb->fn and cb->param should complete before
* cb is visible to data plane.
*/
- __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&tail->next, cb, rte_memory_order_release);
}
rte_spinlock_unlock(ð_dev_rx_cb_lock);
@@ -5704,9 +5704,9 @@ int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id,
/* Stores to cb->fn, cb->param and cb->next should complete before
* cb is visible to data plane threads.
*/
- __atomic_store_n(
+ rte_atomic_store_explicit(
&rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
- cb, __ATOMIC_RELEASE);
+ cb, rte_memory_order_release);
rte_spinlock_unlock(ð_dev_rx_cb_lock);
rte_eth_trace_add_first_rx_callback(port_id, queue_id, fn, user_param,
@@ -5757,9 +5757,9 @@ int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id,
/* Stores to cb->fn and cb->param should complete before
* cb is visible to data plane.
*/
- __atomic_store_n(
+ rte_atomic_store_explicit(
&rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id],
- cb, __ATOMIC_RELEASE);
+ cb, rte_memory_order_release);
} else {
while (tail->next)
@@ -5767,7 +5767,7 @@ int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id,
/* Stores to cb->fn and cb->param should complete before
* cb is visible to data plane.
*/
- __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&tail->next, cb, rte_memory_order_release);
}
rte_spinlock_unlock(ð_dev_tx_cb_lock);
@@ -5791,7 +5791,7 @@ int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id,
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
struct rte_eth_rxtx_callback *cb;
- struct rte_eth_rxtx_callback **prev_cb;
+ RTE_ATOMIC(struct rte_eth_rxtx_callback *) *prev_cb;
int ret = -EINVAL;
rte_spinlock_lock(ð_dev_rx_cb_lock);
@@ -5800,7 +5800,7 @@ int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id,
cb = *prev_cb;
if (cb == user_cb) {
/* Remove the user cb from the callback list. */
- __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(prev_cb, cb->next, rte_memory_order_relaxed);
ret = 0;
break;
}
@@ -5828,7 +5828,7 @@ int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id,
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
int ret = -EINVAL;
struct rte_eth_rxtx_callback *cb;
- struct rte_eth_rxtx_callback **prev_cb;
+ RTE_ATOMIC(struct rte_eth_rxtx_callback *) *prev_cb;
rte_spinlock_lock(ð_dev_tx_cb_lock);
prev_cb = &dev->pre_tx_burst_cbs[queue_id];
@@ -5836,7 +5836,7 @@ int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id,
cb = *prev_cb;
if (cb == user_cb) {
/* Remove the user cb from the callback list. */
- __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(prev_cb, cb->next, rte_memory_order_relaxed);
ret = 0;
break;
}
diff --git a/lib/ethdev/rte_ethdev.h b/lib/ethdev/rte_ethdev.h
index f949dfc..ec48b24 100644
--- a/lib/ethdev/rte_ethdev.h
+++ b/lib/ethdev/rte_ethdev.h
@@ -6018,14 +6018,14 @@ uint16_t rte_eth_call_rx_callbacks(uint16_t port_id, uint16_t queue_id,
{
void *cb;
- /* __ATOMIC_RELEASE memory order was used when the
+ /* rte_memory_order_release memory order was used when the
* call back was inserted into the list.
* Since there is a clear dependency between loading
- * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
+ * cb and cb->fn/cb->next, rte_memory_order_acquire memory order is
* not required.
*/
- cb = __atomic_load_n((void **)&p->rxq.clbk[queue_id],
- __ATOMIC_RELAXED);
+ cb = rte_atomic_load_explicit(&p->rxq.clbk[queue_id],
+ rte_memory_order_relaxed);
if (unlikely(cb != NULL))
nb_rx = rte_eth_call_rx_callbacks(port_id, queue_id,
rx_pkts, nb_rx, nb_pkts, cb);
@@ -6355,14 +6355,14 @@ uint16_t rte_eth_call_tx_callbacks(uint16_t port_id, uint16_t queue_id,
{
void *cb;
- /* __ATOMIC_RELEASE memory order was used when the
+ /* rte_memory_order_release memory order was used when the
* call back was inserted into the list.
* Since there is a clear dependency between loading
- * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
+ * cb and cb->fn/cb->next, rte_memory_order_acquire memory order is
* not required.
*/
- cb = __atomic_load_n((void **)&p->txq.clbk[queue_id],
- __ATOMIC_RELAXED);
+ cb = rte_atomic_load_explicit(&p->txq.clbk[queue_id],
+ rte_memory_order_relaxed);
if (unlikely(cb != NULL))
nb_pkts = rte_eth_call_tx_callbacks(port_id, queue_id,
tx_pkts, nb_pkts, cb);
diff --git a/lib/ethdev/rte_ethdev_core.h b/lib/ethdev/rte_ethdev_core.h
index 32f5f73..4bfaf79 100644
--- a/lib/ethdev/rte_ethdev_core.h
+++ b/lib/ethdev/rte_ethdev_core.h
@@ -71,7 +71,7 @@ struct rte_ethdev_qdata {
/** points to array of internal queue data pointers */
void **data;
/** points to array of queue callback data pointers */
- void **clbk;
+ RTE_ATOMIC(void *) *clbk;
};
/**
--
1.8.3.1
next prev parent reply other threads:[~2023-10-16 23:10 UTC|newest]
Thread overview: 91+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-10-16 23:08 [PATCH 00/21] " Tyler Retzlaff
2023-10-16 23:08 ` [PATCH 01/21] power: fix use of rte stdatomic Tyler Retzlaff
2023-10-16 23:08 ` [PATCH 02/21] event/cnxk: remove single " Tyler Retzlaff
2023-10-16 23:08 ` [PATCH 03/21] power: use rte optional stdatomic API Tyler Retzlaff
2023-10-16 23:08 ` [PATCH 04/21] bbdev: " Tyler Retzlaff
2023-10-16 23:08 ` [PATCH 05/21] eal: " Tyler Retzlaff
2023-10-16 23:08 ` [PATCH 06/21] eventdev: " Tyler Retzlaff
2023-10-16 23:08 ` [PATCH 07/21] gpudev: " Tyler Retzlaff
2023-10-16 23:08 ` [PATCH 08/21] ipsec: " Tyler Retzlaff
2023-10-16 23:08 ` [PATCH 09/21] mbuf: " Tyler Retzlaff
2023-10-16 23:08 ` [PATCH 10/21] mempool: " Tyler Retzlaff
2023-10-16 23:08 ` [PATCH 11/21] rcu: " Tyler Retzlaff
2023-10-16 23:08 ` [PATCH 12/21] pdump: " Tyler Retzlaff
2023-10-16 23:08 ` [PATCH 13/21] stack: " Tyler Retzlaff
2023-10-16 23:08 ` [PATCH 14/21] telemetry: " Tyler Retzlaff
2023-10-16 23:08 ` [PATCH 15/21] vhost: " Tyler Retzlaff
2023-10-16 23:09 ` [PATCH 16/21] cryptodev: " Tyler Retzlaff
2023-10-16 23:09 ` [PATCH 17/21] distributor: " Tyler Retzlaff
2023-10-16 23:09 ` Tyler Retzlaff [this message]
2023-10-16 23:09 ` [PATCH 19/21] hash: " Tyler Retzlaff
2023-10-16 23:09 ` [PATCH 20/21] timer: " Tyler Retzlaff
2023-10-16 23:09 ` [PATCH 21/21] ring: " Tyler Retzlaff
2023-10-17 20:30 ` [PATCH v2 00/19] " Tyler Retzlaff
2023-10-17 20:30 ` [PATCH v2 01/19] power: " Tyler Retzlaff
2023-10-17 20:31 ` [PATCH v2 02/19] bbdev: " Tyler Retzlaff
2023-10-17 20:31 ` [PATCH v2 03/19] eal: " Tyler Retzlaff
2023-10-17 20:31 ` [PATCH v2 04/19] eventdev: " Tyler Retzlaff
2023-10-17 20:31 ` [PATCH v2 05/19] gpudev: " Tyler Retzlaff
2023-10-17 20:31 ` [PATCH v2 06/19] ipsec: " Tyler Retzlaff
2023-10-24 8:45 ` Konstantin Ananyev
2023-10-17 20:31 ` [PATCH v2 07/19] mbuf: " Tyler Retzlaff
2023-10-24 8:46 ` Konstantin Ananyev
2023-10-17 20:31 ` [PATCH v2 08/19] mempool: " Tyler Retzlaff
2023-10-24 8:47 ` Konstantin Ananyev
2023-10-17 20:31 ` [PATCH v2 09/19] rcu: " Tyler Retzlaff
2023-10-25 9:41 ` Ruifeng Wang
2023-10-25 22:38 ` Tyler Retzlaff
2023-10-26 4:24 ` Ruifeng Wang
2023-10-26 16:36 ` Tyler Retzlaff
2023-10-17 20:31 ` [PATCH v2 10/19] pdump: " Tyler Retzlaff
2023-10-17 20:31 ` [PATCH v2 11/19] stack: " Tyler Retzlaff
2023-10-24 8:48 ` Konstantin Ananyev
2023-10-17 20:31 ` [PATCH v2 12/19] telemetry: " Tyler Retzlaff
2023-10-17 20:31 ` [PATCH v2 13/19] vhost: " Tyler Retzlaff
2023-10-17 20:31 ` [PATCH v2 14/19] cryptodev: " Tyler Retzlaff
2023-10-17 20:31 ` [PATCH v2 15/19] distributor: " Tyler Retzlaff
2023-10-17 20:31 ` [PATCH v2 16/19] ethdev: " Tyler Retzlaff
2023-10-17 20:31 ` [PATCH v2 17/19] hash: " Tyler Retzlaff
2023-10-17 20:31 ` [PATCH v2 18/19] timer: " Tyler Retzlaff
2023-10-17 20:31 ` [PATCH v2 19/19] ring: " Tyler Retzlaff
2023-10-24 8:43 ` Konstantin Ananyev
2023-10-24 9:56 ` Morten Brørup
2023-10-24 15:58 ` Tyler Retzlaff
2023-10-24 16:36 ` Morten Brørup
2023-10-24 16:29 ` Tyler Retzlaff
2023-10-25 10:06 ` Konstantin Ananyev
2023-10-25 22:49 ` Tyler Retzlaff
2023-10-25 23:22 ` Tyler Retzlaff
2023-10-17 23:55 ` [PATCH v2 00/19] " Stephen Hemminger
2023-10-26 0:31 ` [PATCH v3 " Tyler Retzlaff
2023-10-26 0:31 ` [PATCH v3 01/19] power: " Tyler Retzlaff
2023-10-26 0:31 ` [PATCH v3 02/19] bbdev: " Tyler Retzlaff
2023-10-26 11:57 ` Maxime Coquelin
2023-10-26 0:31 ` [PATCH v3 03/19] eal: " Tyler Retzlaff
2023-10-26 0:31 ` [PATCH v3 04/19] eventdev: " Tyler Retzlaff
2023-10-26 0:31 ` [PATCH v3 05/19] gpudev: " Tyler Retzlaff
2023-10-26 0:31 ` [PATCH v3 06/19] ipsec: " Tyler Retzlaff
2023-10-26 15:54 ` [EXT] " Akhil Goyal
2023-10-27 12:59 ` Konstantin Ananyev
2023-10-26 0:31 ` [PATCH v3 07/19] mbuf: " Tyler Retzlaff
2023-10-27 13:03 ` Konstantin Ananyev
2023-10-26 0:31 ` [PATCH v3 08/19] mempool: " Tyler Retzlaff
2023-10-27 13:01 ` Konstantin Ananyev
2023-10-26 0:31 ` [PATCH v3 09/19] rcu: " Tyler Retzlaff
2023-10-26 0:31 ` [PATCH v3 10/19] pdump: " Tyler Retzlaff
2023-10-26 0:31 ` [PATCH v3 11/19] stack: " Tyler Retzlaff
2023-10-26 0:31 ` [PATCH v3 12/19] telemetry: " Tyler Retzlaff
2023-10-26 0:31 ` [PATCH v3 13/19] vhost: " Tyler Retzlaff
2023-10-26 11:57 ` Maxime Coquelin
2023-10-26 0:31 ` [PATCH v3 14/19] cryptodev: " Tyler Retzlaff
2023-10-26 15:53 ` [EXT] " Akhil Goyal
2023-10-27 13:05 ` Konstantin Ananyev
2023-10-26 0:31 ` [PATCH v3 15/19] distributor: " Tyler Retzlaff
2023-10-26 0:31 ` [PATCH v3 16/19] ethdev: " Tyler Retzlaff
2023-10-27 13:04 ` Konstantin Ananyev
2023-10-26 0:31 ` [PATCH v3 17/19] hash: " Tyler Retzlaff
2023-10-26 0:31 ` [PATCH v3 18/19] timer: " Tyler Retzlaff
2023-10-26 0:31 ` [PATCH v3 19/19] ring: " Tyler Retzlaff
2023-10-27 12:58 ` Konstantin Ananyev
2023-10-26 13:47 ` [PATCH v3 00/19] " David Marchand
2023-10-30 15:34 ` David Marchand
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1697497745-20664-19-git-send-email-roretzla@linux.microsoft.com \
--to=roretzla@linux.microsoft.com \
--cc=anatoly.burakov@intel.com \
--cc=andrew.rybchenko@oktetlabs.ru \
--cc=bruce.richardson@intel.com \
--cc=chenbo.xia@intel.com \
--cc=ciara.power@intel.com \
--cc=david.hunt@intel.com \
--cc=dev@dpdk.org \
--cc=dmitry.kozliuk@gmail.com \
--cc=dmitrym@microsoft.com \
--cc=drc@linux.vnet.ibm.com \
--cc=eagostini@nvidia.com \
--cc=erik.g.carrillo@intel.com \
--cc=fanzhang.oss@gmail.com \
--cc=ferruh.yigit@amd.com \
--cc=gakhil@marvell.com \
--cc=harry.van.haaren@intel.com \
--cc=hkalra@marvell.com \
--cc=honnappa.nagarahalli@arm.com \
--cc=jerinj@marvell.com \
--cc=konstantin.v.ananyev@yandex.ru \
--cc=matan@nvidia.com \
--cc=maxime.coquelin@redhat.com \
--cc=navasile@linux.microsoft.com \
--cc=nicolas.chautru@intel.com \
--cc=olivier.matz@6wind.com \
--cc=orika@nvidia.com \
--cc=pallavi.kadam@intel.com \
--cc=pbhagavatula@marvell.com \
--cc=reshma.pattan@intel.com \
--cc=sameh.gobriel@intel.com \
--cc=sivaprasad.tummala@amd.com \
--cc=skori@marvell.com \
--cc=stephen@networkplumber.org \
--cc=sthotton@marvell.com \
--cc=suanmingm@nvidia.com \
--cc=thomas@monjalon.net \
--cc=viacheslavo@nvidia.com \
--cc=vladimir.medvedkin@intel.com \
--cc=yipeng1.wang@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).