DPDK patches and discussions
 help / color / mirror / Atom feed
From: Thomas Monjalon <thomas@monjalon.net>
To: dev@dpdk.org
Cc: Stephen Hemminger <stephen@networkplumber.org>,
	David Marchand <david.marchand@redhat.com>,
	Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>,
	Haiyue Wang <haiyue.wang@intel.com>,
	Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>,
	Jerin Jacob <jerinj@marvell.com>,
	Ferruh Yigit <ferruh.yigit@intel.com>,
	Elena Agostini <eagostini@nvidia.com>,
	Ray Kinsella <mdr@ashroe.eu>,
	Anatoly Burakov <anatoly.burakov@intel.com>
Subject: [dpdk-dev] [RFC PATCH v2 4/7] hcdev: support multi-process
Date: Fri, 30 Jul 2021 15:55:30 +0200	[thread overview]
Message-ID: <20210730135533.417611-5-thomas@monjalon.net> (raw)
In-Reply-To: <20210730135533.417611-1-thomas@monjalon.net>

The device data shared between processes are moved in a struct
allocated in a shared memory (a new memzone for all hcdevs).
The main struct rte_hcdev references the shared memory
via the pointer mpshared.

The API function rte_hcdev_attach() is added to attach a device
from the secondary process.
The function rte_hcdev_allocate() can be used only by primary process.

Signed-off-by: Thomas Monjalon <thomas@monjalon.net>
---
 lib/hcdev/hcdev.c        | 114 ++++++++++++++++++++++++++++++++-------
 lib/hcdev/hcdev_driver.h |  23 ++++++--
 lib/hcdev/rte_hcdev.h    |   3 +-
 lib/hcdev/version.map    |   1 +
 4 files changed, 115 insertions(+), 26 deletions(-)

diff --git a/lib/hcdev/hcdev.c b/lib/hcdev/hcdev.c
index d40010749a..a7badd122b 100644
--- a/lib/hcdev/hcdev.c
+++ b/lib/hcdev/hcdev.c
@@ -5,6 +5,7 @@
 #include <rte_eal.h>
 #include <rte_tailq.h>
 #include <rte_string_fns.h>
+#include <rte_memzone.h>
 #include <rte_errno.h>
 #include <rte_log.h>
 
@@ -28,6 +29,12 @@ static int16_t hcdev_max;
 /* Number of currently valid devices */
 static int16_t hcdev_count;
 
+/* Shared memory between processes. */
+static const char *HCDEV_MEMZONE = "rte_hcdev_shared";
+static struct {
+	__extension__ struct rte_hcdev_mpshared hcdevs[0];
+} *hcdev_shared_mem;
+
 /* Event callback object */
 struct rte_hcdev_callback {
 	TAILQ_ENTRY(rte_hcdev_callback) next;
@@ -40,6 +47,8 @@ static void hcdev_free_callbacks(struct rte_hcdev *dev);
 int
 rte_hcdev_init(size_t dev_max)
 {
+	const struct rte_memzone *memzone;
+
 	if (dev_max == 0 || dev_max > INT16_MAX) {
 		HCDEV_LOG(ERR, "invalid array size");
 		rte_errno = EINVAL;
@@ -60,6 +69,23 @@ rte_hcdev_init(size_t dev_max)
 		return -rte_errno;
 	}
 
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		memzone = rte_memzone_reserve(HCDEV_MEMZONE,
+				sizeof(*hcdev_shared_mem) +
+				sizeof(*hcdev_shared_mem->hcdevs) * dev_max,
+				SOCKET_ID_ANY, 0);
+	} else {
+		memzone = rte_memzone_lookup(HCDEV_MEMZONE);
+	}
+	if (memzone == NULL) {
+		HCDEV_LOG(ERR, "cannot initialize shared memory");
+		free(hcdevs);
+		hcdevs = NULL;
+		rte_errno = ENOMEM;
+		return -rte_errno;
+	}
+	hcdev_shared_mem = memzone->addr;
+
 	hcdev_max = dev_max;
 	return 0;
 }
@@ -74,7 +100,7 @@ bool
 rte_hcdev_is_valid(int16_t dev_id)
 {
 	if (dev_id >= 0 && dev_id < hcdev_max &&
-		hcdevs[dev_id].state == RTE_HCDEV_STATE_INITIALIZED)
+		hcdevs[dev_id].process_state == RTE_HCDEV_STATE_INITIALIZED)
 		return true;
 	return false;
 }
@@ -84,7 +110,7 @@ hcdev_match_parent(int16_t dev_id, int16_t parent)
 {
 	if (parent == RTE_HCDEV_ID_ANY)
 		return true;
-	return hcdevs[dev_id].info.parent == parent;
+	return hcdevs[dev_id].mpshared->info.parent == parent;
 }
 
 int16_t
@@ -93,7 +119,7 @@ rte_hcdev_find_next(int16_t dev_id, int16_t parent)
 	if (dev_id < 0)
 		dev_id = 0;
 	while (dev_id < hcdev_max &&
-			(hcdevs[dev_id].state == RTE_HCDEV_STATE_UNUSED ||
+			(hcdevs[dev_id].process_state == RTE_HCDEV_STATE_UNUSED ||
 			!hcdev_match_parent(dev_id, parent)))
 		dev_id++;
 
@@ -108,7 +134,7 @@ hcdev_find_free_id(void)
 	int16_t dev_id;
 
 	for (dev_id = 0; dev_id < hcdev_max; dev_id++) {
-		if (hcdevs[dev_id].state == RTE_HCDEV_STATE_UNUSED)
+		if (hcdevs[dev_id].process_state == RTE_HCDEV_STATE_UNUSED)
 			return dev_id;
 	}
 	return RTE_HCDEV_ID_NONE;
@@ -135,7 +161,7 @@ rte_hcdev_get_by_name(const char *name)
 
 	RTE_HCDEV_FOREACH(dev_id) {
 		dev = &hcdevs[dev_id];
-		if (strncmp(name, dev->name, RTE_DEV_NAME_MAX_LEN) == 0)
+		if (strncmp(name, dev->mpshared->name, RTE_DEV_NAME_MAX_LEN) == 0)
 			return dev;
 	}
 	return NULL;
@@ -177,16 +203,20 @@ rte_hcdev_allocate(const char *name)
 	dev = &hcdevs[dev_id];
 	memset(dev, 0, sizeof(*dev));
 
-	if (rte_strscpy(dev->name, name, RTE_DEV_NAME_MAX_LEN) < 0) {
+	dev->mpshared = &hcdev_shared_mem->hcdevs[dev_id];
+	memset(dev->mpshared, 0, sizeof(*dev->mpshared));
+
+	if (rte_strscpy(dev->mpshared->name, name, RTE_DEV_NAME_MAX_LEN) < 0) {
 		HCDEV_LOG(ERR, "device name too long: %s", name);
 		rte_errno = ENAMETOOLONG;
 		return NULL;
 	}
-	dev->info.name = dev->name;
-	dev->info.dev_id = dev_id;
-	dev->info.numa_node = -1;
-	dev->info.parent = RTE_HCDEV_ID_NONE;
+	dev->mpshared->info.name = dev->mpshared->name;
+	dev->mpshared->info.dev_id = dev_id;
+	dev->mpshared->info.numa_node = -1;
+	dev->mpshared->info.parent = RTE_HCDEV_ID_NONE;
 	TAILQ_INIT(&dev->callbacks);
+	__atomic_fetch_add(&dev->mpshared->process_refcnt, 1, __ATOMIC_RELAXED);
 
 	hcdev_count++;
 	HCDEV_LOG(DEBUG, "new device %s (id %d) of total %d",
@@ -194,6 +224,51 @@ rte_hcdev_allocate(const char *name)
 	return dev;
 }
 
+struct rte_hcdev *
+rte_hcdev_attach(const char *name)
+{
+	int16_t dev_id;
+	struct rte_hcdev *dev;
+	struct rte_hcdev_mpshared *shared_dev;
+
+	if (rte_eal_process_type() != RTE_PROC_SECONDARY) {
+		HCDEV_LOG(ERR, "only secondary process can attach device");
+		rte_errno = EPERM;
+		return NULL;
+	}
+	if (name == NULL) {
+		HCDEV_LOG(ERR, "attach device without a name");
+		rte_errno = EINVAL;
+		return NULL;
+	}
+
+	/* implicit initialization of library before adding first device */
+	if (hcdevs == NULL && rte_hcdev_init(RTE_HCDEV_DEFAULT_MAX) < 0)
+		return NULL;
+
+	for (dev_id = 0; dev_id < hcdev_max; dev_id++) {
+		shared_dev = &hcdev_shared_mem->hcdevs[dev_id];
+		if (strncmp(name, shared_dev->name, RTE_DEV_NAME_MAX_LEN) == 0)
+			break;
+	}
+	if (dev_id >= hcdev_max) {
+		HCDEV_LOG(ERR, "device with name %s not found", name);
+		rte_errno = ENOENT;
+		return NULL;
+	}
+	dev = &hcdevs[dev_id];
+	memset(dev, 0, sizeof(*dev));
+
+	TAILQ_INIT(&dev->callbacks);
+	dev->mpshared = shared_dev;
+	__atomic_fetch_add(&dev->mpshared->process_refcnt, 1, __ATOMIC_RELAXED);
+
+	hcdev_count++;
+	HCDEV_LOG(DEBUG, "attached device %s (id %d) of total %d",
+			name, dev_id, hcdev_count);
+	return dev;
+}
+
 int16_t
 rte_hcdev_add_child(const char *name, int16_t parent, uint64_t child_context)
 {
@@ -209,11 +284,11 @@ rte_hcdev_add_child(const char *name, int16_t parent, uint64_t child_context)
 	if (dev == NULL)
 		return -rte_errno;
 
-	dev->info.parent = parent;
-	dev->info.context = child_context;
+	dev->mpshared->info.parent = parent;
+	dev->mpshared->info.context = child_context;
 
 	rte_hcdev_complete_new(dev);
-	return dev->info.dev_id;
+	return dev->mpshared->info.dev_id;
 }
 
 void
@@ -222,7 +297,7 @@ rte_hcdev_complete_new(struct rte_hcdev *dev)
 	if (dev == NULL)
 		return;
 
-	dev->state = RTE_HCDEV_STATE_INITIALIZED;
+	dev->process_state = RTE_HCDEV_STATE_INITIALIZED;
 	rte_hcdev_notify(dev, RTE_HCDEV_EVENT_NEW);
 }
 
@@ -235,7 +310,7 @@ rte_hcdev_release(struct rte_hcdev *dev)
 		rte_errno = ENODEV;
 		return -rte_errno;
 	}
-	dev_id = dev->info.dev_id;
+	dev_id = dev->mpshared->info.dev_id;
 	RTE_HCDEV_FOREACH_CHILD(child, dev_id) {
 		HCDEV_LOG(ERR, "cannot release device %d with child %d",
 				dev_id, child);
@@ -244,11 +319,12 @@ rte_hcdev_release(struct rte_hcdev *dev)
 	}
 
 	HCDEV_LOG(DEBUG, "free device %s (id %d)",
-			dev->info.name, dev->info.dev_id);
+			dev->mpshared->info.name, dev->mpshared->info.dev_id);
 	rte_hcdev_notify(dev, RTE_HCDEV_EVENT_DEL);
 
 	hcdev_free_callbacks(dev);
-	dev->state = RTE_HCDEV_STATE_UNUSED;
+	dev->process_state = RTE_HCDEV_STATE_UNUSED;
+	__atomic_fetch_sub(&dev->mpshared->process_refcnt, 1, __ATOMIC_RELAXED);
 	hcdev_count--;
 
 	return 0;
@@ -394,7 +470,7 @@ rte_hcdev_notify(struct rte_hcdev *dev, enum rte_hcdev_event event)
 	int16_t dev_id;
 	struct rte_hcdev_callback *callback;
 
-	dev_id = dev->info.dev_id;
+	dev_id = dev->mpshared->info.dev_id;
 	TAILQ_FOREACH(callback, &dev->callbacks, next) {
 		if (callback->event != event || callback->function == NULL)
 			continue;
@@ -420,7 +496,7 @@ rte_hcdev_info_get(int16_t dev_id, struct rte_hcdev_info *info)
 	}
 
 	if (dev->ops.dev_info_get == NULL) {
-		*info = dev->info;
+		*info = dev->mpshared->info;
 		return 0;
 	}
 	return HCDEV_DRV_RET(dev->ops.dev_info_get(dev, info));
diff --git a/lib/hcdev/hcdev_driver.h b/lib/hcdev/hcdev_driver.h
index 39f6fc57ab..f33b56947b 100644
--- a/lib/hcdev/hcdev_driver.h
+++ b/lib/hcdev/hcdev_driver.h
@@ -35,19 +35,28 @@ struct rte_hcdev_ops {
 	rte_hcdev_close_t *dev_close;
 };
 
-struct rte_hcdev {
-	/* Backing device. */
-	struct rte_device *device;
+struct rte_hcdev_mpshared {
 	/* Unique identifier name. */
 	char name[RTE_DEV_NAME_MAX_LEN]; /* Updated by this library. */
+	/* Driver-specific private data shared in multi-process. */
+	void *dev_private;
 	/* Device info structure. */
 	struct rte_hcdev_info info;
+	/* Counter of processes using the device. */
+	uint16_t process_refcnt; /* Updated by this library. */
+};
+
+struct rte_hcdev {
+	/* Backing device. */
+	struct rte_device *device;
+	/* Data shared between processes. */
+	struct rte_hcdev_mpshared *mpshared;
 	/* Driver functions. */
 	struct rte_hcdev_ops ops;
 	/* Event callback list. */
 	TAILQ_HEAD(rte_hcdev_callback_list, rte_hcdev_callback) callbacks;
 	/* Current state (used or not) in the running process. */
-	enum rte_hcdev_state state; /* Updated by this library. */
+	enum rte_hcdev_state process_state; /* Updated by this library. */
 	/* Driver-specific private data for the running process. */
 	void *process_private;
 } __rte_cache_aligned;
@@ -55,10 +64,14 @@ struct rte_hcdev {
 __rte_internal
 struct rte_hcdev *rte_hcdev_get_by_name(const char *name);
 
-/* First step of initialization */
+/* First step of initialization in primary process. */
 __rte_internal
 struct rte_hcdev *rte_hcdev_allocate(const char *name);
 
+/* First step of initialization in secondary process. */
+__rte_internal
+struct rte_hcdev *rte_hcdev_attach(const char *name);
+
 /* Last step of initialization. */
 __rte_internal
 void rte_hcdev_complete_new(struct rte_hcdev *dev);
diff --git a/lib/hcdev/rte_hcdev.h b/lib/hcdev/rte_hcdev.h
index 518020fd2f..c95f37063d 100644
--- a/lib/hcdev/rte_hcdev.h
+++ b/lib/hcdev/rte_hcdev.h
@@ -15,9 +15,8 @@
  * @file
  * Generic library to interact with heterogeneous computing device.
  *
- * The API is not thread-safe.
- * Device management must be done by a single thread.
  * TODO device rwlock for callback list
+ * TODO mp shared rwlock for device array
  *
  * @warning
  * @b EXPERIMENTAL: this API may change without prior notice.
diff --git a/lib/hcdev/version.map b/lib/hcdev/version.map
index 6d1a1ab1c9..450c256527 100644
--- a/lib/hcdev/version.map
+++ b/lib/hcdev/version.map
@@ -17,6 +17,7 @@ INTERNAL {
 	global:
 
 	rte_hcdev_allocate;
+	rte_hcdev_attach;
 	rte_hcdev_complete_new;
 	rte_hcdev_get_by_name;
 	rte_hcdev_notify;
-- 
2.31.1


  parent reply	other threads:[~2021-07-30 13:56 UTC|newest]

Thread overview: 128+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-06-02 20:35 [dpdk-dev] [PATCH] gpudev: introduce memory API Thomas Monjalon
2021-06-02 20:46 ` Stephen Hemminger
2021-06-02 20:48   ` Thomas Monjalon
2021-06-03  7:06 ` Andrew Rybchenko
2021-06-03  7:26   ` Thomas Monjalon
2021-06-03  7:49     ` Andrew Rybchenko
2021-06-03  8:26       ` Thomas Monjalon
2021-06-03  8:57         ` Andrew Rybchenko
2021-06-03  7:18 ` David Marchand
2021-06-03  7:30   ` Thomas Monjalon
2021-06-03  7:47 ` Jerin Jacob
2021-06-03  8:28   ` Thomas Monjalon
2021-06-03  8:41     ` Jerin Jacob
2021-06-03  8:43       ` Thomas Monjalon
2021-06-03  8:47         ` Jerin Jacob
2021-06-03  8:53           ` Thomas Monjalon
2021-06-03  9:20             ` Jerin Jacob
2021-06-03  9:36               ` Thomas Monjalon
2021-06-03 10:04                 ` Jerin Jacob
2021-06-03 10:30                   ` Thomas Monjalon
2021-06-03 11:38                     ` Jerin Jacob
2021-06-04 12:55                       ` Thomas Monjalon
2021-06-04 15:05                         ` Jerin Jacob
2021-06-03  9:33   ` Ferruh Yigit
2021-06-04 10:28     ` Thomas Monjalon
2021-06-04 11:09       ` Jerin Jacob
2021-06-04 12:46         ` Thomas Monjalon
2021-06-04 13:05           ` Andrew Rybchenko
2021-06-04 13:18             ` Thomas Monjalon
2021-06-04 13:59               ` Andrew Rybchenko
2021-06-04 14:09                 ` Thomas Monjalon
2021-06-04 15:20                   ` Jerin Jacob
2021-06-04 15:51                     ` Thomas Monjalon
2021-06-04 18:20                       ` Wang, Haiyue
2021-06-05  5:09                         ` Jerin Jacob
2021-06-06  1:13                           ` Honnappa Nagarahalli
2021-06-06  5:28                             ` Jerin Jacob
2021-06-07 10:29                               ` Thomas Monjalon
2021-06-07  7:20                             ` Wang, Haiyue
2021-06-07 10:43                               ` Thomas Monjalon
2021-06-07 13:54                                 ` Jerin Jacob
2021-06-07 16:47                                   ` Thomas Monjalon
2021-06-08  4:10                                     ` Jerin Jacob
2021-06-08  6:34                                       ` Thomas Monjalon
2021-06-08  7:09                                         ` Jerin Jacob
2021-06-08  7:32                                           ` Thomas Monjalon
2021-06-15 18:24                                         ` Ferruh Yigit
2021-06-15 18:54                                           ` Thomas Monjalon
2021-06-07 23:31                                   ` Honnappa Nagarahalli
2021-06-04  5:51 ` Wang, Haiyue
2021-06-04  8:15   ` Thomas Monjalon
2021-06-04 11:07 ` Wang, Haiyue
2021-06-04 12:43   ` Thomas Monjalon
2021-06-04 13:25     ` Wang, Haiyue
2021-06-04 14:06       ` Thomas Monjalon
2021-06-04 18:04         ` Wang, Haiyue
2021-06-05  7:49           ` Thomas Monjalon
2021-06-05 11:09             ` Wang, Haiyue
2021-06-06  1:10 ` Honnappa Nagarahalli
2021-06-07 10:50   ` Thomas Monjalon
2021-07-30 13:55 ` [dpdk-dev] [RFC PATCH v2 0/7] heterogeneous computing library Thomas Monjalon
2021-07-30 13:55   ` [dpdk-dev] [RFC PATCH v2 1/7] hcdev: introduce heterogeneous computing device library Thomas Monjalon
2021-07-30 13:55   ` [dpdk-dev] [RFC PATCH v2 2/7] hcdev: add event notification Thomas Monjalon
2021-07-30 13:55   ` [dpdk-dev] [RFC PATCH v2 3/7] hcdev: add child device representing a device context Thomas Monjalon
2021-07-30 13:55   ` Thomas Monjalon [this message]
2021-07-30 13:55   ` [dpdk-dev] [RFC PATCH v2 5/7] hcdev: add memory API Thomas Monjalon
2021-07-30 13:55   ` [dpdk-dev] [RFC PATCH v2 6/7] hcdev: add communication flag Thomas Monjalon
2021-07-30 13:55   ` [dpdk-dev] [RFC PATCH v2 7/7] hcdev: add communication list Thomas Monjalon
2021-07-31  7:06   ` [dpdk-dev] [RFC PATCH v2 0/7] heterogeneous computing library Jerin Jacob
2021-07-31  8:21     ` Thomas Monjalon
2021-07-31 13:42       ` Jerin Jacob
2021-08-27  9:44         ` Thomas Monjalon
2021-08-27 12:19           ` Jerin Jacob
2021-08-29  5:32             ` Wang, Haiyue
2021-09-01 15:35               ` Elena Agostini
2021-09-02 13:12                 ` Jerin Jacob
2021-09-06 16:11                   ` Elena Agostini
2021-09-06 17:15                     ` Wang, Haiyue
2021-09-06 17:22                       ` Elena Agostini
2021-09-07  0:55                         ` Wang, Haiyue
2021-10-09  1:53 ` [dpdk-dev] [PATCH v3 0/9] GPU library eagostini
2021-10-09  1:53   ` [dpdk-dev] [PATCH v3 1/9] gpudev: introduce GPU device class library eagostini
2021-10-09  1:53   ` [dpdk-dev] [PATCH v3 2/9] gpudev: add event notification eagostini
2021-10-09  1:53   ` [dpdk-dev] [PATCH v3 3/9] gpudev: add child device representing a device context eagostini
2021-10-09  1:53   ` [dpdk-dev] [PATCH v3 4/9] gpudev: support multi-process eagostini
2021-10-09  1:53   ` [dpdk-dev] [PATCH v3 5/9] gpudev: add memory API eagostini
2021-10-08 20:18     ` Thomas Monjalon
2021-10-29 19:38     ` Mattias Rönnblom
2021-11-08 15:16       ` Elena Agostini
2021-10-09  1:53   ` [dpdk-dev] [PATCH v3 6/9] gpudev: add memory barrier eagostini
2021-10-08 20:16     ` Thomas Monjalon
2021-10-09  1:53   ` [dpdk-dev] [PATCH v3 7/9] gpudev: add communication flag eagostini
2021-10-09  1:53   ` [dpdk-dev] [PATCH v3 8/9] gpudev: add communication list eagostini
2021-10-09  1:53   ` [dpdk-dev] [PATCH v3 9/9] doc: add CUDA example in GPU guide eagostini
2021-10-10 10:16   ` [dpdk-dev] [PATCH v3 0/9] GPU library Jerin Jacob
2021-10-11  8:18     ` Thomas Monjalon
2021-10-11  8:43       ` Jerin Jacob
2021-10-11  9:12         ` Thomas Monjalon
2021-10-11  9:29           ` Jerin Jacob
2021-10-11 10:27             ` Thomas Monjalon
2021-10-11 11:41               ` Jerin Jacob
2021-10-11 12:44                 ` Thomas Monjalon
2021-10-11 13:30                   ` Jerin Jacob
2021-10-19 10:00                     ` Elena Agostini
2021-10-19 18:47                       ` Jerin Jacob
2021-10-19 19:11                         ` Thomas Monjalon
2021-10-19 19:56                           ` [dpdk-dev] [EXT] " Jerin Jacob Kollanukkaran
2021-11-03 19:15 ` [dpdk-dev] [PATCH v4 " eagostini
2021-11-03 19:15   ` [dpdk-dev] [PATCH v4 1/9] gpudev: introduce GPU device class library eagostini
2021-11-03 19:15   ` [dpdk-dev] [PATCH v4 2/9] gpudev: add event notification eagostini
2021-11-03 19:15   ` [dpdk-dev] [PATCH v4 3/9] gpudev: add child device representing a device context eagostini
2021-11-03 19:15   ` [dpdk-dev] [PATCH v4 4/9] gpudev: support multi-process eagostini
2021-11-03 19:15   ` [dpdk-dev] [PATCH v4 5/9] gpudev: add memory API eagostini
2021-11-03 19:15   ` [dpdk-dev] [PATCH v4 6/9] gpudev: add memory barrier eagostini
2021-11-03 19:15   ` [dpdk-dev] [PATCH v4 7/9] gpudev: add communication flag eagostini
2021-11-03 19:15   ` [dpdk-dev] [PATCH v4 8/9] gpudev: add communication list eagostini
2021-11-03 19:15   ` [dpdk-dev] [PATCH v4 9/9] doc: add CUDA example in GPU guide eagostini
2021-11-08 18:57 ` [dpdk-dev] [PATCH v5 0/9] GPU library eagostini
2021-11-08 16:25   ` Thomas Monjalon
2021-11-08 18:57   ` [dpdk-dev] [PATCH v5 1/9] gpudev: introduce GPU device class library eagostini
2021-11-08 18:57   ` [dpdk-dev] [PATCH v5 2/9] gpudev: add event notification eagostini
2021-11-08 18:57   ` [dpdk-dev] [PATCH v5 3/9] gpudev: add child device representing a device context eagostini
2021-11-08 18:58   ` [dpdk-dev] [PATCH v5 4/9] gpudev: support multi-process eagostini
2021-11-08 18:58   ` [dpdk-dev] [PATCH v5 5/9] gpudev: add memory API eagostini
2021-11-08 18:58   ` [dpdk-dev] [PATCH v5 6/9] gpudev: add memory barrier eagostini
2021-11-08 18:58   ` [dpdk-dev] [PATCH v5 7/9] gpudev: add communication flag eagostini
2021-11-08 18:58   ` [dpdk-dev] [PATCH v5 8/9] gpudev: add communication list eagostini
2021-11-08 18:58   ` [dpdk-dev] [PATCH v5 9/9] doc: add CUDA example in GPU guide eagostini

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210730135533.417611-5-thomas@monjalon.net \
    --to=thomas@monjalon.net \
    --cc=anatoly.burakov@intel.com \
    --cc=andrew.rybchenko@oktetlabs.ru \
    --cc=david.marchand@redhat.com \
    --cc=dev@dpdk.org \
    --cc=eagostini@nvidia.com \
    --cc=ferruh.yigit@intel.com \
    --cc=haiyue.wang@intel.com \
    --cc=honnappa.nagarahalli@arm.com \
    --cc=jerinj@marvell.com \
    --cc=mdr@ashroe.eu \
    --cc=stephen@networkplumber.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).