DPDK patches and discussions
 help / color / mirror / Atom feed
From: <eagostini@nvidia.com>
To: <dev@dpdk.org>
Cc: Elena Agostini <eagostini@nvidia.com>
Subject: [PATCH v1 1/1] gpu/cuda: expose GPU memory with GDRCopy
Date: Tue, 11 Jan 2022 17:39:29 +0000	[thread overview]
Message-ID: <20220111173929.28746-2-eagostini@nvidia.com> (raw)
In-Reply-To: <20220111173929.28746-1-eagostini@nvidia.com>

From: Elena Agostini <eagostini@nvidia.com>

GPU CUDA implementation of the new gpudev functions
to expose GPU memory to the CPU.

Today GDRCopy library is required to pin and DMA map
the GPU memory through the BAR1 of the GPU and expose
it to the CPU.

Goal here is to hide technical details GDRCopy library
and expose the functionality through the generic
gpudev layer.

GDRCopy can be found here: https://github.com/NVIDIA/gdrcopy

To build GPU CUDA driver with GDRCopy, you need to build
DPDK indicating the gdrapi.h header file with
-Dc_args="-I/path/to/gdrapi/".

To execute you need to indicate the path to libgdrapi.so
library with the environment variable
GDRCOPY_PATH_L=/path/to/gdrcopy/lib/

If GDRCopy is not built with GPU CUDA driver, the GPU expose
functionality will not be supported by the driver.

This is an indipendent feature.
All the other GPU CUDA driver capabilities are not affected
if GDRCopy is not built.

Signed-off-by: Elena Agostini <eagostini@nvidia.com>

---
Dependency on https://patches.dpdk.org/project/dpdk/patch/20220108000457.31104-1-eagostini@nvidia.com/
---
 drivers/gpu/cuda/cuda.c      | 101 +++++++++++++++++++++++++
 drivers/gpu/cuda/gdrcopy.c   | 139 +++++++++++++++++++++++++++++++++++
 drivers/gpu/cuda/gdrcopy.h   |  29 ++++++++
 drivers/gpu/cuda/meson.build |   6 +-
 4 files changed, 274 insertions(+), 1 deletion(-)
 create mode 100644 drivers/gpu/cuda/gdrcopy.c
 create mode 100644 drivers/gpu/cuda/gdrcopy.h

diff --git a/drivers/gpu/cuda/cuda.c b/drivers/gpu/cuda/cuda.c
index 882df08e56..d66d6b76b9 100644
--- a/drivers/gpu/cuda/cuda.c
+++ b/drivers/gpu/cuda/cuda.c
@@ -17,6 +17,8 @@
 #include <cuda.h>
 #include <cudaTypedefs.h>
 
+#include "gdrcopy.h"
+
 #define CUDA_DRIVER_MIN_VERSION 11040
 #define CUDA_API_MIN_VERSION 3020
 
@@ -52,6 +54,8 @@ static void *cudalib;
 static unsigned int cuda_api_version;
 static int cuda_driver_version;
 
+static gdr_t gdrc_h;
+
 /* NVIDIA GPU vendor */
 #define NVIDIA_GPU_VENDOR_ID (0x10de)
 
@@ -144,6 +148,7 @@ struct mem_entry {
 	struct rte_gpu *dev;
 	CUcontext ctx;
 	cuda_ptr_key pkey;
+	gdr_mh_t mh;
 	enum mem_type mtype;
 	struct mem_entry *prev;
 	struct mem_entry *next;
@@ -943,6 +948,87 @@ cuda_wmb(struct rte_gpu *dev)
 	return 0;
 }
 
+static int
+cuda_mem_expose(struct rte_gpu *dev, __rte_unused size_t size, void *ptr_in, void **ptr_out)
+{
+	struct mem_entry *mem_item;
+	cuda_ptr_key hk;
+
+	if (dev == NULL)
+		return -ENODEV;
+
+	if (gdrc_h == NULL) {
+		rte_cuda_log(ERR, "GDRCopy not built or loaded. Can't expose GPU memory.");
+		rte_errno = ENOTSUP;
+		return -rte_errno;
+	}
+
+	hk = get_hash_from_ptr((void *)ptr_in);
+
+	mem_item = mem_list_find_item(hk);
+	if (mem_item == NULL) {
+		rte_cuda_log(ERR, "Memory address 0x%p not found in driver memory.", ptr_in);
+		rte_errno = EPERM;
+		return -rte_errno;
+	}
+
+	if (mem_item->mtype == GPU_MEM) {
+		rte_cuda_log(ERR, "Memory address 0x%p is not GPU memory type.", ptr_in);
+		rte_errno = EPERM;
+		return -rte_errno;
+	}
+
+	if (mem_item->size != size)
+		rte_cuda_log(WARNING,
+				"Can't expose memory area with size (%zd) different from original size (%zd).",
+				size, mem_item->size);
+
+	if (gdrcopy_pin(gdrc_h, &(mem_item->mh), (uint64_t)mem_item->ptr_d,
+			mem_item->size, &(mem_item->ptr_h))) {
+		rte_cuda_log(ERR, "Error exposing GPU memory address 0x%p.", ptr_in);
+		rte_errno = EPERM;
+		return -rte_errno;
+	}
+
+	*ptr_out = mem_item->ptr_h;
+
+	return 0;
+}
+
+static int
+cuda_mem_unexpose(struct rte_gpu *dev, void *ptr_in)
+{
+	struct mem_entry *mem_item;
+	cuda_ptr_key hk;
+
+	if (dev == NULL)
+		return -ENODEV;
+
+	if (gdrc_h == NULL) {
+		rte_cuda_log(ERR, "GDRCopy not built or loaded. Can't unexpose GPU memory.");
+		rte_errno = ENOTSUP;
+		return -rte_errno;
+	}
+
+	hk = get_hash_from_ptr((void *)ptr_in);
+
+	mem_item = mem_list_find_item(hk);
+	if (mem_item == NULL) {
+		rte_cuda_log(ERR, "Memory address 0x%p not found in driver memory.", ptr_in);
+		rte_errno = EPERM;
+		return -rte_errno;
+	}
+
+	if (gdrcopy_unpin(gdrc_h, mem_item->mh, (void *)mem_item->ptr_d,
+			mem_item->size)) {
+		rte_cuda_log(ERR, "Error unexposing GPU memory address 0x%p.", ptr_in);
+		rte_errno = EPERM;
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
 static int
 cuda_gpu_probe(__rte_unused struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
 {
@@ -1018,6 +1104,19 @@ cuda_gpu_probe(__rte_unused struct rte_pci_driver *pci_drv, struct rte_pci_devic
 			rte_errno = ENOTSUP;
 			return -rte_errno;
 		}
+
+		gdrc_h = NULL;
+
+		#ifdef DRIVERS_GPU_CUDA_GDRCOPY_H
+			if (gdrcopy_loader())
+				rte_cuda_log(ERR, "GDRCopy shared library not found.\n");
+			else {
+				if (gdrcopy_open(&gdrc_h))
+					rte_cuda_log(ERR, "GDRCopy handler can't be created. Is gdrdrv driver installed and loaded?\n");
+			}
+		#else
+			gdrc_h = NULL;
+		#endif
 	}
 
 	/* Fill HW specific part of device structure */
@@ -1160,6 +1259,8 @@ cuda_gpu_probe(__rte_unused struct rte_pci_driver *pci_drv, struct rte_pci_devic
 	dev->ops.mem_free = cuda_mem_free;
 	dev->ops.mem_register = cuda_mem_register;
 	dev->ops.mem_unregister = cuda_mem_unregister;
+	dev->ops.mem_expose = cuda_mem_expose;
+	dev->ops.mem_unexpose = cuda_mem_unexpose;
 	dev->ops.wmb = cuda_wmb;
 
 	rte_gpu_complete_new(dev);
diff --git a/drivers/gpu/cuda/gdrcopy.c b/drivers/gpu/cuda/gdrcopy.c
new file mode 100644
index 0000000000..1dc6b676e5
--- /dev/null
+++ b/drivers/gpu/cuda/gdrcopy.c
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2022 NVIDIA Corporation & Affiliates
+ */
+
+#include "gdrcopy.h"
+
+static void *gdrclib;
+
+static gdr_t (*sym_gdr_open)(void);
+static int (*sym_gdr_close)(gdr_t g);
+static int (*sym_gdr_pin_buffer)(gdr_t g, unsigned long addr, size_t size, uint64_t p2p_token, uint32_t va_space, gdr_mh_t *handle);
+static int (*sym_gdr_unpin_buffer)(gdr_t g, gdr_mh_t handle);
+static int (*sym_gdr_map)(gdr_t g, gdr_mh_t handle, void **va, size_t size);
+static int (*sym_gdr_unmap)(gdr_t g, gdr_mh_t handle, void *va, size_t size);
+
+int
+gdrcopy_loader(void)
+{
+	char gdrcopy_path[1024];
+
+	if (getenv("GDRCOPY_PATH_L") == NULL)
+		snprintf(gdrcopy_path, 1024, "%s", "libgdrapi.so");
+	else
+		snprintf(gdrcopy_path, 1024, "%s%s", getenv("GDRCOPY_PATH_L"), "libgdrapi.so");
+
+	gdrclib = dlopen(gdrcopy_path, RTLD_LAZY);
+	if (gdrclib == NULL) {
+		fprintf(stderr, "Failed to find GDRCopy library in %s (GDRCOPY_PATH_L=%s)\n",
+				gdrcopy_path, getenv("GDRCOPY_PATH_L"));
+		return -1;
+	}
+
+	sym_gdr_open = dlsym(gdrclib, "gdr_open");
+	if (sym_gdr_open == NULL) {
+		fprintf(stderr, "Failed to load GDRCopy symbols\n");
+		return -1;
+	}
+
+	sym_gdr_close = dlsym(gdrclib, "gdr_close");
+	if (sym_gdr_close == NULL) {
+		fprintf(stderr, "Failed to load GDRCopy symbols\n");
+		return -1;
+	}
+
+	sym_gdr_pin_buffer = dlsym(gdrclib, "gdr_pin_buffer");
+	if (sym_gdr_pin_buffer == NULL) {
+		fprintf(stderr, "Failed to load GDRCopy symbols\n");
+		return -1;
+	}
+
+	sym_gdr_unpin_buffer = dlsym(gdrclib, "gdr_unpin_buffer");
+	if (sym_gdr_unpin_buffer == NULL) {
+		fprintf(stderr, "Failed to load GDRCopy symbols\n");
+		return -1;
+	}
+
+	sym_gdr_map = dlsym(gdrclib, "gdr_map");
+	if (sym_gdr_map == NULL) {
+		fprintf(stderr, "Failed to load GDRCopy symbols\n");
+		return -1;
+	}
+
+	sym_gdr_unmap = dlsym(gdrclib, "gdr_unmap");
+	if (sym_gdr_unmap == NULL) {
+		fprintf(stderr, "Failed to load GDRCopy symbols\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+int
+gdrcopy_open(gdr_t *g)
+{
+#ifdef DRIVERS_GPU_CUDA_GDRCOPY_H
+	gdr_t g_;
+
+	g_ = sym_gdr_open();
+	if (!g_)
+		return -1;
+
+	*g = g_;
+#else
+	*g = NULL;
+#endif
+	return 0;
+}
+
+int
+gdrcopy_close(__rte_unused gdr_t *g)
+{
+#ifdef DRIVERS_GPU_CUDA_GDRCOPY_H
+	sym_gdr_close(*g);
+#endif
+	return 0;
+}
+
+int
+gdrcopy_pin(gdr_t g, __rte_unused gdr_mh_t *mh, uint64_t d_addr, size_t size, void **h_addr)
+{
+	if (g == NULL)
+		return -ENOTSUP;
+
+#ifdef DRIVERS_GPU_CUDA_GDRCOPY_H
+	/* Pin the device buffer */
+	if (sym_gdr_pin_buffer(g, d_addr, size, 0, 0, mh) != 0) {
+		fprintf(stderr, "sym_gdr_pin_buffer\n");
+		return -1;
+	}
+
+	/* Map the buffer to user space */
+	if (sym_gdr_map(g, *mh, h_addr, size) != 0) {
+		fprintf(stderr, "sym_gdr_map\n");
+		sym_gdr_unpin_buffer(g, *mh);
+		return -1;
+	}
+#endif
+	return 0;
+}
+
+int
+gdrcopy_unpin(gdr_t g, __rte_unused gdr_mh_t mh, void *d_addr, size_t size)
+{
+	if (g == NULL)
+		return -ENOTSUP;
+
+#ifdef DRIVERS_GPU_CUDA_GDRCOPY_H
+	/* Unmap the buffer from user space */
+	if (sym_gdr_unmap(g, mh, d_addr, size) != 0)
+		fprintf(stderr, "sym_gdr_unmap\n");
+
+	/* Pin the device buffer */
+	if (sym_gdr_unpin_buffer(g, mh) != 0) {
+		fprintf(stderr, "sym_gdr_pin_buffer\n");
+		return -11;
+	}
+#endif
+	return 0;
+}
diff --git a/drivers/gpu/cuda/gdrcopy.h b/drivers/gpu/cuda/gdrcopy.h
new file mode 100644
index 0000000000..e5c1997731
--- /dev/null
+++ b/drivers/gpu/cuda/gdrcopy.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2022 NVIDIA Corporation & Affiliates
+ */
+
+#ifndef _CUDA_GDRCOPY_H_
+#define _CUDA_GDRCOPY_H_
+
+#include <dlfcn.h>
+
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_errno.h>
+
+#ifdef DRIVERS_GPU_CUDA_GDRCOPY_H
+	#include <gdrapi.h>
+#else
+	struct gdr;
+	typedef struct gdr *gdr_t;
+	struct gdr_mh_s;
+	typedef struct gdr_mh_s gdr_mh_t;
+#endif
+
+int gdrcopy_loader(void);
+int gdrcopy_open(gdr_t *g);
+int gdrcopy_close(gdr_t *g);
+int gdrcopy_pin(gdr_t g, gdr_mh_t *mh, uint64_t d_addr, size_t size, void **h_addr);
+int gdrcopy_unpin(gdr_t g, gdr_mh_t mh, void *d_addr, size_t size);
+
+#endif
diff --git a/drivers/gpu/cuda/meson.build b/drivers/gpu/cuda/meson.build
index 3fe20929fa..784fa8bf0d 100644
--- a/drivers/gpu/cuda/meson.build
+++ b/drivers/gpu/cuda/meson.build
@@ -17,5 +17,9 @@ if not cc.has_header('cudaTypedefs.h')
         subdir_done()
 endif
 
+if cc.has_header('gdrapi.h')
+        dpdk_conf.set('DRIVERS_GPU_CUDA_GDRCOPY_H', 1)
+endif
+
 deps += ['gpudev', 'pci', 'bus_pci']
-sources = files('cuda.c')
+sources = files('cuda.c', 'gdrcopy.c')
-- 
2.17.1


  reply	other threads:[~2022-01-11  9:29 UTC|newest]

Thread overview: 8+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-01-11 17:39 [PATCH v1 0/1] " eagostini
2022-01-11 17:39 ` eagostini [this message]
2022-02-21 22:44 ` [PATCH v2] gpu/cuda: CPU map " eagostini
2022-02-23 19:44   ` [PATCH v3] " eagostini
2022-02-25  3:12   ` [PATCH v4 1/2] doc/gpus: add cuda.ini into features eagostini
2022-02-25  3:12     ` [PATCH v4 2/2] gpu/cuda: CPU map GPU memory with GDRCopy eagostini
2022-02-27 16:49       ` Thomas Monjalon
2022-02-27 16:48     ` [PATCH v4 1/2] doc/gpus: add cuda.ini into features Thomas Monjalon

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220111173929.28746-2-eagostini@nvidia.com \
    --to=eagostini@nvidia.com \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).