DPDK patches and discussions
 help / color / mirror / Atom feed
From: <eagostini@nvidia.com>
To: <dev@dpdk.org>
Cc: Elena Agostini <eagostini@nvidia.com>
Subject: [PATCH v2] gpu/cuda: set rte_errno
Date: Wed, 24 Nov 2021 18:34:09 +0000	[thread overview]
Message-ID: <20211124183409.11181-1-eagostini@nvidia.com> (raw)
In-Reply-To: <20211118204527.26524-1-eagostini@nvidia.com>

From: Elena Agostini <eagostini@nvidia.com>

Set correct rte_errno variable in gpu/cuda and return
-rte_errno in case of error.

rte_errno values are compliant with the gpudev library
documentation.

Fixes: 1306a73b1958 ("gpu/cuda: introduce CUDA driver")

Signed-off-by: Elena Agostini <eagostini@nvidia.com>
---
 drivers/gpu/cuda/cuda.c | 184 +++++++++++++++++++++++++++-------------
 1 file changed, 123 insertions(+), 61 deletions(-)

diff --git a/drivers/gpu/cuda/cuda.c b/drivers/gpu/cuda/cuda.c
index a4869da186..882df08e56 100644
--- a/drivers/gpu/cuda/cuda.c
+++ b/drivers/gpu/cuda/cuda.c
@@ -464,8 +464,10 @@ cuda_dev_info_get(struct rte_gpu *dev, struct rte_gpu_info *info)
 	CUcontext current_ctx;
 	CUcontext input_ctx;
 
-	if (dev == NULL)
-		return -ENODEV;
+	if (dev == NULL) {
+		rte_errno = ENODEV;
+		return -rte_errno;
+	}
 
 	/* Child initialization time probably called by rte_gpu_add_child() */
 	if (dev->mpshared->info.parent != RTE_GPU_ID_NONE &&
@@ -476,7 +478,8 @@ cuda_dev_info_get(struct rte_gpu *dev, struct rte_gpu_info *info)
 			pfn_cuGetErrorString(res, &(err_string));
 			rte_cuda_log(ERR, "cuCtxGetCurrent failed with %s",
 					err_string);
-			return -EPERM;
+			rte_errno = EPERM;
+			return -rte_errno;
 		}
 
 		/* Set child ctx as current ctx */
@@ -486,7 +489,8 @@ cuda_dev_info_get(struct rte_gpu *dev, struct rte_gpu_info *info)
 			pfn_cuGetErrorString(res, &(err_string));
 			rte_cuda_log(ERR, "cuCtxSetCurrent input failed with %s",
 					err_string);
-			return -EPERM;
+			rte_errno = EPERM;
+			return -rte_errno;
 		}
 
 		/*
@@ -505,8 +509,10 @@ cuda_dev_info_get(struct rte_gpu *dev, struct rte_gpu_info *info)
 				(uint32_t)affinityPrm.param.smCount.val;
 
 		ret = rte_gpu_info_get(dev->mpshared->info.parent, &parent_info);
-		if (ret)
-			return -ENODEV;
+		if (ret) {
+			rte_errno = ENODEV;
+			return -rte_errno;
+		}
 		dev->mpshared->info.total_memory = parent_info.total_memory;
 
 		/*
@@ -517,7 +523,8 @@ cuda_dev_info_get(struct rte_gpu *dev, struct rte_gpu_info *info)
 				RTE_CACHE_LINE_SIZE);
 		if (dev->mpshared->dev_private == NULL) {
 			rte_cuda_log(ERR, "Failed to allocate memory for GPU process private");
-			return -EPERM;
+			rte_errno = EPERM;
+			return -rte_errno;
 		}
 
 		private = (struct cuda_info *)dev->mpshared->dev_private;
@@ -527,7 +534,8 @@ cuda_dev_info_get(struct rte_gpu *dev, struct rte_gpu_info *info)
 			pfn_cuGetErrorString(res, &(err_string));
 			rte_cuda_log(ERR, "cuCtxGetDevice failed with %s",
 					err_string);
-			return -EPERM;
+			rte_errno = EPERM;
+			return -rte_errno;
 		}
 
 		res = pfn_cuDeviceGetName(private->gpu_name,
@@ -536,7 +544,8 @@ cuda_dev_info_get(struct rte_gpu *dev, struct rte_gpu_info *info)
 			pfn_cuGetErrorString(res, &(err_string));
 			rte_cuda_log(ERR, "cuDeviceGetName failed with %s",
 					err_string);
-			return -EPERM;
+			rte_errno = EPERM;
+			return -rte_errno;
 		}
 
 		/* Restore original ctx as current ctx */
@@ -545,7 +554,8 @@ cuda_dev_info_get(struct rte_gpu *dev, struct rte_gpu_info *info)
 			pfn_cuGetErrorString(res, &(err_string));
 			rte_cuda_log(ERR, "cuCtxSetCurrent current failed with %s",
 					err_string);
-			return -EPERM;
+			rte_errno = EPERM;
+			return -rte_errno;
 		}
 	}
 
@@ -576,7 +586,8 @@ cuda_mem_alloc(struct rte_gpu *dev, size_t size, void **ptr)
 		pfn_cuGetErrorString(res, &(err_string));
 		rte_cuda_log(ERR, "cuCtxGetCurrent failed with %s",
 				err_string);
-		return -EPERM;
+		rte_errno = EPERM;
+		return -rte_errno;
 	}
 
 	/* Set child ctx as current ctx */
@@ -586,13 +597,16 @@ cuda_mem_alloc(struct rte_gpu *dev, size_t size, void **ptr)
 		pfn_cuGetErrorString(res, &(err_string));
 		rte_cuda_log(ERR, "cuCtxSetCurrent input failed with %s",
 				err_string);
-		return -EPERM;
+		rte_errno = EPERM;
+		return -rte_errno;
 	}
 
 	/* Get next memory list item */
 	mem_alloc_list_tail = mem_list_add_item();
-	if (mem_alloc_list_tail == NULL)
-		return -ENOMEM;
+	if (mem_alloc_list_tail == NULL) {
+		rte_errno = EPERM;
+		return -rte_errno;
+	}
 
 	/* Allocate memory */
 	mem_alloc_list_tail->size = size;
@@ -602,7 +616,8 @@ cuda_mem_alloc(struct rte_gpu *dev, size_t size, void **ptr)
 		pfn_cuGetErrorString(res, &(err_string));
 		rte_cuda_log(ERR, "cuCtxSetCurrent current failed with %s",
 				err_string);
-		return -EPERM;
+		rte_errno = EPERM;
+		return -rte_errno;
 	}
 
 	/* GPUDirect RDMA attribute required */
@@ -613,7 +628,8 @@ cuda_mem_alloc(struct rte_gpu *dev, size_t size, void **ptr)
 		rte_cuda_log(ERR, "Could not set SYNC MEMOP attribute for "
 				"GPU memory at  %"PRIu32", err %d",
 				(uint32_t)mem_alloc_list_tail->ptr_d, res);
-		return -EPERM;
+		rte_errno = EPERM;
+		return -rte_errno;
 	}
 
 	mem_alloc_list_tail->pkey = get_hash_from_ptr((void *)mem_alloc_list_tail->ptr_d);
@@ -629,7 +645,8 @@ cuda_mem_alloc(struct rte_gpu *dev, size_t size, void **ptr)
 		pfn_cuGetErrorString(res, &(err_string));
 		rte_cuda_log(ERR, "cuCtxSetCurrent current failed with %s",
 				err_string);
-		return -EPERM;
+		rte_errno = EPERM;
+		return -rte_errno;
 	}
 
 	*ptr = (void *)mem_alloc_list_tail->ptr_d;
@@ -656,7 +673,8 @@ cuda_mem_register(struct rte_gpu *dev, size_t size, void *ptr)
 		pfn_cuGetErrorString(res, &(err_string));
 		rte_cuda_log(ERR, "cuCtxGetCurrent failed with %s",
 				err_string);
-		return -EPERM;
+		rte_errno = EPERM;
+		return -rte_errno;
 	}
 
 	/* Set child ctx as current ctx */
@@ -666,13 +684,16 @@ cuda_mem_register(struct rte_gpu *dev, size_t size, void *ptr)
 		pfn_cuGetErrorString(res, &(err_string));
 		rte_cuda_log(ERR, "cuCtxSetCurrent input failed with %s",
 				err_string);
-		return -EPERM;
+		rte_errno = EPERM;
+		return -rte_errno;
 	}
 
 	/* Get next memory list item */
 	mem_alloc_list_tail = mem_list_add_item();
-	if (mem_alloc_list_tail == NULL)
-		return -ENOMEM;
+	if (mem_alloc_list_tail == NULL) {
+		rte_errno = EPERM;
+		return -rte_errno;
+	}
 
 	/* Allocate memory */
 	mem_alloc_list_tail->size = size;
@@ -688,7 +709,8 @@ cuda_mem_register(struct rte_gpu *dev, size_t size, void *ptr)
 				err_string,
 				mem_alloc_list_tail->ptr_h,
 				mem_alloc_list_tail->size);
-		return -EPERM;
+		rte_errno = EPERM;
+		return -rte_errno;
 	}
 
 	res = pfn_cuDeviceGetAttribute(&(use_ptr_h),
@@ -698,7 +720,8 @@ cuda_mem_register(struct rte_gpu *dev, size_t size, void *ptr)
 		pfn_cuGetErrorString(res, &(err_string));
 		rte_cuda_log(ERR, "cuDeviceGetAttribute failed with %s",
 				err_string);
-		return -EPERM;
+		rte_errno = EPERM;
+		return -rte_errno;
 	}
 
 	if (use_ptr_h == 0) {
@@ -708,13 +731,15 @@ cuda_mem_register(struct rte_gpu *dev, size_t size, void *ptr)
 			pfn_cuGetErrorString(res, &(err_string));
 			rte_cuda_log(ERR, "cuMemHostGetDevicePointer failed with %s",
 					err_string);
-			return -EPERM;
+			rte_errno = EPERM;
+			return -rte_errno;
 		}
 
 		if ((uintptr_t)mem_alloc_list_tail->ptr_d !=
 				(uintptr_t)mem_alloc_list_tail->ptr_h) {
 			rte_cuda_log(ERR, "Host input pointer is different wrt GPU registered pointer");
-			return -ENOTSUP;
+			rte_errno = ENOTSUP;
+			return -rte_errno;
 		}
 	} else {
 		mem_alloc_list_tail->ptr_d = (CUdeviceptr)mem_alloc_list_tail->ptr_h;
@@ -727,7 +752,8 @@ cuda_mem_register(struct rte_gpu *dev, size_t size, void *ptr)
 	if (res != 0) {
 		rte_cuda_log(ERR, "Could not set SYNC MEMOP attribute for GPU memory at %"PRIu32
 				", err %d", (uint32_t)mem_alloc_list_tail->ptr_d, res);
-		return -EPERM;
+		rte_errno = EPERM;
+		return -rte_errno;
 	}
 
 	mem_alloc_list_tail->pkey = get_hash_from_ptr((void *)mem_alloc_list_tail->ptr_h);
@@ -742,7 +768,8 @@ cuda_mem_register(struct rte_gpu *dev, size_t size, void *ptr)
 		pfn_cuGetErrorString(res, &(err_string));
 		rte_cuda_log(ERR, "cuCtxSetCurrent current failed with %s",
 				err_string);
-		return -EPERM;
+		rte_errno = EPERM;
+		return -rte_errno;
 	}
 
 	return 0;
@@ -764,7 +791,8 @@ cuda_mem_free(struct rte_gpu *dev, void *ptr)
 	mem_item = mem_list_find_item(hk);
 	if (mem_item == NULL) {
 		rte_cuda_log(ERR, "Memory address 0x%p not found in driver memory", ptr);
-		return -EPERM;
+		rte_errno = EPERM;
+		return -rte_errno;
 	}
 
 	if (mem_item->mtype == GPU_MEM) {
@@ -773,7 +801,8 @@ cuda_mem_free(struct rte_gpu *dev, void *ptr)
 			pfn_cuGetErrorString(res, &(err_string));
 			rte_cuda_log(ERR, "cuMemFree current failed with %s",
 					err_string);
-			return -EPERM;
+			rte_errno = EPERM;
+			return -rte_errno;
 		}
 
 		return mem_list_del_item(hk);
@@ -800,7 +829,8 @@ cuda_mem_unregister(struct rte_gpu *dev, void *ptr)
 	mem_item = mem_list_find_item(hk);
 	if (mem_item == NULL) {
 		rte_cuda_log(ERR, "Memory address 0x%p not found in driver memory", ptr);
-		return -EPERM;
+		rte_errno = EPERM;
+		return -rte_errno;
 	}
 
 	if (mem_item->mtype == CPU_REGISTERED) {
@@ -809,7 +839,8 @@ cuda_mem_unregister(struct rte_gpu *dev, void *ptr)
 			pfn_cuGetErrorString(res, &(err_string));
 			rte_cuda_log(ERR, "cuMemHostUnregister current failed with %s",
 					err_string);
-			return -EPERM;
+			rte_errno = EPERM;
+			return -rte_errno;
 		}
 
 		return mem_list_del_item(hk);
@@ -817,7 +848,8 @@ cuda_mem_unregister(struct rte_gpu *dev, void *ptr)
 
 	rte_cuda_log(ERR, "Memory type %d not supported", mem_item->mtype);
 
-	return -EPERM;
+	rte_errno = EPERM;
+	return -rte_errno;
 }
 
 static int
@@ -840,8 +872,10 @@ cuda_wmb(struct rte_gpu *dev)
 	CUcontext input_ctx;
 	struct cuda_info *private;
 
-	if (dev == NULL)
-		return -ENODEV;
+	if (dev == NULL) {
+		rte_errno = ENODEV;
+		return -rte_errno;
+	}
 
 	private = (struct cuda_info *)dev->mpshared->dev_private;
 
@@ -860,7 +894,9 @@ cuda_wmb(struct rte_gpu *dev)
 		 */
 		rte_cuda_log(WARNING, "Can't flush GDR writes with cuFlushGPUDirectRDMAWrites CUDA function."
 				"Application needs to use alternative methods.");
-		return -ENOTSUP;
+
+		rte_errno = ENOTSUP;
+		return -rte_errno;
 	}
 
 	/* Store current ctx */
@@ -869,7 +905,8 @@ cuda_wmb(struct rte_gpu *dev)
 		pfn_cuGetErrorString(res, &(err_string));
 		rte_cuda_log(ERR, "cuCtxGetCurrent failed with %s",
 				err_string);
-		return -EPERM;
+		rte_errno = EPERM;
+		return -rte_errno;
 	}
 
 	/* Set child ctx as current ctx */
@@ -879,7 +916,8 @@ cuda_wmb(struct rte_gpu *dev)
 		pfn_cuGetErrorString(res, &(err_string));
 		rte_cuda_log(ERR, "cuCtxSetCurrent input failed with %s",
 				err_string);
-		return -EPERM;
+		rte_errno = EPERM;
+		return -rte_errno;
 	}
 
 	res = pfn_cuFlushGPUDirectRDMAWrites(CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TARGET_CURRENT_CTX,
@@ -888,7 +926,8 @@ cuda_wmb(struct rte_gpu *dev)
 		pfn_cuGetErrorString(res, &(err_string));
 		rte_cuda_log(ERR, "cuFlushGPUDirectRDMAWrites current failed with %s",
 				err_string);
-		return -EPERM;
+		rte_errno = EPERM;
+		return -rte_errno;
 	}
 
 	/* Restore original ctx as current ctx */
@@ -897,7 +936,8 @@ cuda_wmb(struct rte_gpu *dev)
 		pfn_cuGetErrorString(res, &(err_string));
 		rte_cuda_log(ERR, "cuCtxSetCurrent current failed with %s",
 				err_string);
-		return -EPERM;
+		rte_errno = EPERM;
+		return -rte_errno;
 	}
 
 	return 0;
@@ -917,15 +957,18 @@ cuda_gpu_probe(__rte_unused struct rte_pci_driver *pci_drv, struct rte_pci_devic
 
 	if (pci_dev == NULL) {
 		rte_cuda_log(ERR, "NULL PCI device");
-		return -EINVAL;
+		rte_errno = ENODEV;
+		return -rte_errno;
 	}
 
 	rte_pci_device_name(&pci_dev->addr, dev_name, sizeof(dev_name));
 
 	/* Allocate memory to be used privately by drivers */
 	dev = rte_gpu_allocate(pci_dev->device.name);
-	if (dev == NULL)
-		return -ENODEV;
+	if (dev == NULL) {
+		rte_errno = ENODEV;
+		return -rte_errno;
+	}
 
 	/* Initialize values only for the first CUDA driver call */
 	if (dev->mpshared->info.dev_id == 0) {
@@ -936,13 +979,15 @@ cuda_gpu_probe(__rte_unused struct rte_pci_driver *pci_drv, struct rte_pci_devic
 		/* Load libcuda.so library */
 		if (cuda_loader()) {
 			rte_cuda_log(ERR, "CUDA Driver library not found");
-			return -ENOTSUP;
+			rte_errno = ENOTSUP;
+			return -rte_errno;
 		}
 
 		/* Load initial CUDA functions */
 		if (cuda_sym_func_loader()) {
 			rte_cuda_log(ERR, "CUDA functions not found in library");
-			return -ENOTSUP;
+			rte_errno = ENOTSUP;
+			return -rte_errno;
 		}
 
 		/*
@@ -955,7 +1000,8 @@ cuda_gpu_probe(__rte_unused struct rte_pci_driver *pci_drv, struct rte_pci_devic
 		res = sym_cuDriverGetVersion(&cuda_driver_version);
 		if (res != 0) {
 			rte_cuda_log(ERR, "cuDriverGetVersion failed with %d", res);
-			return -ENOTSUP;
+			rte_errno = ENOTSUP;
+			return -rte_errno;
 		}
 
 		if (cuda_driver_version < CUDA_DRIVER_MIN_VERSION) {
@@ -963,12 +1009,14 @@ cuda_gpu_probe(__rte_unused struct rte_pci_driver *pci_drv, struct rte_pci_devic
 					"Minimum requirement is %d",
 					cuda_driver_version,
 					CUDA_DRIVER_MIN_VERSION);
-			return -ENOTSUP;
+			rte_errno = ENOTSUP;
+			return -rte_errno;
 		}
 
 		if (cuda_pfn_func_loader()) {
 			rte_cuda_log(ERR, "CUDA PFN functions not found in library");
-			return -ENOTSUP;
+			rte_errno = ENOTSUP;
+			return -rte_errno;
 		}
 	}
 
@@ -982,7 +1030,8 @@ cuda_gpu_probe(__rte_unused struct rte_pci_driver *pci_drv, struct rte_pci_devic
 		pfn_cuGetErrorString(res, &(err_string));
 		rte_cuda_log(ERR, "cuDeviceGetByPCIBusId name %s failed with %d: %s",
 				dev->device->name, res, err_string);
-		return -EPERM;
+		rte_errno = EPERM;
+		return -rte_errno;
 	}
 
 	res = pfn_cuDevicePrimaryCtxRetain(&pctx, cu_dev_id);
@@ -990,19 +1039,22 @@ cuda_gpu_probe(__rte_unused struct rte_pci_driver *pci_drv, struct rte_pci_devic
 		pfn_cuGetErrorString(res, &(err_string));
 		rte_cuda_log(ERR, "cuDevicePrimaryCtxRetain name %s failed with %d: %s",
 				dev->device->name, res, err_string);
-		return -EPERM;
+		rte_errno = EPERM;
+		return -rte_errno;
 	}
 
 	res = pfn_cuCtxGetApiVersion(pctx, &cuda_api_version);
 	if (res != 0) {
 		rte_cuda_log(ERR, "cuCtxGetApiVersion failed with %d", res);
-		return -ENOTSUP;
+		rte_errno = ENOTSUP;
+		return -rte_errno;
 	}
 
 	if (cuda_api_version < CUDA_API_MIN_VERSION) {
 		rte_cuda_log(ERR, "CUDA API version found is %d Minimum requirement is %d",
 				cuda_api_version, CUDA_API_MIN_VERSION);
-		return -ENOTSUP;
+		rte_errno = ENOTSUP;
+		return -rte_errno;
 	}
 
 	dev->mpshared->info.context = (uint64_t)pctx;
@@ -1019,7 +1071,8 @@ cuda_gpu_probe(__rte_unused struct rte_pci_driver *pci_drv, struct rte_pci_devic
 		pfn_cuGetErrorString(res, &(err_string));
 		rte_cuda_log(ERR, "cuDeviceGetAttribute failed with %s",
 				err_string);
-		return -EPERM;
+		rte_errno = EPERM;
+		return -rte_errno;
 	}
 	dev->mpshared->info.processor_count = (uint32_t)processor_count;
 
@@ -1029,7 +1082,8 @@ cuda_gpu_probe(__rte_unused struct rte_pci_driver *pci_drv, struct rte_pci_devic
 		pfn_cuGetErrorString(res, &(err_string));
 		rte_cuda_log(ERR, "cuDeviceTotalMem failed with %s",
 				err_string);
-		return -EPERM;
+		rte_errno = EPERM;
+		return -rte_errno;
 	}
 
 	/*
@@ -1040,7 +1094,8 @@ cuda_gpu_probe(__rte_unused struct rte_pci_driver *pci_drv, struct rte_pci_devic
 			RTE_CACHE_LINE_SIZE);
 	if (dev->mpshared->dev_private == NULL) {
 		rte_cuda_log(ERR, "Failed to allocate memory for GPU process private");
-		return -ENOMEM;
+		rte_errno = EPERM;
+		return -rte_errno;
 	}
 
 	private = (struct cuda_info *)dev->mpshared->dev_private;
@@ -1052,7 +1107,8 @@ cuda_gpu_probe(__rte_unused struct rte_pci_driver *pci_drv, struct rte_pci_devic
 		pfn_cuGetErrorString(res, &(err_string));
 		rte_cuda_log(ERR, "cuDeviceGetName failed with %s",
 				err_string);
-		return -EPERM;
+		rte_errno = EPERM;
+		return -rte_errno;
 	}
 
 	res = pfn_cuDeviceGetAttribute(&(private->gdr_supported),
@@ -1062,7 +1118,8 @@ cuda_gpu_probe(__rte_unused struct rte_pci_driver *pci_drv, struct rte_pci_devic
 		pfn_cuGetErrorString(res, &(err_string));
 		rte_cuda_log(ERR, "cuDeviceGetAttribute failed with %s",
 				err_string);
-		return -EPERM;
+		rte_errno = EPERM;
+		return -rte_errno;
 	}
 
 	if (private->gdr_supported == 0)
@@ -1077,7 +1134,8 @@ cuda_gpu_probe(__rte_unused struct rte_pci_driver *pci_drv, struct rte_pci_devic
 		rte_cuda_log(ERR,
 				"cuDeviceGetAttribute failed with %s",
 				err_string);
-		return -EPERM;
+		rte_errno = EPERM;
+		return -rte_errno;
 	}
 
 	if (private->gdr_write_ordering == CU_GPU_DIRECT_RDMA_WRITES_ORDERING_NONE) {
@@ -1088,7 +1146,8 @@ cuda_gpu_probe(__rte_unused struct rte_pci_driver *pci_drv, struct rte_pci_devic
 			pfn_cuGetErrorString(res, &(err_string));
 			rte_cuda_log(ERR, "cuDeviceGetAttribute failed with %s",
 					err_string);
-			return -EPERM;
+			rte_errno = EPERM;
+			return -rte_errno;
 		}
 
 		if (private->gdr_flush_type != CU_FLUSH_GPU_DIRECT_RDMA_WRITES_OPTION_HOST)
@@ -1118,14 +1177,17 @@ cuda_gpu_remove(struct rte_pci_device *pci_dev)
 	int ret;
 	uint8_t gpu_id;
 
-	if (pci_dev == NULL)
-		return -EINVAL;
+	if (pci_dev == NULL) {
+		rte_errno = ENODEV;
+		return -rte_errno;
+	}
 
 	dev = rte_gpu_get_by_name(pci_dev->device.name);
 	if (dev == NULL) {
 		rte_cuda_log(ERR, "Couldn't find HW dev \"%s\" to uninitialise it",
 				pci_dev->device.name);
-		return -ENODEV;
+		rte_errno = ENODEV;
+		return -rte_errno;
 	}
 	gpu_id = dev->mpshared->info.dev_id;
 
-- 
2.17.1


  reply	other threads:[~2021-11-24 10:23 UTC|newest]

Thread overview: 3+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-11-18 20:45 [PATCH v1] gpu/cuda: properly " eagostini
2021-11-24 18:34 ` eagostini [this message]
2021-11-24 11:03   ` [PATCH v2] gpu/cuda: " Thomas Monjalon

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20211124183409.11181-1-eagostini@nvidia.com \
    --to=eagostini@nvidia.com \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).