* [dpdk-dev] [PATCH] app/testpmd: add GPU memory option in iofwd engine
@ 2021-10-29 20:49 eagostini
2021-11-11 21:41 ` [PATCH v2 0/1] " eagostini
` (2 more replies)
0 siblings, 3 replies; 31+ messages in thread
From: eagostini @ 2021-10-29 20:49 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit, xiaoyun.li, eagostini
From: eagostini <eagostini@nvidia.com>
This patch introduces GPU memory in testpmd through the gpudev library.
Testpmd can be used for network benchmarks when using GPU memory
instead of regular CPU memory to send and receive packets.
This option is currently limited to iofwd engine.
Signed-off-by: Elena Agostini <eagostini@nvidia.com>
---
Depends-on: series-19465 ("GPU library")
---
---
app/test-pmd/cmdline.c | 14 +++
app/test-pmd/meson.build | 2 +-
app/test-pmd/parameters.c | 13 ++-
app/test-pmd/testpmd.c | 136 +++++++++++++++++++++++---
app/test-pmd/testpmd.h | 16 ++-
doc/guides/testpmd_app_ug/run_app.rst | 3 +
6 files changed, 167 insertions(+), 17 deletions(-)
diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index ebea13f86a..f9a332458f 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -3614,6 +3614,7 @@ parse_item_list(const char *str, const char *item_name, unsigned int max_items,
unsigned int j;
int value_ok;
char c;
+ int gpu_mbuf = 0;
/*
* First parse all items in the list and store their value.
@@ -3628,6 +3629,14 @@ parse_item_list(const char *str, const char *item_name, unsigned int max_items,
value_ok = 1;
continue;
}
+ if (c == 'g') {
+ /*
+ * When this flag is set, mbufs for this segment
+ * will be created on GPU memory.
+ */
+ gpu_mbuf = 1;
+ continue;
+ }
if (c != ',') {
fprintf(stderr, "character %c is not a decimal digit\n", c);
return 0;
@@ -3640,6 +3649,8 @@ parse_item_list(const char *str, const char *item_name, unsigned int max_items,
parsed_items[nb_item] = value;
value_ok = 0;
value = 0;
+ mbuf_mem_types[nb_item] = gpu_mbuf ? MBUF_MEM_GPU : MBUF_MEM_CPU;
+ gpu_mbuf = 0;
}
nb_item++;
}
@@ -3648,6 +3659,9 @@ parse_item_list(const char *str, const char *item_name, unsigned int max_items,
item_name, nb_item + 1, max_items);
return 0;
}
+
+ mbuf_mem_types[nb_item] = gpu_mbuf ? MBUF_MEM_GPU : MBUF_MEM_CPU;
+
parsed_items[nb_item++] = value;
if (! check_unique_values)
return nb_item;
diff --git a/app/test-pmd/meson.build b/app/test-pmd/meson.build
index b5a0f7b620..5328f79eab 100644
--- a/app/test-pmd/meson.build
+++ b/app/test-pmd/meson.build
@@ -28,7 +28,7 @@ sources = files(
'util.c',
)
-deps += ['ethdev', 'gro', 'gso', 'cmdline', 'metrics', 'meter', 'bus_pci']
+deps += ['ethdev', 'gro', 'gso', 'cmdline', 'metrics', 'meter', 'bus_pci', 'gpudev']
if dpdk_conf.has('RTE_LIB_BITRATESTATS')
deps += 'bitratestats'
endif
diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c
index cb40917077..4a2639cd75 100644
--- a/app/test-pmd/parameters.c
+++ b/app/test-pmd/parameters.c
@@ -90,7 +90,10 @@ usage(char* progname)
"in NUMA mode.\n");
printf(" --mbuf-size=N,[N1[,..Nn]: set the data size of mbuf to "
"N bytes. If multiple numbers are specified the extra pools "
- "will be created to receive with packet split features\n");
+ "will be created to receive with packet split features\n"
+ "Use 'g' suffix for GPU memory.\n"
+ "If no or an unrecognized suffix is provided, CPU is assumed\n");
+
printf(" --total-num-mbufs=N: set the number of mbufs to be allocated "
"in mbuf pools.\n");
printf(" --max-pkt-len=N: set the maximum size of packet to N bytes.\n");
@@ -598,6 +601,7 @@ launch_args_parse(int argc, char** argv)
struct rte_eth_dev_info dev_info;
uint16_t rec_nb_pkts;
int ret;
+ uint32_t idx = 0;
static struct option lgopts[] = {
{ "help", 0, 0, 0 },
@@ -1541,4 +1545,11 @@ launch_args_parse(int argc, char** argv)
"ignored\n");
mempool_flags = 0;
}
+
+ for (idx = 0; idx < mbuf_data_size_n; idx++) {
+ if (mbuf_mem_types[idx] == MBUF_MEM_GPU && strcmp(cur_fwd_eng->fwd_mode_name, "io") != 0) {
+ fprintf(stderr, "GPU memory mbufs can be used with iofwd engine only\n");
+ rte_exit(EXIT_FAILURE, "Command line is incorrect\n");
+ }
+ }
}
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index a66dfb297c..58638bf230 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -205,6 +205,12 @@ uint32_t mbuf_data_size_n = 1; /* Number of specified mbuf sizes. */
uint16_t mbuf_data_size[MAX_SEGS_BUFFER_SPLIT] = {
DEFAULT_MBUF_DATA_SIZE
}; /**< Mbuf data space size. */
+
+/* Mbuf memory types. */
+enum mbuf_mem_type mbuf_mem_types[MAX_SEGS_BUFFER_SPLIT];
+/* Pointers to external memory allocated for mempools. */
+uintptr_t mempools_ext_ptr[MAX_SEGS_BUFFER_SPLIT];
+
uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
* specified on command-line. */
uint16_t stats_period; /**< Period to show statistics (disabled by default) */
@@ -543,6 +549,12 @@ int proc_id;
*/
unsigned int num_procs = 1;
+/*
+ * In case of GPU memory external mbufs use, for simplicity,
+ * the first GPU device in the list.
+ */
+int gpu_id = 0;
+
static void
eth_rx_metadata_negotiate_mp(uint16_t port_id)
{
@@ -1103,6 +1115,81 @@ setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int socket_id,
return ext_num;
}
+static struct rte_mempool *
+gpu_mbuf_pool_create(uint16_t mbuf_seg_size, unsigned int nb_mbuf,
+ unsigned int socket_id, uint16_t port_id,
+ int gpu_id, uintptr_t * mp_addr)
+{
+ int ret = 0;
+ char pool_name[RTE_MEMPOOL_NAMESIZE];
+ struct rte_eth_dev_info dev_info;
+ struct rte_mempool *rte_mp = NULL;
+ struct rte_pktmbuf_extmem gpu_mem;
+ struct rte_gpu_info ginfo;
+ uint8_t gpu_page_shift = 16;
+ uint32_t gpu_page_size = (1UL << gpu_page_shift);
+
+ ret = eth_dev_info_get_print_err(port_id, &dev_info);
+ if (ret != 0)
+ rte_exit(EXIT_FAILURE,
+ "Failed to get device info for port %d\n",
+ port_id);
+
+ mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name), port_id, MBUF_MEM_GPU);
+ if (!is_proc_primary()) {
+ rte_mp = rte_mempool_lookup(pool_name);
+ if (rte_mp == NULL)
+ rte_exit(EXIT_FAILURE,
+ "Get mbuf pool for socket %u failed: %s\n",
+ socket_id, rte_strerror(rte_errno));
+ return rte_mp;
+ }
+
+ if (rte_gpu_info_get(gpu_id, &ginfo))
+ rte_exit(EXIT_FAILURE, "rte_gpu_info_get error - bye\n");
+
+ TESTPMD_LOG(INFO,
+ "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u GPU device=%s\n",
+ pool_name, nb_mbuf, mbuf_seg_size, socket_id, ginfo.name);
+
+ /* Create an external memory mempool using memory allocated on the GPU. */
+
+ gpu_mem.elt_size = RTE_MBUF_DEFAULT_BUF_SIZE;
+ gpu_mem.buf_len = RTE_ALIGN_CEIL(nb_mbuf * gpu_mem.elt_size, gpu_page_size);
+ gpu_mem.buf_iova = RTE_BAD_IOVA;
+
+ gpu_mem.buf_ptr = rte_gpu_malloc(gpu_id, gpu_mem.buf_len);
+ if (gpu_mem.buf_ptr == NULL) {
+ rte_exit(EXIT_FAILURE, "Could not allocate GPU device memory\n");
+ }
+
+ ret = rte_extmem_register(gpu_mem.buf_ptr, gpu_mem.buf_len, NULL, gpu_mem.buf_iova, gpu_page_size);
+ if (ret) {
+ rte_exit(EXIT_FAILURE, "Unable to register addr 0x%p, ret %d\n", gpu_mem.buf_ptr, ret);
+ }
+
+ uint16_t pid = 0;
+
+ RTE_ETH_FOREACH_DEV(pid)
+ {
+ ret = rte_dev_dma_map(dev_info.device, gpu_mem.buf_ptr,
+ gpu_mem.buf_iova, gpu_mem.buf_len);
+ if (ret) {
+ rte_exit(EXIT_FAILURE, "Unable to DMA map addr 0x%p for device %s\n",
+ gpu_mem.buf_ptr, dev_info.device->name);
+ }
+ }
+
+ rte_mp = rte_pktmbuf_pool_create_extbuf(pool_name, nb_mbuf, mb_mempool_cache, 0, mbuf_seg_size, socket_id, &gpu_mem, 1);
+ if (rte_mp == NULL) {
+ rte_exit(EXIT_FAILURE, "Creation of GPU mempool <%s> failed\n", pool_name);
+ }
+
+ *mp_addr = (uintptr_t) gpu_mem.buf_ptr;
+
+ return rte_mp;
+}
+
/*
* Configuration initialisation done once at init time.
*/
@@ -1117,7 +1204,7 @@ mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
#endif
- mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name), size_idx);
+ mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name), size_idx, MBUF_MEM_CPU);
if (!is_proc_primary()) {
rte_mp = rte_mempool_lookup(pool_name);
if (rte_mp == NULL)
@@ -1700,19 +1787,42 @@ init_config(void)
for (i = 0; i < num_sockets; i++)
for (j = 0; j < mbuf_data_size_n; j++)
- mempools[i * MAX_SEGS_BUFFER_SPLIT + j] =
- mbuf_pool_create(mbuf_data_size[j],
- nb_mbuf_per_pool,
- socket_ids[i], j);
+ {
+ if (mbuf_mem_types[j] == MBUF_MEM_GPU) {
+ if(rte_gpu_count_avail() == 0)
+ rte_exit(EXIT_FAILURE, "No GPU device available.\n");
+
+ mempools[i * MAX_SEGS_BUFFER_SPLIT + j] =
+ gpu_mbuf_pool_create(mbuf_data_size[j],
+ nb_mbuf_per_pool,
+ socket_ids[i], j, gpu_id,
+ &(mempools_ext_ptr[j]));
+ } else {
+ mempools[i * MAX_SEGS_BUFFER_SPLIT + j] =
+ mbuf_pool_create(mbuf_data_size[j],
+ nb_mbuf_per_pool,
+ socket_ids[i], j);
+ }
+ }
} else {
uint8_t i;
for (i = 0; i < mbuf_data_size_n; i++)
- mempools[i] = mbuf_pool_create
- (mbuf_data_size[i],
- nb_mbuf_per_pool,
- socket_num == UMA_NO_CONFIG ?
- 0 : socket_num, i);
+ {
+ if (mbuf_mem_types[i] == MBUF_MEM_GPU) {
+ mempools[i] = gpu_mbuf_pool_create(mbuf_data_size[i],
+ nb_mbuf_per_pool,
+ socket_num == UMA_NO_CONFIG ? 0 : socket_num,
+ i, gpu_id,
+ &(mempools_ext_ptr[i]));
+ } else {
+ mempools[i] = mbuf_pool_create(mbuf_data_size[i],
+ nb_mbuf_per_pool,
+ socket_num == UMA_NO_CONFIG ?
+ 0 : socket_num, i);
+ }
+ }
+
}
init_port_config();
@@ -3415,8 +3525,12 @@ pmd_test_exit(void)
}
}
for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
- if (mempools[i])
+ if (mempools[i]) {
mempool_free_mp(mempools[i]);
+ if (mbuf_mem_types[i] == MBUF_MEM_GPU) {
+ rte_gpu_free(gpu_id, (void*)mempools_ext_ptr[i]);
+ }
+ }
}
free(xstats_display);
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index 669ce1e87d..9919044372 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -12,6 +12,7 @@
#include <rte_gro.h>
#include <rte_gso.h>
#include <rte_os_shim.h>
+#include <rte_gpudev.h>
#include <cmdline.h>
#include <sys/queue.h>
#ifdef RTE_HAS_JANSSON
@@ -474,6 +475,11 @@ extern uint8_t dcb_config;
extern uint32_t mbuf_data_size_n;
extern uint16_t mbuf_data_size[MAX_SEGS_BUFFER_SPLIT];
/**< Mbuf data space size. */
+enum mbuf_mem_type {
+ MBUF_MEM_CPU,
+ MBUF_MEM_GPU
+};
+extern enum mbuf_mem_type mbuf_mem_types[MAX_SEGS_BUFFER_SPLIT];
extern uint32_t param_total_num_mbufs;
extern uint16_t stats_period;
@@ -717,14 +723,16 @@ current_fwd_lcore(void)
/* Mbuf Pools */
static inline void
mbuf_poolname_build(unsigned int sock_id, char *mp_name,
- int name_size, uint16_t idx)
+ int name_size, uint16_t idx, enum mbuf_mem_type mem_type)
{
+ const char *suffix = mem_type == MBUF_MEM_GPU ? "_gpu" : "";
+
if (!idx)
snprintf(mp_name, name_size,
- MBUF_POOL_NAME_PFX "_%u", sock_id);
+ MBUF_POOL_NAME_PFX "_%u%s", sock_id, suffix);
else
snprintf(mp_name, name_size,
- MBUF_POOL_NAME_PFX "_%hu_%hu", (uint16_t)sock_id, idx);
+ MBUF_POOL_NAME_PFX "_%hu_%hu%s", (uint16_t)sock_id, idx, suffix);
}
static inline struct rte_mempool *
@@ -732,7 +740,7 @@ mbuf_pool_find(unsigned int sock_id, uint16_t idx)
{
char pool_name[RTE_MEMPOOL_NAMESIZE];
- mbuf_poolname_build(sock_id, pool_name, sizeof(pool_name), idx);
+ mbuf_poolname_build(sock_id, pool_name, sizeof(pool_name), idx, mbuf_mem_types[idx]);
return rte_mempool_lookup((const char *)pool_name);
}
diff --git a/doc/guides/testpmd_app_ug/run_app.rst b/doc/guides/testpmd_app_ug/run_app.rst
index 30edef07ea..ede7b79abb 100644
--- a/doc/guides/testpmd_app_ug/run_app.rst
+++ b/doc/guides/testpmd_app_ug/run_app.rst
@@ -119,6 +119,9 @@ The command line options are:
The default value is 2048. If multiple mbuf-size values are specified the
extra memory pools will be created for allocating mbufs to receive packets
with buffer splitting features.
+ Providing mbuf size with a 'g' suffix (e.g. ``--mbuf-size=2048g``),
+ will cause the mempool to be created on a GPU memory area allocated.
+ This option is currently limited to iofwd engine with the first GPU.
* ``--total-num-mbufs=N``
--
2.17.1
^ permalink raw reply [flat|nested] 31+ messages in thread
* [PATCH v2 0/1] app/testpmd: add GPU memory option in iofwd engine
2021-10-29 20:49 [dpdk-dev] [PATCH] app/testpmd: add GPU memory option in iofwd engine eagostini
@ 2021-11-11 21:41 ` eagostini
2021-11-11 21:41 ` [PATCH v2 1/1] " eagostini
2021-11-17 3:04 ` [PATCH v3 0/1] app/testpmd: add GPU memory option for mbuf pools eagostini
2021-11-17 21:49 ` [PATCH v4 0/1] " eagostini
2 siblings, 1 reply; 31+ messages in thread
From: eagostini @ 2021-11-11 21:41 UTC (permalink / raw)
To: dev; +Cc: Elena Agostini
From: Elena Agostini <eagostini@nvidia.com>
This patch introduces GPU memory in testpmd through the gpudev library.
Testpmd can be used for network benchmarks when using GPU memory
instead of regular CPU memory to send and receive packets.
This option is currently limited to iofwd engine.
Changelog:
- Updated to latest gpudev API
eagostini (1):
app/testpmd: add GPU memory option in iofwd engine
app/test-pmd/cmdline.c | 14 +++
app/test-pmd/meson.build | 2 +-
app/test-pmd/parameters.c | 13 ++-
app/test-pmd/testpmd.c | 133 +++++++++++++++++++++++---
app/test-pmd/testpmd.h | 16 +++-
doc/guides/testpmd_app_ug/run_app.rst | 3 +
6 files changed, 164 insertions(+), 17 deletions(-)
--
2.17.1
^ permalink raw reply [flat|nested] 31+ messages in thread
* [PATCH v2 1/1] app/testpmd: add GPU memory option in iofwd engine
2021-11-11 21:41 ` [PATCH v2 0/1] " eagostini
@ 2021-11-11 21:41 ` eagostini
2021-11-16 16:28 ` Slava Ovsiienko
2021-11-16 17:55 ` Ferruh Yigit
0 siblings, 2 replies; 31+ messages in thread
From: eagostini @ 2021-11-11 21:41 UTC (permalink / raw)
To: dev; +Cc: eagostini
From: eagostini <eagostini@nvidia.com>
This patch introduces GPU memory in testpmd through the gpudev library.
Testpmd can be used for network benchmarks when using GPU memory
instead of regular CPU memory to send and receive packets.
This option is currently limited to iofwd engine.
Signed-off-by: Elena Agostini <eagostini@nvidia.com>
Depends-on: series-19465 ("GPU library")
Depends-on: series-20422 ("common/mlx5: fix external memory pool registration")
---
app/test-pmd/cmdline.c | 14 +++
app/test-pmd/meson.build | 2 +-
app/test-pmd/parameters.c | 13 ++-
app/test-pmd/testpmd.c | 133 +++++++++++++++++++++++---
app/test-pmd/testpmd.h | 16 +++-
doc/guides/testpmd_app_ug/run_app.rst | 3 +
6 files changed, 164 insertions(+), 17 deletions(-)
diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index 4f51b259fe..36193bc566 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -3614,6 +3614,7 @@ parse_item_list(const char *str, const char *item_name, unsigned int max_items,
unsigned int j;
int value_ok;
char c;
+ int gpu_mbuf = 0;
/*
* First parse all items in the list and store their value.
@@ -3628,6 +3629,14 @@ parse_item_list(const char *str, const char *item_name, unsigned int max_items,
value_ok = 1;
continue;
}
+ if (c == 'g') {
+ /*
+ * When this flag is set, mbufs for this segment
+ * will be created on GPU memory.
+ */
+ gpu_mbuf = 1;
+ continue;
+ }
if (c != ',') {
fprintf(stderr, "character %c is not a decimal digit\n", c);
return 0;
@@ -3640,6 +3649,8 @@ parse_item_list(const char *str, const char *item_name, unsigned int max_items,
parsed_items[nb_item] = value;
value_ok = 0;
value = 0;
+ mbuf_mem_types[nb_item] = gpu_mbuf ? MBUF_MEM_GPU : MBUF_MEM_CPU;
+ gpu_mbuf = 0;
}
nb_item++;
}
@@ -3648,6 +3659,9 @@ parse_item_list(const char *str, const char *item_name, unsigned int max_items,
item_name, nb_item + 1, max_items);
return 0;
}
+
+ mbuf_mem_types[nb_item] = gpu_mbuf ? MBUF_MEM_GPU : MBUF_MEM_CPU;
+
parsed_items[nb_item++] = value;
if (! check_unique_values)
return nb_item;
diff --git a/app/test-pmd/meson.build b/app/test-pmd/meson.build
index d5df52c470..5c8ca68c9d 100644
--- a/app/test-pmd/meson.build
+++ b/app/test-pmd/meson.build
@@ -32,7 +32,7 @@ if dpdk_conf.has('RTE_HAS_JANSSON')
ext_deps += jansson_dep
endif
-deps += ['ethdev', 'gro', 'gso', 'cmdline', 'metrics', 'bus_pci']
+deps += ['ethdev', 'gro', 'gso', 'cmdline', 'metrics', 'bus_pci', 'gpudev']
if dpdk_conf.has('RTE_CRYPTO_SCHEDULER')
deps += 'crypto_scheduler'
endif
diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c
index 0974b0a38f..d41f7f220b 100644
--- a/app/test-pmd/parameters.c
+++ b/app/test-pmd/parameters.c
@@ -87,7 +87,10 @@ usage(char* progname)
"in NUMA mode.\n");
printf(" --mbuf-size=N,[N1[,..Nn]: set the data size of mbuf to "
"N bytes. If multiple numbers are specified the extra pools "
- "will be created to receive with packet split features\n");
+ "will be created to receive with packet split features\n"
+ "Use 'g' suffix for GPU memory.\n"
+ "If no or an unrecognized suffix is provided, CPU is assumed\n");
+
printf(" --total-num-mbufs=N: set the number of mbufs to be allocated "
"in mbuf pools.\n");
printf(" --max-pkt-len=N: set the maximum size of packet to N bytes.\n");
@@ -595,6 +598,7 @@ launch_args_parse(int argc, char** argv)
struct rte_eth_dev_info dev_info;
uint16_t rec_nb_pkts;
int ret;
+ uint32_t idx = 0;
static struct option lgopts[] = {
{ "help", 0, 0, 0 },
@@ -1538,4 +1542,11 @@ launch_args_parse(int argc, char** argv)
"ignored\n");
mempool_flags = 0;
}
+
+ for (idx = 0; idx < mbuf_data_size_n; idx++) {
+ if (mbuf_mem_types[idx] == MBUF_MEM_GPU && strcmp(cur_fwd_eng->fwd_mode_name, "io") != 0) {
+ fprintf(stderr, "GPU memory mbufs can be used with iofwd engine only\n");
+ rte_exit(EXIT_FAILURE, "Command line is incorrect\n");
+ }
+ }
}
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index a66dfb297c..1af235c4d8 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -205,6 +205,12 @@ uint32_t mbuf_data_size_n = 1; /* Number of specified mbuf sizes. */
uint16_t mbuf_data_size[MAX_SEGS_BUFFER_SPLIT] = {
DEFAULT_MBUF_DATA_SIZE
}; /**< Mbuf data space size. */
+
+/* Mbuf memory types. */
+enum mbuf_mem_type mbuf_mem_types[MAX_SEGS_BUFFER_SPLIT];
+/* Pointers to external memory allocated for mempools. */
+uintptr_t mempools_ext_ptr[MAX_SEGS_BUFFER_SPLIT];
+
uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
* specified on command-line. */
uint16_t stats_period; /**< Period to show statistics (disabled by default) */
@@ -543,6 +549,12 @@ int proc_id;
*/
unsigned int num_procs = 1;
+/*
+ * In case of GPU memory external mbufs use, for simplicity,
+ * the first GPU device in the list.
+ */
+int gpu_id = 0;
+
static void
eth_rx_metadata_negotiate_mp(uint16_t port_id)
{
@@ -1103,6 +1115,79 @@ setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int socket_id,
return ext_num;
}
+static struct rte_mempool *
+gpu_mbuf_pool_create(uint16_t mbuf_seg_size, unsigned int nb_mbuf,
+ unsigned int socket_id, uint16_t port_id,
+ int gpu_id, uintptr_t * mp_addr)
+{
+ int ret = 0;
+ char pool_name[RTE_MEMPOOL_NAMESIZE];
+ struct rte_eth_dev_info dev_info;
+ struct rte_mempool *rte_mp = NULL;
+ struct rte_pktmbuf_extmem gpu_mem;
+ struct rte_gpu_info ginfo;
+ uint8_t gpu_page_shift = 16;
+ uint32_t gpu_page_size = (1UL << gpu_page_shift);
+
+ ret = eth_dev_info_get_print_err(port_id, &dev_info);
+ if (ret != 0)
+ rte_exit(EXIT_FAILURE,
+ "Failed to get device info for port %d\n",
+ port_id);
+
+ mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name), port_id, MBUF_MEM_GPU);
+ if (!is_proc_primary()) {
+ rte_mp = rte_mempool_lookup(pool_name);
+ if (rte_mp == NULL)
+ rte_exit(EXIT_FAILURE,
+ "Get mbuf pool for socket %u failed: %s\n",
+ socket_id, rte_strerror(rte_errno));
+ return rte_mp;
+ }
+
+ if (rte_gpu_info_get(gpu_id, &ginfo))
+ rte_exit(EXIT_FAILURE, "Can't retrieve info about GPU %d - bye\n", gpu_id);
+
+ TESTPMD_LOG(INFO,
+ "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u GPU device=%s\n",
+ pool_name, nb_mbuf, mbuf_seg_size, socket_id, ginfo.name);
+
+ /* Create an external memory mempool using memory allocated on the GPU. */
+
+ gpu_mem.elt_size = RTE_MBUF_DEFAULT_BUF_SIZE;
+ gpu_mem.buf_len = RTE_ALIGN_CEIL(nb_mbuf * gpu_mem.elt_size, gpu_page_size);
+ gpu_mem.buf_iova = RTE_BAD_IOVA;
+
+ gpu_mem.buf_ptr = rte_gpu_mem_alloc(gpu_id, gpu_mem.buf_len);
+ if (gpu_mem.buf_ptr == NULL)
+ rte_exit(EXIT_FAILURE, "Could not allocate GPU device memory\n");
+
+ ret = rte_extmem_register(gpu_mem.buf_ptr, gpu_mem.buf_len, NULL, gpu_mem.buf_iova, gpu_page_size);
+ if (ret)
+ rte_exit(EXIT_FAILURE, "Unable to register addr 0x%p, ret %d\n", gpu_mem.buf_ptr, ret);
+
+ uint16_t pid = 0;
+
+ RTE_ETH_FOREACH_DEV(pid)
+ {
+ ret = rte_dev_dma_map(dev_info.device, gpu_mem.buf_ptr,
+ gpu_mem.buf_iova, gpu_mem.buf_len);
+ if (ret) {
+ rte_exit(EXIT_FAILURE, "Unable to DMA map addr 0x%p for device %s\n",
+ gpu_mem.buf_ptr, dev_info.device->name);
+ }
+ }
+
+ rte_mp = rte_pktmbuf_pool_create_extbuf(pool_name, nb_mbuf, mb_mempool_cache, 0, mbuf_seg_size, socket_id, &gpu_mem, 1);
+ if (rte_mp == NULL) {
+ rte_exit(EXIT_FAILURE, "Creation of GPU mempool <%s> failed\n", pool_name);
+ }
+
+ *mp_addr = (uintptr_t) gpu_mem.buf_ptr;
+
+ return rte_mp;
+}
+
/*
* Configuration initialisation done once at init time.
*/
@@ -1117,7 +1202,7 @@ mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
#endif
- mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name), size_idx);
+ mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name), size_idx, MBUF_MEM_CPU);
if (!is_proc_primary()) {
rte_mp = rte_mempool_lookup(pool_name);
if (rte_mp == NULL)
@@ -1700,19 +1785,42 @@ init_config(void)
for (i = 0; i < num_sockets; i++)
for (j = 0; j < mbuf_data_size_n; j++)
- mempools[i * MAX_SEGS_BUFFER_SPLIT + j] =
- mbuf_pool_create(mbuf_data_size[j],
- nb_mbuf_per_pool,
- socket_ids[i], j);
+ {
+ if (mbuf_mem_types[j] == MBUF_MEM_GPU) {
+ if (rte_gpu_count_avail() == 0)
+ rte_exit(EXIT_FAILURE, "No GPU device available.\n");
+
+ mempools[i * MAX_SEGS_BUFFER_SPLIT + j] =
+ gpu_mbuf_pool_create(mbuf_data_size[j],
+ nb_mbuf_per_pool,
+ socket_ids[i], j, gpu_id,
+ &(mempools_ext_ptr[j]));
+ } else {
+ mempools[i * MAX_SEGS_BUFFER_SPLIT + j] =
+ mbuf_pool_create(mbuf_data_size[j],
+ nb_mbuf_per_pool,
+ socket_ids[i], j);
+ }
+ }
} else {
uint8_t i;
for (i = 0; i < mbuf_data_size_n; i++)
- mempools[i] = mbuf_pool_create
- (mbuf_data_size[i],
- nb_mbuf_per_pool,
- socket_num == UMA_NO_CONFIG ?
- 0 : socket_num, i);
+ {
+ if (mbuf_mem_types[i] == MBUF_MEM_GPU) {
+ mempools[i] = gpu_mbuf_pool_create(mbuf_data_size[i],
+ nb_mbuf_per_pool,
+ socket_num == UMA_NO_CONFIG ? 0 : socket_num,
+ i, gpu_id,
+ &(mempools_ext_ptr[i]));
+ } else {
+ mempools[i] = mbuf_pool_create(mbuf_data_size[i],
+ nb_mbuf_per_pool,
+ socket_num == UMA_NO_CONFIG ?
+ 0 : socket_num, i);
+ }
+ }
+
}
init_port_config();
@@ -3415,8 +3523,11 @@ pmd_test_exit(void)
}
}
for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
- if (mempools[i])
+ if (mempools[i]) {
mempool_free_mp(mempools[i]);
+ if (mbuf_mem_types[i] == MBUF_MEM_GPU)
+ rte_gpu_mem_free(gpu_id, (void *)mempools_ext_ptr[i]);
+ }
}
free(xstats_display);
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index 669ce1e87d..9919044372 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -12,6 +12,7 @@
#include <rte_gro.h>
#include <rte_gso.h>
#include <rte_os_shim.h>
+#include <rte_gpudev.h>
#include <cmdline.h>
#include <sys/queue.h>
#ifdef RTE_HAS_JANSSON
@@ -474,6 +475,11 @@ extern uint8_t dcb_config;
extern uint32_t mbuf_data_size_n;
extern uint16_t mbuf_data_size[MAX_SEGS_BUFFER_SPLIT];
/**< Mbuf data space size. */
+enum mbuf_mem_type {
+ MBUF_MEM_CPU,
+ MBUF_MEM_GPU
+};
+extern enum mbuf_mem_type mbuf_mem_types[MAX_SEGS_BUFFER_SPLIT];
extern uint32_t param_total_num_mbufs;
extern uint16_t stats_period;
@@ -717,14 +723,16 @@ current_fwd_lcore(void)
/* Mbuf Pools */
static inline void
mbuf_poolname_build(unsigned int sock_id, char *mp_name,
- int name_size, uint16_t idx)
+ int name_size, uint16_t idx, enum mbuf_mem_type mem_type)
{
+ const char *suffix = mem_type == MBUF_MEM_GPU ? "_gpu" : "";
+
if (!idx)
snprintf(mp_name, name_size,
- MBUF_POOL_NAME_PFX "_%u", sock_id);
+ MBUF_POOL_NAME_PFX "_%u%s", sock_id, suffix);
else
snprintf(mp_name, name_size,
- MBUF_POOL_NAME_PFX "_%hu_%hu", (uint16_t)sock_id, idx);
+ MBUF_POOL_NAME_PFX "_%hu_%hu%s", (uint16_t)sock_id, idx, suffix);
}
static inline struct rte_mempool *
@@ -732,7 +740,7 @@ mbuf_pool_find(unsigned int sock_id, uint16_t idx)
{
char pool_name[RTE_MEMPOOL_NAMESIZE];
- mbuf_poolname_build(sock_id, pool_name, sizeof(pool_name), idx);
+ mbuf_poolname_build(sock_id, pool_name, sizeof(pool_name), idx, mbuf_mem_types[idx]);
return rte_mempool_lookup((const char *)pool_name);
}
diff --git a/doc/guides/testpmd_app_ug/run_app.rst b/doc/guides/testpmd_app_ug/run_app.rst
index 30edef07ea..ede7b79abb 100644
--- a/doc/guides/testpmd_app_ug/run_app.rst
+++ b/doc/guides/testpmd_app_ug/run_app.rst
@@ -119,6 +119,9 @@ The command line options are:
The default value is 2048. If multiple mbuf-size values are specified the
extra memory pools will be created for allocating mbufs to receive packets
with buffer splitting features.
+ Providing mbuf size with a 'g' suffix (e.g. ``--mbuf-size=2048g``),
+ will cause the mempool to be created on a GPU memory area allocated.
+ This option is currently limited to iofwd engine with the first GPU.
* ``--total-num-mbufs=N``
--
2.17.1
^ permalink raw reply [flat|nested] 31+ messages in thread
* RE: [PATCH v2 1/1] app/testpmd: add GPU memory option in iofwd engine
2021-11-11 21:41 ` [PATCH v2 1/1] " eagostini
@ 2021-11-16 16:28 ` Slava Ovsiienko
2021-11-16 17:16 ` Ananyev, Konstantin
2021-11-16 17:55 ` Ferruh Yigit
1 sibling, 1 reply; 31+ messages in thread
From: Slava Ovsiienko @ 2021-11-16 16:28 UTC (permalink / raw)
To: Elena Agostini, dev
Hi, Elena
> -----Original Message-----
> From: eagostini@nvidia.com <eagostini@nvidia.com>
> Sent: Thursday, November 11, 2021 23:42
> To: dev@dpdk.org
> Cc: Elena Agostini <eagostini@nvidia.com>
> Subject: [PATCH v2 1/1] app/testpmd: add GPU memory option in iofwd
> engine
>
> From: eagostini <eagostini@nvidia.com>
Could you, please, set "Author" correctly - "Elena Agostini <eagostini@nvidia.com>"?
Otherwise, we see in the git log:
"Author: eagostini <eagostini@nvidia.com>"
Compare with:
"Author: Bing Zhao <bingz@nvidia.com>"
"Author: Viacheslav Ovsiienko <viacheslavo@nvidia.com>"
Also, please, see the codestyle issues, too many code lines far beyond 100 chars.
Lines like this:
+ if (mbuf_mem_types[idx] == MBUF_MEM_GPU && strcmp(cur_fwd_eng->fwd_mode_name, "io") != 0) {
can be easily be splitted.
>>app/testpmd: add GPU memory option in iofwd
As I see from the patch - it adds the new mbuf pool type (residing on GPU memory).
May be, we should reword the title?
" app/testpmd: add GPU memory option for mbuf pools"
>
> This patch introduces GPU memory in testpmd through the gpudev library.
> Testpmd can be used for network benchmarks when using GPU memory
> instead of regular CPU memory to send and receive packets.
> This option is currently limited to iofwd engine.
Why? Because iofwd the only mode not touching the mbuf data?
Is it critical for functionality? Is GPU mbuf pool memory accessible from CPU side?
I would explain the reasons (for supporting iofwd mode only) in commit message.
>
> Signed-off-by: Elena Agostini <eagostini@nvidia.com>
>
> Depends-on: series-19465 ("GPU library")
> Depends-on: series-20422 ("common/mlx5: fix external memory pool
> registration")
> ---
> app/test-pmd/cmdline.c | 14 +++
> app/test-pmd/meson.build | 2 +-
> app/test-pmd/parameters.c | 13 ++-
> app/test-pmd/testpmd.c | 133 +++++++++++++++++++++++---
> app/test-pmd/testpmd.h | 16 +++-
> doc/guides/testpmd_app_ug/run_app.rst | 3 +
> 6 files changed, 164 insertions(+), 17 deletions(-)
>
> diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c index
> 4f51b259fe..36193bc566 100644
> --- a/app/test-pmd/cmdline.c
> +++ b/app/test-pmd/cmdline.c
> @@ -3614,6 +3614,7 @@ parse_item_list(const char *str, const char
The "parse_item_list()" is used to parse the list looking like "number0, number1, ...., numberN"
and invoked for "core","port", "txtimes", etc. Not sure all of these params need to handle "g"
suffix. We should allow "g" processing only for "mbuf-size". We have "item_name" argument
to check whether we are invoked on "mbuf-size".
> *item_name, unsigned int max_items,
> unsigned int j;
> int value_ok;
> char c;
> + int gpu_mbuf = 0;
>
> /*
> * First parse all items in the list and store their value.
> @@ -3628,6 +3629,14 @@ parse_item_list(const char *str, const char
> *item_name, unsigned int max_items,
> value_ok = 1;
> continue;
> }
> + if (c == 'g') {
We should check whether "g" is the single char suffix (last char).
Otherwise, "20g48" and "g20gggg48" would be also valid values.
> + /*
> + * When this flag is set, mbufs for this segment
> + * will be created on GPU memory.
> + */
> + gpu_mbuf = 1;
> + continue;
> + }
> if (c != ',') {
> fprintf(stderr, "character %c is not a decimal digit\n",
> c);
> return 0;
> @@ -3640,6 +3649,8 @@ parse_item_list(const char *str, const char
> *item_name, unsigned int max_items,
> parsed_items[nb_item] = value;
> value_ok = 0;
> value = 0;
> + mbuf_mem_types[nb_item] = gpu_mbuf ?
> MBUF_MEM_GPU : MBUF_MEM_CPU;
> + gpu_mbuf = 0;
> }
> nb_item++;
> }
> @@ -3648,6 +3659,9 @@ parse_item_list(const char *str, const char
> *item_name, unsigned int max_items,
> item_name, nb_item + 1, max_items);
> return 0;
> }
> +
> + mbuf_mem_types[nb_item] = gpu_mbuf ? MBUF_MEM_GPU :
> MBUF_MEM_CPU;
> +
> parsed_items[nb_item++] = value;
> if (! check_unique_values)
> return nb_item;
> diff --git a/app/test-pmd/meson.build b/app/test-pmd/meson.build index
> d5df52c470..5c8ca68c9d 100644
> --- a/app/test-pmd/meson.build
> +++ b/app/test-pmd/meson.build
> @@ -32,7 +32,7 @@ if dpdk_conf.has('RTE_HAS_JANSSON')
> ext_deps += jansson_dep
> endif
>
> -deps += ['ethdev', 'gro', 'gso', 'cmdline', 'metrics', 'bus_pci']
> +deps += ['ethdev', 'gro', 'gso', 'cmdline', 'metrics', 'bus_pci',
> +'gpudev']
> if dpdk_conf.has('RTE_CRYPTO_SCHEDULER')
> deps += 'crypto_scheduler'
> endif
> diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c index
> 0974b0a38f..d41f7f220b 100644
> --- a/app/test-pmd/parameters.c
> +++ b/app/test-pmd/parameters.c
> @@ -87,7 +87,10 @@ usage(char* progname)
> "in NUMA mode.\n");
> printf(" --mbuf-size=N,[N1[,..Nn]: set the data size of mbuf to "
> "N bytes. If multiple numbers are specified the extra pools "
> - "will be created to receive with packet split features\n");
> + "will be created to receive with packet split features\n"
> + "Use 'g' suffix for GPU memory.\n"
> + "If no or an unrecognized suffix is provided, CPU is
> assumed\n");
Unrecognized suffix? I would emit an error and abort the launch.
> +
> printf(" --total-num-mbufs=N: set the number of mbufs to be
> allocated "
> "in mbuf pools.\n");
> printf(" --max-pkt-len=N: set the maximum size of packet to N
> bytes.\n"); @@ -595,6 +598,7 @@ launch_args_parse(int argc, char** argv)
> struct rte_eth_dev_info dev_info;
> uint16_t rec_nb_pkts;
> int ret;
> + uint32_t idx = 0;
>
> static struct option lgopts[] = {
> { "help", 0, 0, 0 },
> @@ -1538,4 +1542,11 @@ launch_args_parse(int argc, char** argv)
> "ignored\n");
> mempool_flags = 0;
> }
> +
> + for (idx = 0; idx < mbuf_data_size_n; idx++) {
> + if (mbuf_mem_types[idx] == MBUF_MEM_GPU &&
> strcmp(cur_fwd_eng->fwd_mode_name, "io") != 0) {
> + fprintf(stderr, "GPU memory mbufs can be used with
> iofwd engine only\n");
> + rte_exit(EXIT_FAILURE, "Command line is
> incorrect\n");
> + }
> + }
Please, note, the forwarding mode can be changed from interactive prompt with "set fwd <mode>" command.
If iofwd mode is crucial for GPU functionality - we should prevent switching to other modes if GPU pools are engaged.
> }
> diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index
> a66dfb297c..1af235c4d8 100644
> --- a/app/test-pmd/testpmd.c
> +++ b/app/test-pmd/testpmd.c
> @@ -205,6 +205,12 @@ uint32_t mbuf_data_size_n = 1; /* Number of
> specified mbuf sizes. */ uint16_t mbuf_data_size[MAX_SEGS_BUFFER_SPLIT]
> = {
> DEFAULT_MBUF_DATA_SIZE
> }; /**< Mbuf data space size. */
> +
> +/* Mbuf memory types. */
> +enum mbuf_mem_type mbuf_mem_types[MAX_SEGS_BUFFER_SPLIT];
> +/* Pointers to external memory allocated for mempools. */ uintptr_t
> +mempools_ext_ptr[MAX_SEGS_BUFFER_SPLIT];
> +
> uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
> * specified on command-line. */ uint16_t
> stats_period; /**< Period to show statistics (disabled by default) */ @@ -
> 543,6 +549,12 @@ int proc_id;
> */
> unsigned int num_procs = 1;
>
> +/*
> + * In case of GPU memory external mbufs use, for simplicity,
> + * the first GPU device in the list.
> + */
> +int gpu_id = 0;
It is assigned with zero and never changes. Support the first GPU only?
This limitation should be mentioned in documentation.
> +
> static void
> eth_rx_metadata_negotiate_mp(uint16_t port_id) { @@ -1103,6 +1115,79
> @@ setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int
> socket_id,
> return ext_num;
> }
>
> +static struct rte_mempool *
> +gpu_mbuf_pool_create(uint16_t mbuf_seg_size, unsigned int nb_mbuf,
> + unsigned int socket_id, uint16_t port_id,
> + int gpu_id, uintptr_t * mp_addr)
> +{
> + int ret = 0;
> + char pool_name[RTE_MEMPOOL_NAMESIZE];
> + struct rte_eth_dev_info dev_info;
> + struct rte_mempool *rte_mp = NULL;
> + struct rte_pktmbuf_extmem gpu_mem;
> + struct rte_gpu_info ginfo;
> + uint8_t gpu_page_shift = 16;
> + uint32_t gpu_page_size = (1UL << gpu_page_shift);
> +
> + ret = eth_dev_info_get_print_err(port_id, &dev_info);
> + if (ret != 0)
> + rte_exit(EXIT_FAILURE,
> + "Failed to get device info for port %d\n",
> + port_id);
> +
> + mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name),
> port_id, MBUF_MEM_GPU);
> + if (!is_proc_primary()) {
> + rte_mp = rte_mempool_lookup(pool_name);
> + if (rte_mp == NULL)
> + rte_exit(EXIT_FAILURE,
> + "Get mbuf pool for socket %u failed: %s\n",
> + socket_id, rte_strerror(rte_errno));
> + return rte_mp;
> + }
> +
> + if (rte_gpu_info_get(gpu_id, &ginfo))
> + rte_exit(EXIT_FAILURE, "Can't retrieve info about GPU %d -
> bye\n",
> +gpu_id);
> +
> + TESTPMD_LOG(INFO,
> + "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u
> GPU device=%s\n",
> + pool_name, nb_mbuf, mbuf_seg_size, socket_id, ginfo.name);
> +
> + /* Create an external memory mempool using memory allocated on
> the
> +GPU. */
> +
> + gpu_mem.elt_size = RTE_MBUF_DEFAULT_BUF_SIZE;
> + gpu_mem.buf_len = RTE_ALIGN_CEIL(nb_mbuf * gpu_mem.elt_size,
> gpu_page_size);
> + gpu_mem.buf_iova = RTE_BAD_IOVA;
> +
> + gpu_mem.buf_ptr = rte_gpu_mem_alloc(gpu_id, gpu_mem.buf_len);
> + if (gpu_mem.buf_ptr == NULL)
> + rte_exit(EXIT_FAILURE, "Could not allocate GPU device
> memory\n");
> +
> + ret = rte_extmem_register(gpu_mem.buf_ptr, gpu_mem.buf_len,
> NULL, gpu_mem.buf_iova, gpu_page_size);
> + if (ret)
> + rte_exit(EXIT_FAILURE, "Unable to register addr 0x%p, ret
> %d\n",
> +gpu_mem.buf_ptr, ret);
> +
> + uint16_t pid = 0;
> +
> + RTE_ETH_FOREACH_DEV(pid)
> + {
> + ret = rte_dev_dma_map(dev_info.device, gpu_mem.buf_ptr,
> + gpu_mem.buf_iova,
> gpu_mem.buf_len);
> + if (ret) {
> + rte_exit(EXIT_FAILURE, "Unable to DMA map addr
> 0x%p for device %s\n",
> + gpu_mem.buf_ptr, dev_info.device-
> >name);
> + }
> + }
> +
> + rte_mp = rte_pktmbuf_pool_create_extbuf(pool_name, nb_mbuf,
> mb_mempool_cache, 0, mbuf_seg_size, socket_id, &gpu_mem, 1);
> + if (rte_mp == NULL) {
> + rte_exit(EXIT_FAILURE, "Creation of GPU mempool <%s>
> failed\n", pool_name);
> + }
> +
> + *mp_addr = (uintptr_t) gpu_mem.buf_ptr;
> +
> + return rte_mp;
> +}
> +
> /*
> * Configuration initialisation done once at init time.
> */
> @@ -1117,7 +1202,7 @@ mbuf_pool_create(uint16_t mbuf_seg_size,
> unsigned nb_mbuf,
>
> mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size; #endif
> - mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name),
> size_idx);
> + mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name),
> size_idx,
> +MBUF_MEM_CPU);
> if (!is_proc_primary()) {
> rte_mp = rte_mempool_lookup(pool_name);
> if (rte_mp == NULL)
> @@ -1700,19 +1785,42 @@ init_config(void)
>
> for (i = 0; i < num_sockets; i++)
> for (j = 0; j < mbuf_data_size_n; j++)
> - mempools[i * MAX_SEGS_BUFFER_SPLIT + j]
> =
> - mbuf_pool_create(mbuf_data_size[j],
> - nb_mbuf_per_pool,
> - socket_ids[i], j);
> + {
> + if (mbuf_mem_types[j] == MBUF_MEM_GPU)
> {
> + if (rte_gpu_count_avail() == 0)
> + rte_exit(EXIT_FAILURE, "No
> GPU device available.\n");
> +
> + mempools[i *
> MAX_SEGS_BUFFER_SPLIT + j] =
> +
> gpu_mbuf_pool_create(mbuf_data_size[j],
What about GPU/CPU adherence ? Do we create one GPU pool per CPU socket?
Disregarding hardware topology at all?
We can mitigate the issue by "--socket-mem/--socket-num" parameter though.
> +
> nb_mbuf_per_pool,
> +
> socket_ids[i], j, gpu_id,
> +
> &(mempools_ext_ptr[j]));
> + } else {
> + mempools[i *
> MAX_SEGS_BUFFER_SPLIT + j] =
> +
> mbuf_pool_create(mbuf_data_size[j],
> +
> nb_mbuf_per_pool,
> + socket_ids[i],
> j);
> + }
> + }
> } else {
> uint8_t i;
>
> for (i = 0; i < mbuf_data_size_n; i++)
> - mempools[i] = mbuf_pool_create
> - (mbuf_data_size[i],
> - nb_mbuf_per_pool,
> - socket_num == UMA_NO_CONFIG ?
> - 0 : socket_num, i);
> + {
> + if (mbuf_mem_types[i] == MBUF_MEM_GPU) {
> + mempools[i] =
> gpu_mbuf_pool_create(mbuf_data_size[i],
> +
> nb_mbuf_per_pool,
> +
> socket_num == UMA_NO_CONFIG ? 0 : socket_num,
> +
> i, gpu_id,
> +
> &(mempools_ext_ptr[i]));
> + } else {
> + mempools[i] =
> mbuf_pool_create(mbuf_data_size[i],
> + nb_mbuf_per_pool,
> + socket_num ==
> UMA_NO_CONFIG ?
> + 0 : socket_num, i);
> + }
> + }
> +
> }
>
> init_port_config();
> @@ -3415,8 +3523,11 @@ pmd_test_exit(void)
> }
> }
> for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
> - if (mempools[i])
> + if (mempools[i]) {
> mempool_free_mp(mempools[i]);
> + if (mbuf_mem_types[i] == MBUF_MEM_GPU)
> + rte_gpu_mem_free(gpu_id, (void
> *)mempools_ext_ptr[i]);
> + }
> }
> free(xstats_display);
>
> diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h index
> 669ce1e87d..9919044372 100644
> --- a/app/test-pmd/testpmd.h
> +++ b/app/test-pmd/testpmd.h
> @@ -12,6 +12,7 @@
> #include <rte_gro.h>
> #include <rte_gso.h>
> #include <rte_os_shim.h>
> +#include <rte_gpudev.h>
> #include <cmdline.h>
> #include <sys/queue.h>
> #ifdef RTE_HAS_JANSSON
> @@ -474,6 +475,11 @@ extern uint8_t dcb_config; extern uint32_t
> mbuf_data_size_n; extern uint16_t
> mbuf_data_size[MAX_SEGS_BUFFER_SPLIT];
> /**< Mbuf data space size. */
> +enum mbuf_mem_type {
> + MBUF_MEM_CPU,
> + MBUF_MEM_GPU
> +};
> +extern enum mbuf_mem_type
> mbuf_mem_types[MAX_SEGS_BUFFER_SPLIT];
> extern uint32_t param_total_num_mbufs;
>
> extern uint16_t stats_period;
> @@ -717,14 +723,16 @@ current_fwd_lcore(void)
> /* Mbuf Pools */
> static inline void
> mbuf_poolname_build(unsigned int sock_id, char *mp_name,
> - int name_size, uint16_t idx)
> + int name_size, uint16_t idx, enum mbuf_mem_type
> mem_type)
> {
> + const char *suffix = mem_type == MBUF_MEM_GPU ? "_gpu" : "";
> +
> if (!idx)
> snprintf(mp_name, name_size,
> - MBUF_POOL_NAME_PFX "_%u", sock_id);
> + MBUF_POOL_NAME_PFX "_%u%s", sock_id, suffix);
> else
> snprintf(mp_name, name_size,
> - MBUF_POOL_NAME_PFX "_%hu_%hu",
> (uint16_t)sock_id, idx);
> + MBUF_POOL_NAME_PFX "_%hu_%hu%s",
> (uint16_t)sock_id, idx, suffix);
> }
>
> static inline struct rte_mempool *
> @@ -732,7 +740,7 @@ mbuf_pool_find(unsigned int sock_id, uint16_t idx) {
> char pool_name[RTE_MEMPOOL_NAMESIZE];
>
> - mbuf_poolname_build(sock_id, pool_name, sizeof(pool_name), idx);
> + mbuf_poolname_build(sock_id, pool_name, sizeof(pool_name), idx,
> +mbuf_mem_types[idx]);
> return rte_mempool_lookup((const char *)pool_name); }
>
> diff --git a/doc/guides/testpmd_app_ug/run_app.rst
> b/doc/guides/testpmd_app_ug/run_app.rst
> index 30edef07ea..ede7b79abb 100644
> --- a/doc/guides/testpmd_app_ug/run_app.rst
> +++ b/doc/guides/testpmd_app_ug/run_app.rst
> @@ -119,6 +119,9 @@ The command line options are:
> The default value is 2048. If multiple mbuf-size values are specified the
> extra memory pools will be created for allocating mbufs to receive packets
> with buffer splitting features.
> + Providing mbuf size with a 'g' suffix (e.g. ``--mbuf-size=2048g``),
> + will cause the mempool to be created on a GPU memory area allocated.
> + This option is currently limited to iofwd engine with the first GPU.
Mmm, if we have multiple GPUs - we have no way to specify on which one we should allocate the pool.
Syntax is not complete☹. Possible we should specify GPU port id after the suffix ? Like 2048g2 ?
If port index is omitted we should consider we have the only GPU and check this.
With best regards,
Slava
^ permalink raw reply [flat|nested] 31+ messages in thread
* RE: [PATCH v2 1/1] app/testpmd: add GPU memory option in iofwd engine
2021-11-16 16:28 ` Slava Ovsiienko
@ 2021-11-16 17:16 ` Ananyev, Konstantin
2021-11-16 18:15 ` Elena Agostini
0 siblings, 1 reply; 31+ messages in thread
From: Ananyev, Konstantin @ 2021-11-16 17:16 UTC (permalink / raw)
To: Slava Ovsiienko, Elena Agostini, dev
> Could you, please, set "Author" correctly - "Elena Agostini <eagostini@nvidia.com>"?
> Otherwise, we see in the git log:
>
> "Author: eagostini <eagostini@nvidia.com>"
>
> Compare with:
> "Author: Bing Zhao <bingz@nvidia.com>"
> "Author: Viacheslav Ovsiienko <viacheslavo@nvidia.com>"
>
> Also, please, see the codestyle issues, too many code lines far beyond 100 chars.
> Lines like this:
> + if (mbuf_mem_types[idx] == MBUF_MEM_GPU && strcmp(cur_fwd_eng->fwd_mode_name, "io") != 0) {
> can be easily be splitted.
>
> >>app/testpmd: add GPU memory option in iofwd
> As I see from the patch - it adds the new mbuf pool type (residing on GPU memory).
> May be, we should reword the title?
> " app/testpmd: add GPU memory option for mbuf pools"
>
> >
> > This patch introduces GPU memory in testpmd through the gpudev library.
> > Testpmd can be used for network benchmarks when using GPU memory
> > instead of regular CPU memory to send and receive packets.
> > This option is currently limited to iofwd engine.
> Why? Because iofwd the only mode not touching the mbuf data?
> Is it critical for functionality? Is GPU mbuf pool memory accessible from CPU side?
> I would explain the reasons (for supporting iofwd mode only) in commit message.
>
> >
> > Signed-off-by: Elena Agostini <eagostini@nvidia.com>
> >
> > Depends-on: series-19465 ("GPU library")
> > Depends-on: series-20422 ("common/mlx5: fix external memory pool
> > registration")
> > ---
> > app/test-pmd/cmdline.c | 14 +++
> > app/test-pmd/meson.build | 2 +-
> > app/test-pmd/parameters.c | 13 ++-
> > app/test-pmd/testpmd.c | 133 +++++++++++++++++++++++---
> > app/test-pmd/testpmd.h | 16 +++-
> > doc/guides/testpmd_app_ug/run_app.rst | 3 +
> > 6 files changed, 164 insertions(+), 17 deletions(-)
> >
> > diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c index
> > 4f51b259fe..36193bc566 100644
> > --- a/app/test-pmd/cmdline.c
> > +++ b/app/test-pmd/cmdline.c
> > @@ -3614,6 +3614,7 @@ parse_item_list(const char *str, const char
>
> The "parse_item_list()" is used to parse the list looking like "number0, number1, ...., numberN"
> and invoked for "core","port", "txtimes", etc. Not sure all of these params need to handle "g"
> suffix. We should allow "g" processing only for "mbuf-size". We have "item_name" argument
> to check whether we are invoked on "mbuf-size".
>
> > *item_name, unsigned int max_items,
> > unsigned int j;
> > int value_ok;
> > char c;
> > + int gpu_mbuf = 0;
> >
> > /*
> > * First parse all items in the list and store their value.
> > @@ -3628,6 +3629,14 @@ parse_item_list(const char *str, const char
> > *item_name, unsigned int max_items,
> > value_ok = 1;
> > continue;
> > }
> > + if (c == 'g') {
> We should check whether "g" is the single char suffix (last char).
> Otherwise, "20g48" and "g20gggg48" would be also valid values.
>
> > + /*
> > + * When this flag is set, mbufs for this segment
> > + * will be created on GPU memory.
> > + */
> > + gpu_mbuf = 1;
> > + continue;
> > + }
> > if (c != ',') {
> > fprintf(stderr, "character %c is not a decimal digit\n",
> > c);
> > return 0;
> > @@ -3640,6 +3649,8 @@ parse_item_list(const char *str, const char
> > *item_name, unsigned int max_items,
> > parsed_items[nb_item] = value;
> > value_ok = 0;
> > value = 0;
> > + mbuf_mem_types[nb_item] = gpu_mbuf ?
> > MBUF_MEM_GPU : MBUF_MEM_CPU;
> > + gpu_mbuf = 0;
> > }
> > nb_item++;
> > }
> > @@ -3648,6 +3659,9 @@ parse_item_list(const char *str, const char
> > *item_name, unsigned int max_items,
> > item_name, nb_item + 1, max_items);
> > return 0;
> > }
> > +
> > + mbuf_mem_types[nb_item] = gpu_mbuf ? MBUF_MEM_GPU :
> > MBUF_MEM_CPU;
> > +
> > parsed_items[nb_item++] = value;
> > if (! check_unique_values)
> > return nb_item;
> > diff --git a/app/test-pmd/meson.build b/app/test-pmd/meson.build index
> > d5df52c470..5c8ca68c9d 100644
> > --- a/app/test-pmd/meson.build
> > +++ b/app/test-pmd/meson.build
> > @@ -32,7 +32,7 @@ if dpdk_conf.has('RTE_HAS_JANSSON')
> > ext_deps += jansson_dep
> > endif
> >
> > -deps += ['ethdev', 'gro', 'gso', 'cmdline', 'metrics', 'bus_pci']
> > +deps += ['ethdev', 'gro', 'gso', 'cmdline', 'metrics', 'bus_pci',
> > +'gpudev']
> > if dpdk_conf.has('RTE_CRYPTO_SCHEDULER')
> > deps += 'crypto_scheduler'
> > endif
> > diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c index
> > 0974b0a38f..d41f7f220b 100644
> > --- a/app/test-pmd/parameters.c
> > +++ b/app/test-pmd/parameters.c
> > @@ -87,7 +87,10 @@ usage(char* progname)
> > "in NUMA mode.\n");
> > printf(" --mbuf-size=N,[N1[,..Nn]: set the data size of mbuf to "
> > "N bytes. If multiple numbers are specified the extra pools "
> > - "will be created to receive with packet split features\n");
> > + "will be created to receive with packet split features\n"
> > + "Use 'g' suffix for GPU memory.\n"
> > + "If no or an unrecognized suffix is provided, CPU is
> > assumed\n");
> Unrecognized suffix? I would emit an error and abort the launch.
>
> > +
> > printf(" --total-num-mbufs=N: set the number of mbufs to be
> > allocated "
> > "in mbuf pools.\n");
> > printf(" --max-pkt-len=N: set the maximum size of packet to N
> > bytes.\n"); @@ -595,6 +598,7 @@ launch_args_parse(int argc, char** argv)
> > struct rte_eth_dev_info dev_info;
> > uint16_t rec_nb_pkts;
> > int ret;
> > + uint32_t idx = 0;
> >
> > static struct option lgopts[] = {
> > { "help", 0, 0, 0 },
> > @@ -1538,4 +1542,11 @@ launch_args_parse(int argc, char** argv)
> > "ignored\n");
> > mempool_flags = 0;
> > }
> > +
> > + for (idx = 0; idx < mbuf_data_size_n; idx++) {
> > + if (mbuf_mem_types[idx] == MBUF_MEM_GPU &&
> > strcmp(cur_fwd_eng->fwd_mode_name, "io") != 0) {
> > + fprintf(stderr, "GPU memory mbufs can be used with
> > iofwd engine only\n");
> > + rte_exit(EXIT_FAILURE, "Command line is
> > incorrect\n");
> > + }
> > + }
> Please, note, the forwarding mode can be changed from interactive prompt with "set fwd <mode>" command.
> If iofwd mode is crucial for GPU functionality - we should prevent switching to other modes if GPU pools are engaged.
>
>
> > }
> > diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index
> > a66dfb297c..1af235c4d8 100644
> > --- a/app/test-pmd/testpmd.c
> > +++ b/app/test-pmd/testpmd.c
> > @@ -205,6 +205,12 @@ uint32_t mbuf_data_size_n = 1; /* Number of
> > specified mbuf sizes. */ uint16_t mbuf_data_size[MAX_SEGS_BUFFER_SPLIT]
> > = {
> > DEFAULT_MBUF_DATA_SIZE
> > }; /**< Mbuf data space size. */
> > +
> > +/* Mbuf memory types. */
> > +enum mbuf_mem_type mbuf_mem_types[MAX_SEGS_BUFFER_SPLIT];
> > +/* Pointers to external memory allocated for mempools. */ uintptr_t
> > +mempools_ext_ptr[MAX_SEGS_BUFFER_SPLIT];
> > +
> > uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
> > * specified on command-line. */ uint16_t
> > stats_period; /**< Period to show statistics (disabled by default) */ @@ -
> > 543,6 +549,12 @@ int proc_id;
> > */
> > unsigned int num_procs = 1;
> >
> > +/*
> > + * In case of GPU memory external mbufs use, for simplicity,
> > + * the first GPU device in the list.
> > + */
> > +int gpu_id = 0;
> It is assigned with zero and never changes. Support the first GPU only?
> This limitation should be mentioned in documentation.
>
> > +
> > static void
> > eth_rx_metadata_negotiate_mp(uint16_t port_id) { @@ -1103,6 +1115,79
> > @@ setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int
> > socket_id,
> > return ext_num;
> > }
> >
> > +static struct rte_mempool *
> > +gpu_mbuf_pool_create(uint16_t mbuf_seg_size, unsigned int nb_mbuf,
> > + unsigned int socket_id, uint16_t port_id,
> > + int gpu_id, uintptr_t * mp_addr)
> > +{
> > + int ret = 0;
> > + char pool_name[RTE_MEMPOOL_NAMESIZE];
> > + struct rte_eth_dev_info dev_info;
> > + struct rte_mempool *rte_mp = NULL;
> > + struct rte_pktmbuf_extmem gpu_mem;
> > + struct rte_gpu_info ginfo;
> > + uint8_t gpu_page_shift = 16;
> > + uint32_t gpu_page_size = (1UL << gpu_page_shift);
> > +
> > + ret = eth_dev_info_get_print_err(port_id, &dev_info);
> > + if (ret != 0)
> > + rte_exit(EXIT_FAILURE,
> > + "Failed to get device info for port %d\n",
> > + port_id);
> > +
> > + mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name),
> > port_id, MBUF_MEM_GPU);
> > + if (!is_proc_primary()) {
> > + rte_mp = rte_mempool_lookup(pool_name);
> > + if (rte_mp == NULL)
> > + rte_exit(EXIT_FAILURE,
> > + "Get mbuf pool for socket %u failed: %s\n",
> > + socket_id, rte_strerror(rte_errno));
> > + return rte_mp;
> > + }
> > +
> > + if (rte_gpu_info_get(gpu_id, &ginfo))
> > + rte_exit(EXIT_FAILURE, "Can't retrieve info about GPU %d -
> > bye\n",
> > +gpu_id);
> > +
> > + TESTPMD_LOG(INFO,
> > + "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u
> > GPU device=%s\n",
> > + pool_name, nb_mbuf, mbuf_seg_size, socket_id, ginfo.name);
> > +
> > + /* Create an external memory mempool using memory allocated on
> > the
> > +GPU. */
> > +
> > + gpu_mem.elt_size = RTE_MBUF_DEFAULT_BUF_SIZE;
> > + gpu_mem.buf_len = RTE_ALIGN_CEIL(nb_mbuf * gpu_mem.elt_size,
> > gpu_page_size);
> > + gpu_mem.buf_iova = RTE_BAD_IOVA;
> > +
> > + gpu_mem.buf_ptr = rte_gpu_mem_alloc(gpu_id, gpu_mem.buf_len);
> > + if (gpu_mem.buf_ptr == NULL)
> > + rte_exit(EXIT_FAILURE, "Could not allocate GPU device
> > memory\n");
> > +
> > + ret = rte_extmem_register(gpu_mem.buf_ptr, gpu_mem.buf_len,
> > NULL, gpu_mem.buf_iova, gpu_page_size);
> > + if (ret)
> > + rte_exit(EXIT_FAILURE, "Unable to register addr 0x%p, ret
> > %d\n",
> > +gpu_mem.buf_ptr, ret);
> > +
> > + uint16_t pid = 0;
> > +
> > + RTE_ETH_FOREACH_DEV(pid)
> > + {
> > + ret = rte_dev_dma_map(dev_info.device, gpu_mem.buf_ptr,
> > + gpu_mem.buf_iova,
> > gpu_mem.buf_len);
> > + if (ret) {
> > + rte_exit(EXIT_FAILURE, "Unable to DMA map addr
> > 0x%p for device %s\n",
> > + gpu_mem.buf_ptr, dev_info.device-
> > >name);
> > + }
> > + }
> > +
> > + rte_mp = rte_pktmbuf_pool_create_extbuf(pool_name, nb_mbuf,
> > mb_mempool_cache, 0, mbuf_seg_size, socket_id, &gpu_mem, 1);
> > + if (rte_mp == NULL) {
> > + rte_exit(EXIT_FAILURE, "Creation of GPU mempool <%s>
> > failed\n", pool_name);
> > + }
> > +
> > + *mp_addr = (uintptr_t) gpu_mem.buf_ptr;
> > +
> > + return rte_mp;
> > +}
> > +
> > /*
> > * Configuration initialisation done once at init time.
> > */
> > @@ -1117,7 +1202,7 @@ mbuf_pool_create(uint16_t mbuf_seg_size,
> > unsigned nb_mbuf,
> >
> > mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size; #endif
> > - mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name),
> > size_idx);
> > + mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name),
> > size_idx,
> > +MBUF_MEM_CPU);
> > if (!is_proc_primary()) {
> > rte_mp = rte_mempool_lookup(pool_name);
> > if (rte_mp == NULL)
> > @@ -1700,19 +1785,42 @@ init_config(void)
> >
> > for (i = 0; i < num_sockets; i++)
> > for (j = 0; j < mbuf_data_size_n; j++)
> > - mempools[i * MAX_SEGS_BUFFER_SPLIT + j]
> > =
> > - mbuf_pool_create(mbuf_data_size[j],
> > - nb_mbuf_per_pool,
> > - socket_ids[i], j);
> > + {
> > + if (mbuf_mem_types[j] == MBUF_MEM_GPU)
> > {
> > + if (rte_gpu_count_avail() == 0)
> > + rte_exit(EXIT_FAILURE, "No
> > GPU device available.\n");
> > +
> > + mempools[i *
> > MAX_SEGS_BUFFER_SPLIT + j] =
> > +
> > gpu_mbuf_pool_create(mbuf_data_size[j],
> What about GPU/CPU adherence ? Do we create one GPU pool per CPU socket?
> Disregarding hardware topology at all?
> We can mitigate the issue by "--socket-mem/--socket-num" parameter though.
>
> > +
> > nb_mbuf_per_pool,
> > +
> > socket_ids[i], j, gpu_id,
> > +
> > &(mempools_ext_ptr[j]));
> > + } else {
> > + mempools[i *
> > MAX_SEGS_BUFFER_SPLIT + j] =
> > +
> > mbuf_pool_create(mbuf_data_size[j],
> > +
> > nb_mbuf_per_pool,
> > + socket_ids[i],
> > j);
> > + }
> > + }
> > } else {
> > uint8_t i;
> >
> > for (i = 0; i < mbuf_data_size_n; i++)
> > - mempools[i] = mbuf_pool_create
> > - (mbuf_data_size[i],
> > - nb_mbuf_per_pool,
> > - socket_num == UMA_NO_CONFIG ?
> > - 0 : socket_num, i);
> > + {
> > + if (mbuf_mem_types[i] == MBUF_MEM_GPU) {
> > + mempools[i] =
> > gpu_mbuf_pool_create(mbuf_data_size[i],
> > +
> > nb_mbuf_per_pool,
> > +
> > socket_num == UMA_NO_CONFIG ? 0 : socket_num,
> > +
> > i, gpu_id,
> > +
> > &(mempools_ext_ptr[i]));
> > + } else {
> > + mempools[i] =
> > mbuf_pool_create(mbuf_data_size[i],
> > + nb_mbuf_per_pool,
> > + socket_num ==
> > UMA_NO_CONFIG ?
> > + 0 : socket_num, i);
> > + }
> > + }
> > +
> > }
> >
> > init_port_config();
> > @@ -3415,8 +3523,11 @@ pmd_test_exit(void)
> > }
> > }
> > for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
> > - if (mempools[i])
> > + if (mempools[i]) {
> > mempool_free_mp(mempools[i]);
> > + if (mbuf_mem_types[i] == MBUF_MEM_GPU)
> > + rte_gpu_mem_free(gpu_id, (void
> > *)mempools_ext_ptr[i]);
> > + }
> > }
> > free(xstats_display);
> >
> > diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h index
> > 669ce1e87d..9919044372 100644
> > --- a/app/test-pmd/testpmd.h
> > +++ b/app/test-pmd/testpmd.h
> > @@ -12,6 +12,7 @@
> > #include <rte_gro.h>
> > #include <rte_gso.h>
> > #include <rte_os_shim.h>
> > +#include <rte_gpudev.h>
> > #include <cmdline.h>
> > #include <sys/queue.h>
> > #ifdef RTE_HAS_JANSSON
> > @@ -474,6 +475,11 @@ extern uint8_t dcb_config; extern uint32_t
> > mbuf_data_size_n; extern uint16_t
> > mbuf_data_size[MAX_SEGS_BUFFER_SPLIT];
> > /**< Mbuf data space size. */
> > +enum mbuf_mem_type {
> > + MBUF_MEM_CPU,
> > + MBUF_MEM_GPU
> > +};
> > +extern enum mbuf_mem_type
> > mbuf_mem_types[MAX_SEGS_BUFFER_SPLIT];
> > extern uint32_t param_total_num_mbufs;
> >
> > extern uint16_t stats_period;
> > @@ -717,14 +723,16 @@ current_fwd_lcore(void)
> > /* Mbuf Pools */
> > static inline void
> > mbuf_poolname_build(unsigned int sock_id, char *mp_name,
> > - int name_size, uint16_t idx)
> > + int name_size, uint16_t idx, enum mbuf_mem_type
> > mem_type)
> > {
> > + const char *suffix = mem_type == MBUF_MEM_GPU ? "_gpu" : "";
> > +
> > if (!idx)
> > snprintf(mp_name, name_size,
> > - MBUF_POOL_NAME_PFX "_%u", sock_id);
> > + MBUF_POOL_NAME_PFX "_%u%s", sock_id, suffix);
> > else
> > snprintf(mp_name, name_size,
> > - MBUF_POOL_NAME_PFX "_%hu_%hu",
> > (uint16_t)sock_id, idx);
> > + MBUF_POOL_NAME_PFX "_%hu_%hu%s",
> > (uint16_t)sock_id, idx, suffix);
> > }
> >
> > static inline struct rte_mempool *
> > @@ -732,7 +740,7 @@ mbuf_pool_find(unsigned int sock_id, uint16_t idx) {
> > char pool_name[RTE_MEMPOOL_NAMESIZE];
> >
> > - mbuf_poolname_build(sock_id, pool_name, sizeof(pool_name), idx);
> > + mbuf_poolname_build(sock_id, pool_name, sizeof(pool_name), idx,
> > +mbuf_mem_types[idx]);
> > return rte_mempool_lookup((const char *)pool_name); }
> >
> > diff --git a/doc/guides/testpmd_app_ug/run_app.rst
> > b/doc/guides/testpmd_app_ug/run_app.rst
> > index 30edef07ea..ede7b79abb 100644
> > --- a/doc/guides/testpmd_app_ug/run_app.rst
> > +++ b/doc/guides/testpmd_app_ug/run_app.rst
> > @@ -119,6 +119,9 @@ The command line options are:
> > The default value is 2048. If multiple mbuf-size values are specified the
> > extra memory pools will be created for allocating mbufs to receive packets
> > with buffer splitting features.
> > + Providing mbuf size with a 'g' suffix (e.g. ``--mbuf-size=2048g``),
> > + will cause the mempool to be created on a GPU memory area allocated.
> > + This option is currently limited to iofwd engine with the first GPU.
> Mmm, if we have multiple GPUs - we have no way to specify on which one we should allocate the pool.
> Syntax is not complete☹. Possible we should specify GPU port id after the suffix ? Like 2048g2 ?
> If port index is omitted we should consider we have the only GPU and check this.
Do we need to overload --mbuf-size parameter here?
Why not add 'gpu' option to --mp-alloc parameter?
^ permalink raw reply [flat|nested] 31+ messages in thread
* Re: [PATCH v2 1/1] app/testpmd: add GPU memory option in iofwd engine
2021-11-11 21:41 ` [PATCH v2 1/1] " eagostini
2021-11-16 16:28 ` Slava Ovsiienko
@ 2021-11-16 17:55 ` Ferruh Yigit
2021-11-16 18:06 ` Elena Agostini
1 sibling, 1 reply; 31+ messages in thread
From: Ferruh Yigit @ 2021-11-16 17:55 UTC (permalink / raw)
To: eagostini; +Cc: dev, Slava Ovsiienko
On 11/11/2021 9:41 PM, eagostini@nvidia.com wrote:
> --- a/app/test-pmd/meson.build
> +++ b/app/test-pmd/meson.build
> @@ -32,7 +32,7 @@ if dpdk_conf.has('RTE_HAS_JANSSON')
> ext_deps += jansson_dep
> endif
>
> -deps += ['ethdev', 'gro', 'gso', 'cmdline', 'metrics', 'bus_pci']
> +deps += ['ethdev', 'gro', 'gso', 'cmdline', 'metrics', 'bus_pci', 'gpudev']
I didn't review the set, but in a very high level do we want to add
'gpudev' as dependency? Isn't this like adding 'rawdev' as dependency.
^ permalink raw reply [flat|nested] 31+ messages in thread
* Re: [PATCH v2 1/1] app/testpmd: add GPU memory option in iofwd engine
2021-11-16 17:55 ` Ferruh Yigit
@ 2021-11-16 18:06 ` Elena Agostini
2021-11-16 18:11 ` Ferruh Yigit
0 siblings, 1 reply; 31+ messages in thread
From: Elena Agostini @ 2021-11-16 18:06 UTC (permalink / raw)
To: Ferruh Yigit; +Cc: dev, Slava Ovsiienko
[-- Attachment #1: Type: text/plain, Size: 1260 bytes --]
> From: Ferruh Yigit <ferruh.yigit@intel.com>
> Date: Tuesday, 16 November 2021 at 19:00
> To: Elena Agostini <eagostini@nvidia.com>
> Cc: dev@dpdk.org <dev@dpdk.org>, Slava Ovsiienko <viacheslavo@nvidia.com>
> Subject: Re: [PATCH v2 1/1] app/testpmd: add GPU memory option in iofwd engine
> External email: Use caution opening links or attachments>
>
> On 11/11/2021 9:41 PM, eagostini@nvidia.com wrote:
> > --- a/app/test-pmd/meson.build
> > +++ b/app/test-pmd/meson.build
> > @@ -32,7 +32,7 @@ if dpdk_conf.has('RTE_HAS_JANSSON')
> > ext_deps += jansson_dep
> > endif
> >
> > -deps += ['ethdev', 'gro', 'gso', 'cmdline', 'metrics', 'bus_pci']
> > +deps += ['ethdev', 'gro', 'gso', 'cmdline', 'metrics', 'bus_pci', 'gpudev']>
> I didn't review the set, but in a very high level do we want to add
> 'gpudev' as dependency? Isn't this like adding 'rawdev' as dependency.
gpudev is a library that can be built without a gpu driver as all the other libraries
and it is actually used only in case of GPU memory mempool.
Reasons for this patch are:
- Have an upstreamed benchmark tool to measure network metrics using GPU memory
- Test some DPDK features not really tested anywhere like the external memory mempool feature
[-- Attachment #2: Type: text/html, Size: 3658 bytes --]
^ permalink raw reply [flat|nested] 31+ messages in thread
* Re: [PATCH v2 1/1] app/testpmd: add GPU memory option in iofwd engine
2021-11-16 18:06 ` Elena Agostini
@ 2021-11-16 18:11 ` Ferruh Yigit
2021-11-16 19:09 ` Jerin Jacob
0 siblings, 1 reply; 31+ messages in thread
From: Ferruh Yigit @ 2021-11-16 18:11 UTC (permalink / raw)
To: Elena Agostini; +Cc: dev, Slava Ovsiienko
On 11/16/2021 6:06 PM, Elena Agostini wrote:
> > From: Ferruh Yigit <ferruh.yigit@intel.com>
>
> > Date: Tuesday, 16 November 2021 at 19:00
>
> > To: Elena Agostini <eagostini@nvidia.com>
>
> > Cc: dev@dpdk.org <dev@dpdk.org>, Slava Ovsiienko <viacheslavo@nvidia.com>
>
> > Subject: Re: [PATCH v2 1/1] app/testpmd: add GPU memory option in iofwd engine
>
> > External email: Use caution opening links or attachments>
>
> >
>
> > On 11/11/2021 9:41 PM, eagostini@nvidia.com wrote:
>
> > > --- a/app/test-pmd/meson.build
>
> > > +++ b/app/test-pmd/meson.build
>
> > > @@ -32,7 +32,7 @@ if dpdk_conf.has('RTE_HAS_JANSSON')
>
> > > ext_deps += jansson_dep
>
> > > endif
>
> > >
>
> > > -deps += ['ethdev', 'gro', 'gso', 'cmdline', 'metrics', 'bus_pci']
>
> > > +deps += ['ethdev', 'gro', 'gso', 'cmdline', 'metrics', 'bus_pci', 'gpudev']>
>
> > I didn't review the set, but in a very high level do we want to add
>
> > 'gpudev' as dependency? Isn't this like adding 'rawdev' as dependency.
>
> gpudev is a library that can be built without a gpu driver as all the other libraries
>
> and itis actually used only in case of GPU memory mempool.
>
> Reasons for this patch are:
>
> - Have an upstreamed benchmark tool to measure network metrics using GPU memory
>
> - Test some DPDK features not really tested anywhere like the external memory mempool feature
>
I can see the reason, that is obvious, yet again why we are not adding rawdev
testing to the testpmd? But adding gpudev.
It is easier to add it to the testpmd, and for some testing perspective it
makes sense, but still I am not quite sure about this new dependency, I would
like to get more feedback.
^ permalink raw reply [flat|nested] 31+ messages in thread
* Re: [PATCH v2 1/1] app/testpmd: add GPU memory option in iofwd engine
2021-11-16 17:16 ` Ananyev, Konstantin
@ 2021-11-16 18:15 ` Elena Agostini
0 siblings, 0 replies; 31+ messages in thread
From: Elena Agostini @ 2021-11-16 18:15 UTC (permalink / raw)
To: Ananyev, Konstantin, Slava Ovsiienko, dev
[-- Attachment #1: Type: text/plain, Size: 20258 bytes --]
> > Could you, please, set "Author" correctly - "Elena Agostini <eagostini@nvidia.com>"?
> > Otherwise, we see in the git log:
> >
> > "Author: eagostini <eagostini@nvidia.com>"
> >
> > Compare with:
> > "Author: Bing Zhao <bingz@nvidia.com>"
> > "Author: Viacheslav Ovsiienko <viacheslavo@nvidia.com>"
> >
> > Also, please, see the codestyle issues, too many code lines far beyond 100 chars.
> > Lines like this:
> > + if (mbuf_mem_types[idx] == MBUF_MEM_GPU && strcmp(cur_fwd_eng->fwd_mode_name, "io") != 0) {
> > can be easily be splitted.
> >
> > >>app/testpmd: add GPU memory option in iofwd
> > As I see from the patch - it adds the new mbuf pool type (residing on GPU memory).
> > May be, we should reword the title?
> > " app/testpmd: add GPU memory option for mbuf pools"
> >
> > >
> > > This patch introduces GPU memory in testpmd through the gpudev library.
> > > Testpmd can be used for network benchmarks when using GPU memory
> > > instead of regular CPU memory to send and receive packets.
> > > This option is currently limited to iofwd engine.
> > Why? Because iofwd the only mode not touching the mbuf data?
> > Is it critical for functionality? Is GPU mbuf pool memory accessible from CPU side?
> > I would explain the reasons (for supporting iofwd mode only) in commit message.
> >
> > >
> > > Signed-off-by: Elena Agostini <eagostini@nvidia.com>
> > >
> > > Depends-on: series-19465 ("GPU library")
> > > Depends-on: series-20422 ("common/mlx5: fix external memory pool
> > > registration")
> > > ---
> > > app/test-pmd/cmdline.c | 14 +++
> > > app/test-pmd/meson.build | 2 +-
> > > app/test-pmd/parameters.c | 13 ++-
> > > app/test-pmd/testpmd.c | 133 +++++++++++++++++++++++---
> > > app/test-pmd/testpmd.h | 16 +++-
> > > doc/guides/testpmd_app_ug/run_app.rst | 3 +
> > > 6 files changed, 164 insertions(+), 17 deletions(-)
> > >
> > > diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c index
> > > 4f51b259fe..36193bc566 100644
> > > --- a/app/test-pmd/cmdline.c
> > > +++ b/app/test-pmd/cmdline.c
> > > @@ -3614,6 +3614,7 @@ parse_item_list(const char *str, const char
> >
> > The "parse_item_list()" is used to parse the list looking like "number0, number1, ...., numberN"
> > and invoked for "core","port", "txtimes", etc. Not sure all of these params need to handle "g"
> > suffix. We should allow "g" processing only for "mbuf-size". We have "item_name" argument
> > to check whether we are invoked on "mbuf-size".
> >
> > > *item_name, unsigned int max_items,
> > > unsigned int j;
> > > int value_ok;
> > > char c;
> > > + int gpu_mbuf = 0;
> > >
> > > /*
> > > * First parse all items in the list and store their value.
> > > @@ -3628,6 +3629,14 @@ parse_item_list(const char *str, const char
> > > *item_name, unsigned int max_items,
> > > value_ok = 1;
> > > continue;
> > > }
> > > + if (c == 'g') {
> > We should check whether "g" is the single char suffix (last char).
> > Otherwise, "20g48" and "g20gggg48" would be also valid values.
> >
> > > + /*
> > > + * When this flag is set, mbufs for this segment
> > > + * will be created on GPU memory.
> > > + */
> > > + gpu_mbuf = 1;
> > > + continue;
> > > + }
> > > if (c != ',') {
> > > fprintf(stderr, "character %c is not a decimal digit\n",
> > > c);
> > > return 0;
> > > @@ -3640,6 +3649,8 @@ parse_item_list(const char *str, const char
> > > *item_name, unsigned int max_items,
> > > parsed_items[nb_item] = value;
> > > value_ok = 0;
> > > value = 0;
> > > + mbuf_mem_types[nb_item] = gpu_mbuf ?
> > > MBUF_MEM_GPU : MBUF_MEM_CPU;
> > > + gpu_mbuf = 0;
> > > }
> > > nb_item++;
> > > }
> > > @@ -3648,6 +3659,9 @@ parse_item_list(const char *str, const char
> > > *item_name, unsigned int max_items,
> > > item_name, nb_item + 1, max_items);
> > > return 0;
> > > }
> > > +
> > > + mbuf_mem_types[nb_item] = gpu_mbuf ? MBUF_MEM_GPU :
> > > MBUF_MEM_CPU;
> > > +
> > > parsed_items[nb_item++] = value;
> > > if (! check_unique_values)
> > > return nb_item;
> > > diff --git a/app/test-pmd/meson.build b/app/test-pmd/meson.build index
> > > d5df52c470..5c8ca68c9d 100644
> > > --- a/app/test-pmd/meson.build
> > > +++ b/app/test-pmd/meson.build
> > > @@ -32,7 +32,7 @@ if dpdk_conf.has('RTE_HAS_JANSSON')
> > > ext_deps += jansson_dep
> > > endif
> > >
> > > -deps += ['ethdev', 'gro', 'gso', 'cmdline', 'metrics', 'bus_pci']
> > > +deps += ['ethdev', 'gro', 'gso', 'cmdline', 'metrics', 'bus_pci',
> > > +'gpudev']
> > > if dpdk_conf.has('RTE_CRYPTO_SCHEDULER')
> > > deps += 'crypto_scheduler'
> > > endif
> > > diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c index
> > > 0974b0a38f..d41f7f220b 100644
> > > --- a/app/test-pmd/parameters.c
> > > +++ b/app/test-pmd/parameters.c
> > > @@ -87,7 +87,10 @@ usage(char* progname)
> > > "in NUMA mode.\n");
> > > printf(" --mbuf-size=N,[N1[,..Nn]: set the data size of mbuf to "
> > > "N bytes. If multiple numbers are specified the extra pools "
> > > - "will be created to receive with packet split features\n");
> > > + "will be created to receive with packet split features\n"
> > > + "Use 'g' suffix for GPU memory.\n"
> > > + "If no or an unrecognized suffix is provided, CPU is
> > > assumed\n");
> > Unrecognized suffix? I would emit an error and abort the launch.
> >
> > > +
> > > printf(" --total-num-mbufs=N: set the number of mbufs to be
> > > allocated "
> > > "in mbuf pools.\n");
> > > printf(" --max-pkt-len=N: set the maximum size of packet to N
> > > bytes.\n"); @@ -595,6 +598,7 @@ launch_args_parse(int argc, char** argv)
> > > struct rte_eth_dev_info dev_info;
> > > uint16_t rec_nb_pkts;
> > > int ret;
> > > + uint32_t idx = 0;
> > >
> > > static struct option lgopts[] = {
> > > { "help", 0, 0, 0 },
> > > @@ -1538,4 +1542,11 @@ launch_args_parse(int argc, char** argv)
> > > "ignored\n");
> > > mempool_flags = 0;
> > > }
> > > +
> > > + for (idx = 0; idx < mbuf_data_size_n; idx++) {
> > > + if (mbuf_mem_types[idx] == MBUF_MEM_GPU &&
> > > strcmp(cur_fwd_eng->fwd_mode_name, "io") != 0) {
> > > + fprintf(stderr, "GPU memory mbufs can be used with
> > > iofwd engine only\n");
> > > + rte_exit(EXIT_FAILURE, "Command line is
> > > incorrect\n");
> > > + }
> > > + }
> > Please, note, the forwarding mode can be changed from interactive prompt with "set fwd <mode>" command.
> > If iofwd mode is crucial for GPU functionality - we should prevent switching to other modes if GPU pools are engaged.
> >
> >
> > > }
> > > diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index
> > > a66dfb297c..1af235c4d8 100644
> > > --- a/app/test-pmd/testpmd.c
> > > +++ b/app/test-pmd/testpmd.c
> > > @@ -205,6 +205,12 @@ uint32_t mbuf_data_size_n = 1; /* Number of
> > > specified mbuf sizes. */ uint16_t mbuf_data_size[MAX_SEGS_BUFFER_SPLIT]
> > > = {
> > > DEFAULT_MBUF_DATA_SIZE
> > > }; /**< Mbuf data space size. */
> > > +
> > > +/* Mbuf memory types. */
> > > +enum mbuf_mem_type mbuf_mem_types[MAX_SEGS_BUFFER_SPLIT];
> > > +/* Pointers to external memory allocated for mempools. */ uintptr_t
> > > +mempools_ext_ptr[MAX_SEGS_BUFFER_SPLIT];
> > > +
> > > uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
> > > * specified on command-line. */ uint16_t
> > > stats_period; /**< Period to show statistics (disabled by default) */ @@ -
> > > 543,6 +549,12 @@ int proc_id;
> > > */
> > > unsigned int num_procs = 1;
> > >
> > > +/*
> > > + * In case of GPU memory external mbufs use, for simplicity,
> > > + * the first GPU device in the list.
> > > + */
> > > +int gpu_id = 0;
> > It is assigned with zero and never changes. Support the first GPU only?
> > This limitation should be mentioned in documentation.
> >
> > > +
> > > static void
> > > eth_rx_metadata_negotiate_mp(uint16_t port_id) { @@ -1103,6 +1115,79
> > > @@ setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int
> > > socket_id,
> > > return ext_num;
> > > }
> > >
> > > +static struct rte_mempool *
> > > +gpu_mbuf_pool_create(uint16_t mbuf_seg_size, unsigned int nb_mbuf,
> > > + unsigned int socket_id, uint16_t port_id,
> > > + int gpu_id, uintptr_t * mp_addr)
> > > +{
> > > + int ret = 0;
> > > + char pool_name[RTE_MEMPOOL_NAMESIZE];
> > > + struct rte_eth_dev_info dev_info;
> > > + struct rte_mempool *rte_mp = NULL;
> > > + struct rte_pktmbuf_extmem gpu_mem;
> > > + struct rte_gpu_info ginfo;
> > > + uint8_t gpu_page_shift = 16;
> > > + uint32_t gpu_page_size = (1UL << gpu_page_shift);
> > > +
> > > + ret = eth_dev_info_get_print_err(port_id, &dev_info);
> > > + if (ret != 0)
> > > + rte_exit(EXIT_FAILURE,
> > > + "Failed to get device info for port %d\n",
> > > + port_id);
> > > +
> > > + mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name),
> > > port_id, MBUF_MEM_GPU);
> > > + if (!is_proc_primary()) {
> > > + rte_mp = rte_mempool_lookup(pool_name);
> > > + if (rte_mp == NULL)
> > > + rte_exit(EXIT_FAILURE,
> > > + "Get mbuf pool for socket %u failed: %s\n",
> > > + socket_id, rte_strerror(rte_errno));
> > > + return rte_mp;
> > > + }
> > > +
> > > + if (rte_gpu_info_get(gpu_id, &ginfo))
> > > + rte_exit(EXIT_FAILURE, "Can't retrieve info about GPU %d -
> > > bye\n",
> > > +gpu_id);
> > > +
> > > + TESTPMD_LOG(INFO,
> > > + "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u
> > > GPU device=%s\n",
> > > + pool_name, nb_mbuf, mbuf_seg_size, socket_id, ginfo.name);
> > > +
> > > + /* Create an external memory mempool using memory allocated on
> > > the
> > > +GPU. */
> > > +
> > > + gpu_mem.elt_size = RTE_MBUF_DEFAULT_BUF_SIZE;
> > > + gpu_mem.buf_len = RTE_ALIGN_CEIL(nb_mbuf * gpu_mem.elt_size,
> > > gpu_page_size);
> > > + gpu_mem.buf_iova = RTE_BAD_IOVA;
> > > +
> > > + gpu_mem.buf_ptr = rte_gpu_mem_alloc(gpu_id, gpu_mem.buf_len);
> > > + if (gpu_mem.buf_ptr == NULL)
> > > + rte_exit(EXIT_FAILURE, "Could not allocate GPU device
> > > memory\n");
> > > +
> > > + ret = rte_extmem_register(gpu_mem.buf_ptr, gpu_mem.buf_len,
> > > NULL, gpu_mem.buf_iova, gpu_page_size);
> > > + if (ret)
> > > + rte_exit(EXIT_FAILURE, "Unable to register addr 0x%p, ret
> > > %d\n",
> > > +gpu_mem.buf_ptr, ret);
> > > +
> > > + uint16_t pid = 0;
> > > +
> > > + RTE_ETH_FOREACH_DEV(pid)
> > > + {
> > > + ret = rte_dev_dma_map(dev_info.device, gpu_mem.buf_ptr,
> > > + gpu_mem.buf_iova,
> > > gpu_mem.buf_len);
> > > + if (ret) {
> > > + rte_exit(EXIT_FAILURE, "Unable to DMA map addr
> > > 0x%p for device %s\n",
> > > + gpu_mem.buf_ptr, dev_info.device-
> > > >name);
> > > + }
> > > + }
> > > +
> > > + rte_mp = rte_pktmbuf_pool_create_extbuf(pool_name, nb_mbuf,
> > > mb_mempool_cache, 0, mbuf_seg_size, socket_id, &gpu_mem, 1);
> > > + if (rte_mp == NULL) {
> > > + rte_exit(EXIT_FAILURE, "Creation of GPU mempool <%s>
> > > failed\n", pool_name);
> > > + }
> > > +
> > > + *mp_addr = (uintptr_t) gpu_mem.buf_ptr;
> > > +
> > > + return rte_mp;
> > > +}
> > > +
> > > /*
> > > * Configuration initialisation done once at init time.
> > > */
> > > @@ -1117,7 +1202,7 @@ mbuf_pool_create(uint16_t mbuf_seg_size,
> > > unsigned nb_mbuf,
> > >
> > > mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size; #endif
> > > - mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name),
> > > size_idx);
> > > + mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name),
> > > size_idx,
> > > +MBUF_MEM_CPU);
> > > if (!is_proc_primary()) {
> > > rte_mp = rte_mempool_lookup(pool_name);
> > > if (rte_mp == NULL)
> > > @@ -1700,19 +1785,42 @@ init_config(void)
> > >
> > > for (i = 0; i < num_sockets; i++)
> > > for (j = 0; j < mbuf_data_size_n; j++)
> > > - mempools[i * MAX_SEGS_BUFFER_SPLIT + j]
> > > =
> > > - mbuf_pool_create(mbuf_data_size[j],
> > > - nb_mbuf_per_pool,
> > > - socket_ids[i], j);
> > > + {
> > > + if (mbuf_mem_types[j] == MBUF_MEM_GPU)
> > > {
> > > + if (rte_gpu_count_avail() == 0)
> > > + rte_exit(EXIT_FAILURE, "No
> > > GPU device available.\n");
> > > +
> > > + mempools[i *
> > > MAX_SEGS_BUFFER_SPLIT + j] =
> > > +
> > > gpu_mbuf_pool_create(mbuf_data_size[j],
> > What about GPU/CPU adherence ? Do we create one GPU pool per CPU socket?
> > Disregarding hardware topology at all?
> > We can mitigate the issue by "--socket-mem/--socket-num" parameter though.
> >
> > > +
> > > nb_mbuf_per_pool,
> > > +
> > > socket_ids[i], j, gpu_id,
> > > +
> > > &(mempools_ext_ptr[j]));
> > > + } else {
> > > + mempools[i *
> > > MAX_SEGS_BUFFER_SPLIT + j] =
> > > +
> > > mbuf_pool_create(mbuf_data_size[j],
> > > +
> > > nb_mbuf_per_pool,
> > > + socket_ids[i],
> > > j);
> > > + }
> > > + }
> > > } else {
> > > uint8_t i;
> > >
> > > for (i = 0; i < mbuf_data_size_n; i++)
> > > - mempools[i] = mbuf_pool_create
> > > - (mbuf_data_size[i],
> > > - nb_mbuf_per_pool,
> > > - socket_num == UMA_NO_CONFIG ?
> > > - 0 : socket_num, i);
> > > + {
> > > + if (mbuf_mem_types[i] == MBUF_MEM_GPU) {
> > > + mempools[i] =
> > > gpu_mbuf_pool_create(mbuf_data_size[i],
> > > +
> > > nb_mbuf_per_pool,
> > > +
> > > socket_num == UMA_NO_CONFIG ? 0 : socket_num,
> > > +
> > > i, gpu_id,
> > > +
> > > &(mempools_ext_ptr[i]));
> > > + } else {
> > > + mempools[i] =
> > > mbuf_pool_create(mbuf_data_size[i],
> > > + nb_mbuf_per_pool,
> > > + socket_num ==
> > > UMA_NO_CONFIG ?
> > > + 0 : socket_num, i);
> > > + }
> > > + }
> > > +
> > > }
> > >
> > > init_port_config();
> > > @@ -3415,8 +3523,11 @@ pmd_test_exit(void)
> > > }
> > > }
> > > for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
> > > - if (mempools[i])
> > > + if (mempools[i]) {
> > > mempool_free_mp(mempools[i]);
> > > + if (mbuf_mem_types[i] == MBUF_MEM_GPU)
> > > + rte_gpu_mem_free(gpu_id, (void
> > > *)mempools_ext_ptr[i]);
> > > + }
> > > }
> > > free(xstats_display);
> > >
> > > diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h index
> > > 669ce1e87d..9919044372 100644
> > > --- a/app/test-pmd/testpmd.h
> > > +++ b/app/test-pmd/testpmd.h
> > > @@ -12,6 +12,7 @@
> > > #include <rte_gro.h>
> > > #include <rte_gso.h>
> > > #include <rte_os_shim.h>
> > > +#include <rte_gpudev.h>
> > > #include <cmdline.h>
> > > #include <sys/queue.h>
> > > #ifdef RTE_HAS_JANSSON
> > > @@ -474,6 +475,11 @@ extern uint8_t dcb_config; extern uint32_t
> > > mbuf_data_size_n; extern uint16_t
> > > mbuf_data_size[MAX_SEGS_BUFFER_SPLIT];
> > > /**< Mbuf data space size. */
> > > +enum mbuf_mem_type {
> > > + MBUF_MEM_CPU,
> > > + MBUF_MEM_GPU
> > > +};
> > > +extern enum mbuf_mem_type
> > > mbuf_mem_types[MAX_SEGS_BUFFER_SPLIT];
> > > extern uint32_t param_total_num_mbufs;
> > >
> > > extern uint16_t stats_period;
> > > @@ -717,14 +723,16 @@ current_fwd_lcore(void)
> > > /* Mbuf Pools */
> > > static inline void
> > > mbuf_poolname_build(unsigned int sock_id, char *mp_name,
> > > - int name_size, uint16_t idx)
> > > + int name_size, uint16_t idx, enum mbuf_mem_type
> > > mem_type)
> > > {
> > > + const char *suffix = mem_type == MBUF_MEM_GPU ? "_gpu" : "";
> > > +
> > > if (!idx)
> > > snprintf(mp_name, name_size,
> > > - MBUF_POOL_NAME_PFX "_%u", sock_id);
> > > + MBUF_POOL_NAME_PFX "_%u%s", sock_id, suffix);
> > > else
> > > snprintf(mp_name, name_size,
> > > - MBUF_POOL_NAME_PFX "_%hu_%hu",
> > > (uint16_t)sock_id, idx);
> > > + MBUF_POOL_NAME_PFX "_%hu_%hu%s",
> > > (uint16_t)sock_id, idx, suffix);
> > > }
> > >
> > > static inline struct rte_mempool *
> > > @@ -732,7 +740,7 @@ mbuf_pool_find(unsigned int sock_id, uint16_t idx) {
> > > char pool_name[RTE_MEMPOOL_NAMESIZE];
> > >
> > > - mbuf_poolname_build(sock_id, pool_name, sizeof(pool_name), idx);
> > > + mbuf_poolname_build(sock_id, pool_name, sizeof(pool_name), idx,
> > > +mbuf_mem_types[idx]);
> > > return rte_mempool_lookup((const char *)pool_name); }
> > >
> > > diff --git a/doc/guides/testpmd_app_ug/run_app.rst
> > > b/doc/guides/testpmd_app_ug/run_app.rst
> > > index 30edef07ea..ede7b79abb 100644
> > > --- a/doc/guides/testpmd_app_ug/run_app.rst
> > > +++ b/doc/guides/testpmd_app_ug/run_app.rst
> > > @@ -119,6 +119,9 @@ The command line options are:
> > > The default value is 2048. If multiple mbuf-size values are specified the
> > > extra memory pools will be created for allocating mbufs to receive packets
> > > with buffer splitting features.
> > > + Providing mbuf size with a 'g' suffix (e.g. ``--mbuf-size=2048g``),
> > > + will cause the mempool to be created on a GPU memory area allocated.
> > > + This option is currently limited to iofwd engine with the first GPU.
> > Mmm, if we have multiple GPUs - we have no way to specify on which one we should allocate the pool.
> > Syntax is not complete☹. Possible we should specify GPU port id after the suffix ? Like 2048g2 ?
> > If port index is omitted we should consider we have the only GPU and check this.
>
> Do we need to overload --mbuf-size parameter here?
> Why not add 'gpu' option to --mp-alloc parameter?
--mbuf-size is preferred to --mp-alloc because with --mbuf-size you can enable buffer split feature
allocating multiple mempools with different mbufs sizes on different type of memories.
An example would be --mbuf-size=2048g,1024 which will create two mempools to split packets,
one in GPU memory and another in CPU memory
[-- Attachment #2: Type: text/html, Size: 57434 bytes --]
^ permalink raw reply [flat|nested] 31+ messages in thread
* Re: [PATCH v2 1/1] app/testpmd: add GPU memory option in iofwd engine
2021-11-16 18:11 ` Ferruh Yigit
@ 2021-11-16 19:09 ` Jerin Jacob
2021-11-16 19:14 ` Elena Agostini
0 siblings, 1 reply; 31+ messages in thread
From: Jerin Jacob @ 2021-11-16 19:09 UTC (permalink / raw)
To: Ferruh Yigit; +Cc: Elena Agostini, dev, Slava Ovsiienko
On Tue, Nov 16, 2021 at 11:42 PM Ferruh Yigit <ferruh.yigit@intel.com> wrote:
>
> On 11/16/2021 6:06 PM, Elena Agostini wrote:
> > > From: Ferruh Yigit <ferruh.yigit@intel.com>
> >
> > > Date: Tuesday, 16 November 2021 at 19:00
> >
> > > To: Elena Agostini <eagostini@nvidia.com>
> >
> > > Cc: dev@dpdk.org <dev@dpdk.org>, Slava Ovsiienko <viacheslavo@nvidia.com>
> >
> > > Subject: Re: [PATCH v2 1/1] app/testpmd: add GPU memory option in iofwd engine
> >
> > > External email: Use caution opening links or attachments>
> >
> > >
> >
> > > On 11/11/2021 9:41 PM, eagostini@nvidia.com wrote:
> >
> > > > --- a/app/test-pmd/meson.build
> >
> > > > +++ b/app/test-pmd/meson.build
> >
> > > > @@ -32,7 +32,7 @@ if dpdk_conf.has('RTE_HAS_JANSSON')
> >
> > > > ext_deps += jansson_dep
> >
> > > > endif
> >
> > > >
> >
> > > > -deps += ['ethdev', 'gro', 'gso', 'cmdline', 'metrics', 'bus_pci']
> >
> > > > +deps += ['ethdev', 'gro', 'gso', 'cmdline', 'metrics', 'bus_pci', 'gpudev']>
> >
> > > I didn't review the set, but in a very high level do we want to add
> >
> > > 'gpudev' as dependency? Isn't this like adding 'rawdev' as dependency.
> >
> > gpudev is a library that can be built without a gpu driver as all the other libraries
> >
> > and itis actually used only in case of GPU memory mempool.
> >
> > Reasons for this patch are:
> >
> > - Have an upstreamed benchmark tool to measure network metrics using GPU memory
> >
> > - Test some DPDK features not really tested anywhere like the external memory mempool feature
> >
>
> I can see the reason, that is obvious, yet again why we are not adding rawdev
> testing to the testpmd? But adding gpudev.
> It is easier to add it to the testpmd, and for some testing perspective it
> makes sense, but still I am not quite sure about this new dependency, I would
> like to get more feedback.
I had the similar concern earlier. IMO, It is better to have a
separate test application for gpudev like
other device classes. For eventdev cases when it needs to work with
ethdev for Rx adapter cases,
We have enabled such code in app/test-eventdev to make testpmd focus on ethdev.
^ permalink raw reply [flat|nested] 31+ messages in thread
* Re: [PATCH v2 1/1] app/testpmd: add GPU memory option in iofwd engine
2021-11-16 19:09 ` Jerin Jacob
@ 2021-11-16 19:14 ` Elena Agostini
2021-11-16 19:21 ` Jerin Jacob
0 siblings, 1 reply; 31+ messages in thread
From: Elena Agostini @ 2021-11-16 19:14 UTC (permalink / raw)
To: Jerin Jacob, Ferruh Yigit; +Cc: dev, Slava Ovsiienko
[-- Attachment #1: Type: text/plain, Size: 3005 bytes --]
> From: Jerin Jacob <jerinjacobk@gmail.com>
> Date: Tuesday, 16 November 2021 at 20:09
> To: Ferruh Yigit <ferruh.yigit@intel.com>
> Cc: Elena Agostini <eagostini@nvidia.com>, dev@dpdk.org <dev@dpdk.org>, Slava Ovsiienko <viacheslavo@nvidia.com>
> Subject: Re: [PATCH v2 1/1] app/testpmd: add GPU memory option in iofwd engine
> External email: Use caution opening links or attachments
>
>
> On Tue, Nov 16, 2021 at 11:42 PM Ferruh Yigit <ferruh.yigit@intel.com> wrote:
> >
> > On 11/16/2021 6:06 PM, Elena Agostini wrote:
> > > > From: Ferruh Yigit <ferruh.yigit@intel.com>
> > >
> > > > Date: Tuesday, 16 November 2021 at 19:00
> > >
> > > > To: Elena Agostini <eagostini@nvidia.com>
> > >
> > > > Cc: dev@dpdk.org <dev@dpdk.org>, Slava Ovsiienko <viacheslavo@nvidia.com>
> > >
> > > > Subject: Re: [PATCH v2 1/1] app/testpmd: add GPU memory option in iofwd engine
> > >
> > > > External email: Use caution opening links or attachments>
> > >
> > > >
> > >
> > > > On 11/11/2021 9:41 PM, eagostini@nvidia.com wrote:
> > >
> > > > > --- a/app/test-pmd/meson.build
> > >
> > > > > +++ b/app/test-pmd/meson.build
> > >
> > > > > @@ -32,7 +32,7 @@ if dpdk_conf.has('RTE_HAS_JANSSON')
> > >
> > > > > ext_deps += jansson_dep
> > >
> > > > > endif
> > >
> > > > >
> > >
> > > > > -deps += ['ethdev', 'gro', 'gso', 'cmdline', 'metrics', 'bus_pci']
> > >
> > > > > +deps += ['ethdev', 'gro', 'gso', 'cmdline', 'metrics', 'bus_pci', 'gpudev']>
> > >
> > > > I didn't review the set, but in a very high level do we want to add
> > >
> > > > 'gpudev' as dependency? Isn't this like adding 'rawdev' as dependency.
> > >
> > > gpudev is a library that can be built without a gpu driver as all the other libraries
> > >
> > > and itis actually used only in case of GPU memory mempool.
> > >
> > > Reasons for this patch are:
> > >
> > > - Have an upstreamed benchmark tool to measure network metrics using GPU memory
> > >
> > > - Test some DPDK features not really tested anywhere like the external memory mempool feature
> > >
> >
> > I can see the reason, that is obvious, yet again why we are not adding rawdev
> > testing to the testpmd? But adding gpudev.
> > It is easier to add it to the testpmd, and for some testing perspective it
> > makes sense, but still I am not quite sure about this new dependency, I would
> > like to get more feedback.
>
> I had the similar concern earlier. IMO, It is better to have a
> separate test application for gpudev like
> other device classes. For eventdev cases when it needs to work with
> ethdev for Rx adapter cases,
> We have enabled such code in app/test-eventdev to make testpmd focus on ethdev.
gpudev already has a test app in app/test-gpudev.
gpudev needs to be also test with network card and today another application
decidated to test gpudev over the network would be very similar to testpmd io.
At this stage, there is no point in reinventing the wheel
[-- Attachment #2: Type: text/html, Size: 7749 bytes --]
^ permalink raw reply [flat|nested] 31+ messages in thread
* Re: [PATCH v2 1/1] app/testpmd: add GPU memory option in iofwd engine
2021-11-16 19:14 ` Elena Agostini
@ 2021-11-16 19:21 ` Jerin Jacob
2021-11-17 8:55 ` Bruce Richardson
0 siblings, 1 reply; 31+ messages in thread
From: Jerin Jacob @ 2021-11-16 19:21 UTC (permalink / raw)
To: Elena Agostini; +Cc: Ferruh Yigit, dev, Slava Ovsiienko
On Wed, Nov 17, 2021 at 12:44 AM Elena Agostini <eagostini@nvidia.com> wrote:
>
> > From: Jerin Jacob <jerinjacobk@gmail.com>
>
> > Date: Tuesday, 16 November 2021 at 20:09
>
> > To: Ferruh Yigit <ferruh.yigit@intel.com>
>
> > Cc: Elena Agostini <eagostini@nvidia.com>, dev@dpdk.org <dev@dpdk.org>, Slava Ovsiienko <viacheslavo@nvidia.com>
>
> > Subject: Re: [PATCH v2 1/1] app/testpmd: add GPU memory option in iofwd engine
>
> > External email: Use caution opening links or attachments
>
> >
>
> >
>
> > On Tue, Nov 16, 2021 at 11:42 PM Ferruh Yigit <ferruh.yigit@intel.com> wrote:
>
> > >
>
> > > On 11/16/2021 6:06 PM, Elena Agostini wrote:
>
> > > > > From: Ferruh Yigit <ferruh.yigit@intel.com>
>
> > > >
>
> > > > > Date: Tuesday, 16 November 2021 at 19:00
>
> > > >
>
> > > > > To: Elena Agostini <eagostini@nvidia.com>
>
> > > >
>
> > > > > Cc: dev@dpdk.org <dev@dpdk.org>, Slava Ovsiienko <viacheslavo@nvidia.com>
>
> > > >
>
> > > > > Subject: Re: [PATCH v2 1/1] app/testpmd: add GPU memory option in iofwd engine
>
> > > >
>
> > > > > External email: Use caution opening links or attachments>
>
> > > >
>
> > > > >
>
> > > >
>
> > > > > On 11/11/2021 9:41 PM, eagostini@nvidia.com wrote:
>
> > > >
>
> > > > > > --- a/app/test-pmd/meson.build
>
> > > >
>
> > > > > > +++ b/app/test-pmd/meson.build
>
> > > >
>
> > > > > > @@ -32,7 +32,7 @@ if dpdk_conf.has('RTE_HAS_JANSSON')
>
> > > >
>
> > > > > > ext_deps += jansson_dep
>
> > > >
>
> > > > > > endif
>
> > > >
>
> > > > > >
>
> > > >
>
> > > > > > -deps += ['ethdev', 'gro', 'gso', 'cmdline', 'metrics', 'bus_pci']
>
> > > >
>
> > > > > > +deps += ['ethdev', 'gro', 'gso', 'cmdline', 'metrics', 'bus_pci', 'gpudev']>
>
> > > >
>
> > > > > I didn't review the set, but in a very high level do we want to add
>
> > > >
>
> > > > > 'gpudev' as dependency? Isn't this like adding 'rawdev' as dependency.
>
> > > >
>
> > > > gpudev is a library that can be built without a gpu driver as all the other libraries
>
> > > >
>
> > > > and itis actually used only in case of GPU memory mempool.
>
> > > >
>
> > > > Reasons for this patch are:
>
> > > >
>
> > > > - Have an upstreamed benchmark tool to measure network metrics using GPU memory
>
> > > >
>
> > > > - Test some DPDK features not really tested anywhere like the external memory mempool feature
>
> > > >
>
> > >
>
> > > I can see the reason, that is obvious, yet again why we are not adding rawdev
>
> > > testing to the testpmd? But adding gpudev.
>
> > > It is easier to add it to the testpmd, and for some testing perspective it
>
> > > makes sense, but still I am not quite sure about this new dependency, I would
>
> > > like to get more feedback.
>
> >
>
> > I had the similar concern earlier. IMO, It is better to have a
>
> > separate test application for gpudev like
>
> > other device classes. For eventdev cases when it needs to work with
>
> > ethdev for Rx adapter cases,
>
> > We have enabled such code in app/test-eventdev to make testpmd focus on ethdev.
>
>
>
> gpudev already has a test app in app/test-gpudev.
>
>
>
> gpudev needs to be also test with network card and today another application
>
> decidated to test gpudev over the network would be very similar to testpmd io.
>
>
>
> At this stage, there is no point in reinventing the wheel
I think, it is not about not reinventing the wheel, It is about
maintenance of testpmd,
currently, the feature are specific to ethdev. Adding more
cross-device-specific features
will populate the testpmd. I had a similar case when it network cases
need to be integrated to eventdev,
I choose to have it test-eventdev so that testpmd focus remains for ethdev.
^ permalink raw reply [flat|nested] 31+ messages in thread
* Re: [PATCH v3 1/1] app/testpmd: add GPU memory option for mbuf pools
2021-11-17 3:04 ` [PATCH v3 1/1] " eagostini
@ 2021-11-16 21:34 ` Stephen Hemminger
2021-11-17 11:08 ` Elena Agostini
0 siblings, 1 reply; 31+ messages in thread
From: Stephen Hemminger @ 2021-11-16 21:34 UTC (permalink / raw)
To: eagostini; +Cc: dev
On Wed, 17 Nov 2021 03:04:59 +0000
<eagostini@nvidia.com> wrote:
> From: Elena Agostini <eagostini@nvidia.com>
>
> This patch introduces GPU memory in testpmd through the gpudev library.
> Testpmd can be used for network benchmarks when using GPU memory
> instead of regular CPU memory to send and receive packets.
> This option is currently limited to iofwd engine to ensure
> no workload is applied on packets not accessible from the CPU.
>
> The options chose is --mbuf-size so buffer split feature across
> different mempools can be enabled.
>
> Signed-off-by: Elena Agostini <eagostini@nvidia.com>
Won't this create a hard dependency of test-pmd on gpudev?
I thought gpudev was supposed to be optional
^ permalink raw reply [flat|nested] 31+ messages in thread
* [PATCH v3 0/1] app/testpmd: add GPU memory option for mbuf pools
2021-10-29 20:49 [dpdk-dev] [PATCH] app/testpmd: add GPU memory option in iofwd engine eagostini
2021-11-11 21:41 ` [PATCH v2 0/1] " eagostini
@ 2021-11-17 3:04 ` eagostini
2021-11-17 3:04 ` [PATCH v3 1/1] " eagostini
2021-11-17 21:49 ` [PATCH v4 0/1] " eagostini
2 siblings, 1 reply; 31+ messages in thread
From: eagostini @ 2021-11-17 3:04 UTC (permalink / raw)
To: dev; +Cc: Elena Agostini
From: Elena Agostini <eagostini@nvidia.com>
This patch introduces GPU memory in testpmd through the gpudev library.
Testpmd can be used for network benchmarks when using GPU memory
instead of regular CPU memory to send and receive packets.
This option is currently limited to iofwd engine to ensure
no workload is applied on packets not accessible from the CPU.
The options chose is --mbuf-size so buffer split feature across
different mempools can be enabled.
Main reasons for this patch are:
- test memory features like external memory mempools and buffer split
- provide an upstream tool to test network metrics with GPU memory
Changelog:
- Addressed reviews' comments
Elena Agostini (1):
app/testpmd: add GPU memory option for mbuf pools
app/test-pmd/cmdline.c | 32 +++++++-
app/test-pmd/config.c | 4 +-
app/test-pmd/icmpecho.c | 2 +-
app/test-pmd/meson.build | 2 +-
app/test-pmd/parameters.c | 15 +++-
app/test-pmd/testpmd.c | 167 +++++++++++++++++++++++++++++++++++---
app/test-pmd/testpmd.h | 16 +++-
7 files changed, 217 insertions(+), 21 deletions(-)
--
2.17.1
^ permalink raw reply [flat|nested] 31+ messages in thread
* [PATCH v3 1/1] app/testpmd: add GPU memory option for mbuf pools
2021-11-17 3:04 ` [PATCH v3 0/1] app/testpmd: add GPU memory option for mbuf pools eagostini
@ 2021-11-17 3:04 ` eagostini
2021-11-16 21:34 ` Stephen Hemminger
0 siblings, 1 reply; 31+ messages in thread
From: eagostini @ 2021-11-17 3:04 UTC (permalink / raw)
To: dev; +Cc: Elena Agostini
From: Elena Agostini <eagostini@nvidia.com>
This patch introduces GPU memory in testpmd through the gpudev library.
Testpmd can be used for network benchmarks when using GPU memory
instead of regular CPU memory to send and receive packets.
This option is currently limited to iofwd engine to ensure
no workload is applied on packets not accessible from the CPU.
The options chose is --mbuf-size so buffer split feature across
different mempools can be enabled.
Signed-off-by: Elena Agostini <eagostini@nvidia.com>
---
app/test-pmd/cmdline.c | 32 +++++++-
app/test-pmd/config.c | 4 +-
app/test-pmd/icmpecho.c | 2 +-
app/test-pmd/meson.build | 2 +-
app/test-pmd/parameters.c | 15 +++-
app/test-pmd/testpmd.c | 167 +++++++++++++++++++++++++++++++++++---
app/test-pmd/testpmd.h | 16 +++-
7 files changed, 217 insertions(+), 21 deletions(-)
diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index fb5433fd5b..3afb97fae4 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -3614,6 +3614,7 @@ parse_item_list(const char *str, const char *item_name, unsigned int max_items,
unsigned int j;
int value_ok;
char c;
+ int gpu_mbuf = 0;
/*
* First parse all items in the list and store their value.
@@ -3623,11 +3624,25 @@ parse_item_list(const char *str, const char *item_name, unsigned int max_items,
value_ok = 0;
for (i = 0; i < strnlen(str, STR_TOKEN_SIZE); i++) {
c = str[i];
+
if ((c >= '0') && (c <= '9')) {
value = (unsigned int) (value * 10 + (c - '0'));
value_ok = 1;
continue;
}
+ if (c == 'g') {
+ /*
+ * When this flag is set, mbufs for this segment
+ * will be created on GPU memory.
+ */
+ if (i < strnlen(str, STR_TOKEN_SIZE) - 1 && str[i+1] != ',') {
+ fprintf(stderr, "input param list is not well formatted\n");
+ return 0;
+ }
+
+ gpu_mbuf = 1;
+ continue;
+ }
if (c != ',') {
fprintf(stderr, "character %c is not a decimal digit\n", c);
return 0;
@@ -3640,6 +3655,8 @@ parse_item_list(const char *str, const char *item_name, unsigned int max_items,
parsed_items[nb_item] = value;
value_ok = 0;
value = 0;
+ mbuf_mem_types[nb_item] = gpu_mbuf ? MBUF_MEM_GPU : MBUF_MEM_CPU;
+ gpu_mbuf = 0;
}
nb_item++;
}
@@ -3648,12 +3665,15 @@ parse_item_list(const char *str, const char *item_name, unsigned int max_items,
item_name, nb_item + 1, max_items);
return 0;
}
+
+ mbuf_mem_types[nb_item] = gpu_mbuf ? MBUF_MEM_GPU : MBUF_MEM_CPU;
+
parsed_items[nb_item++] = value;
if (! check_unique_values)
return nb_item;
/*
- * Then, check that all values in the list are different.
+ * Then, check that all values in the list are differents.
* No optimization here...
*/
for (i = 0; i < nb_item; i++) {
@@ -6865,6 +6885,16 @@ static void cmd_set_fwd_mode_parsed(void *parsed_result,
__rte_unused void *data)
{
struct cmd_set_fwd_mode_result *res = parsed_result;
+ int idx;
+
+ for (idx = 0; idx < MAX_SEGS_BUFFER_SPLIT; idx++) {
+ if (mbuf_mem_types[idx] == MBUF_MEM_GPU &&
+ strcmp(res->mode, "io") != 0) {
+ TESTPMD_LOG(ERR,
+ "GPU memory mbufs can be used with iofwd engine only\n");
+ return;
+ }
+ }
retry_enabled = 0;
set_pkt_forwarding_mode(res->mode);
diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
index 26318b4f14..26cadf39f7 100644
--- a/app/test-pmd/config.c
+++ b/app/test-pmd/config.c
@@ -2965,7 +2965,7 @@ port_rss_reta_info(portid_t port_id,
}
/*
- * Displays the RSS hash functions of a port, and, optionally, the RSS hash
+ * Displays the RSS hash functions of a port, and, optionaly, the RSS hash
* key of the port.
*/
void
@@ -5250,7 +5250,7 @@ mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx)
{
port->mc_addr_nb--;
if (addr_idx == port->mc_addr_nb) {
- /* No need to recompact the set of multicast addresses. */
+ /* No need to recompact the set of multicast addressses. */
if (port->mc_addr_nb == 0) {
/* free the pool of multicast addresses. */
free(port->mc_addr_pool);
diff --git a/app/test-pmd/icmpecho.c b/app/test-pmd/icmpecho.c
index d6620f5f6a..8f1d68a83a 100644
--- a/app/test-pmd/icmpecho.c
+++ b/app/test-pmd/icmpecho.c
@@ -54,7 +54,7 @@ arp_op_name(uint16_t arp_op)
default:
break;
}
- return "Unknown ARP op";
+ return "Unkwown ARP op";
}
static const char *
diff --git a/app/test-pmd/meson.build b/app/test-pmd/meson.build
index d5df52c470..5c8ca68c9d 100644
--- a/app/test-pmd/meson.build
+++ b/app/test-pmd/meson.build
@@ -32,7 +32,7 @@ if dpdk_conf.has('RTE_HAS_JANSSON')
ext_deps += jansson_dep
endif
-deps += ['ethdev', 'gro', 'gso', 'cmdline', 'metrics', 'bus_pci']
+deps += ['ethdev', 'gro', 'gso', 'cmdline', 'metrics', 'bus_pci', 'gpudev']
if dpdk_conf.has('RTE_CRYPTO_SCHEDULER')
deps += 'crypto_scheduler'
endif
diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c
index 5251722d0f..c222d115c3 100644
--- a/app/test-pmd/parameters.c
+++ b/app/test-pmd/parameters.c
@@ -87,7 +87,10 @@ usage(char* progname)
"in NUMA mode.\n");
printf(" --mbuf-size=N,[N1[,..Nn]: set the data size of mbuf to "
"N bytes. If multiple numbers are specified the extra pools "
- "will be created to receive with packet split features\n");
+ "will be created to receive with packet split features\n"
+ "Use 'g' suffix for GPU memory.\n"
+ "If no or an unrecognized suffix is provided, CPU is assumed\n");
+
printf(" --total-num-mbufs=N: set the number of mbufs to be allocated "
"in mbuf pools.\n");
printf(" --max-pkt-len=N: set the maximum size of packet to N bytes.\n");
@@ -595,6 +598,7 @@ launch_args_parse(int argc, char** argv)
struct rte_eth_dev_info dev_info;
uint16_t rec_nb_pkts;
int ret;
+ uint32_t idx = 0;
static struct option lgopts[] = {
{ "help", 0, 0, 0 },
@@ -1544,4 +1548,13 @@ launch_args_parse(int argc, char** argv)
"ignored\n");
mempool_flags = 0;
}
+
+ for (idx = 0; idx < mbuf_data_size_n; idx++) {
+ if (mbuf_mem_types[idx] == MBUF_MEM_GPU &&
+ strcmp(cur_fwd_eng->fwd_mode_name, "io") != 0) {
+ TESTPMD_LOG(ERR,
+ "GPU memory mbufs can be used with iofwd engine only\n");
+ rte_exit(EXIT_FAILURE, "Command line is incorrect\n");
+ }
+ }
}
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index c18942279a..778c4e7f72 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -205,6 +205,13 @@ uint32_t mbuf_data_size_n = 1; /* Number of specified mbuf sizes. */
uint16_t mbuf_data_size[MAX_SEGS_BUFFER_SPLIT] = {
DEFAULT_MBUF_DATA_SIZE
}; /**< Mbuf data space size. */
+
+/* Mbuf memory types. */
+enum mbuf_mem_type mbuf_mem_types[MAX_SEGS_BUFFER_SPLIT];
+/* Pointers to external memory allocated for mempools. */
+uintptr_t mempools_ext_ptr[MAX_SEGS_BUFFER_SPLIT];
+size_t mempools_ext_size[MAX_SEGS_BUFFER_SPLIT];
+
uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
* specified on command-line. */
uint16_t stats_period; /**< Period to show statistics (disabled by default) */
@@ -543,6 +550,12 @@ int proc_id;
*/
unsigned int num_procs = 1;
+/*
+ * In case of GPU memory external mbufs use, for simplicity,
+ * the first GPU device in the list.
+ */
+int gpu_id = 0;
+
static void
eth_rx_metadata_negotiate_mp(uint16_t port_id)
{
@@ -1103,6 +1116,81 @@ setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int socket_id,
return ext_num;
}
+static struct rte_mempool *
+gpu_mbuf_pool_create(uint16_t mbuf_seg_size, unsigned int nb_mbuf,
+ unsigned int socket_id, uint16_t port_id,
+ int gpu_id, uintptr_t *mp_addr, size_t *mp_size)
+{
+ int ret = 0;
+ char pool_name[RTE_MEMPOOL_NAMESIZE];
+ struct rte_eth_dev_info dev_info;
+ struct rte_mempool *rte_mp = NULL;
+ struct rte_pktmbuf_extmem gpu_mem;
+ struct rte_gpu_info ginfo;
+ uint8_t gpu_page_shift = 16;
+ uint32_t gpu_page_size = (1UL << gpu_page_shift);
+
+ if (rte_gpu_count_avail() == 0)
+ rte_exit(EXIT_FAILURE, "No GPU device available.\n");
+
+ if (rte_gpu_info_get(gpu_id, &ginfo))
+ rte_exit(EXIT_FAILURE, "Can't retrieve info about GPU %d - bye\n", gpu_id);
+
+ ret = eth_dev_info_get_print_err(port_id, &dev_info);
+ if (ret != 0)
+ rte_exit(EXIT_FAILURE,
+ "Failed to get device info for port %d\n",
+ port_id);
+
+ mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name), port_id, MBUF_MEM_GPU);
+ if (!is_proc_primary()) {
+ rte_mp = rte_mempool_lookup(pool_name);
+ if (rte_mp == NULL)
+ rte_exit(EXIT_FAILURE,
+ "Get mbuf pool for socket %u failed: %s\n",
+ socket_id, rte_strerror(rte_errno));
+ return rte_mp;
+ }
+
+ TESTPMD_LOG(INFO,
+ "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u GPU device=%s\n",
+ pool_name, nb_mbuf, mbuf_seg_size, socket_id, ginfo.name);
+
+ /* Create an external memory mempool using memory allocated on the GPU. */
+
+ gpu_mem.elt_size = RTE_MBUF_DEFAULT_BUF_SIZE;
+ gpu_mem.buf_len = RTE_ALIGN_CEIL(nb_mbuf * gpu_mem.elt_size, gpu_page_size);
+ gpu_mem.buf_iova = RTE_BAD_IOVA;
+
+ gpu_mem.buf_ptr = rte_gpu_mem_alloc(gpu_id, gpu_mem.buf_len);
+ if (gpu_mem.buf_ptr == NULL)
+ rte_exit(EXIT_FAILURE, "Could not allocate GPU device memory\n");
+
+ ret = rte_extmem_register(gpu_mem.buf_ptr, gpu_mem.buf_len, NULL, gpu_mem.buf_iova, gpu_page_size);
+ if (ret)
+ rte_exit(EXIT_FAILURE, "Unable to register addr 0x%p, ret %d\n", gpu_mem.buf_ptr, ret);
+
+ uint16_t pid = 0;
+
+ RTE_ETH_FOREACH_DEV(pid)
+ {
+ ret = rte_dev_dma_map(dev_info.device, gpu_mem.buf_ptr,
+ gpu_mem.buf_iova, gpu_mem.buf_len);
+ if (ret)
+ rte_exit(EXIT_FAILURE, "Unable to DMA map addr 0x%p for device %s\n",
+ gpu_mem.buf_ptr, dev_info.device->name);
+ }
+
+ rte_mp = rte_pktmbuf_pool_create_extbuf(pool_name, nb_mbuf, mb_mempool_cache, 0, mbuf_seg_size, socket_id, &gpu_mem, 1);
+ if (rte_mp == NULL)
+ rte_exit(EXIT_FAILURE, "Creation of GPU mempool <%s> failed\n", pool_name);
+
+ *mp_addr = (uintptr_t) gpu_mem.buf_ptr;
+ *mp_size = (size_t) gpu_mem.buf_len;
+
+ return rte_mp;
+}
+
/*
* Configuration initialisation done once at init time.
*/
@@ -1117,7 +1205,7 @@ mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
#endif
- mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name), size_idx);
+ mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name), size_idx, MBUF_MEM_CPU);
if (!is_proc_primary()) {
rte_mp = rte_mempool_lookup(pool_name);
if (rte_mp == NULL)
@@ -1700,19 +1788,42 @@ init_config(void)
for (i = 0; i < num_sockets; i++)
for (j = 0; j < mbuf_data_size_n; j++)
- mempools[i * MAX_SEGS_BUFFER_SPLIT + j] =
- mbuf_pool_create(mbuf_data_size[j],
- nb_mbuf_per_pool,
- socket_ids[i], j);
+ {
+ if (mbuf_mem_types[j] == MBUF_MEM_GPU) {
+ mempools[i * MAX_SEGS_BUFFER_SPLIT + j] =
+ gpu_mbuf_pool_create(mbuf_data_size[j],
+ nb_mbuf_per_pool,
+ socket_ids[i], j, gpu_id,
+ &(mempools_ext_ptr[i]),
+ &(mempools_ext_size[i]));
+ } else {
+ mempools[i * MAX_SEGS_BUFFER_SPLIT + j] =
+ mbuf_pool_create(mbuf_data_size[j],
+ nb_mbuf_per_pool,
+ socket_ids[i], j);
+ }
+ }
} else {
uint8_t i;
for (i = 0; i < mbuf_data_size_n; i++)
- mempools[i] = mbuf_pool_create
- (mbuf_data_size[i],
- nb_mbuf_per_pool,
- socket_num == UMA_NO_CONFIG ?
- 0 : socket_num, i);
+ {
+ if (mbuf_mem_types[i] == MBUF_MEM_GPU) {
+ mempools[i] =
+ gpu_mbuf_pool_create(mbuf_data_size[i],
+ nb_mbuf_per_pool,
+ socket_num == UMA_NO_CONFIG ? 0 : socket_num,
+ i, gpu_id,
+ &(mempools_ext_ptr[i]),
+ &(mempools_ext_size[i]));
+ } else {
+ mempools[i] = mbuf_pool_create(mbuf_data_size[i],
+ nb_mbuf_per_pool,
+ socket_num == UMA_NO_CONFIG ?
+ 0 : socket_num, i);
+ }
+ }
+
}
init_port_config();
@@ -3414,9 +3525,43 @@ pmd_test_exit(void)
return;
}
}
+
for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
- if (mempools[i])
+ if (mempools[i]) {
mempool_free_mp(mempools[i]);
+ if (mbuf_mem_types[i] == MBUF_MEM_GPU) {
+ if (mempools_ext_ptr[i] != 0) {
+ ret = rte_extmem_unregister(
+ (void *)mempools_ext_ptr[i],
+ mempools_ext_size[i]);
+
+ if (ret)
+ RTE_LOG(ERR, EAL,
+ "rte_extmem_unregister 0x%p -> %d (rte_errno = %d)\n",
+ (uint8_t *)mempools_ext_ptr[i], ret, rte_errno);
+
+ RTE_ETH_FOREACH_DEV(pt_id) {
+ struct rte_eth_dev_info dev_info;
+ ret = eth_dev_info_get_print_err(pt_id, &dev_info);
+ if (ret != 0)
+ rte_exit(EXIT_FAILURE,
+ "Failed to get device info for port %d\n",
+ pt_id);
+
+ ret = rte_dev_dma_unmap(dev_info.device,
+ (void *)mempools_ext_ptr[i], RTE_BAD_IOVA,
+ mempools_ext_size[i]);
+
+ if (ret)
+ RTE_LOG(ERR, EAL,
+ "rte_dev_dma_unmap 0x%p -> %d (rte_errno = %d)\n",
+ (uint8_t *)mempools_ext_ptr[i], ret, rte_errno);
+ }
+
+ rte_gpu_mem_free(gpu_id, (void *)mempools_ext_ptr[i]);
+ }
+ }
+ }
}
free(xstats_display);
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index 669ce1e87d..9919044372 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -12,6 +12,7 @@
#include <rte_gro.h>
#include <rte_gso.h>
#include <rte_os_shim.h>
+#include <rte_gpudev.h>
#include <cmdline.h>
#include <sys/queue.h>
#ifdef RTE_HAS_JANSSON
@@ -474,6 +475,11 @@ extern uint8_t dcb_config;
extern uint32_t mbuf_data_size_n;
extern uint16_t mbuf_data_size[MAX_SEGS_BUFFER_SPLIT];
/**< Mbuf data space size. */
+enum mbuf_mem_type {
+ MBUF_MEM_CPU,
+ MBUF_MEM_GPU
+};
+extern enum mbuf_mem_type mbuf_mem_types[MAX_SEGS_BUFFER_SPLIT];
extern uint32_t param_total_num_mbufs;
extern uint16_t stats_period;
@@ -717,14 +723,16 @@ current_fwd_lcore(void)
/* Mbuf Pools */
static inline void
mbuf_poolname_build(unsigned int sock_id, char *mp_name,
- int name_size, uint16_t idx)
+ int name_size, uint16_t idx, enum mbuf_mem_type mem_type)
{
+ const char *suffix = mem_type == MBUF_MEM_GPU ? "_gpu" : "";
+
if (!idx)
snprintf(mp_name, name_size,
- MBUF_POOL_NAME_PFX "_%u", sock_id);
+ MBUF_POOL_NAME_PFX "_%u%s", sock_id, suffix);
else
snprintf(mp_name, name_size,
- MBUF_POOL_NAME_PFX "_%hu_%hu", (uint16_t)sock_id, idx);
+ MBUF_POOL_NAME_PFX "_%hu_%hu%s", (uint16_t)sock_id, idx, suffix);
}
static inline struct rte_mempool *
@@ -732,7 +740,7 @@ mbuf_pool_find(unsigned int sock_id, uint16_t idx)
{
char pool_name[RTE_MEMPOOL_NAMESIZE];
- mbuf_poolname_build(sock_id, pool_name, sizeof(pool_name), idx);
+ mbuf_poolname_build(sock_id, pool_name, sizeof(pool_name), idx, mbuf_mem_types[idx]);
return rte_mempool_lookup((const char *)pool_name);
}
--
2.17.1
^ permalink raw reply [flat|nested] 31+ messages in thread
* Re: [PATCH v2 1/1] app/testpmd: add GPU memory option in iofwd engine
2021-11-16 19:21 ` Jerin Jacob
@ 2021-11-17 8:55 ` Bruce Richardson
0 siblings, 0 replies; 31+ messages in thread
From: Bruce Richardson @ 2021-11-17 8:55 UTC (permalink / raw)
To: Jerin Jacob; +Cc: Elena Agostini, Ferruh Yigit, dev, Slava Ovsiienko
On Wed, Nov 17, 2021 at 12:51:13AM +0530, Jerin Jacob wrote:
> On Wed, Nov 17, 2021 at 12:44 AM Elena Agostini <eagostini@nvidia.com> wrote:
> >
> > > From: Jerin Jacob <jerinjacobk@gmail.com>
> >
> > > Date: Tuesday, 16 November 2021 at 20:09
> >
> > > To: Ferruh Yigit <ferruh.yigit@intel.com>
> >
> > > Cc: Elena Agostini <eagostini@nvidia.com>, dev@dpdk.org <dev@dpdk.org>, Slava Ovsiienko <viacheslavo@nvidia.com>
> >
> > > Subject: Re: [PATCH v2 1/1] app/testpmd: add GPU memory option in iofwd engine
> >
> > > External email: Use caution opening links or attachments
> >
> > >
> >
> > >
> >
> > > On Tue, Nov 16, 2021 at 11:42 PM Ferruh Yigit <ferruh.yigit@intel.com> wrote:
> >
> > > >
> >
> > > > On 11/16/2021 6:06 PM, Elena Agostini wrote:
> >
> > > > > > From: Ferruh Yigit <ferruh.yigit@intel.com>
> >
> > > > >
> >
> > > > > > Date: Tuesday, 16 November 2021 at 19:00
> >
> > > > >
> >
> > > > > > To: Elena Agostini <eagostini@nvidia.com>
> >
> > > > >
> >
> > > > > > Cc: dev@dpdk.org <dev@dpdk.org>, Slava Ovsiienko <viacheslavo@nvidia.com>
> >
> > > > >
> >
> > > > > > Subject: Re: [PATCH v2 1/1] app/testpmd: add GPU memory option in iofwd engine
> >
> > > > >
> >
> > > > > > External email: Use caution opening links or attachments>
> >
> > > > >
> >
> > > > > >
> >
> > > > >
> >
> > > > > > On 11/11/2021 9:41 PM, eagostini@nvidia.com wrote:
> >
> > > > >
> >
> > > > > > > --- a/app/test-pmd/meson.build
> >
> > > > >
> >
> > > > > > > +++ b/app/test-pmd/meson.build
> >
> > > > >
> >
> > > > > > > @@ -32,7 +32,7 @@ if dpdk_conf.has('RTE_HAS_JANSSON')
> >
> > > > >
> >
> > > > > > > ext_deps += jansson_dep
> >
> > > > >
> >
> > > > > > > endif
> >
> > > > >
> >
> > > > > > >
> >
> > > > >
> >
> > > > > > > -deps += ['ethdev', 'gro', 'gso', 'cmdline', 'metrics', 'bus_pci']
> >
> > > > >
> >
> > > > > > > +deps += ['ethdev', 'gro', 'gso', 'cmdline', 'metrics', 'bus_pci', 'gpudev']>
> >
> > > > >
> >
> > > > > > I didn't review the set, but in a very high level do we want to add
> >
> > > > >
> >
> > > > > > 'gpudev' as dependency? Isn't this like adding 'rawdev' as dependency.
> >
> > > > >
> >
> > > > > gpudev is a library that can be built without a gpu driver as all the other libraries
> >
> > > > >
> >
> > > > > and itis actually used only in case of GPU memory mempool.
> >
> > > > >
> >
> > > > > Reasons for this patch are:
> >
> > > > >
> >
> > > > > - Have an upstreamed benchmark tool to measure network metrics using GPU memory
> >
> > > > >
> >
> > > > > - Test some DPDK features not really tested anywhere like the external memory mempool feature
> >
> > > > >
> >
> > > >
> >
> > > > I can see the reason, that is obvious, yet again why we are not adding rawdev
> >
> > > > testing to the testpmd? But adding gpudev.
> >
> > > > It is easier to add it to the testpmd, and for some testing perspective it
> >
> > > > makes sense, but still I am not quite sure about this new dependency, I would
> >
> > > > like to get more feedback.
> >
> > >
> >
> > > I had the similar concern earlier. IMO, It is better to have a
> >
> > > separate test application for gpudev like
> >
> > > other device classes. For eventdev cases when it needs to work with
> >
> > > ethdev for Rx adapter cases,
> >
> > > We have enabled such code in app/test-eventdev to make testpmd focus on ethdev.
> >
> >
> >
> > gpudev already has a test app in app/test-gpudev.
> >
> >
> >
> > gpudev needs to be also test with network card and today another application
> >
> > decidated to test gpudev over the network would be very similar to testpmd io.
> >
> >
> >
> > At this stage, there is no point in reinventing the wheel
>
>
> I think, it is not about not reinventing the wheel, It is about
> maintenance of testpmd,
> currently, the feature are specific to ethdev. Adding more
> cross-device-specific features
> will populate the testpmd. I had a similar case when it network cases
> need to be integrated to eventdev,
> I choose to have it test-eventdev so that testpmd focus remains for ethdev.
Since there is an increasing push for libraries to be able to be disabled
in DPDK, at minimum I would look to have gpudev as an optional dependency
here, so that disabling it would not completely disable building testpmd.
Regards,
/Bruce
^ permalink raw reply [flat|nested] 31+ messages in thread
* Re: [PATCH v3 1/1] app/testpmd: add GPU memory option for mbuf pools
2021-11-16 21:34 ` Stephen Hemminger
@ 2021-11-17 11:08 ` Elena Agostini
2021-11-17 11:23 ` Jerin Jacob
0 siblings, 1 reply; 31+ messages in thread
From: Elena Agostini @ 2021-11-17 11:08 UTC (permalink / raw)
To: Stephen Hemminger; +Cc: dev
[-- Attachment #1: Type: text/plain, Size: 884 bytes --]
> External email: Use caution opening links or attachments
>
>
> On Wed, 17 Nov 2021 03:04:59 +0000
> <eagostini@nvidia.com> wrote:
>
> > From: Elena Agostini <eagostini@nvidia.com>
> >
> > This patch introduces GPU memory in testpmd through the gpudev library.
> > Testpmd can be used for network benchmarks when using GPU memory
> > instead of regular CPU memory to send and receive packets.
> > This option is currently limited to iofwd engine to ensure
> > no workload is applied on packets not accessible from the CPU.
> >
> > The options chose is --mbuf-size so buffer split feature across
> > different mempools can be enabled.
> >
> > Signed-off-by: Elena Agostini <eagostini@nvidia.com>
>
> Won't this create a hard dependency of test-pmd on gpudev?
> I thought gpudev was supposed to be optional
Sure, let me submit another patch to make it optional
[-- Attachment #2: Type: text/html, Size: 3879 bytes --]
^ permalink raw reply [flat|nested] 31+ messages in thread
* Re: [PATCH v3 1/1] app/testpmd: add GPU memory option for mbuf pools
2021-11-17 11:08 ` Elena Agostini
@ 2021-11-17 11:23 ` Jerin Jacob
2021-11-17 11:26 ` Elena Agostini
0 siblings, 1 reply; 31+ messages in thread
From: Jerin Jacob @ 2021-11-17 11:23 UTC (permalink / raw)
To: Elena Agostini, Ferruh Yigit, Richardson, Bruce; +Cc: Stephen Hemminger, dev
On Wed, Nov 17, 2021 at 4:38 PM Elena Agostini <eagostini@nvidia.com> wrote:
>
> > External email: Use caution opening links or attachments
>
> >
>
> >
>
> > On Wed, 17 Nov 2021 03:04:59 +0000
>
> > <eagostini@nvidia.com> wrote:
>
> >
>
> > > From: Elena Agostini <eagostini@nvidia.com>
>
> > >
>
> > > This patch introduces GPU memory in testpmd through the gpudev library.
>
> > > Testpmd can be used for network benchmarks when using GPU memory
>
> > > instead of regular CPU memory to send and receive packets.
>
> > > This option is currently limited to iofwd engine to ensure
>
> > > no workload is applied on packets not accessible from the CPU.
>
> > >
>
> > > The options chose is --mbuf-size so buffer split feature across
>
> > > different mempools can be enabled.
>
> > >
>
> > > Signed-off-by: Elena Agostini <eagostini@nvidia.com>
>
> >
>
> > Won't this create a hard dependency of test-pmd on gpudev?
>
> > I thought gpudev was supposed to be optional
>
>
>
> Sure, let me submit another patch to make it optional
Why to add yet another compile time macro everywhere in testpmd and
make hard to maintain?
Adding iofwd kind of code is very simple to add test/test-gpudev and
all GPU specific options
can be added in test-gpudev. It also helps to review the patches as
test cases focus on
each device class.
^ permalink raw reply [flat|nested] 31+ messages in thread
* Re: [PATCH v3 1/1] app/testpmd: add GPU memory option for mbuf pools
2021-11-17 11:23 ` Jerin Jacob
@ 2021-11-17 11:26 ` Elena Agostini
2021-11-17 11:31 ` Jerin Jacob
0 siblings, 1 reply; 31+ messages in thread
From: Elena Agostini @ 2021-11-17 11:26 UTC (permalink / raw)
To: Jerin Jacob, Ferruh Yigit, Richardson, Bruce; +Cc: Stephen Hemminger, dev
[-- Attachment #1: Type: text/plain, Size: 1643 bytes --]
> On Wed, Nov 17, 2021 at 4:38 PM Elena Agostini <eagostini@nvidia.com> wrote:
> >
> > > External email: Use caution opening links or attachments
> >
> > >
> >
> > >
> >
> > > On Wed, 17 Nov 2021 03:04:59 +0000
> >
> > > <eagostini@nvidia.com> wrote:
> >
> > >
> >
> > > > From: Elena Agostini <eagostini@nvidia.com>
> >
> > > >
> >
> > > > This patch introduces GPU memory in testpmd through the gpudev library.
> >
> > > > Testpmd can be used for network benchmarks when using GPU memory
> >
> > > > instead of regular CPU memory to send and receive packets.
> >
> > > > This option is currently limited to iofwd engine to ensure
> >
> > > > no workload is applied on packets not accessible from the CPU.
> >
> > > >
> >
> > > > The options chose is --mbuf-size so buffer split feature across
> >
> > > > different mempools can be enabled.
> >
> > > >
> >
> > > > Signed-off-by: Elena Agostini <eagostini@nvidia.com>
> >
> > >
> >
> > > Won't this create a hard dependency of test-pmd on gpudev?
> >
> > > I thought gpudev was supposed to be optional
> >
> >
> >
> > Sure, let me submit another patch to make it optional
>
> Why to add yet another compile time macro everywhere in testpmd and
> make hard to maintain?
> Adding iofwd kind of code is very simple to add test/test-gpudev and
> all GPU specific options
> can be added in test-gpudev. It also helps to review the patches as
> test cases focus on
> each device class.
Test-gpudev is standalone unit test to ensure gpudev functions work correctly.
In testpmd instead, there is a connection between gpudev and the network.
[-- Attachment #2: Type: text/html, Size: 7547 bytes --]
^ permalink raw reply [flat|nested] 31+ messages in thread
* Re: [PATCH v3 1/1] app/testpmd: add GPU memory option for mbuf pools
2021-11-17 11:26 ` Elena Agostini
@ 2021-11-17 11:31 ` Jerin Jacob
2021-11-17 11:48 ` Ferruh Yigit
0 siblings, 1 reply; 31+ messages in thread
From: Jerin Jacob @ 2021-11-17 11:31 UTC (permalink / raw)
To: Elena Agostini; +Cc: Ferruh Yigit, Richardson, Bruce, Stephen Hemminger, dev
On Wed, Nov 17, 2021 at 4:56 PM Elena Agostini <eagostini@nvidia.com> wrote:
>
> > On Wed, Nov 17, 2021 at 4:38 PM Elena Agostini <eagostini@nvidia.com> wrote:
>
> > >
>
> > > > External email: Use caution opening links or attachments
>
> > >
>
> > > >
>
> > >
>
> > > >
>
> > >
>
> > > > On Wed, 17 Nov 2021 03:04:59 +0000
>
> > >
>
> > > > <eagostini@nvidia.com> wrote:
>
> > >
>
> > > >
>
> > >
>
> > > > > From: Elena Agostini <eagostini@nvidia.com>
>
> > >
>
> > > > >
>
> > >
>
> > > > > This patch introduces GPU memory in testpmd through the gpudev library.
>
> > >
>
> > > > > Testpmd can be used for network benchmarks when using GPU memory
>
> > >
>
> > > > > instead of regular CPU memory to send and receive packets.
>
> > >
>
> > > > > This option is currently limited to iofwd engine to ensure
>
> > >
>
> > > > > no workload is applied on packets not accessible from the CPU.
>
> > >
>
> > > > >
>
> > >
>
> > > > > The options chose is --mbuf-size so buffer split feature across
>
> > >
>
> > > > > different mempools can be enabled.
>
> > >
>
> > > > >
>
> > >
>
> > > > > Signed-off-by: Elena Agostini <eagostini@nvidia.com>
>
> > >
>
> > > >
>
> > >
>
> > > > Won't this create a hard dependency of test-pmd on gpudev?
>
> > >
>
> > > > I thought gpudev was supposed to be optional
>
> > >
>
> > >
>
> > >
>
> > > Sure, let me submit another patch to make it optional
>
> >
>
> > Why to add yet another compile time macro everywhere in testpmd and
>
> > make hard to maintain?
>
> > Adding iofwd kind of code is very simple to add test/test-gpudev and
>
> > all GPU specific options
>
> > can be added in test-gpudev. It also helps to review the patches as
>
> > test cases focus on
>
> > each device class.
>
>
>
> Test-gpudev is standalone unit test to ensure gpudev functions work correctly.
>
> In testpmd instead, there is a connection between gpudev and the network.
I understand that. We had the same case with eventdev, where it needs to
work with network. Testpmd is already complicated, IMO, we should
focus only ethdev
test cases on testpmd, test-gpudev can use ethdev API to enable
networking requirements for gpudev.
^ permalink raw reply [flat|nested] 31+ messages in thread
* Re: [PATCH v3 1/1] app/testpmd: add GPU memory option for mbuf pools
2021-11-17 11:31 ` Jerin Jacob
@ 2021-11-17 11:48 ` Ferruh Yigit
2021-11-17 12:36 ` Ananyev, Konstantin
0 siblings, 1 reply; 31+ messages in thread
From: Ferruh Yigit @ 2021-11-17 11:48 UTC (permalink / raw)
To: Jerin Jacob, Elena Agostini; +Cc: Richardson, Bruce, Stephen Hemminger, dev
On 11/17/2021 11:31 AM, Jerin Jacob wrote:
> On Wed, Nov 17, 2021 at 4:56 PM Elena Agostini <eagostini@nvidia.com> wrote:
>>
>>> On Wed, Nov 17, 2021 at 4:38 PM Elena Agostini <eagostini@nvidia.com> wrote:
>>
>>>>
>>
>>>>> External email: Use caution opening links or attachments
>>
>>>>
>>
>>>>>
>>
>>>>
>>
>>>>>
>>
>>>>
>>
>>>>> On Wed, 17 Nov 2021 03:04:59 +0000
>>
>>>>
>>
>>>>> <eagostini@nvidia.com> wrote:
>>
>>>>
>>
>>>>>
>>
>>>>
>>
>>>>>> From: Elena Agostini <eagostini@nvidia.com>
>>
>>>>
>>
>>>>>>
>>
>>>>
>>
>>>>>> This patch introduces GPU memory in testpmd through the gpudev library.
>>
>>>>
>>
>>>>>> Testpmd can be used for network benchmarks when using GPU memory
>>
>>>>
>>
>>>>>> instead of regular CPU memory to send and receive packets.
>>
>>>>
>>
>>>>>> This option is currently limited to iofwd engine to ensure
>>
>>>>
>>
>>>>>> no workload is applied on packets not accessible from the CPU.
>>
>>>>
>>
>>>>>>
>>
>>>>
>>
>>>>>> The options chose is --mbuf-size so buffer split feature across
>>
>>>>
>>
>>>>>> different mempools can be enabled.
>>
>>>>
>>
>>>>>>
>>
>>>>
>>
>>>>>> Signed-off-by: Elena Agostini <eagostini@nvidia.com>
>>
>>>>
>>
>>>>>
>>
>>>>
>>
>>>>> Won't this create a hard dependency of test-pmd on gpudev?
>>
>>>>
>>
>>>>> I thought gpudev was supposed to be optional
>>
>>>>
>>
>>>>
>>
>>>>
>>
>>>> Sure, let me submit another patch to make it optional
>>
>>>
>>
>>> Why to add yet another compile time macro everywhere in testpmd and
>>
>>> make hard to maintain?
>>
>>> Adding iofwd kind of code is very simple to add test/test-gpudev and
>>
>>> all GPU specific options
>>
>>> can be added in test-gpudev. It also helps to review the patches as
>>
>>> test cases focus on
>>
>>> each device class.
>>
>>
>>
>> Test-gpudev is standalone unit test to ensure gpudev functions work correctly.
>>
>> In testpmd instead, there is a connection between gpudev and the network.
>
> I understand that. We had the same case with eventdev, where it needs to
> work with network. Testpmd is already complicated, IMO, we should
> focus only ethdev
> test cases on testpmd, test-gpudev can use ethdev API to enable
> networking requirements for gpudev.
>
+1
^ permalink raw reply [flat|nested] 31+ messages in thread
* RE: [PATCH v3 1/1] app/testpmd: add GPU memory option for mbuf pools
2021-11-17 11:48 ` Ferruh Yigit
@ 2021-11-17 12:36 ` Ananyev, Konstantin
2021-11-17 12:39 ` Elena Agostini
0 siblings, 1 reply; 31+ messages in thread
From: Ananyev, Konstantin @ 2021-11-17 12:36 UTC (permalink / raw)
To: Yigit, Ferruh, Jerin Jacob, Elena Agostini
Cc: Richardson, Bruce, Stephen Hemminger, dev
> >>
> >>>>
> >>
> >>>>> On Wed, 17 Nov 2021 03:04:59 +0000
> >>
> >>>>
> >>>>>> This patch introduces GPU memory in testpmd through the gpudev library.
> >>
> >>>>
> >>
> >>>>>> Testpmd can be used for network benchmarks when using GPU memory
> >>
> >>>>
> >>
> >>>>>> instead of regular CPU memory to send and receive packets.
> >>
> >>>>
> >>
> >>>>>> This option is currently limited to iofwd engine to ensure
> >>
> >>>>
> >>
> >>>>>> no workload is applied on packets not accessible from the CPU.
> >>
> >>>>
> >>
> >>>>>>
> >>
> >>>>
> >>
> >>>>>> The options chose is --mbuf-size so buffer split feature across
> >>
> >>>>
> >>
> >>>>>> different mempools can be enabled.
> >>
> >>>>
> >>
> >>>>>>
> >>
> >>>>
> >>
> >>>>>> Signed-off-by: Elena Agostini <eagostini@nvidia.com>
> >>
> >>>>
> >>
> >>>>>
> >>
> >>>>
> >>
> >>>>> Won't this create a hard dependency of test-pmd on gpudev?
> >>
> >>>>
> >>
> >>>>> I thought gpudev was supposed to be optional
> >>
> >>>>
> >>
> >>>>
> >>
> >>>>
> >>
> >>>> Sure, let me submit another patch to make it optional
> >>
> >>>
> >>
> >>> Why to add yet another compile time macro everywhere in testpmd and
> >>
> >>> make hard to maintain?
> >>
> >>> Adding iofwd kind of code is very simple to add test/test-gpudev and
> >>
> >>> all GPU specific options
> >>
> >>> can be added in test-gpudev. It also helps to review the patches as
> >>
> >>> test cases focus on
> >>
> >>> each device class.
> >>
> >>
> >>
> >> Test-gpudev is standalone unit test to ensure gpudev functions work correctly.
> >>
> >> In testpmd instead, there is a connection between gpudev and the network.
> >
> > I understand that. We had the same case with eventdev, where it needs to
> > work with network. Testpmd is already complicated, IMO, we should
> > focus only ethdev
> > test cases on testpmd, test-gpudev can use ethdev API to enable
> > networking requirements for gpudev.
> >
>
> +1
+1
^ permalink raw reply [flat|nested] 31+ messages in thread
* Re: [PATCH v3 1/1] app/testpmd: add GPU memory option for mbuf pools
2021-11-17 12:36 ` Ananyev, Konstantin
@ 2021-11-17 12:39 ` Elena Agostini
2021-11-17 13:39 ` Jerin Jacob
0 siblings, 1 reply; 31+ messages in thread
From: Elena Agostini @ 2021-11-17 12:39 UTC (permalink / raw)
To: Ananyev, Konstantin, Yigit, Ferruh, Jerin Jacob
Cc: Richardson, Bruce, Stephen Hemminger, dev
[-- Attachment #1: Type: text/plain, Size: 2353 bytes --]
> > >>
> > >>>>
> > >>
> > >>>>> On Wed, 17 Nov 2021 03:04:59 +0000
> > >>
> > >>>>
> > >>>>>> This patch introduces GPU memory in testpmd through the gpudev library.
> > >>
> > >>>>
> > >>
> > >>>>>> Testpmd can be used for network benchmarks when using GPU memory
> > >>
> > >>>>
> > >>
> > >>>>>> instead of regular CPU memory to send and receive packets.
> > >>
> > >>>>
> > >>
> > >>>>>> This option is currently limited to iofwd engine to ensure
> > >>
> > >>>>
> > >>
> > >>>>>> no workload is applied on packets not accessible from the CPU.
> > >>
> > >>>>
> > >>
> > >>>>>>
> > >>
> > >>>>
> > >>
> > >>>>>> The options chose is --mbuf-size so buffer split feature across
> > >>
> > >>>>
> > >>
> > >>>>>> different mempools can be enabled.
> > >>
> > >>>>
> > >>
> > >>>>>>
> > >>
> > >>>>
> > >>
> > >>>>>> Signed-off-by: Elena Agostini <eagostini@nvidia.com>
> > >>
> > >>>>
> > >>
> > >>>>>
> > >>
> > >>>>
> > >>
> > >>>>> Won't this create a hard dependency of test-pmd on gpudev?
> > >>
> > >>>>
> > >>
> > >>>>> I thought gpudev was supposed to be optional
> > >>
> > >>>>
> > >>
> > >>>>
> > >>
> > >>>>
> > >>
> > >>>> Sure, let me submit another patch to make it optional
> > >>
> > >>>
> > >>
> > >>> Why to add yet another compile time macro everywhere in testpmd and
> > >>
> > >>> make hard to maintain?
> > >>
> > >>> Adding iofwd kind of code is very simple to add test/test-gpudev and
> > >>
> > >>> all GPU specific options
> > >>
> > >>> can be added in test-gpudev. It also helps to review the patches as
> > >>
> > >>> test cases focus on
> > >>
> > >>> each device class.
> > >>
> > >>
> > >>
> > >> Test-gpudev is standalone unit test to ensure gpudev functions work correctly.
> > >>
> > >> In testpmd instead, there is a connection between gpudev and the network.
> > >
> > > I understand that. We had the same case with eventdev, where it needs to
> > > work with network. Testpmd is already complicated, IMO, we should
> > > focus only ethdev
> > > test cases on testpmd, test-gpudev can use ethdev API to enable
> > > networking requirements for gpudev.
> > >
> >
> > +1
>
> +1
Testpmd already manages different type of memories for mempools.
gpudev is just another type of memory, there is nothing more than that.
[-- Attachment #2: Type: text/html, Size: 12260 bytes --]
^ permalink raw reply [flat|nested] 31+ messages in thread
* Re: [PATCH v3 1/1] app/testpmd: add GPU memory option for mbuf pools
2021-11-17 12:39 ` Elena Agostini
@ 2021-11-17 13:39 ` Jerin Jacob
2021-11-17 13:50 ` Elena Agostini
0 siblings, 1 reply; 31+ messages in thread
From: Jerin Jacob @ 2021-11-17 13:39 UTC (permalink / raw)
To: Elena Agostini
Cc: Ananyev, Konstantin, Yigit, Ferruh, Richardson, Bruce,
Stephen Hemminger, dev
On Wed, Nov 17, 2021 at 6:09 PM Elena Agostini <eagostini@nvidia.com> wrote:
>
> > > >>
>
> > > >>>>
>
> > > >>
>
> > > >>>>> On Wed, 17 Nov 2021 03:04:59 +0000
>
> > > >>
>
> > > >>>>
>
> > > >>>>>> This patch introduces GPU memory in testpmd through the gpudev library.
>
> > > >>
>
> > > >>>>
>
> > > >>
>
> > > >>>>>> Testpmd can be used for network benchmarks when using GPU memory
>
> > > >>
>
> > > >>>>
>
> > > >>
>
> > > >>>>>> instead of regular CPU memory to send and receive packets.
>
> > > >>
>
> > > >>>>
>
> > > >>
>
> > > >>>>>> This option is currently limited to iofwd engine to ensure
>
> > > >>
>
> > > >>>>
>
> > > >>
>
> > > >>>>>> no workload is applied on packets not accessible from the CPU.
>
> > > >>
>
> > > >>>>
>
> > > >>
>
> > > >>>>>>
>
> > > >>
>
> > > >>>>
>
> > > >>
>
> > > >>>>>> The options chose is --mbuf-size so buffer split feature across
>
> > > >>
>
> > > >>>>
>
> > > >>
>
> > > >>>>>> different mempools can be enabled.
>
> > > >>
>
> > > >>>>
>
> > > >>
>
> > > >>>>>>
>
> > > >>
>
> > > >>>>
>
> > > >>
>
> > > >>>>>> Signed-off-by: Elena Agostini <eagostini@nvidia.com>
>
> > > >>
>
> > > >>>>
>
> > > >>
>
> > > >>>>>
>
> > > >>
>
> > > >>>>
>
> > > >>
>
> > > >>>>> Won't this create a hard dependency of test-pmd on gpudev?
>
> > > >>
>
> > > >>>>
>
> > > >>
>
> > > >>>>> I thought gpudev was supposed to be optional
>
> > > >>
>
> > > >>>>
>
> > > >>
>
> > > >>>>
>
> > > >>
>
> > > >>>>
>
> > > >>
>
> > > >>>> Sure, let me submit another patch to make it optional
>
> > > >>
>
> > > >>>
>
> > > >>
>
> > > >>> Why to add yet another compile time macro everywhere in testpmd and
>
> > > >>
>
> > > >>> make hard to maintain?
>
> > > >>
>
> > > >>> Adding iofwd kind of code is very simple to add test/test-gpudev and
>
> > > >>
>
> > > >>> all GPU specific options
>
> > > >>
>
> > > >>> can be added in test-gpudev. It also helps to review the patches as
>
> > > >>
>
> > > >>> test cases focus on
>
> > > >>
>
> > > >>> each device class.
>
> > > >>
>
> > > >>
>
> > > >>
>
> > > >> Test-gpudev is standalone unit test to ensure gpudev functions work correctly.
>
> > > >>
>
> > > >> In testpmd instead, there is a connection between gpudev and the network.
>
> > > >
>
> > > > I understand that. We had the same case with eventdev, where it needs to
>
> > > > work with network. Testpmd is already complicated, IMO, we should
>
> > > > focus only ethdev
>
> > > > test cases on testpmd, test-gpudev can use ethdev API to enable
>
> > > > networking requirements for gpudev.
>
> > > >
>
> > >
>
> > > +1
>
> >
>
> > +1
>
>
>
> Testpmd already manages different type of memories for mempools.
>
> gpudev is just another type of memory, there is nothing more than that.
Let take this example:
1) New code changes
app/test-pmd/cmdline.c | 32 +++++++-
app/test-pmd/config.c | 4 +-
app/test-pmd/icmpecho.c | 2 +-
app/test-pmd/meson.build | 2 +-
app/test-pmd/parameters.c | 15 +++-
app/test-pmd/testpmd.c | 167 +++++++++++++++++++++++++++++++++++---
app/test-pmd/testpmd.h | 16 +++-
7 files changed, 217 insertions(+), 21 deletions(-)
2) Good amount of code need to go through condition compilation as
gpudev is optional that make
testpmd further ugly.
3) It introduces new memtype, now
+enum mbuf_mem_type {
+ MBUF_MEM_CPU,
+ MBUF_MEM_GPU
+};
The question largely, why testpmd need to pollute for this, testpmd,
we are using for testing ethdev device class.
All we are saying is to enable this use case in test-gpudev so that it
focuses on GPU specific, Whoever is not
interested in specific libraries do not even need to review the testpmd patches.
^ permalink raw reply [flat|nested] 31+ messages in thread
* Re: [PATCH v3 1/1] app/testpmd: add GPU memory option for mbuf pools
2021-11-17 13:39 ` Jerin Jacob
@ 2021-11-17 13:50 ` Elena Agostini
2021-11-17 14:02 ` Jerin Jacob
0 siblings, 1 reply; 31+ messages in thread
From: Elena Agostini @ 2021-11-17 13:50 UTC (permalink / raw)
To: Jerin Jacob
Cc: Ananyev, Konstantin, Yigit, Ferruh, Richardson, Bruce,
Stephen Hemminger, dev
[-- Attachment #1: Type: text/plain, Size: 4744 bytes --]
> On Wed, Nov 17, 2021 at 6:09 PM Elena Agostini <eagostini@nvidia.com> wrote:
> >
> > > > >>
> >
> > > > >>>>
> >
> > > > >>
> >
> > > > >>>>> On Wed, 17 Nov 2021 03:04:59 +0000
> >
> > > > >>
> >
> > > > >>>>
> >
> > > > >>>>>> This patch introduces GPU memory in testpmd through the gpudev library.
> >
> > > > >>
> >
> > > > >>>>
> >
> > > > >>
> >
> > > > >>>>>> Testpmd can be used for network benchmarks when using GPU memory
> >
> > > > >>
> >
> > > > >>>>
> >
> > > > >>
> >
> > > > >>>>>> instead of regular CPU memory to send and receive packets.
> >
> > > > >>
> >
> > > > >>>>
> >
> > > > >>
> >
> > > > >>>>>> This option is currently limited to iofwd engine to ensure
> >
> > > > >>
> >
> > > > >>>>
> >
> > > > >>
> >
> > > > >>>>>> no workload is applied on packets not accessible from the CPU.
> >
> > > > >>
> >
> > > > >>>>
> >
> > > > >>
> >
> > > > >>>>>>
> >
> > > > >>
> >
> > > > >>>>
> >
> > > > >>
> >
> > > > >>>>>> The options chose is --mbuf-size so buffer split feature across
> >
> > > > >>
> >
> > > > >>>>
> >
> > > > >>
> >
> > > > >>>>>> different mempools can be enabled.
> >
> > > > >>
> >
> > > > >>>>
> >
> > > > >>
> >
> > > > >>>>>>
> >
> > > > >>
> >
> > > > >>>>
> >
> > > > >>
> >
> > > > >>>>>> Signed-off-by: Elena Agostini <eagostini@nvidia.com>
> >
> > > > >>
> >
> > > > >>>>
> >
> > > > >>
> >
> > > > >>>>>
> >
> > > > >>
> >
> > > > >>>>
> >
> > > > >>
> >
> > > > >>>>> Won't this create a hard dependency of test-pmd on gpudev?
> >
> > > > >>
> >
> > > > >>>>
> >
> > > > >>
> >
> > > > >>>>> I thought gpudev was supposed to be optional
> >
> > > > >>
> >
> > > > >>>>
> >
> > > > >>
> >
> > > > >>>>
> >
> > > > >>
> >
> > > > >>>>
> >
> > > > >>
> >
> > > > >>>> Sure, let me submit another patch to make it optional
> >
> > > > >>
> >
> > > > >>>
> >
> > > > >>
> >
> > > > >>> Why to add yet another compile time macro everywhere in testpmd and
> >
> > > > >>
> >
> > > > >>> make hard to maintain?
> >
> > > > >>
> >
> > > > >>> Adding iofwd kind of code is very simple to add test/test-gpudev and
> >
> > > > >>
> >
> > > > >>> all GPU specific options
> >
> > > > >>
> >
> > > > >>> can be added in test-gpudev. It also helps to review the patches as
> >
> > > > >>
> >
> > > > >>> test cases focus on
> >
> > > > >>
> >
> > > > >>> each device class.
> >
> > > > >>
> >
> > > > >>
> >
> > > > >>
> >
> > > > >> Test-gpudev is standalone unit test to ensure gpudev functions work correctly.
> >
> > > > >>
> >
> > > > >> In testpmd instead, there is a connection between gpudev and the network.
> >
> > > > >
> >
> > > > > I understand that. We had the same case with eventdev, where it needs to
> >
> > > > > work with network. Testpmd is already complicated, IMO, we should
> >
> > > > > focus only ethdev
> >
> > > > > test cases on testpmd, test-gpudev can use ethdev API to enable
> >
> > > > > networking requirements for gpudev.
> >
> > > > >
> >
> > > >
> >
> > > > +1
> >
> > >
> >
> > > +1
> >
> >
> >
> > Testpmd already manages different type of memories for mempools.
> >
> > gpudev is just another type of memory, there is nothing more than that.
>
> Let take this example:
> 1) New code changes
>
> app/test-pmd/cmdline.c | 32 +++++++-
> app/test-pmd/config.c | 4 +-
> app/test-pmd/icmpecho.c | 2 +-
> app/test-pmd/meson.build | 2 +-
> app/test-pmd/parameters.c | 15 +++-
> app/test-pmd/testpmd.c | 167 +++++++++++++++++++++++++++++++++++---
> app/test-pmd/testpmd.h | 16 +++-
> 7 files changed, 217 insertions(+), 21 deletions(-)
>
> 2) Good amount of code need to go through condition compilation as
> gpudev is optional that make
> testpmd further ugly.
>
> 3) It introduces new memtype, now
>
> +enum mbuf_mem_type {
> + MBUF_MEM_CPU,
> + MBUF_MEM_GPU
> +};
>
> The question largely, why testpmd need to pollute for this, testpmd,
> we are using for testing ethdev device class.
> All we are saying is to enable this use case in test-gpudev so that it
> focuses on GPU specific, Whoever is not
> interested in specific libraries do not even need to review the testpmd patches.
I understand your point. I don’t understand why this testpmd patch is there since Oct 29 but
I'm receiving reviews only few days before rc4 when I have a limited amount of time to get new code accepted.
I can provide a gpudev + ethdev example by end of today (I'd like to keep test-gpudev as it is to test gpudev API standalone).
Is there any chance this new example will be reviewed and eventually accepted in DPDK 21.11?
[-- Attachment #2: Type: text/html, Size: 26663 bytes --]
^ permalink raw reply [flat|nested] 31+ messages in thread
* Re: [PATCH v3 1/1] app/testpmd: add GPU memory option for mbuf pools
2021-11-17 13:50 ` Elena Agostini
@ 2021-11-17 14:02 ` Jerin Jacob
2021-11-17 14:07 ` Elena Agostini
0 siblings, 1 reply; 31+ messages in thread
From: Jerin Jacob @ 2021-11-17 14:02 UTC (permalink / raw)
To: Elena Agostini
Cc: Ananyev, Konstantin, Yigit, Ferruh, Richardson, Bruce,
Stephen Hemminger, dev
te
On Wed, Nov 17, 2021 at 7:20 PM Elena Agostini <eagostini@nvidia.com> wrote:
>
> > On Wed, Nov 17, 2021 at 6:09 PM Elena Agostini <eagostini@nvidia.com> wrote:
>
> > >
>
> > > > > >>
>
> > >
>
> > > > > >>>>
>
> > >
>
> > > > > >>
>
> > >
>
> > > > > >>>>> On Wed, 17 Nov 2021 03:04:59 +0000
>
> > >
>
> > > > > >>
>
> > >
>
> > > > > >>>>
>
> > >
>
> > > > > >>>>>> This patch introduces GPU memory in testpmd through the gpudev library.
>
> > >
>
> > > > > >>
>
> > >
>
> > > > > >>>>
>
> > >
>
> > > > > >>
>
> > >
>
> > > > > >>>>>> Testpmd can be used for network benchmarks when using GPU memory
>
> > >
>
> > > > > >>
>
> > >
>
> > > > > >>>>
>
> > >
>
> > > > > >>
>
> > >
>
> > > > > >>>>>> instead of regular CPU memory to send and receive packets.
>
> > >
>
> > > > > >>
>
> > >
>
> > > > > >>>>
>
> > >
>
> > > > > >>
>
> > >
>
> > > > > >>>>>> This option is currently limited to iofwd engine to ensure
>
> > >
>
> > > > > >>
>
> > >
>
> > > > > >>>>
>
> > >
>
> > > > > >>
>
> > >
>
> > > > > >>>>>> no workload is applied on packets not accessible from the CPU.
>
> > >
>
> > > > > >>
>
> > >
>
> > > > > >>>>
>
> > >
>
> > > > > >>
>
> > >
>
> > > > > >>>>>>
>
> > >
>
> > > > > >>
>
> > >
>
> > > > > >>>>
>
> > >
>
> > > > > >>
>
> > >
>
> > > > > >>>>>> The options chose is --mbuf-size so buffer split feature across
>
> > >
>
> > > > > >>
>
> > >
>
> > > > > >>>>
>
> > >
>
> > > > > >>
>
> > >
>
> > > > > >>>>>> different mempools can be enabled.
>
> > >
>
> > > > > >>
>
> > >
>
> > > > > >>>>
>
> > >
>
> > > > > >>
>
> > >
>
> > > > > >>>>>>
>
> > >
>
> > > > > >>
>
> > >
>
> > > > > >>>>
>
> > >
>
> > > > > >>
>
> > >
>
> > > > > >>>>>> Signed-off-by: Elena Agostini <eagostini@nvidia.com>
>
> > >
>
> > > > > >>
>
> > >
>
> > > > > >>>>
>
> > >
>
> > > > > >>
>
> > >
>
> > > > > >>>>>
>
> > >
>
> > > > > >>
>
> > >
>
> > > > > >>>>
>
> > >
>
> > > > > >>
>
> > >
>
> > > > > >>>>> Won't this create a hard dependency of test-pmd on gpudev?
>
> > >
>
> > > > > >>
>
> > >
>
> > > > > >>>>
>
> > >
>
> > > > > >>
>
> > >
>
> > > > > >>>>> I thought gpudev was supposed to be optional
>
> > >
>
> > > > > >>
>
> > >
>
> > > > > >>>>
>
> > >
>
> > > > > >>
>
> > >
>
> > > > > >>>>
>
> > >
>
> > > > > >>
>
> > >
>
> > > > > >>>>
>
> > >
>
> > > > > >>
>
> > >
>
> > > > > >>>> Sure, let me submit another patch to make it optional
>
> > >
>
> > > > > >>
>
> > >
>
> > > > > >>>
>
> > >
>
> > > > > >>
>
> > >
>
> > > > > >>> Why to add yet another compile time macro everywhere in testpmd and
>
> > >
>
> > > > > >>
>
> > >
>
> > > > > >>> make hard to maintain?
>
> > >
>
> > > > > >>
>
> > >
>
> > > > > >>> Adding iofwd kind of code is very simple to add test/test-gpudev and
>
> > >
>
> > > > > >>
>
> > >
>
> > > > > >>> all GPU specific options
>
> > >
>
> > > > > >>
>
> > >
>
> > > > > >>> can be added in test-gpudev. It also helps to review the patches as
>
> > >
>
> > > > > >>
>
> > >
>
> > > > > >>> test cases focus on
>
> > >
>
> > > > > >>
>
> > >
>
> > > > > >>> each device class.
>
> > >
>
> > > > > >>
>
> > >
>
> > > > > >>
>
> > >
>
> > > > > >>
>
> > >
>
> > > > > >> Test-gpudev is standalone unit test to ensure gpudev functions work correctly.
>
> > >
>
> > > > > >>
>
> > >
>
> > > > > >> In testpmd instead, there is a connection between gpudev and the network.
>
> > >
>
> > > > > >
>
> > >
>
> > > > > > I understand that. We had the same case with eventdev, where it needs to
>
> > >
>
> > > > > > work with network. Testpmd is already complicated, IMO, we should
>
> > >
>
> > > > > > focus only ethdev
>
> > >
>
> > > > > > test cases on testpmd, test-gpudev can use ethdev API to enable
>
> > >
>
> > > > > > networking requirements for gpudev.
>
> > >
>
> > > > > >
>
> > >
>
> > > > >
>
> > >
>
> > > > > +1
>
> > >
>
> > > >
>
> > >
>
> > > > +1
>
> > >
>
> > >
>
> > >
>
> > > Testpmd already manages different type of memories for mempools.
>
> > >
>
> > > gpudev is just another type of memory, there is nothing more than that.
>
> >
>
> > Let take this example:
>
> > 1) New code changes
>
> >
>
> > app/test-pmd/cmdline.c | 32 +++++++-
>
> > app/test-pmd/config.c | 4 +-
>
> > app/test-pmd/icmpecho.c | 2 +-
>
> > app/test-pmd/meson.build | 2 +-
>
> > app/test-pmd/parameters.c | 15 +++-
>
> > app/test-pmd/testpmd.c | 167 +++++++++++++++++++++++++++++++++++---
>
> > app/test-pmd/testpmd.h | 16 +++-
>
> > 7 files changed, 217 insertions(+), 21 deletions(-)
>
> >
>
> > 2) Good amount of code need to go through condition compilation as
>
> > gpudev is optional that make
>
> > testpmd further ugly.
>
> >
>
> > 3) It introduces new memtype, now
>
> >
>
> > +enum mbuf_mem_type {
>
> > + MBUF_MEM_CPU,
>
> > + MBUF_MEM_GPU
>
> > +};
>
> >
>
> > The question largely, why testpmd need to pollute for this, testpmd,
>
> > we are using for testing ethdev device class.
>
> > All we are saying is to enable this use case in test-gpudev so that it
>
> > focuses on GPU specific, Whoever is not
>
> > interested in specific libraries do not even need to review the testpmd patches.
>
>
>
> I understand your point. I don’t understand why this testpmd patch is there since Oct 29 but
>
> I'm receiving reviews only few days before rc4 when I have a limited amount of time to get new code accepted.
I understand that pain. Welcome to DPDK, we have all gone through this
review issue one or another way.
>
>
>
> I can provide a gpudev + ethdev example by end of today (I'd like to keep test-gpudev as it is to test gpudev API standalone).
>
> Is there any chance this new example will be reviewed and eventually accepted in DPDK 21.11?
Why a new example? I don't have any issues in updating app/test-gpudev/.
^ permalink raw reply [flat|nested] 31+ messages in thread
* Re: [PATCH v4 1/1] app/testpmd: add GPU memory option for mbuf pools
2021-11-17 21:49 ` [PATCH v4 1/1] " eagostini
@ 2021-11-17 14:04 ` Bruce Richardson
0 siblings, 0 replies; 31+ messages in thread
From: Bruce Richardson @ 2021-11-17 14:04 UTC (permalink / raw)
To: eagostini; +Cc: dev
On Wed, Nov 17, 2021 at 09:49:23PM +0000, eagostini@nvidia.com wrote:
> From: Elena Agostini <eagostini@nvidia.com>
>
> This patch introduces GPU memory in testpmd through the gpudev library.
> Testpmd can be used for network benchmarks when using GPU memory
> instead of regular CPU memory to send and receive packets.
> This option is currently limited to iofwd engine to ensure
> no workload is applied on packets not accessible from the CPU.
>
> The options chose is --mbuf-size so buffer split feature across
> different mempools can be enabled.
>
> Signed-off-by: Elena Agostini <eagostini@nvidia.com>
Ignoring for now the discussion about whether this patch should be accepted
or not (and I would share many of the concerns about it going into testpmd) -
a few comments on the code changes themselves below.
/Bruce
> ---
> app/test-pmd/cmdline.c | 36 +++++++-
> app/test-pmd/config.c | 4 +-
> app/test-pmd/icmpecho.c | 2 +-
> app/test-pmd/meson.build | 3 +
> app/test-pmd/parameters.c | 15 +++-
> app/test-pmd/testpmd.c | 182 +++++++++++++++++++++++++++++++++++---
> app/test-pmd/testpmd.h | 18 +++-
> 7 files changed, 240 insertions(+), 20 deletions(-)
>
> diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
> index 3faa37db6d..400aa07da8 100644
> --- a/app/test-pmd/cmdline.c
> +++ b/app/test-pmd/cmdline.c
> @@ -3619,6 +3619,7 @@ parse_item_list(const char *str, const char *item_name, unsigned int max_items,
> unsigned int j;
> int value_ok;
> char c;
> + int gpu_mbuf = 0;
>
> /*
> * First parse all items in the list and store their value.
> @@ -3628,11 +3629,29 @@ parse_item_list(const char *str, const char *item_name, unsigned int max_items,
> value_ok = 0;
> for (i = 0; i < strnlen(str, STR_TOKEN_SIZE); i++) {
> c = str[i];
> +
Whitespace change unneeded for this patch, drop from diff.
> if ((c >= '0') && (c <= '9')) {
> value = (unsigned int) (value * 10 + (c - '0'));
> value_ok = 1;
> continue;
> }
> + if (c == 'g') {
> + /*
> + * When this flag is set, mbufs for this segment
> + * will be created on GPU memory.
> + */
> + if (i < strnlen(str, STR_TOKEN_SIZE) - 1 && str[i+1] != ',') {
> + fprintf(stderr, "input param list is not well formatted\n");
> + return 0;
> + }
> + #ifdef RTE_LIB_GPUDEV
> + gpu_mbuf = 1;
> + #else
> + fprintf(stderr, "gpudev library not built. Can't create mempools in GPU memory.\n");
> + return 0;
> + #endif
While I suppose it makes things more readable, the alignment here with the
ifdefs is unusual. The standard in DPDK is that #ifdefs always start at the
margin. If you want things inline like this, I suggest you convert the
macro to a global variable at the top of the file and use that in regular C
if conditions.
> + continue;
> + }
> if (c != ',') {
> fprintf(stderr, "character %c is not a decimal digit\n", c);
> return 0;
> @@ -3645,6 +3664,8 @@ parse_item_list(const char *str, const char *item_name, unsigned int max_items,
> parsed_items[nb_item] = value;
> value_ok = 0;
> value = 0;
> + mbuf_mem_types[nb_item] = gpu_mbuf ? MBUF_MEM_GPU : MBUF_MEM_CPU;
> + gpu_mbuf = 0;
> }
> nb_item++;
> }
> @@ -3653,12 +3674,15 @@ parse_item_list(const char *str, const char *item_name, unsigned int max_items,
> item_name, nb_item + 1, max_items);
> return 0;
> }
> +
> + mbuf_mem_types[nb_item] = gpu_mbuf ? MBUF_MEM_GPU : MBUF_MEM_CPU;
> +
> parsed_items[nb_item++] = value;
> if (! check_unique_values)
> return nb_item;
>
> /*
> - * Then, check that all values in the list are different.
> + * Then, check that all values in the list are differents.
Original comment is correct. Remove this change from diff.
> * No optimization here...
> */
> for (i = 0; i < nb_item; i++) {
> @@ -6874,6 +6898,16 @@ static void cmd_set_fwd_mode_parsed(void *parsed_result,
> __rte_unused void *data)
> {
> struct cmd_set_fwd_mode_result *res = parsed_result;
> + int idx;
> +
> + for (idx = 0; idx < MAX_SEGS_BUFFER_SPLIT; idx++) {
> + if (mbuf_mem_types[idx] == MBUF_MEM_GPU &&
> + strcmp(res->mode, "io") != 0) {
> + TESTPMD_LOG(ERR,
> + "GPU memory mbufs can be used with iofwd engine only\n");
No point in wrapping like this, as you end up at the same column anyway.
> + return;
> + }
> + }
>
> retry_enabled = 0;
> set_pkt_forwarding_mode(res->mode);
> diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
> index 6fca09527b..9e8f8f1462 100644
> --- a/app/test-pmd/config.c
> +++ b/app/test-pmd/config.c
> @@ -2966,7 +2966,7 @@ port_rss_reta_info(portid_t port_id,
> }
>
> /*
> - * Displays the RSS hash functions of a port, and, optionally, the RSS hash
> + * Displays the RSS hash functions of a port, and, optionaly, the RSS hash
Original comment spelling is correct. Drop from diff.
> * key of the port.
> */
> void
> @@ -5255,7 +5255,7 @@ mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx)
> {
> port->mc_addr_nb--;
> if (addr_idx == port->mc_addr_nb) {
> - /* No need to recompact the set of multicast addresses. */
> + /* No need to recompact the set of multicast addressses. */
> if (port->mc_addr_nb == 0) {
> /* free the pool of multicast addresses. */
> free(port->mc_addr_pool);
> diff --git a/app/test-pmd/icmpecho.c b/app/test-pmd/icmpecho.c
> index 99c94cb282..3a85ec3dd1 100644
> --- a/app/test-pmd/icmpecho.c
> +++ b/app/test-pmd/icmpecho.c
> @@ -53,7 +53,7 @@ arp_op_name(uint16_t arp_op)
> default:
> break;
> }
> - return "Unknown ARP op";
> + return "Unkwown ARP op";
Introducting typo here too. Drop from diff.
> }
>
> static const char *
> diff --git a/app/test-pmd/meson.build b/app/test-pmd/meson.build
> index 43130c8856..568660e18f 100644
> --- a/app/test-pmd/meson.build
> +++ b/app/test-pmd/meson.build
> @@ -43,6 +43,9 @@ if dpdk_conf.has('RTE_LIB_BPF')
> sources += files('bpf_cmd.c')
> deps += 'bpf'
> endif
> +if dpdk_conf.has('RTE_LIB_GPUDEV')
> + deps += 'gpudev'
> +endif
> if dpdk_conf.has('RTE_LIB_GRO')
> deps += 'gro'
> endif
> diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c
> index f9185065af..dde5ef6590 100644
> --- a/app/test-pmd/parameters.c
> +++ b/app/test-pmd/parameters.c
> @@ -86,7 +86,10 @@ usage(char* progname)
> "in NUMA mode.\n");
> printf(" --mbuf-size=N,[N1[,..Nn]: set the data size of mbuf to "
> "N bytes. If multiple numbers are specified the extra pools "
> - "will be created to receive with packet split features\n");
> + "will be created to receive with packet split features\n"
> + "Use 'g' suffix for GPU memory.\n"
> + "If no or an unrecognized suffix is provided, CPU is assumed\n");
Indentation looks wrong. Also, "g" is a bad suffix to append to a size,
since it would indicate "gigabyte" in most size-based values.
> +
> printf(" --total-num-mbufs=N: set the number of mbufs to be allocated "
> "in mbuf pools.\n");
> printf(" --max-pkt-len=N: set the maximum size of packet to N bytes.\n");
> @@ -594,6 +597,7 @@ launch_args_parse(int argc, char** argv)
> struct rte_eth_dev_info dev_info;
> uint16_t rec_nb_pkts;
> int ret;
> + uint32_t idx = 0;
>
> static struct option lgopts[] = {
> { "help", 0, 0, 0 },
> @@ -1543,4 +1547,13 @@ launch_args_parse(int argc, char** argv)
> "ignored\n");
> mempool_flags = 0;
> }
> +
> + for (idx = 0; idx < mbuf_data_size_n; idx++) {
> + if (mbuf_mem_types[idx] == MBUF_MEM_GPU &&
> + strcmp(cur_fwd_eng->fwd_mode_name, "io") != 0) {
> + TESTPMD_LOG(ERR,
> + "GPU memory mbufs can be used with iofwd engine only\n");
Again, no need to wrap here.
> + rte_exit(EXIT_FAILURE, "Command line is incorrect\n");
> + }
> + }
> }
> diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
> index ba65342b6d..b6c55e61b1 100644
> --- a/app/test-pmd/testpmd.c
> +++ b/app/test-pmd/testpmd.c
> @@ -206,6 +206,13 @@ uint32_t mbuf_data_size_n = 1; /* Number of specified mbuf sizes. */
> uint16_t mbuf_data_size[MAX_SEGS_BUFFER_SPLIT] = {
> DEFAULT_MBUF_DATA_SIZE
> }; /**< Mbuf data space size. */
> +
> +/* Mbuf memory types. */
> +enum mbuf_mem_type mbuf_mem_types[MAX_SEGS_BUFFER_SPLIT];
> +/* Pointers to external memory allocated for mempools. */
> +uintptr_t mempools_ext_ptr[MAX_SEGS_BUFFER_SPLIT];
> +size_t mempools_ext_size[MAX_SEGS_BUFFER_SPLIT];
> +
> uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
> * specified on command-line. */
> uint16_t stats_period; /**< Period to show statistics (disabled by default) */
> @@ -546,6 +553,12 @@ int proc_id;
> */
> unsigned int num_procs = 1;
>
> +/*
> + * In case of GPU memory external mbufs use, for simplicity,
> + * the first GPU device in the list.
> + */
> +int gpu_id = 0;
> +
> static void
> eth_rx_metadata_negotiate_mp(uint16_t port_id)
> {
> @@ -1108,6 +1121,94 @@ setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int socket_id,
> return ext_num;
> }
>
> +static struct rte_mempool *
> +gpu_mbuf_pool_create(__rte_unused uint16_t mbuf_seg_size,
> + __rte_unused unsigned int nb_mbuf,
> + __rte_unused unsigned int socket_id,
> + __rte_unused uint16_t port_id,
> + __rte_unused int gpu_id,
> + __rte_unused uintptr_t *mp_addr,
> + __rte_unused size_t *mp_size)
> +{
> +#ifdef RTE_LIB_GPUDEV
> + int ret = 0;
> + char pool_name[RTE_MEMPOOL_NAMESIZE];
> + struct rte_eth_dev_info dev_info;
> + struct rte_mempool *rte_mp = NULL;
> + struct rte_pktmbuf_extmem gpu_mem;
> + struct rte_gpu_info ginfo;
> + uint8_t gpu_page_shift = 16;
> + uint32_t gpu_page_size = (1UL << gpu_page_shift);
> +
> + if (rte_gpu_count_avail() == 0)
> + rte_exit(EXIT_FAILURE, "No GPU device available.\n");
> +
> + if (rte_gpu_info_get(gpu_id, &ginfo))
> + rte_exit(EXIT_FAILURE,
> + "Can't retrieve info about GPU %d - bye\n", gpu_id);
> +
> + ret = eth_dev_info_get_print_err(port_id, &dev_info);
> + if (ret != 0)
> + rte_exit(EXIT_FAILURE,
> + "Failed to get device info for port %d\n",
> + port_id);
> +
> + mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name), port_id, MBUF_MEM_GPU);
> + if (!is_proc_primary()) {
> + rte_mp = rte_mempool_lookup(pool_name);
> + if (rte_mp == NULL)
> + rte_exit(EXIT_FAILURE,
> + "Get mbuf pool for socket %u failed: %s\n",
> + socket_id, rte_strerror(rte_errno));
> + return rte_mp;
> + }
> +
> + TESTPMD_LOG(INFO,
> + "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u GPU device=%s\n",
> + pool_name, nb_mbuf, mbuf_seg_size, socket_id, ginfo.name);
> +
> + /* Create an external memory mempool using memory allocated on the GPU. */
> +
> + gpu_mem.elt_size = RTE_MBUF_DEFAULT_BUF_SIZE;
> + gpu_mem.buf_len = RTE_ALIGN_CEIL(nb_mbuf * gpu_mem.elt_size, gpu_page_size);
> + gpu_mem.buf_iova = RTE_BAD_IOVA;
> +
> + gpu_mem.buf_ptr = rte_gpu_mem_alloc(gpu_id, gpu_mem.buf_len);
> + if (gpu_mem.buf_ptr == NULL)
> + rte_exit(EXIT_FAILURE, "Could not allocate GPU device memory\n");
> +
> + ret = rte_extmem_register(gpu_mem.buf_ptr, gpu_mem.buf_len,
> + NULL, gpu_mem.buf_iova, gpu_page_size);
> + if (ret)
> + rte_exit(EXIT_FAILURE, "Unable to register addr 0x%p, ret %d\n", gpu_mem.buf_ptr, ret);
> +
> + uint16_t pid = 0;
> +
> + RTE_ETH_FOREACH_DEV(pid)
> + {
> + ret = rte_dev_dma_map(dev_info.device, gpu_mem.buf_ptr,
> + gpu_mem.buf_iova, gpu_mem.buf_len);
> + if (ret)
> + rte_exit(EXIT_FAILURE, "Unable to DMA map addr 0x%p for device %s\n",
> + gpu_mem.buf_ptr, dev_info.device->name);
> + }
> +
> + rte_mp = rte_pktmbuf_pool_create_extbuf(pool_name, nb_mbuf,
> + mb_mempool_cache, 0, mbuf_seg_size, socket_id, &gpu_mem, 1);
> + if (rte_mp == NULL)
> + rte_exit(EXIT_FAILURE,
> + "Creation of GPU mempool <%s> failed\n", pool_name);
> +
> + *mp_addr = (uintptr_t) gpu_mem.buf_ptr;
> + *mp_size = (size_t) gpu_mem.buf_len;
> +
> + return rte_mp;
> +#else
> + rte_exit(EXIT_FAILURE,
> + "gpudev library not built. Can't create mempools in GPU memory.\n");
> +#endif
> +}
> +
> /*
> * Configuration initialisation done once at init time.
> */
> @@ -1122,7 +1223,7 @@ mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
>
> mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
> #endif
> - mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name), size_idx);
> + mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name), size_idx, MBUF_MEM_CPU);
> if (!is_proc_primary()) {
> rte_mp = rte_mempool_lookup(pool_name);
> if (rte_mp == NULL)
> @@ -1709,19 +1810,42 @@ init_config(void)
>
> for (i = 0; i < num_sockets; i++)
> for (j = 0; j < mbuf_data_size_n; j++)
> - mempools[i * MAX_SEGS_BUFFER_SPLIT + j] =
> - mbuf_pool_create(mbuf_data_size[j],
> - nb_mbuf_per_pool,
> - socket_ids[i], j);
> + {
brace should be on the same line as the "for". Given that "if - else"
counts as one statement, the braces may not need to be added at all.
> + if (mbuf_mem_types[j] == MBUF_MEM_GPU) {
> + mempools[i * MAX_SEGS_BUFFER_SPLIT + j] =
> + gpu_mbuf_pool_create(mbuf_data_size[j],
> + nb_mbuf_per_pool,
> + socket_ids[i], j, gpu_id,
> + &(mempools_ext_ptr[i]),
> + &(mempools_ext_size[i]));
> + } else {
> + mempools[i * MAX_SEGS_BUFFER_SPLIT + j] =
> + mbuf_pool_create(mbuf_data_size[j],
> + nb_mbuf_per_pool,
> + socket_ids[i], j);
> + }
> + }
> } else {
> uint8_t i;
>
> for (i = 0; i < mbuf_data_size_n; i++)
> - mempools[i] = mbuf_pool_create
> - (mbuf_data_size[i],
> - nb_mbuf_per_pool,
> - socket_num == UMA_NO_CONFIG ?
> - 0 : socket_num, i);
> + {
> + if (mbuf_mem_types[i] == MBUF_MEM_GPU) {
> + mempools[i] =
> + gpu_mbuf_pool_create(mbuf_data_size[i],
> + nb_mbuf_per_pool,
> + socket_num == UMA_NO_CONFIG ? 0 : socket_num,
> + i, gpu_id,
> + &(mempools_ext_ptr[i]),
> + &(mempools_ext_size[i]));
> + } else {
> + mempools[i] = mbuf_pool_create(mbuf_data_size[i],
> + nb_mbuf_per_pool,
> + socket_num == UMA_NO_CONFIG ?
> + 0 : socket_num, i);
> + }
> + }
> +
> }
>
> init_port_config();
> @@ -3434,9 +3558,45 @@ pmd_test_exit(void)
> return;
> }
> }
> +
> for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
> - if (mempools[i])
> + if (mempools[i]) {
> mempool_free_mp(mempools[i]);
> +#ifdef RTE_LIB_GPUDEV
> + if (mbuf_mem_types[i] == MBUF_MEM_GPU) {
> + if (mempools_ext_ptr[i] != 0) {
> + ret = rte_extmem_unregister(
> + (void *)mempools_ext_ptr[i],
> + mempools_ext_size[i]);
> +
> + if (ret)
> + RTE_LOG(ERR, EAL,
> + "rte_extmem_unregister 0x%p -> %d (rte_errno = %d)\n",
> + (uint8_t *)mempools_ext_ptr[i], ret, rte_errno);
> +
> + RTE_ETH_FOREACH_DEV(pt_id) {
> + struct rte_eth_dev_info dev_info;
> + ret = eth_dev_info_get_print_err(pt_id, &dev_info);
> + if (ret != 0)
> + rte_exit(EXIT_FAILURE,
> + "Failed to get device info for port %d\n",
> + pt_id);
> +
> + ret = rte_dev_dma_unmap(dev_info.device,
> + (void *)mempools_ext_ptr[i], RTE_BAD_IOVA,
> + mempools_ext_size[i]);
> +
> + if (ret)
> + RTE_LOG(ERR, EAL,
> + "rte_dev_dma_unmap 0x%p -> %d (rte_errno = %d)\n",
> + (uint8_t *)mempools_ext_ptr[i], ret, rte_errno);
> + }
> +
> + rte_gpu_mem_free(gpu_id, (void *)mempools_ext_ptr[i]);
> + }
> + }
> +#endif
This block as very large amounts of indentation and nesting and should be
reworked to reduce this. Aim for 4 or 5 levels of indent max.
> + }
> }
> free(xstats_display);
>
> diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
> index b1dfd097c7..eca5b041d3 100644
> --- a/app/test-pmd/testpmd.h
> +++ b/app/test-pmd/testpmd.h
> @@ -9,6 +9,9 @@
>
> #include <rte_pci.h>
> #include <rte_bus_pci.h>
> +#ifdef RTE_LIB_GPUDEV
> +#include <rte_gpudev.h>
> +#endif
> #ifdef RTE_LIB_GRO
> #include <rte_gro.h>
> #endif
> @@ -484,6 +487,11 @@ extern uint8_t dcb_config;
> extern uint32_t mbuf_data_size_n;
> extern uint16_t mbuf_data_size[MAX_SEGS_BUFFER_SPLIT];
> /**< Mbuf data space size. */
> +enum mbuf_mem_type {
> + MBUF_MEM_CPU,
> + MBUF_MEM_GPU
> +};
> +extern enum mbuf_mem_type mbuf_mem_types[MAX_SEGS_BUFFER_SPLIT];
> extern uint32_t param_total_num_mbufs;
>
> extern uint16_t stats_period;
> @@ -731,14 +739,16 @@ current_fwd_lcore(void)
> /* Mbuf Pools */
> static inline void
> mbuf_poolname_build(unsigned int sock_id, char *mp_name,
> - int name_size, uint16_t idx)
> + int name_size, uint16_t idx, enum mbuf_mem_type mem_type)
> {
> + const char *suffix = mem_type == MBUF_MEM_GPU ? "_gpu" : "";
> +
> if (!idx)
> snprintf(mp_name, name_size,
> - MBUF_POOL_NAME_PFX "_%u", sock_id);
> + MBUF_POOL_NAME_PFX "_%u%s", sock_id, suffix);
> else
> snprintf(mp_name, name_size,
> - MBUF_POOL_NAME_PFX "_%hu_%hu", (uint16_t)sock_id, idx);
> + MBUF_POOL_NAME_PFX "_%hu_%hu%s", (uint16_t)sock_id, idx, suffix);
> }
>
> static inline struct rte_mempool *
> @@ -746,7 +756,7 @@ mbuf_pool_find(unsigned int sock_id, uint16_t idx)
> {
> char pool_name[RTE_MEMPOOL_NAMESIZE];
>
> - mbuf_poolname_build(sock_id, pool_name, sizeof(pool_name), idx);
> + mbuf_poolname_build(sock_id, pool_name, sizeof(pool_name), idx, mbuf_mem_types[idx]);
> return rte_mempool_lookup((const char *)pool_name);
> }
>
> --
> 2.17.1
>
^ permalink raw reply [flat|nested] 31+ messages in thread
* Re: [PATCH v3 1/1] app/testpmd: add GPU memory option for mbuf pools
2021-11-17 14:02 ` Jerin Jacob
@ 2021-11-17 14:07 ` Elena Agostini
2021-11-17 17:44 ` Elena Agostini
0 siblings, 1 reply; 31+ messages in thread
From: Elena Agostini @ 2021-11-17 14:07 UTC (permalink / raw)
To: Jerin Jacob
Cc: Ananyev, Konstantin, Yigit, Ferruh, Richardson, Bruce,
Stephen Hemminger, dev
[-- Attachment #1: Type: text/plain, Size: 7292 bytes --]
> On Wed, Nov 17, 2021 at 7:20 PM Elena Agostini <eagostini@nvidia.com> wrote:
> >
> > > On Wed, Nov 17, 2021 at 6:09 PM Elena Agostini <eagostini@nvidia.com> wrote:
> >
> > > >
> >
> > > > > > >>
> >
> > > >
> >
> > > > > > >>>>
> >
> > > >
> >
> > > > > > >>
> >
> > > >
> >
> > > > > > >>>>> On Wed, 17 Nov 2021 03:04:59 +0000
> >
> > > >
> >
> > > > > > >>
> >
> > > >
> >
> > > > > > >>>>
> >
> > > >
> >
> > > > > > >>>>>> This patch introduces GPU memory in testpmd through the gpudev library.
> >
> > > >
> >
> > > > > > >>
> >
> > > >
> >
> > > > > > >>>>
> >
> > > >
> >
> > > > > > >>
> >
> > > >
> >
> > > > > > >>>>>> Testpmd can be used for network benchmarks when using GPU memory
> >
> > > >
> >
> > > > > > >>
> >
> > > >
> >
> > > > > > >>>>
> >
> > > >
> >
> > > > > > >>
> >
> > > >
> >
> > > > > > >>>>>> instead of regular CPU memory to send and receive packets.
> >
> > > >
> >
> > > > > > >>
> >
> > > >
> >
> > > > > > >>>>
> >
> > > >
> >
> > > > > > >>
> >
> > > >
> >
> > > > > > >>>>>> This option is currently limited to iofwd engine to ensure
> >
> > > >
> >
> > > > > > >>
> >
> > > >
> >
> > > > > > >>>>
> >
> > > >
> >
> > > > > > >>
> >
> > > >
> >
> > > > > > >>>>>> no workload is applied on packets not accessible from the CPU.
> >
> > > >
> >
> > > > > > >>
> >
> > > >
> >
> > > > > > >>>>
> >
> > > >
> >
> > > > > > >>
> >
> > > >
> >
> > > > > > >>>>>>
> >
> > > >
> >
> > > > > > >>
> >
> > > >
> >
> > > > > > >>>>
> >
> > > >
> >
> > > > > > >>
> >
> > > >
> >
> > > > > > >>>>>> The options chose is --mbuf-size so buffer split feature across
> >
> > > >
> >
> > > > > > >>
> >
> > > >
> >
> > > > > > >>>>
> >
> > > >
> >
> > > > > > >>
> >
> > > >
> >
> > > > > > >>>>>> different mempools can be enabled.
> >
> > > >
> >
> > > > > > >>
> >
> > > >
> >
> > > > > > >>>>
> >
> > > >
> >
> > > > > > >>
> >
> > > >
> >
> > > > > > >>>>>>
> >
> > > >
> >
> > > > > > >>
> >
> > > >
> >
> > > > > > >>>>
> >
> > > >
> >
> > > > > > >>
> >
> > > >
> >
> > > > > > >>>>>> Signed-off-by: Elena Agostini <eagostini@nvidia.com>
> >
> > > >
> >
> > > > > > >>
> >
> > > >
> >
> > > > > > >>>>
> >
> > > >
> >
> > > > > > >>
> >
> > > >
> >
> > > > > > >>>>>
> >
> > > >
> >
> > > > > > >>
> >
> > > >
> >
> > > > > > >>>>
> >
> > > >
> >
> > > > > > >>
> >
> > > >
> >
> > > > > > >>>>> Won't this create a hard dependency of test-pmd on gpudev?
> >
> > > >
> >
> > > > > > >>
> >
> > > >
> >
> > > > > > >>>>
> >
> > > >
> >
> > > > > > >>
> >
> > > >
> >
> > > > > > >>>>> I thought gpudev was supposed to be optional
> >
> > > >
> >
> > > > > > >>
> >
> > > >
> >
> > > > > > >>>>
> >
> > > >
> >
> > > > > > >>
> >
> > > >
> >
> > > > > > >>>>
> >
> > > >
> >
> > > > > > >>
> >
> > > >
> >
> > > > > > >>>>
> >
> > > >
> >
> > > > > > >>
> >
> > > >
> >
> > > > > > >>>> Sure, let me submit another patch to make it optional
> >
> > > >
> >
> > > > > > >>
> >
> > > >
> >
> > > > > > >>>
> >
> > > >
> >
> > > > > > >>
> >
> > > >
> >
> > > > > > >>> Why to add yet another compile time macro everywhere in testpmd and
> >
> > > >
> >
> > > > > > >>
> >
> > > >
> >
> > > > > > >>> make hard to maintain?
> >
> > > >
> >
> > > > > > >>
> >
> > > >
> >
> > > > > > >>> Adding iofwd kind of code is very simple to add test/test-gpudev and
> >
> > > >
> >
> > > > > > >>
> >
> > > >
> >
> > > > > > >>> all GPU specific options
> >
> > > >
> >
> > > > > > >>
> >
> > > >
> >
> > > > > > >>> can be added in test-gpudev. It also helps to review the patches as
> >
> > > >
> >
> > > > > > >>
> >
> > > >
> >
> > > > > > >>> test cases focus on
> >
> > > >
> >
> > > > > > >>
> >
> > > >
> >
> > > > > > >>> each device class.
> >
> > > >
> >
> > > > > > >>
> >
> > > >
> >
> > > > > > >>
> >
> > > >
> >
> > > > > > >>
> >
> > > >
> >
> > > > > > >> Test-gpudev is standalone unit test to ensure gpudev functions work correctly.
> >
> > > >
> >
> > > > > > >>
> >
> > > >
> >
> > > > > > >> In testpmd instead, there is a connection between gpudev and the network.
> >
> > > >
> >
> > > > > > >
> >
> > > >
> >
> > > > > > > I understand that. We had the same case with eventdev, where it needs to
> >
> > > >
> >
> > > > > > > work with network. Testpmd is already complicated, IMO, we should
> >
> > > >
> >
> > > > > > > focus only ethdev
> >
> > > >
> >
> > > > > > > test cases on testpmd, test-gpudev can use ethdev API to enable
> >
> > > >
> >
> > > > > > > networking requirements for gpudev.
> >
> > > >
> >
> > > > > > >
> >
> > > >
> >
> > > > > >
> >
> > > >
> >
> > > > > > +1
> >
> > > >
> >
> > > > >
> >
> > > >
> >
> > > > > +1
> >
> > > >
> >
> > > >
> >
> > > >
> >
> > > > Testpmd already manages different type of memories for mempools.
> >
> > > >
> >
> > > > gpudev is just another type of memory, there is nothing more than that.
> >
> > >
> >
> > > Let take this example:
> >
> > > 1) New code changes
> >
> > >
> >
> > > app/test-pmd/cmdline.c | 32 +++++++-
> >
> > > app/test-pmd/config.c | 4 +-
> >
> > > app/test-pmd/icmpecho.c | 2 +-
> >
> > > app/test-pmd/meson.build | 2 +-
> >
> > > app/test-pmd/parameters.c | 15 +++-
> >
> > > app/test-pmd/testpmd.c | 167 +++++++++++++++++++++++++++++++++++---
> >
> > > app/test-pmd/testpmd.h | 16 +++-
> >
> > > 7 files changed, 217 insertions(+), 21 deletions(-)
> >
> > >
> >
> > > 2) Good amount of code need to go through condition compilation as
> >
> > > gpudev is optional that make
> >
> > > testpmd further ugly.
> >
> > >
> >
> > > 3) It introduces new memtype, now
> >
> > >
> >
> > > +enum mbuf_mem_type {
> >
> > > + MBUF_MEM_CPU,
> >
> > > + MBUF_MEM_GPU
> >
> > > +};
> >
> > >
> >
> > > The question largely, why testpmd need to pollute for this, testpmd,
> >
> > > we are using for testing ethdev device class.
> >
> > > All we are saying is to enable this use case in test-gpudev so that it
> >
> > > focuses on GPU specific, Whoever is not
> >
> > > interested in specific libraries do not even need to review the testpmd patches.
> >
> >
> >
> > I understand your point. I don’t understand why this testpmd patch is there since Oct 29 but
> >
> > I'm receiving reviews only few days before rc4 when I have a limited amount of time to get new code accepted.>
> I understand that pain. Welcome to DPDK, we have all gone through this
> review issue one or another way.>
>
> >
> >
> >
> > I can provide a gpudev + ethdev example by end of today (I'd like to keep test-gpudev as it is to test gpudev API standalone).
> >
> > Is there any chance this new example will be reviewed and eventually accepted in DPDK 21.11?>
> Why a new example? I don't have any issues in updating app/test-gpudev/.
Understood. If this can also help to speed up the acceptance process, I’ll provide a new patch for app/test-gpudev to introduce ethdev to send and receive packets
[-- Attachment #2: Type: text/html, Size: 49512 bytes --]
^ permalink raw reply [flat|nested] 31+ messages in thread
* Re: [PATCH v3 1/1] app/testpmd: add GPU memory option for mbuf pools
2021-11-17 14:07 ` Elena Agostini
@ 2021-11-17 17:44 ` Elena Agostini
0 siblings, 0 replies; 31+ messages in thread
From: Elena Agostini @ 2021-11-17 17:44 UTC (permalink / raw)
To: Jerin Jacob
Cc: Ananyev, Konstantin, Yigit, Ferruh, Richardson, Bruce,
Stephen Hemminger, dev
[-- Attachment #1: Type: text/plain, Size: 8585 bytes --]
> > On Wed, Nov 17, 2021 at 7:20 PM Elena Agostini <eagostini@nvidia.com> wrote:
> > >
> > > > On Wed, Nov 17, 2021 at 6:09 PM Elena Agostini <eagostini@nvidia.com> wrote:
> > >
> > > > >
> > >
> > > > > > > >>
> > >
> > > > >
> > >
> > > > > > > >>>>
> > >
> > > > >
> > >
> > > > > > > >>
> > >
> > > > >
> > >
> > > > > > > >>>>> On Wed, 17 Nov 2021 03:04:59 +0000
> > >
> > > > >
> > >
> > > > > > > >>
> > >
> > > > >
> > >
> > > > > > > >>>>
> > >
> > > > >
> > >
> > > > > > > >>>>>> This patch introduces GPU memory in testpmd through the gpudev library.
> > >
> > > > >
> > >
> > > > > > > >>
> > >
> > > > >
> > >
> > > > > > > >>>>
> > >
> > > > >
> > >
> > > > > > > >>
> > >
> > > > >
> > >
> > > > > > > >>>>>> Testpmd can be used for network benchmarks when using GPU memory
> > >
> > > > >
> > >
> > > > > > > >>
> > >
> > > > >
> > >
> > > > > > > >>>>
> > >
> > > > >
> > >
> > > > > > > >>
> > >
> > > > >
> > >
> > > > > > > >>>>>> instead of regular CPU memory to send and receive packets.
> > >
> > > > >
> > >
> > > > > > > >>
> > >
> > > > >
> > >
> > > > > > > >>>>
> > >
> > > > >
> > >
> > > > > > > >>
> > >
> > > > >
> > >
> > > > > > > >>>>>> This option is currently limited to iofwd engine to ensure
> > >
> > > > >
> > >
> > > > > > > >>
> > >
> > > > >
> > >
> > > > > > > >>>>
> > >
> > > > >
> > >
> > > > > > > >>
> > >
> > > > >
> > >
> > > > > > > >>>>>> no workload is applied on packets not accessible from the CPU.
> > >
> > > > >
> > >
> > > > > > > >>
> > >
> > > > >
> > >
> > > > > > > >>>>
> > >
> > > > >
> > >
> > > > > > > >>
> > >
> > > > >
> > >
> > > > > > > >>>>>>
> > >
> > > > >
> > >
> > > > > > > >>
> > >
> > > > >
> > >
> > > > > > > >>>>
> > >
> > > > >
> > >
> > > > > > > >>
> > >
> > > > >
> > >
> > > > > > > >>>>>> The options chose is --mbuf-size so buffer split feature across
> > >
> > > > >
> > >
> > > > > > > >>
> > >
> > > > >
> > >
> > > > > > > >>>>
> > >
> > > > >
> > >
> > > > > > > >>
> > >
> > > > >
> > >
> > > > > > > >>>>>> different mempools can be enabled.
> > >
> > > > >
> > >
> > > > > > > >>
> > >
> > > > >
> > >
> > > > > > > >>>>
> > >
> > > > >
> > >
> > > > > > > >>
> > >
> > > > >
> > >
> > > > > > > >>>>>>
> > >
> > > > >
> > >
> > > > > > > >>
> > >
> > > > >
> > >
> > > > > > > >>>>
> > >
> > > > >
> > >
> > > > > > > >>
> > >
> > > > >
> > >
> > > > > > > >>>>>> Signed-off-by: Elena Agostini <eagostini@nvidia.com>
> > >
> > > > >
> > >
> > > > > > > >>
> > >
> > > > >
> > >
> > > > > > > >>>>
> > >
> > > > >
> > >
> > > > > > > >>
> > >
> > > > >
> > >
> > > > > > > >>>>>
> > >
> > > > >
> > >
> > > > > > > >>
> > >
> > > > >
> > >
> > > > > > > >>>>
> > >
> > > > >
> > >
> > > > > > > >>
> > >
> > > > >
> > >
> > > > > > > >>>>> Won't this create a hard dependency of test-pmd on gpudev?
> > >
> > > > >
> > >
> > > > > > > >>
> > >
> > > > >
> > >
> > > > > > > >>>>
> > >
> > > > >
> > >
> > > > > > > >>
> > >
> > > > >
> > >
> > > > > > > >>>>> I thought gpudev was supposed to be optional
> > >
> > > > >
> > >
> > > > > > > >>
> > >
> > > > >
> > >
> > > > > > > >>>>
> > >
> > > > >
> > >
> > > > > > > >>
> > >
> > > > >
> > >
> > > > > > > >>>>
> > >
> > > > >
> > >
> > > > > > > >>
> > >
> > > > >
> > >
> > > > > > > >>>>
> > >
> > > > >
> > >
> > > > > > > >>
> > >
> > > > >
> > >
> > > > > > > >>>> Sure, let me submit another patch to make it optional
> > >
> > > > >
> > >
> > > > > > > >>
> > >
> > > > >
> > >
> > > > > > > >>>
> > >
> > > > >
> > >
> > > > > > > >>
> > >
> > > > >
> > >
> > > > > > > >>> Why to add yet another compile time macro everywhere in testpmd and
> > >
> > > > >
> > >
> > > > > > > >>
> > >
> > > > >
> > >
> > > > > > > >>> make hard to maintain?
> > >
> > > > >
> > >
> > > > > > > >>
> > >
> > > > >
> > >
> > > > > > > >>> Adding iofwd kind of code is very simple to add test/test-gpudev and
> > >
> > > > >
> > >
> > > > > > > >>
> > >
> > > > >
> > >
> > > > > > > >>> all GPU specific options
> > >
> > > > >
> > >
> > > > > > > >>
> > >
> > > > >
> > >
> > > > > > > >>> can be added in test-gpudev. It also helps to review the patches as
> > >
> > > > >
> > >
> > > > > > > >>
> > >
> > > > >
> > >
> > > > > > > >>> test cases focus on
> > >
> > > > >
> > >
> > > > > > > >>
> > >
> > > > >
> > >
> > > > > > > >>> each device class.
> > >
> > > > >
> > >
> > > > > > > >>
> > >
> > > > >
> > >
> > > > > > > >>
> > >
> > > > >
> > >
> > > > > > > >>
> > >
> > > > >
> > >
> > > > > > > >> Test-gpudev is standalone unit test to ensure gpudev functions work correctly.
> > >
> > > > >
> > >
> > > > > > > >>
> > >
> > > > >
> > >
> > > > > > > >> In testpmd instead, there is a connection between gpudev and the network.
> > >
> > > > >
> > >
> > > > > > > >
> > >
> > > > >
> > >
> > > > > > > > I understand that. We had the same case with eventdev, where it needs to
> > >
> > > > >
> > >
> > > > > > > > work with network. Testpmd is already complicated, IMO, we should
> > >
> > > > >
> > >
> > > > > > > > focus only ethdev
> > >
> > > > >
> > >
> > > > > > > > test cases on testpmd, test-gpudev can use ethdev API to enable
> > >
> > > > >
> > >
> > > > > > > > networking requirements for gpudev.
> > >
> > > > >
> > >
> > > > > > > >
> > >
> > > > >
> > >
> > > > > > >
> > >
> > > > >
> > >
> > > > > > > +1
> > >
> > > > >
> > >
> > > > > >
> > >
> > > > >
> > >
> > > > > > +1
> > >
> > > > >
> > >
> > > > >
> > >
> > > > >
> > >
> > > > > Testpmd already manages different type of memories for mempools.
> > >
> > > > >
> > >
> > > > > gpudev is just another type of memory, there is nothing more than that.
> > >
> > > >
> > >
> > > > Let take this example:
> > >
> > > > 1) New code changes
> > >
> > > >
> > >
> > > > app/test-pmd/cmdline.c | 32 +++++++-
> > >
> > > > app/test-pmd/config.c | 4 +-
> > >
> > > > app/test-pmd/icmpecho.c | 2 +-
> > >
> > > > app/test-pmd/meson.build | 2 +-
> > >
> > > > app/test-pmd/parameters.c | 15 +++-
> > >
> > > > app/test-pmd/testpmd.c | 167 +++++++++++++++++++++++++++++++++++---
> > >
> > > > app/test-pmd/testpmd.h | 16 +++-
> > >
> > > > 7 files changed, 217 insertions(+), 21 deletions(-)
> > >
> > > >
> > >
> > > > 2) Good amount of code need to go through condition compilation as
> > >
> > > > gpudev is optional that make
> > >
> > > > testpmd further ugly.
> > >
> > > >
> > >
> > > > 3) It introduces new memtype, now
> > >
> > > >
> > >
> > > > +enum mbuf_mem_type {
> > >
> > > > + MBUF_MEM_CPU,
> > >
> > > > + MBUF_MEM_GPU
> > >
> > > > +};
> > >
> > > >
> > >
> > > > The question largely, why testpmd need to pollute for this, testpmd,
> > >
> > > > we are using for testing ethdev device class.
> > >
> > > > All we are saying is to enable this use case in test-gpudev so that it
> > >
> > > > focuses on GPU specific, Whoever is not
> > >
> > > > interested in specific libraries do not even need to review the testpmd patches.
> > >
> > >
> > >
> > > I understand your point. I don’t understand why this testpmd patch is there since Oct 29 but
> > >
> > I'm receiving reviews only few days before rc4 when I have a limited amount of time to get new code > accepted.>
>
> > I understand that pain. Welcome to DPDK, we have all gone through this
> > review issue one or another way.>
> >
>
> > >
> > >
> > >
> > I can provide a gpudev + ethdev example by end of today (I'd like to keep test-gpudev as it is to > test gpudev API standalone).
> > >
> > > Is there any chance this new example will be reviewed and eventually accepted in DPDK 21.11?>
>
> > Why a new example? I don't have any issues in updating app/test-gpudev/.
>
> Understood. If this can also help to speed up the acceptance process, I’ll provide a new patch for app/> test-gpudev to introduce ethdev to send and receive packets
This is the patch to introduce ethdev in app/test-gpudev as alternative to testpmd patch, please give me your feedback.
https://patches.dpdk.org/project/dpdk/patch/20211118015228.30628-2-eagostini@nvidia.com
It would be really helpful for the gpudev library to have a concrete example to send and receive packets using GPU memory
[-- Attachment #2: Type: text/html, Size: 52981 bytes --]
^ permalink raw reply [flat|nested] 31+ messages in thread
* [PATCH v4 0/1] app/testpmd: add GPU memory option for mbuf pools
2021-10-29 20:49 [dpdk-dev] [PATCH] app/testpmd: add GPU memory option in iofwd engine eagostini
2021-11-11 21:41 ` [PATCH v2 0/1] " eagostini
2021-11-17 3:04 ` [PATCH v3 0/1] app/testpmd: add GPU memory option for mbuf pools eagostini
@ 2021-11-17 21:49 ` eagostini
2021-11-17 21:49 ` [PATCH v4 1/1] " eagostini
2 siblings, 1 reply; 31+ messages in thread
From: eagostini @ 2021-11-17 21:49 UTC (permalink / raw)
To: dev; +Cc: Elena Agostini
From: Elena Agostini <eagostini@nvidia.com>
This patch introduces GPU memory in testpmd through the gpudev library.
Testpmd can be used for network benchmarks when using GPU memory
instead of regular CPU memory to send and receive packets.
This option is currently limited to iofwd engine to ensure
no workload is applied on packets not accessible from the CPU.
The options chose is --mbuf-size so buffer split feature across
different mempools can be enabled.
Main reasons for this patch are:
- test memory features like external memory mempools and buffer split
- provide an upstream tool to test network metrics with GPU memory
Changelog:
- gpudev dependency is optional
Elena Agostini (1):
app/testpmd: add GPU memory option for mbuf pools
app/test-pmd/cmdline.c | 36 +++++++-
app/test-pmd/config.c | 4 +-
app/test-pmd/icmpecho.c | 2 +-
app/test-pmd/meson.build | 3 +
app/test-pmd/parameters.c | 15 +++-
app/test-pmd/testpmd.c | 182 +++++++++++++++++++++++++++++++++++---
app/test-pmd/testpmd.h | 18 +++-
7 files changed, 240 insertions(+), 20 deletions(-)
--
2.17.1
^ permalink raw reply [flat|nested] 31+ messages in thread
* [PATCH v4 1/1] app/testpmd: add GPU memory option for mbuf pools
2021-11-17 21:49 ` [PATCH v4 0/1] " eagostini
@ 2021-11-17 21:49 ` eagostini
2021-11-17 14:04 ` Bruce Richardson
0 siblings, 1 reply; 31+ messages in thread
From: eagostini @ 2021-11-17 21:49 UTC (permalink / raw)
To: dev; +Cc: Elena Agostini
From: Elena Agostini <eagostini@nvidia.com>
This patch introduces GPU memory in testpmd through the gpudev library.
Testpmd can be used for network benchmarks when using GPU memory
instead of regular CPU memory to send and receive packets.
This option is currently limited to iofwd engine to ensure
no workload is applied on packets not accessible from the CPU.
The options chose is --mbuf-size so buffer split feature across
different mempools can be enabled.
Signed-off-by: Elena Agostini <eagostini@nvidia.com>
---
app/test-pmd/cmdline.c | 36 +++++++-
app/test-pmd/config.c | 4 +-
app/test-pmd/icmpecho.c | 2 +-
app/test-pmd/meson.build | 3 +
app/test-pmd/parameters.c | 15 +++-
app/test-pmd/testpmd.c | 182 +++++++++++++++++++++++++++++++++++---
app/test-pmd/testpmd.h | 18 +++-
7 files changed, 240 insertions(+), 20 deletions(-)
diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index 3faa37db6d..400aa07da8 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -3619,6 +3619,7 @@ parse_item_list(const char *str, const char *item_name, unsigned int max_items,
unsigned int j;
int value_ok;
char c;
+ int gpu_mbuf = 0;
/*
* First parse all items in the list and store their value.
@@ -3628,11 +3629,29 @@ parse_item_list(const char *str, const char *item_name, unsigned int max_items,
value_ok = 0;
for (i = 0; i < strnlen(str, STR_TOKEN_SIZE); i++) {
c = str[i];
+
if ((c >= '0') && (c <= '9')) {
value = (unsigned int) (value * 10 + (c - '0'));
value_ok = 1;
continue;
}
+ if (c == 'g') {
+ /*
+ * When this flag is set, mbufs for this segment
+ * will be created on GPU memory.
+ */
+ if (i < strnlen(str, STR_TOKEN_SIZE) - 1 && str[i+1] != ',') {
+ fprintf(stderr, "input param list is not well formatted\n");
+ return 0;
+ }
+ #ifdef RTE_LIB_GPUDEV
+ gpu_mbuf = 1;
+ #else
+ fprintf(stderr, "gpudev library not built. Can't create mempools in GPU memory.\n");
+ return 0;
+ #endif
+ continue;
+ }
if (c != ',') {
fprintf(stderr, "character %c is not a decimal digit\n", c);
return 0;
@@ -3645,6 +3664,8 @@ parse_item_list(const char *str, const char *item_name, unsigned int max_items,
parsed_items[nb_item] = value;
value_ok = 0;
value = 0;
+ mbuf_mem_types[nb_item] = gpu_mbuf ? MBUF_MEM_GPU : MBUF_MEM_CPU;
+ gpu_mbuf = 0;
}
nb_item++;
}
@@ -3653,12 +3674,15 @@ parse_item_list(const char *str, const char *item_name, unsigned int max_items,
item_name, nb_item + 1, max_items);
return 0;
}
+
+ mbuf_mem_types[nb_item] = gpu_mbuf ? MBUF_MEM_GPU : MBUF_MEM_CPU;
+
parsed_items[nb_item++] = value;
if (! check_unique_values)
return nb_item;
/*
- * Then, check that all values in the list are different.
+ * Then, check that all values in the list are differents.
* No optimization here...
*/
for (i = 0; i < nb_item; i++) {
@@ -6874,6 +6898,16 @@ static void cmd_set_fwd_mode_parsed(void *parsed_result,
__rte_unused void *data)
{
struct cmd_set_fwd_mode_result *res = parsed_result;
+ int idx;
+
+ for (idx = 0; idx < MAX_SEGS_BUFFER_SPLIT; idx++) {
+ if (mbuf_mem_types[idx] == MBUF_MEM_GPU &&
+ strcmp(res->mode, "io") != 0) {
+ TESTPMD_LOG(ERR,
+ "GPU memory mbufs can be used with iofwd engine only\n");
+ return;
+ }
+ }
retry_enabled = 0;
set_pkt_forwarding_mode(res->mode);
diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
index 6fca09527b..9e8f8f1462 100644
--- a/app/test-pmd/config.c
+++ b/app/test-pmd/config.c
@@ -2966,7 +2966,7 @@ port_rss_reta_info(portid_t port_id,
}
/*
- * Displays the RSS hash functions of a port, and, optionally, the RSS hash
+ * Displays the RSS hash functions of a port, and, optionaly, the RSS hash
* key of the port.
*/
void
@@ -5255,7 +5255,7 @@ mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx)
{
port->mc_addr_nb--;
if (addr_idx == port->mc_addr_nb) {
- /* No need to recompact the set of multicast addresses. */
+ /* No need to recompact the set of multicast addressses. */
if (port->mc_addr_nb == 0) {
/* free the pool of multicast addresses. */
free(port->mc_addr_pool);
diff --git a/app/test-pmd/icmpecho.c b/app/test-pmd/icmpecho.c
index 99c94cb282..3a85ec3dd1 100644
--- a/app/test-pmd/icmpecho.c
+++ b/app/test-pmd/icmpecho.c
@@ -53,7 +53,7 @@ arp_op_name(uint16_t arp_op)
default:
break;
}
- return "Unknown ARP op";
+ return "Unkwown ARP op";
}
static const char *
diff --git a/app/test-pmd/meson.build b/app/test-pmd/meson.build
index 43130c8856..568660e18f 100644
--- a/app/test-pmd/meson.build
+++ b/app/test-pmd/meson.build
@@ -43,6 +43,9 @@ if dpdk_conf.has('RTE_LIB_BPF')
sources += files('bpf_cmd.c')
deps += 'bpf'
endif
+if dpdk_conf.has('RTE_LIB_GPUDEV')
+ deps += 'gpudev'
+endif
if dpdk_conf.has('RTE_LIB_GRO')
deps += 'gro'
endif
diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c
index f9185065af..dde5ef6590 100644
--- a/app/test-pmd/parameters.c
+++ b/app/test-pmd/parameters.c
@@ -86,7 +86,10 @@ usage(char* progname)
"in NUMA mode.\n");
printf(" --mbuf-size=N,[N1[,..Nn]: set the data size of mbuf to "
"N bytes. If multiple numbers are specified the extra pools "
- "will be created to receive with packet split features\n");
+ "will be created to receive with packet split features\n"
+ "Use 'g' suffix for GPU memory.\n"
+ "If no or an unrecognized suffix is provided, CPU is assumed\n");
+
printf(" --total-num-mbufs=N: set the number of mbufs to be allocated "
"in mbuf pools.\n");
printf(" --max-pkt-len=N: set the maximum size of packet to N bytes.\n");
@@ -594,6 +597,7 @@ launch_args_parse(int argc, char** argv)
struct rte_eth_dev_info dev_info;
uint16_t rec_nb_pkts;
int ret;
+ uint32_t idx = 0;
static struct option lgopts[] = {
{ "help", 0, 0, 0 },
@@ -1543,4 +1547,13 @@ launch_args_parse(int argc, char** argv)
"ignored\n");
mempool_flags = 0;
}
+
+ for (idx = 0; idx < mbuf_data_size_n; idx++) {
+ if (mbuf_mem_types[idx] == MBUF_MEM_GPU &&
+ strcmp(cur_fwd_eng->fwd_mode_name, "io") != 0) {
+ TESTPMD_LOG(ERR,
+ "GPU memory mbufs can be used with iofwd engine only\n");
+ rte_exit(EXIT_FAILURE, "Command line is incorrect\n");
+ }
+ }
}
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index ba65342b6d..b6c55e61b1 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -206,6 +206,13 @@ uint32_t mbuf_data_size_n = 1; /* Number of specified mbuf sizes. */
uint16_t mbuf_data_size[MAX_SEGS_BUFFER_SPLIT] = {
DEFAULT_MBUF_DATA_SIZE
}; /**< Mbuf data space size. */
+
+/* Mbuf memory types. */
+enum mbuf_mem_type mbuf_mem_types[MAX_SEGS_BUFFER_SPLIT];
+/* Pointers to external memory allocated for mempools. */
+uintptr_t mempools_ext_ptr[MAX_SEGS_BUFFER_SPLIT];
+size_t mempools_ext_size[MAX_SEGS_BUFFER_SPLIT];
+
uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
* specified on command-line. */
uint16_t stats_period; /**< Period to show statistics (disabled by default) */
@@ -546,6 +553,12 @@ int proc_id;
*/
unsigned int num_procs = 1;
+/*
+ * In case of GPU memory external mbufs use, for simplicity,
+ * the first GPU device in the list.
+ */
+int gpu_id = 0;
+
static void
eth_rx_metadata_negotiate_mp(uint16_t port_id)
{
@@ -1108,6 +1121,94 @@ setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int socket_id,
return ext_num;
}
+static struct rte_mempool *
+gpu_mbuf_pool_create(__rte_unused uint16_t mbuf_seg_size,
+ __rte_unused unsigned int nb_mbuf,
+ __rte_unused unsigned int socket_id,
+ __rte_unused uint16_t port_id,
+ __rte_unused int gpu_id,
+ __rte_unused uintptr_t *mp_addr,
+ __rte_unused size_t *mp_size)
+{
+#ifdef RTE_LIB_GPUDEV
+ int ret = 0;
+ char pool_name[RTE_MEMPOOL_NAMESIZE];
+ struct rte_eth_dev_info dev_info;
+ struct rte_mempool *rte_mp = NULL;
+ struct rte_pktmbuf_extmem gpu_mem;
+ struct rte_gpu_info ginfo;
+ uint8_t gpu_page_shift = 16;
+ uint32_t gpu_page_size = (1UL << gpu_page_shift);
+
+ if (rte_gpu_count_avail() == 0)
+ rte_exit(EXIT_FAILURE, "No GPU device available.\n");
+
+ if (rte_gpu_info_get(gpu_id, &ginfo))
+ rte_exit(EXIT_FAILURE,
+ "Can't retrieve info about GPU %d - bye\n", gpu_id);
+
+ ret = eth_dev_info_get_print_err(port_id, &dev_info);
+ if (ret != 0)
+ rte_exit(EXIT_FAILURE,
+ "Failed to get device info for port %d\n",
+ port_id);
+
+ mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name), port_id, MBUF_MEM_GPU);
+ if (!is_proc_primary()) {
+ rte_mp = rte_mempool_lookup(pool_name);
+ if (rte_mp == NULL)
+ rte_exit(EXIT_FAILURE,
+ "Get mbuf pool for socket %u failed: %s\n",
+ socket_id, rte_strerror(rte_errno));
+ return rte_mp;
+ }
+
+ TESTPMD_LOG(INFO,
+ "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u GPU device=%s\n",
+ pool_name, nb_mbuf, mbuf_seg_size, socket_id, ginfo.name);
+
+ /* Create an external memory mempool using memory allocated on the GPU. */
+
+ gpu_mem.elt_size = RTE_MBUF_DEFAULT_BUF_SIZE;
+ gpu_mem.buf_len = RTE_ALIGN_CEIL(nb_mbuf * gpu_mem.elt_size, gpu_page_size);
+ gpu_mem.buf_iova = RTE_BAD_IOVA;
+
+ gpu_mem.buf_ptr = rte_gpu_mem_alloc(gpu_id, gpu_mem.buf_len);
+ if (gpu_mem.buf_ptr == NULL)
+ rte_exit(EXIT_FAILURE, "Could not allocate GPU device memory\n");
+
+ ret = rte_extmem_register(gpu_mem.buf_ptr, gpu_mem.buf_len,
+ NULL, gpu_mem.buf_iova, gpu_page_size);
+ if (ret)
+ rte_exit(EXIT_FAILURE, "Unable to register addr 0x%p, ret %d\n", gpu_mem.buf_ptr, ret);
+
+ uint16_t pid = 0;
+
+ RTE_ETH_FOREACH_DEV(pid)
+ {
+ ret = rte_dev_dma_map(dev_info.device, gpu_mem.buf_ptr,
+ gpu_mem.buf_iova, gpu_mem.buf_len);
+ if (ret)
+ rte_exit(EXIT_FAILURE, "Unable to DMA map addr 0x%p for device %s\n",
+ gpu_mem.buf_ptr, dev_info.device->name);
+ }
+
+ rte_mp = rte_pktmbuf_pool_create_extbuf(pool_name, nb_mbuf,
+ mb_mempool_cache, 0, mbuf_seg_size, socket_id, &gpu_mem, 1);
+ if (rte_mp == NULL)
+ rte_exit(EXIT_FAILURE,
+ "Creation of GPU mempool <%s> failed\n", pool_name);
+
+ *mp_addr = (uintptr_t) gpu_mem.buf_ptr;
+ *mp_size = (size_t) gpu_mem.buf_len;
+
+ return rte_mp;
+#else
+ rte_exit(EXIT_FAILURE,
+ "gpudev library not built. Can't create mempools in GPU memory.\n");
+#endif
+}
+
/*
* Configuration initialisation done once at init time.
*/
@@ -1122,7 +1223,7 @@ mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
#endif
- mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name), size_idx);
+ mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name), size_idx, MBUF_MEM_CPU);
if (!is_proc_primary()) {
rte_mp = rte_mempool_lookup(pool_name);
if (rte_mp == NULL)
@@ -1709,19 +1810,42 @@ init_config(void)
for (i = 0; i < num_sockets; i++)
for (j = 0; j < mbuf_data_size_n; j++)
- mempools[i * MAX_SEGS_BUFFER_SPLIT + j] =
- mbuf_pool_create(mbuf_data_size[j],
- nb_mbuf_per_pool,
- socket_ids[i], j);
+ {
+ if (mbuf_mem_types[j] == MBUF_MEM_GPU) {
+ mempools[i * MAX_SEGS_BUFFER_SPLIT + j] =
+ gpu_mbuf_pool_create(mbuf_data_size[j],
+ nb_mbuf_per_pool,
+ socket_ids[i], j, gpu_id,
+ &(mempools_ext_ptr[i]),
+ &(mempools_ext_size[i]));
+ } else {
+ mempools[i * MAX_SEGS_BUFFER_SPLIT + j] =
+ mbuf_pool_create(mbuf_data_size[j],
+ nb_mbuf_per_pool,
+ socket_ids[i], j);
+ }
+ }
} else {
uint8_t i;
for (i = 0; i < mbuf_data_size_n; i++)
- mempools[i] = mbuf_pool_create
- (mbuf_data_size[i],
- nb_mbuf_per_pool,
- socket_num == UMA_NO_CONFIG ?
- 0 : socket_num, i);
+ {
+ if (mbuf_mem_types[i] == MBUF_MEM_GPU) {
+ mempools[i] =
+ gpu_mbuf_pool_create(mbuf_data_size[i],
+ nb_mbuf_per_pool,
+ socket_num == UMA_NO_CONFIG ? 0 : socket_num,
+ i, gpu_id,
+ &(mempools_ext_ptr[i]),
+ &(mempools_ext_size[i]));
+ } else {
+ mempools[i] = mbuf_pool_create(mbuf_data_size[i],
+ nb_mbuf_per_pool,
+ socket_num == UMA_NO_CONFIG ?
+ 0 : socket_num, i);
+ }
+ }
+
}
init_port_config();
@@ -3434,9 +3558,45 @@ pmd_test_exit(void)
return;
}
}
+
for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
- if (mempools[i])
+ if (mempools[i]) {
mempool_free_mp(mempools[i]);
+#ifdef RTE_LIB_GPUDEV
+ if (mbuf_mem_types[i] == MBUF_MEM_GPU) {
+ if (mempools_ext_ptr[i] != 0) {
+ ret = rte_extmem_unregister(
+ (void *)mempools_ext_ptr[i],
+ mempools_ext_size[i]);
+
+ if (ret)
+ RTE_LOG(ERR, EAL,
+ "rte_extmem_unregister 0x%p -> %d (rte_errno = %d)\n",
+ (uint8_t *)mempools_ext_ptr[i], ret, rte_errno);
+
+ RTE_ETH_FOREACH_DEV(pt_id) {
+ struct rte_eth_dev_info dev_info;
+ ret = eth_dev_info_get_print_err(pt_id, &dev_info);
+ if (ret != 0)
+ rte_exit(EXIT_FAILURE,
+ "Failed to get device info for port %d\n",
+ pt_id);
+
+ ret = rte_dev_dma_unmap(dev_info.device,
+ (void *)mempools_ext_ptr[i], RTE_BAD_IOVA,
+ mempools_ext_size[i]);
+
+ if (ret)
+ RTE_LOG(ERR, EAL,
+ "rte_dev_dma_unmap 0x%p -> %d (rte_errno = %d)\n",
+ (uint8_t *)mempools_ext_ptr[i], ret, rte_errno);
+ }
+
+ rte_gpu_mem_free(gpu_id, (void *)mempools_ext_ptr[i]);
+ }
+ }
+#endif
+ }
}
free(xstats_display);
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index b1dfd097c7..eca5b041d3 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -9,6 +9,9 @@
#include <rte_pci.h>
#include <rte_bus_pci.h>
+#ifdef RTE_LIB_GPUDEV
+#include <rte_gpudev.h>
+#endif
#ifdef RTE_LIB_GRO
#include <rte_gro.h>
#endif
@@ -484,6 +487,11 @@ extern uint8_t dcb_config;
extern uint32_t mbuf_data_size_n;
extern uint16_t mbuf_data_size[MAX_SEGS_BUFFER_SPLIT];
/**< Mbuf data space size. */
+enum mbuf_mem_type {
+ MBUF_MEM_CPU,
+ MBUF_MEM_GPU
+};
+extern enum mbuf_mem_type mbuf_mem_types[MAX_SEGS_BUFFER_SPLIT];
extern uint32_t param_total_num_mbufs;
extern uint16_t stats_period;
@@ -731,14 +739,16 @@ current_fwd_lcore(void)
/* Mbuf Pools */
static inline void
mbuf_poolname_build(unsigned int sock_id, char *mp_name,
- int name_size, uint16_t idx)
+ int name_size, uint16_t idx, enum mbuf_mem_type mem_type)
{
+ const char *suffix = mem_type == MBUF_MEM_GPU ? "_gpu" : "";
+
if (!idx)
snprintf(mp_name, name_size,
- MBUF_POOL_NAME_PFX "_%u", sock_id);
+ MBUF_POOL_NAME_PFX "_%u%s", sock_id, suffix);
else
snprintf(mp_name, name_size,
- MBUF_POOL_NAME_PFX "_%hu_%hu", (uint16_t)sock_id, idx);
+ MBUF_POOL_NAME_PFX "_%hu_%hu%s", (uint16_t)sock_id, idx, suffix);
}
static inline struct rte_mempool *
@@ -746,7 +756,7 @@ mbuf_pool_find(unsigned int sock_id, uint16_t idx)
{
char pool_name[RTE_MEMPOOL_NAMESIZE];
- mbuf_poolname_build(sock_id, pool_name, sizeof(pool_name), idx);
+ mbuf_poolname_build(sock_id, pool_name, sizeof(pool_name), idx, mbuf_mem_types[idx]);
return rte_mempool_lookup((const char *)pool_name);
}
--
2.17.1
^ permalink raw reply [flat|nested] 31+ messages in thread
end of thread, other threads:[~2021-11-17 17:44 UTC | newest]
Thread overview: 31+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-10-29 20:49 [dpdk-dev] [PATCH] app/testpmd: add GPU memory option in iofwd engine eagostini
2021-11-11 21:41 ` [PATCH v2 0/1] " eagostini
2021-11-11 21:41 ` [PATCH v2 1/1] " eagostini
2021-11-16 16:28 ` Slava Ovsiienko
2021-11-16 17:16 ` Ananyev, Konstantin
2021-11-16 18:15 ` Elena Agostini
2021-11-16 17:55 ` Ferruh Yigit
2021-11-16 18:06 ` Elena Agostini
2021-11-16 18:11 ` Ferruh Yigit
2021-11-16 19:09 ` Jerin Jacob
2021-11-16 19:14 ` Elena Agostini
2021-11-16 19:21 ` Jerin Jacob
2021-11-17 8:55 ` Bruce Richardson
2021-11-17 3:04 ` [PATCH v3 0/1] app/testpmd: add GPU memory option for mbuf pools eagostini
2021-11-17 3:04 ` [PATCH v3 1/1] " eagostini
2021-11-16 21:34 ` Stephen Hemminger
2021-11-17 11:08 ` Elena Agostini
2021-11-17 11:23 ` Jerin Jacob
2021-11-17 11:26 ` Elena Agostini
2021-11-17 11:31 ` Jerin Jacob
2021-11-17 11:48 ` Ferruh Yigit
2021-11-17 12:36 ` Ananyev, Konstantin
2021-11-17 12:39 ` Elena Agostini
2021-11-17 13:39 ` Jerin Jacob
2021-11-17 13:50 ` Elena Agostini
2021-11-17 14:02 ` Jerin Jacob
2021-11-17 14:07 ` Elena Agostini
2021-11-17 17:44 ` Elena Agostini
2021-11-17 21:49 ` [PATCH v4 0/1] " eagostini
2021-11-17 21:49 ` [PATCH v4 1/1] " eagostini
2021-11-17 14:04 ` Bruce Richardson
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).