From: <pbhagavatula@marvell.com>
To: <jerinj@marvell.com>
Cc: <dev@dpdk.org>, Pavan Nikhilesh <pbhagavatula@marvell.com>
Subject: [dpdk-dev] [PATCH] app/eventdev: add option to enable per port pool
Date: Tue, 15 Jun 2021 16:01:48 +0530 [thread overview]
Message-ID: <20210615103149.4194-1-pbhagavatula@marvell.com> (raw)
From: Pavan Nikhilesh <pbhagavatula@marvell.com>
Add option to configure unique mempool for each ethernet device
port. Can be used with `pipeline_atq` and `pipeline_queue` tests.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
app/test-eventdev/evt_common.h | 1 +
app/test-eventdev/evt_options.c | 9 ++++
app/test-eventdev/evt_options.h | 1 +
app/test-eventdev/test_pipeline_common.c | 52 +++++++++++++++++-------
app/test-eventdev/test_pipeline_common.h | 2 +-
doc/guides/tools/testeventdev.rst | 8 ++++
6 files changed, 58 insertions(+), 15 deletions(-)
diff --git a/app/test-eventdev/evt_common.h b/app/test-eventdev/evt_common.h
index 0e228258e7..28afb114b3 100644
--- a/app/test-eventdev/evt_common.h
+++ b/app/test-eventdev/evt_common.h
@@ -55,6 +55,7 @@ struct evt_options {
uint8_t timdev_cnt;
uint8_t nb_timer_adptrs;
uint8_t timdev_use_burst;
+ uint8_t per_port_pool;
uint8_t sched_type_list[EVT_MAX_STAGES];
uint16_t mbuf_sz;
uint16_t wkr_deq_dep;
diff --git a/app/test-eventdev/evt_options.c b/app/test-eventdev/evt_options.c
index 061b63e12e..bfa3840dbc 100644
--- a/app/test-eventdev/evt_options.c
+++ b/app/test-eventdev/evt_options.c
@@ -297,6 +297,12 @@ evt_parse_eth_queues(struct evt_options *opt, const char *arg)
return ret;
}
+evt_parse_per_port_pool(struct evt_options *opt, const char *arg __rte_unused)
+{
+ opt->per_port_pool = 1;
+ return 0;
+}
+
static void
usage(char *program)
{
@@ -333,6 +339,7 @@ usage(char *program)
"\t--enable_vector : enable event vectorization.\n"
"\t--vector_size : Max vector size.\n"
"\t--vector_tmo_ns : Max vector timeout in nanoseconds\n"
+ "\t--per_port_pool : Configure unique pool per ethdev port\n"
);
printf("available tests:\n");
evt_test_dump_names();
@@ -408,6 +415,7 @@ static struct option lgopts[] = {
{ EVT_ENA_VECTOR, 0, 0, 0 },
{ EVT_VECTOR_SZ, 1, 0, 0 },
{ EVT_VECTOR_TMO, 1, 0, 0 },
+ { EVT_PER_PORT_POOL, 0, 0, 0 },
{ EVT_HELP, 0, 0, 0 },
{ NULL, 0, 0, 0 }
};
@@ -446,6 +454,7 @@ evt_opts_parse_long(int opt_idx, struct evt_options *opt)
{ EVT_ENA_VECTOR, evt_parse_ena_vector},
{ EVT_VECTOR_SZ, evt_parse_vector_size},
{ EVT_VECTOR_TMO, evt_parse_vector_tmo_ns},
+ { EVT_PER_PORT_POOL, evt_parse_per_port_pool},
};
for (i = 0; i < RTE_DIM(parsermap); i++) {
diff --git a/app/test-eventdev/evt_options.h b/app/test-eventdev/evt_options.h
index 1cea2a3e11..6436200b40 100644
--- a/app/test-eventdev/evt_options.h
+++ b/app/test-eventdev/evt_options.h
@@ -46,6 +46,7 @@
#define EVT_ENA_VECTOR ("enable_vector")
#define EVT_VECTOR_SZ ("vector_size")
#define EVT_VECTOR_TMO ("vector_tmo_ns")
+#define EVT_PER_PORT_POOL ("per_port_pool")
#define EVT_HELP ("help")
void evt_options_default(struct evt_options *opt);
diff --git a/app/test-eventdev/test_pipeline_common.c b/app/test-eventdev/test_pipeline_common.c
index d5ef90500f..6ee530d4cd 100644
--- a/app/test-eventdev/test_pipeline_common.c
+++ b/app/test-eventdev/test_pipeline_common.c
@@ -259,9 +259,10 @@ pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt)
}
for (j = 0; j < opt->eth_queues; j++) {
- if (rte_eth_rx_queue_setup(i, j, NB_RX_DESC,
- rte_socket_id(), &rx_conf,
- t->pool) < 0) {
+ if (rte_eth_rx_queue_setup(
+ i, j, NB_RX_DESC, rte_socket_id(), &rx_conf,
+ opt->per_port_pool ? t->pool[i] :
+ t->pool[0]) < 0) {
evt_err("Failed to setup eth port [%d] rx_queue: %d.",
i, 0);
return -EINVAL;
@@ -569,18 +570,35 @@ pipeline_mempool_setup(struct evt_test *test, struct evt_options *opt)
if (data_size > opt->mbuf_sz)
opt->mbuf_sz = data_size;
}
+ if (opt->per_port_pool) {
+ char name[RTE_MEMPOOL_NAMESIZE];
+
+ snprintf(name, RTE_MEMPOOL_NAMESIZE, "%s-%d",
+ test->name, i);
+ t->pool[i] = rte_pktmbuf_pool_create(
+ name, /* mempool name */
+ opt->pool_sz, /* number of elements*/
+ 0, /* cache size*/
+ 0, opt->mbuf_sz, opt->socket_id); /* flags */
+
+ if (t->pool[i] == NULL) {
+ evt_err("failed to create mempool %s", name);
+ return -ENOMEM;
+ }
+ }
}
- t->pool = rte_pktmbuf_pool_create(test->name, /* mempool name */
+ if (!opt->per_port_pool) {
+ t->pool[0] = rte_pktmbuf_pool_create(
+ test->name, /* mempool name */
opt->pool_sz, /* number of elements*/
- 512, /* cache size*/
- 0,
- opt->mbuf_sz,
- opt->socket_id); /* flags */
-
- if (t->pool == NULL) {
- evt_err("failed to create mempool");
- return -ENOMEM;
+ 0, /* cache size*/
+ 0, opt->mbuf_sz, opt->socket_id); /* flags */
+
+ if (t->pool[0] == NULL) {
+ evt_err("failed to create mempool");
+ return -ENOMEM;
+ }
}
return 0;
@@ -589,10 +607,16 @@ pipeline_mempool_setup(struct evt_test *test, struct evt_options *opt)
void
pipeline_mempool_destroy(struct evt_test *test, struct evt_options *opt)
{
- RTE_SET_USED(opt);
struct test_pipeline *t = evt_test_priv(test);
+ int i;
- rte_mempool_free(t->pool);
+ RTE_SET_USED(opt);
+ if (opt->per_port_pool) {
+ RTE_ETH_FOREACH_DEV(i)
+ rte_mempool_free(t->pool[i]);
+ } else {
+ rte_mempool_free(t->pool[0]);
+ }
}
int
diff --git a/app/test-eventdev/test_pipeline_common.h b/app/test-eventdev/test_pipeline_common.h
index 800a90616f..d69e2f8a3e 100644
--- a/app/test-eventdev/test_pipeline_common.h
+++ b/app/test-eventdev/test_pipeline_common.h
@@ -47,7 +47,7 @@ struct test_pipeline {
enum evt_test_result result;
uint32_t nb_flows;
uint64_t outstand_pkts;
- struct rte_mempool *pool;
+ struct rte_mempool *pool[RTE_MAX_ETHPORTS];
struct worker_data worker[EVT_MAX_PORTS];
struct evt_options *opt;
uint8_t sched_type_list[EVT_MAX_STAGES] __rte_cache_aligned;
diff --git a/doc/guides/tools/testeventdev.rst b/doc/guides/tools/testeventdev.rst
index f252dc2c19..b81340471e 100644
--- a/doc/guides/tools/testeventdev.rst
+++ b/doc/guides/tools/testeventdev.rst
@@ -178,6 +178,12 @@ The following are the application command-line options:
Vector timeout nanoseconds to be configured for the Rx adapter.
Only applicable for `pipeline_atq` and `pipeline_queue` tests.
+* ``--per_port_pool``
+
+ Configure unique mempool per ethernet device, the size of each pool
+ is equal to `pool_sz`.
+ Only applicable for pipeline_atq` and `pipeline_queue` tests.
+
Eventdev Tests
--------------
@@ -631,6 +637,7 @@ Supported application command line options are following::
--enable_vector
--vector_size
--vector_tmo_ns
+ --per_port_pool
.. Note::
@@ -734,6 +741,7 @@ Supported application command line options are following::
--enable_vector
--vector_size
--vector_tmo_ns
+ --per_port_pool
.. Note::
--
2.17.1
next reply other threads:[~2021-06-15 10:32 UTC|newest]
Thread overview: 4+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-06-15 10:31 pbhagavatula [this message]
2021-06-30 15:37 ` Jerin Jacob
2021-07-01 6:07 ` [dpdk-dev] [PATCH v2] " pbhagavatula
2021-07-01 8:31 ` Jerin Jacob
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210615103149.4194-1-pbhagavatula@marvell.com \
--to=pbhagavatula@marvell.com \
--cc=dev@dpdk.org \
--cc=jerinj@marvell.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).