* [dpdk-dev] [PATCH 1/2] test/mempool_perf: Free mempool on exit
@ 2017-04-05 8:51 Santosh Shukla
2017-04-05 8:51 ` [dpdk-dev] [PATCH 2/2] test/mempool_perf: support default mempool autotest Santosh Shukla
` (2 more replies)
0 siblings, 3 replies; 20+ messages in thread
From: Santosh Shukla @ 2017-04-05 8:51 UTC (permalink / raw)
To: olivier.matz, dev; +Cc: shreyansh.jain, hemant.agrawal, Santosh Shukla, stable
Mempool_perf test not freeing pool memory.
Cc: stable@dpdk.org
Signed-off-by: Santosh Shukla <santosh.shukla@caviumnetworks.com>
---
test/test/test_mempool_perf.c | 31 +++++++++++++++++++------------
1 file changed, 19 insertions(+), 12 deletions(-)
diff --git a/test/test/test_mempool_perf.c b/test/test/test_mempool_perf.c
index ebf1721..3c45971 100644
--- a/test/test/test_mempool_perf.c
+++ b/test/test/test_mempool_perf.c
@@ -312,6 +312,8 @@ struct mempool_test_stats {
static int
test_mempool_perf(void)
{
+ int ret = -1;
+
rte_atomic32_init(&synchro);
/* create a mempool (without cache) */
@@ -322,7 +324,7 @@ struct mempool_test_stats {
my_obj_init, NULL,
SOCKET_ID_ANY, 0);
if (mp_nocache == NULL)
- return -1;
+ goto err;
/* create a mempool (with cache) */
if (mp_cache == NULL)
@@ -333,33 +335,33 @@ struct mempool_test_stats {
my_obj_init, NULL,
SOCKET_ID_ANY, 0);
if (mp_cache == NULL)
- return -1;
+ goto err;
/* performance test with 1, 2 and max cores */
printf("start performance test (without cache)\n");
mp = mp_nocache;
if (do_one_mempool_test(1) < 0)
- return -1;
+ goto err;
if (do_one_mempool_test(2) < 0)
- return -1;
+ goto err;
if (do_one_mempool_test(rte_lcore_count()) < 0)
- return -1;
+ goto err;
/* performance test with 1, 2 and max cores */
printf("start performance test (with cache)\n");
mp = mp_cache;
if (do_one_mempool_test(1) < 0)
- return -1;
+ goto err;
if (do_one_mempool_test(2) < 0)
- return -1;
+ goto err;
if (do_one_mempool_test(rte_lcore_count()) < 0)
- return -1;
+ goto err;
/* performance test with 1, 2 and max cores */
printf("start performance test (with user-owned cache)\n");
@@ -367,17 +369,22 @@ struct mempool_test_stats {
use_external_cache = 1;
if (do_one_mempool_test(1) < 0)
- return -1;
+ goto err;
if (do_one_mempool_test(2) < 0)
- return -1;
+ goto err;
if (do_one_mempool_test(rte_lcore_count()) < 0)
- return -1;
+ goto err;
rte_mempool_list_dump(stdout);
- return 0;
+ ret = 0;
+
+err:
+ rte_mempool_free(mp_cache);
+ rte_mempool_free(mp_nocache);
+ return ret;
}
REGISTER_TEST_COMMAND(mempool_perf_autotest, test_mempool_perf);
--
1.7.9.5
^ permalink raw reply [flat|nested] 20+ messages in thread
* [dpdk-dev] [PATCH 2/2] test/mempool_perf: support default mempool autotest
2017-04-05 8:51 [dpdk-dev] [PATCH 1/2] test/mempool_perf: Free mempool on exit Santosh Shukla
@ 2017-04-05 8:51 ` Santosh Shukla
2017-04-05 10:02 ` Shreyansh Jain
2017-04-05 9:57 ` [dpdk-dev] [PATCH 1/2] test/mempool_perf: Free mempool on exit Shreyansh Jain
2017-04-06 6:45 ` [dpdk-dev] [PATCH v2 " Santosh Shukla
2 siblings, 1 reply; 20+ messages in thread
From: Santosh Shukla @ 2017-04-05 8:51 UTC (permalink / raw)
To: olivier.matz, dev; +Cc: shreyansh.jain, hemant.agrawal, Santosh Shukla
Mempool_perf autotest currently does perf regression for:
* nochache
* cache
Introducing default_pool, mainly targeted for ext-mempool regression
test. Ext-mempool don't need 'cache' modes so only adding test-case
support for 'nocache' mode.
So to run ext-mempool perf regression, user has to set
RTE_MBUF_DEFAULT_MEMPOOL_OPS="<>"
There is chance of duplication ie.. if user sets
RTE_MBUF_DEFAULT_MEMPOOL_OPS="ring_mp_mc" then regression
will happen twice for 'ring_mp_mc'
Signed-off-by: Santosh Shukla <santosh.shukla@caviumnetworks.com>
---
test/test/test_mempool_perf.c | 41 +++++++++++++++++++++++++++++++++++++++++
1 file changed, 41 insertions(+)
diff --git a/test/test/test_mempool_perf.c b/test/test/test_mempool_perf.c
index 3c45971..9c80860 100644
--- a/test/test/test_mempool_perf.c
+++ b/test/test/test_mempool_perf.c
@@ -111,6 +111,7 @@
static struct rte_mempool *mp;
static struct rte_mempool *mp_cache, *mp_nocache;
+static struct rte_mempool *default_pool;
static int use_external_cache;
static unsigned external_cache_size = RTE_MEMPOOL_CACHE_MAX_SIZE;
@@ -337,6 +338,31 @@ struct mempool_test_stats {
if (mp_cache == NULL)
goto err;
+ /* Create a mempool based on Default handler */
+ default_pool = rte_mempool_create_empty("default_pool",
+ MEMPOOL_SIZE,
+ MEMPOOL_ELT_SIZE,
+ 0, 0,
+ SOCKET_ID_ANY, 0);
+
+ if (default_pool == NULL) {
+ printf("cannot allocate %s mempool\n",
+ RTE_MBUF_DEFAULT_MEMPOOL_OPS);
+ goto err;
+ }
+ if (rte_mempool_set_ops_byname(default_pool,
+ RTE_MBUF_DEFAULT_MEMPOOL_OPS, NULL) < 0) {
+ printf("cannot set %s handler\n",
+ RTE_MBUF_DEFAULT_MEMPOOL_OPS);
+ goto err;
+ }
+ if (rte_mempool_populate_default(default_pool) < 0) {
+ printf("cannot populate %s mempool\n",
+ RTE_MBUF_DEFAULT_MEMPOOL_OPS);
+ goto err;
+ }
+ rte_mempool_obj_iter(default_pool, my_obj_init, NULL);
+
/* performance test with 1, 2 and max cores */
printf("start performance test (without cache)\n");
mp = mp_nocache;
@@ -351,6 +377,20 @@ struct mempool_test_stats {
goto err;
/* performance test with 1, 2 and max cores */
+ printf("start performance test for %s (without cache)\n",
+ RTE_MBUF_DEFAULT_MEMPOOL_OPS);
+ mp = default_pool;
+
+ if (do_one_mempool_test(1) < 0)
+ goto err;
+
+ if (do_one_mempool_test(2) < 0)
+ goto err;
+
+ if (do_one_mempool_test(rte_lcore_count()) < 0)
+ goto err;
+
+ /* performance test with 1, 2 and max cores */
printf("start performance test (with cache)\n");
mp = mp_cache;
@@ -384,6 +424,7 @@ struct mempool_test_stats {
err:
rte_mempool_free(mp_cache);
rte_mempool_free(mp_nocache);
+ rte_mempool_free(default_pool);
return ret;
}
--
1.7.9.5
^ permalink raw reply [flat|nested] 20+ messages in thread
* Re: [dpdk-dev] [PATCH 1/2] test/mempool_perf: Free mempool on exit
2017-04-05 8:51 [dpdk-dev] [PATCH 1/2] test/mempool_perf: Free mempool on exit Santosh Shukla
2017-04-05 8:51 ` [dpdk-dev] [PATCH 2/2] test/mempool_perf: support default mempool autotest Santosh Shukla
@ 2017-04-05 9:57 ` Shreyansh Jain
2017-04-05 12:33 ` santosh
2017-04-06 6:45 ` [dpdk-dev] [PATCH v2 " Santosh Shukla
2 siblings, 1 reply; 20+ messages in thread
From: Shreyansh Jain @ 2017-04-05 9:57 UTC (permalink / raw)
To: Santosh Shukla; +Cc: olivier.matz, dev, hemant.agrawal, stable
Hello Santosh,
On Wednesday 05 April 2017 02:21 PM, Santosh Shukla wrote:
> Mempool_perf test not freeing pool memory.
>
> Cc: stable@dpdk.org
> Signed-off-by: Santosh Shukla <santosh.shukla@caviumnetworks.com>
> ---
> test/test/test_mempool_perf.c | 31 +++++++++++++++++++------------
> 1 file changed, 19 insertions(+), 12 deletions(-)
>
> diff --git a/test/test/test_mempool_perf.c b/test/test/test_mempool_perf.c
> index ebf1721..3c45971 100644
> --- a/test/test/test_mempool_perf.c
> +++ b/test/test/test_mempool_perf.c
> @@ -312,6 +312,8 @@ struct mempool_test_stats {
> static int
> test_mempool_perf(void)
> {
> + int ret = -1;
> +
> rte_atomic32_init(&synchro);
>
> /* create a mempool (without cache) */
> @@ -322,7 +324,7 @@ struct mempool_test_stats {
Can you check why the patch is showing "struct mempool_test_stats" as
the context?
I tried creating this patch and it shows "test_mempool_perf(void)" as
context.
off the topic, does it matter to maintainers if context is not correct?
> my_obj_init, NULL,
> SOCKET_ID_ANY, 0);
> if (mp_nocache == NULL)
> - return -1;
> + goto err;
>
> /* create a mempool (with cache) */
> if (mp_cache == NULL)
[...]
> +err:
> + rte_mempool_free(mp_cache);
> + rte_mempool_free(mp_nocache);
> + return ret;
> }
>
> REGISTER_TEST_COMMAND(mempool_perf_autotest, test_mempool_perf);
>
Other than that:
Acked-by: Shreyansh Jain <shreyansh.jain@nxp.com>
^ permalink raw reply [flat|nested] 20+ messages in thread
* Re: [dpdk-dev] [PATCH 2/2] test/mempool_perf: support default mempool autotest
2017-04-05 8:51 ` [dpdk-dev] [PATCH 2/2] test/mempool_perf: support default mempool autotest Santosh Shukla
@ 2017-04-05 10:02 ` Shreyansh Jain
2017-04-05 12:40 ` santosh
0 siblings, 1 reply; 20+ messages in thread
From: Shreyansh Jain @ 2017-04-05 10:02 UTC (permalink / raw)
To: Santosh Shukla; +Cc: olivier.matz, dev, hemant.agrawal
Hi Santosh,
On Wednesday 05 April 2017 02:21 PM, Santosh Shukla wrote:
> Mempool_perf autotest currently does perf regression for:
> * nochache
> * cache
>
> Introducing default_pool, mainly targeted for ext-mempool regression
> test. Ext-mempool don't need 'cache' modes so only adding test-case
> support for 'nocache' mode.
>
> So to run ext-mempool perf regression, user has to set
> RTE_MBUF_DEFAULT_MEMPOOL_OPS="<>"
>
> There is chance of duplication ie.. if user sets
>
> RTE_MBUF_DEFAULT_MEMPOOL_OPS="ring_mp_mc" then regression
> will happen twice for 'ring_mp_mc'
>
> Signed-off-by: Santosh Shukla <santosh.shukla@caviumnetworks.com>
> ---
> test/test/test_mempool_perf.c | 41 +++++++++++++++++++++++++++++++++++++++++
> 1 file changed, 41 insertions(+)
>
> diff --git a/test/test/test_mempool_perf.c b/test/test/test_mempool_perf.c
> index 3c45971..9c80860 100644
> --- a/test/test/test_mempool_perf.c
> +++ b/test/test/test_mempool_perf.c
> @@ -111,6 +111,7 @@
>
> static struct rte_mempool *mp;
> static struct rte_mempool *mp_cache, *mp_nocache;
> +static struct rte_mempool *default_pool;
> static int use_external_cache;
> static unsigned external_cache_size = RTE_MEMPOOL_CACHE_MAX_SIZE;
>
> @@ -337,6 +338,31 @@ struct mempool_test_stats {
Similar observations as [PATCH 1/2], the context above is not correct.
it should be test_mempool_perf(void)
> if (mp_cache == NULL)
> goto err;
>
> + /* Create a mempool based on Default handler */
> + default_pool = rte_mempool_create_empty("default_pool",
> + MEMPOOL_SIZE,
> + MEMPOOL_ELT_SIZE,
> + 0, 0,
> + SOCKET_ID_ANY, 0);
> +
> + if (default_pool == NULL) {
> + printf("cannot allocate %s mempool\n",
> + RTE_MBUF_DEFAULT_MEMPOOL_OPS);
> + goto err;
> + }
> + if (rte_mempool_set_ops_byname(default_pool,
> + RTE_MBUF_DEFAULT_MEMPOOL_OPS, NULL) < 0) {
> + printf("cannot set %s handler\n",
> + RTE_MBUF_DEFAULT_MEMPOOL_OPS);
> + goto err;
> + }
> + if (rte_mempool_populate_default(default_pool) < 0) {
> + printf("cannot populate %s mempool\n",
> + RTE_MBUF_DEFAULT_MEMPOOL_OPS);
> + goto err;
> + }
> + rte_mempool_obj_iter(default_pool, my_obj_init, NULL);
> +
> /* performance test with 1, 2 and max cores */
> printf("start performance test (without cache)\n");
> mp = mp_nocache;
> @@ -351,6 +377,20 @@ struct mempool_test_stats {
> goto err;
>
> /* performance test with 1, 2 and max cores */
> + printf("start performance test for %s (without cache)\n",
> + RTE_MBUF_DEFAULT_MEMPOOL_OPS);
> + mp = default_pool;
> +
> + if (do_one_mempool_test(1) < 0)
> + goto err;
> +
> + if (do_one_mempool_test(2) < 0)
> + goto err;
> +
> + if (do_one_mempool_test(rte_lcore_count()) < 0)
> + goto err;
> +
> + /* performance test with 1, 2 and max cores */
> printf("start performance test (with cache)\n");
> mp = mp_cache;
>
> @@ -384,6 +424,7 @@ struct mempool_test_stats {
> err:
> rte_mempool_free(mp_cache);
> rte_mempool_free(mp_nocache);
> + rte_mempool_free(default_pool);
> return ret;
> }
>
>
Other than the above trivial comment:
Acked-by: Shreyansh Jain <shreyansh.jain@nxp.com>
^ permalink raw reply [flat|nested] 20+ messages in thread
* Re: [dpdk-dev] [PATCH 1/2] test/mempool_perf: Free mempool on exit
2017-04-05 9:57 ` [dpdk-dev] [PATCH 1/2] test/mempool_perf: Free mempool on exit Shreyansh Jain
@ 2017-04-05 12:33 ` santosh
0 siblings, 0 replies; 20+ messages in thread
From: santosh @ 2017-04-05 12:33 UTC (permalink / raw)
To: Shreyansh Jain; +Cc: olivier.matz, dev, hemant.agrawal, stable
Hi Shreyansh,
On Wednesday 05 April 2017 03:27 PM, Shreyansh Jain wrote:
> Hello Santosh,
> On Wednesday 05 April 2017 02:21 PM, Santosh Shukla wrote:
>> Mempool_perf test not freeing pool memory.
>> Cc: stable@dpdk.org
>> Signed-off-by: Santosh Shukla <santosh.shukla@caviumnetworks.com>
>> ---
>> test/test/test_mempool_perf.c | 31 +++++++++++++++++++------------
>> 1 file changed, 19 insertions(+), 12 deletions(-)
>> diff --git a/test/test/test_mempool_perf.c b/test/test/test_mempool_perf.c
>> index ebf1721..3c45971 100644
>> --- a/test/test/test_mempool_perf.c
>> +++ b/test/test/test_mempool_perf.c
>> @@ -312,6 +312,8 @@ struct mempool_test_stats {
>> static int
>> test_mempool_perf(void)
>> {
>> + int ret = -1;
>> +
>> rte_atomic32_init(&synchro);
>> /* create a mempool (without cache) */
>> @@ -322,7 +324,7 @@ struct mempool_test_stats {
> Can you check why the patch is showing "struct mempool_test_stats" as
> the context?
> I tried creating this patch and it shows "test_mempool_perf(void)" as
> context.
> off the topic, does it matter to maintainers if context is not correct?
> t
That was because of older git-version i.e. < 2.x.
I upgraded to 2.11.0 and applied same patch then did 'git show' shows
correct context.
So in short; Posted patch won't break anything. if end user has
git version < 2.x then he'll see disparity in patch context however
> 2.x won't.
In anycase I will post v2. Thanks for pointing out.
diff --git a/test/test/test_mempool_perf.c b/test/test/test_mempool_perf.c
index ebf1721ac..3c45971ab 100644
--- a/test/test/test_mempool_perf.c
+++ b/test/test/test_mempool_perf.c
@@ -312,6 +312,8 @@ do_one_mempool_test(unsigned cores)
static int
test_mempool_perf(void)
{
+ int ret = -1;
+
rte_atomic32_init(&synchro);
/* create a mempool (without cache) */
@@ -322,7 +324,7 @@ test_mempool_perf(void)
my_obj_init, NULL,
SOCKET_ID_ANY, 0);
if (mp_nocache == NULL)
- return -1;
+ goto err;
[...]
> Other than that:
> Acked-by: Shreyansh Jain <shreyansh.jain@nxp.com>
Thanks!,
>
^ permalink raw reply [flat|nested] 20+ messages in thread
* Re: [dpdk-dev] [PATCH 2/2] test/mempool_perf: support default mempool autotest
2017-04-05 10:02 ` Shreyansh Jain
@ 2017-04-05 12:40 ` santosh
0 siblings, 0 replies; 20+ messages in thread
From: santosh @ 2017-04-05 12:40 UTC (permalink / raw)
To: Shreyansh Jain; +Cc: olivier.matz, dev, hemant.agrawal
Hi Shreyansh,
On Wednesday 05 April 2017 03:32 PM, Shreyansh Jain wrote:
> Hi Santosh,
> On Wednesday 05 April 2017 02:21 PM, Santosh Shukla wrote:
>> Mempool_perf autotest currently does perf regression for:
>> * nochache
>> * cache
>> Introducing default_pool, mainly targeted for ext-mempool regression
>> test. Ext-mempool don't need 'cache' modes so only adding test-case
>> support for 'nocache' mode.
>> So to run ext-mempool perf regression, user has to set
>> RTE_MBUF_DEFAULT_MEMPOOL_OPS="<>"
>> There is chance of duplication ie.. if user sets
>> RTE_MBUF_DEFAULT_MEMPOOL_OPS="ring_mp_mc" then regression
>> will happen twice for 'ring_mp_mc'
>> Signed-off-by: Santosh Shukla <santosh.shukla@caviumnetworks.com>
>> ---
>> test/test/test_mempool_perf.c | 41 +++++++++++++++++++++++++++++++++++++++++
>> 1 file changed, 41 insertions(+)
>> diff --git a/test/test/test_mempool_perf.c b/test/test/test_mempool_perf.c
>> index 3c45971..9c80860 100644
>> --- a/test/test/test_mempool_perf.c
>> +++ b/test/test/test_mempool_perf.c
>> @@ -111,6 +111,7 @@
>> static struct rte_mempool *mp;
>> static struct rte_mempool *mp_cache, *mp_nocache;
>> +static struct rte_mempool *default_pool;
>> static int use_external_cache;
>> static unsigned external_cache_size = RTE_MEMPOOL_CACHE_MAX_SIZE;
>> @@ -337,6 +338,31 @@ struct mempool_test_stats {
> Similar observations as [PATCH 1/2], the context above is not correct.
> it should be test_mempool_perf(void)
Will post v2.
>> if (mp_cache == NULL)
>> goto err;
>> + /* Create a mempool based on Default handler */
>> + default_pool = rte_mempool_create_empty("default_pool",
>> + MEMPOOL_SIZE,
>> + MEMPOOL_ELT_SIZE,
>> + 0, 0,
>> + SOCKET_ID_ANY, 0);
>> +
>> + if (default_pool == NULL) {
>> + printf("cannot allocate %s mempool\n",
>> + RTE_MBUF_DEFAULT_MEMPOOL_OPS);
>> + goto err;
>> + }
>> + if (rte_mempool_set_ops_byname(default_pool,
>> + RTE_MBUF_DEFAULT_MEMPOOL_OPS, NULL) < 0) {
>> + printf("cannot set %s handler\n",
>> + RTE_MBUF_DEFAULT_MEMPOOL_OPS);
>> + goto err;
>> + }
>> + if (rte_mempool_populate_default(default_pool) < 0) {
>> + printf("cannot populate %s mempool\n",
>> + RTE_MBUF_DEFAULT_MEMPOOL_OPS);
>> + goto err;
>> + }
>> + rte_mempool_obj_iter(default_pool, my_obj_init, NULL);
>> +
>> /* performance test with 1, 2 and max cores */
>> printf("start performance test (without cache)\n");
>> mp = mp_nocache;
>> @@ -351,6 +377,20 @@ struct mempool_test_stats {
>> goto err;
>> /* performance test with 1, 2 and max cores */
>> + printf("start performance test for %s (without cache)\n",
>> + RTE_MBUF_DEFAULT_MEMPOOL_OPS);
>> + mp = default_pool;
>> +
>> + if (do_one_mempool_test(1) < 0)
>> + goto err;
>> +
>> + if (do_one_mempool_test(2) < 0)
>> + goto err;
>> +
>> + if (do_one_mempool_test(rte_lcore_count()) < 0)
>> + goto err;
>> +
>> + /* performance test with 1, 2 and max cores */
>> printf("start performance test (with cache)\n");
>> mp = mp_cache;
>> @@ -384,6 +424,7 @@ struct mempool_test_stats {
>> err:
>> rte_mempool_free(mp_cache);
>> rte_mempool_free(mp_nocache);
>> + rte_mempool_free(default_pool);
>> return ret;
>> }
> Other than the above trivial comment:
> Acked-by: Shreyansh Jain <shreyansh.jain@nxp.com>
Thanks!,
>
^ permalink raw reply [flat|nested] 20+ messages in thread
* [dpdk-dev] [PATCH v2 1/2] test/mempool_perf: Free mempool on exit
2017-04-05 8:51 [dpdk-dev] [PATCH 1/2] test/mempool_perf: Free mempool on exit Santosh Shukla
2017-04-05 8:51 ` [dpdk-dev] [PATCH 2/2] test/mempool_perf: support default mempool autotest Santosh Shukla
2017-04-05 9:57 ` [dpdk-dev] [PATCH 1/2] test/mempool_perf: Free mempool on exit Shreyansh Jain
@ 2017-04-06 6:45 ` Santosh Shukla
2017-04-06 6:45 ` [dpdk-dev] [PATCH v2 2/2] test/mempool_perf: support default mempool autotest Santosh Shukla
` (2 more replies)
2 siblings, 3 replies; 20+ messages in thread
From: Santosh Shukla @ 2017-04-06 6:45 UTC (permalink / raw)
To: olivier.matz, dev; +Cc: hemant.agrawal, shreyansh.jain, Santosh Shukla, stable
Mempool_perf test not freeing pool memory.
Cc: stable@dpdk.org
Signed-off-by: Santosh Shukla <santosh.shukla@caviumnetworks.com>
Acked-by: Shreyansh Jain <shreyansh.jain@nxp.com>
---
v1 --> v2:
* Fixed patch context
test/test/test_mempool_perf.c | 31 +++++++++++++++++++------------
1 file changed, 19 insertions(+), 12 deletions(-)
diff --git a/test/test/test_mempool_perf.c b/test/test/test_mempool_perf.c
index ebf1721ac..3c45971ab 100644
--- a/test/test/test_mempool_perf.c
+++ b/test/test/test_mempool_perf.c
@@ -312,6 +312,8 @@ do_one_mempool_test(unsigned cores)
static int
test_mempool_perf(void)
{
+ int ret = -1;
+
rte_atomic32_init(&synchro);
/* create a mempool (without cache) */
@@ -322,7 +324,7 @@ test_mempool_perf(void)
my_obj_init, NULL,
SOCKET_ID_ANY, 0);
if (mp_nocache == NULL)
- return -1;
+ goto err;
/* create a mempool (with cache) */
if (mp_cache == NULL)
@@ -333,33 +335,33 @@ test_mempool_perf(void)
my_obj_init, NULL,
SOCKET_ID_ANY, 0);
if (mp_cache == NULL)
- return -1;
+ goto err;
/* performance test with 1, 2 and max cores */
printf("start performance test (without cache)\n");
mp = mp_nocache;
if (do_one_mempool_test(1) < 0)
- return -1;
+ goto err;
if (do_one_mempool_test(2) < 0)
- return -1;
+ goto err;
if (do_one_mempool_test(rte_lcore_count()) < 0)
- return -1;
+ goto err;
/* performance test with 1, 2 and max cores */
printf("start performance test (with cache)\n");
mp = mp_cache;
if (do_one_mempool_test(1) < 0)
- return -1;
+ goto err;
if (do_one_mempool_test(2) < 0)
- return -1;
+ goto err;
if (do_one_mempool_test(rte_lcore_count()) < 0)
- return -1;
+ goto err;
/* performance test with 1, 2 and max cores */
printf("start performance test (with user-owned cache)\n");
@@ -367,17 +369,22 @@ test_mempool_perf(void)
use_external_cache = 1;
if (do_one_mempool_test(1) < 0)
- return -1;
+ goto err;
if (do_one_mempool_test(2) < 0)
- return -1;
+ goto err;
if (do_one_mempool_test(rte_lcore_count()) < 0)
- return -1;
+ goto err;
rte_mempool_list_dump(stdout);
- return 0;
+ ret = 0;
+
+err:
+ rte_mempool_free(mp_cache);
+ rte_mempool_free(mp_nocache);
+ return ret;
}
REGISTER_TEST_COMMAND(mempool_perf_autotest, test_mempool_perf);
--
2.11.0
^ permalink raw reply [flat|nested] 20+ messages in thread
* [dpdk-dev] [PATCH v2 2/2] test/mempool_perf: support default mempool autotest
2017-04-06 6:45 ` [dpdk-dev] [PATCH v2 " Santosh Shukla
@ 2017-04-06 6:45 ` Santosh Shukla
2017-04-07 15:51 ` [dpdk-dev] [PATCH v2 1/2] test/mempool_perf: Free mempool on exit Olivier Matz
2017-04-18 8:34 ` [dpdk-dev] [PATCH v3 1/3] test/test/mempool_perf: Remove mempool global vars Santosh Shukla
2 siblings, 0 replies; 20+ messages in thread
From: Santosh Shukla @ 2017-04-06 6:45 UTC (permalink / raw)
To: olivier.matz, dev; +Cc: hemant.agrawal, shreyansh.jain, Santosh Shukla
Mempool_perf autotest currently does perf regression for:
* nochache
* cache
Introducing default_pool, mainly targeted for ext-mempool regression
test. Ext-mempool don't need 'cache' modes so only adding test-case
support for 'nocache' mode.
So to run ext-mempool perf regression, user has to set
RTE_MBUF_DEFAULT_MEMPOOL_OPS="<>"
There is chance of duplication ie.. if user sets
RTE_MBUF_DEFAULT_MEMPOOL_OPS="ring_mp_mc" then regression
will happen twice for 'ring_mp_mc'
Signed-off-by: Santosh Shukla <santosh.shukla@caviumnetworks.com>
Acked-by: Shreyansh Jain <shreyansh.jain@nxp.com>
---
v1 --> v2
* Fixed patch context
test/test/test_mempool_perf.c | 44 +++++++++++++++++++++++++++++++++++++++++++
1 file changed, 44 insertions(+)
diff --git a/test/test/test_mempool_perf.c b/test/test/test_mempool_perf.c
index 3c45971ab..aa67414bc 100644
--- a/test/test/test_mempool_perf.c
+++ b/test/test/test_mempool_perf.c
@@ -111,6 +111,7 @@
static struct rte_mempool *mp;
static struct rte_mempool *mp_cache, *mp_nocache;
+static struct rte_mempool *default_pool;
static int use_external_cache;
static unsigned external_cache_size = RTE_MEMPOOL_CACHE_MAX_SIZE;
@@ -337,6 +338,34 @@ test_mempool_perf(void)
if (mp_cache == NULL)
goto err;
+ /* Create a mempool based on Default handler */
+ default_pool = rte_mempool_create_empty("default_pool",
+ MEMPOOL_SIZE,
+ MEMPOOL_ELT_SIZE,
+ 0, 0,
+ SOCKET_ID_ANY, 0);
+
+ if (default_pool == NULL) {
+ printf("cannot allocate %s mempool\n",
+ RTE_MBUF_DEFAULT_MEMPOOL_OPS);
+ goto err;
+ }
+
+ if (rte_mempool_set_ops_byname(default_pool,
+ RTE_MBUF_DEFAULT_MEMPOOL_OPS, NULL)
+ < 0) {
+ printf("cannot set %s handler\n", RTE_MBUF_DEFAULT_MEMPOOL_OPS);
+ goto err;
+ }
+
+ if (rte_mempool_populate_default(default_pool) < 0) {
+ printf("cannot populate %s mempool\n",
+ RTE_MBUF_DEFAULT_MEMPOOL_OPS);
+ goto err;
+ }
+
+ rte_mempool_obj_iter(default_pool, my_obj_init, NULL);
+
/* performance test with 1, 2 and max cores */
printf("start performance test (without cache)\n");
mp = mp_nocache;
@@ -351,6 +380,20 @@ test_mempool_perf(void)
goto err;
/* performance test with 1, 2 and max cores */
+ printf("start performance test for %s (without cache)\n",
+ RTE_MBUF_DEFAULT_MEMPOOL_OPS);
+ mp = default_pool;
+
+ if (do_one_mempool_test(1) < 0)
+ goto err;
+
+ if (do_one_mempool_test(2) < 0)
+ goto err;
+
+ if (do_one_mempool_test(rte_lcore_count()) < 0)
+ goto err;
+
+ /* performance test with 1, 2 and max cores */
printf("start performance test (with cache)\n");
mp = mp_cache;
@@ -384,6 +427,7 @@ test_mempool_perf(void)
err:
rte_mempool_free(mp_cache);
rte_mempool_free(mp_nocache);
+ rte_mempool_free(default_pool);
return ret;
}
--
2.11.0
^ permalink raw reply [flat|nested] 20+ messages in thread
* Re: [dpdk-dev] [PATCH v2 1/2] test/mempool_perf: Free mempool on exit
2017-04-06 6:45 ` [dpdk-dev] [PATCH v2 " Santosh Shukla
2017-04-06 6:45 ` [dpdk-dev] [PATCH v2 2/2] test/mempool_perf: support default mempool autotest Santosh Shukla
@ 2017-04-07 15:51 ` Olivier Matz
[not found] ` <BLUPR0701MB17140B8FD2D59B1A7835769FEA0E0@BLUPR0701MB1714.namprd07.prod.outlook.com>
2017-04-18 8:34 ` [dpdk-dev] [PATCH v3 1/3] test/test/mempool_perf: Remove mempool global vars Santosh Shukla
2 siblings, 1 reply; 20+ messages in thread
From: Olivier Matz @ 2017-04-07 15:51 UTC (permalink / raw)
To: Santosh Shukla; +Cc: dev, hemant.agrawal, shreyansh.jain, stable
Hi Santosh,
On Thu, 6 Apr 2017 12:15:48 +0530, Santosh Shukla <santosh.shukla@caviumnetworks.com> wrote:
> Mempool_perf test not freeing pool memory.
>
> Cc: stable@dpdk.org
> Signed-off-by: Santosh Shukla <santosh.shukla@caviumnetworks.com>
> Acked-by: Shreyansh Jain <shreyansh.jain@nxp.com>
> ---
> v1 --> v2:
> * Fixed patch context
>
> test/test/test_mempool_perf.c | 31 +++++++++++++++++++------------
> 1 file changed, 19 insertions(+), 12 deletions(-)
>
> diff --git a/test/test/test_mempool_perf.c b/test/test/test_mempool_perf.c
> index ebf1721ac..3c45971ab 100644
> --- a/test/test/test_mempool_perf.c
> +++ b/test/test/test_mempool_perf.c
> @@ -312,6 +312,8 @@ do_one_mempool_test(unsigned cores)
> static int
> test_mempool_perf(void)
> {
> + int ret = -1;
> +
> rte_atomic32_init(&synchro);
>
> /* create a mempool (without cache) */
> @@ -322,7 +324,7 @@ test_mempool_perf(void)
> my_obj_init, NULL,
> SOCKET_ID_ANY, 0);
> if (mp_nocache == NULL)
> - return -1;
> + goto err;
>
> /* create a mempool (with cache) */
> if (mp_cache == NULL)
[...]
>
> - return 0;
> + ret = 0;
> +
> +err:
> + rte_mempool_free(mp_cache);
> + rte_mempool_free(mp_nocache);
> + return ret;
Since mp_cache and mp_nocache are global variables, this won't
work properly due to the way mempool are created:
/* create a mempool (without cache) */
if (mp_nocache == NULL)
mp_nocache = rte_mempool_create("perf_test_nocache", MEMPOOL_SIZE,
MEMPOOL_ELT_SIZE, 0, 0,
NULL, NULL,
my_obj_init, NULL,
SOCKET_ID_ANY, 0);
The if() should be removed, else we'll have a use after free the next
time.
If you want to do more clean-up, you can try to remove the global variables,
but it's maybe harder.
Thanks,
Olivier
^ permalink raw reply [flat|nested] 20+ messages in thread
* Re: [dpdk-dev] [PATCH v2 1/2] test/mempool_perf: Free mempool on exit
[not found] ` <d0ea6fc4-7cbb-8766-616e-097c4e0fbb14@caviumnetworks.com>
@ 2017-04-10 20:09 ` Olivier MATZ
0 siblings, 0 replies; 20+ messages in thread
From: Olivier MATZ @ 2017-04-10 20:09 UTC (permalink / raw)
To: santosh; +Cc: dev, stable, Shreyansh Jain
Hi Santosh,
On Mon, 10 Apr 2017 01:13:43 +0530
santosh <santosh.shukla@caviumnetworks.com> wrote:
> Hi Olivier,
>
> On Monday 10 April 2017 12:47 AM, Shukla, Santosh wrote:
>
> >
> >
> > --------------------------------------------------------------------------------
> > *From:* Olivier Matz <olivier.matz@6wind.com>
> > *Sent:* Friday, April 7, 2017 9:21 PM
> > *To:* Shukla, Santosh
> > *Cc:* dev@dpdk.org; hemant.agrawal@nxp.com; shreyansh.jain@nxp.com; stable@dpdk.org
> > *Subject:* Re: [PATCH v2 1/2] test/mempool_perf: Free mempool on exit
> > Hi Santosh,
> >
> > On Thu, 6 Apr 2017 12:15:48 +0530, Santosh Shukla
> > <santosh.shukla@caviumnetworks.com> wrote:
> > > Mempool_perf test not freeing pool memory.
> > >
> > > Cc: stable@dpdk.org
> > > Signed-off-by: Santosh Shukla <santosh.shukla@caviumnetworks.com>
> > > Acked-by: Shreyansh Jain <shreyansh.jain@nxp.com>
> > > ---
> > > v1 --> v2:
> > > * Fixed patch context
> > >
> > > test/test/test_mempool_perf.c | 31 +++++++++++++++++++------------
> > > 1 file changed, 19 insertions(+), 12 deletions(-)
> > >
> > > diff --git a/test/test/test_mempool_perf.c b/test/test/test_mempool_perf.c
> > > index ebf1721ac..3c45971ab 100644
> > > --- a/test/test/test_mempool_perf.c
> > > +++ b/test/test/test_mempool_perf.c
> > > @@ -312,6 +312,8 @@ do_one_mempool_test(unsigned cores)
> > > static int
> > > test_mempool_perf(void)
> > > {
> > > + int ret = -1;
> > > +
> > > rte_atomic32_init(&synchro);
> > >
> > > /* create a mempool (without cache) */
> > > @@ -322,7 +324,7 @@ test_mempool_perf(void)
> > > my_obj_init, NULL,
> > > SOCKET_ID_ANY, 0);
> > > if (mp_nocache == NULL)
> > > - return -1;
> > > + goto err;
> > >
> > > /* create a mempool (with cache) */
> > > if (mp_cache == NULL)
> >
> > [...]
> >
> > >
> > > - return 0;
> > > + ret = 0;
> > > +
> > > +err:
> > > + rte_mempool_free(mp_cache);
> > > + rte_mempool_free(mp_nocache);
> > > + return ret;
> >
> >
> > Since mp_cache and mp_nocache are global variables, this won't
> > work properly due to the way mempool are created:
> >
> > /* create a mempool (without cache) */
> > if (mp_nocache == NULL)
> > mp_nocache = rte_mempool_create("perf_test_nocache", MEMPOOL_SIZE,
> > MEMPOOL_ELT_SIZE, 0, 0,
> > NULL, NULL,
> > my_obj_init, NULL,
> > SOCKET_ID_ANY, 0);
> >
> > The if() should be removed, else we'll have a use after free the next
> > time.
>
> I understand your point.
> But I think problem is rte_mempool_free() not referencing mp = null
> after freeing resources. Result of that is mp_nocache still has valid
> address, Although internal resources (mz/_ops_handle) were actually freed by
> rte_mempool_free(), right?
>
> So rather removing above if(), why not
> - Application explicit set mp_nocache = NULL after mempool_free().
> ie..
>
> err:
> rte_mempool_free(xxx);
> xxx = NULL;
>
>
> Or
> - Let rte_mempool_free() { - do mp = null; }
>
> And yes remove that if condition anyway. As its a dead-code
> for either of above 2 options.
>
> Does that make sense to you? If so then which one you prefer?
Yes, it makes sense.
My first preference would be removing the global vars (as suggested
below). Else your proposition is ok too.
>
> > If you want to do more clean-up, you can try to remove the global variables,
> > but it's maybe harder.
>
> Removing global var won't be harder imo, May be you know more but
> here is my point of view, after going through code:
>
> - All test_func like
> do_one_mempool_test -> launch_cores --> per_lcore_mempool_test -> using 'mp'
>
> where 'mp' is global.
>
> how about,
> - As you said Yes - remove global var ie.. mp_cache/nocache, default_pool, mp
> - Add 'rte_mempool *mp' as argument in do_one_mempool_test() func and other func too.
>
> Thus get rid of globals from app.
>
> Does that make sense to you?
Yes, looks good. It would be clearer without global vars.
Historically, it was not possible to free a mempool, that's why it was
done like this.
Thanks!
Olivier
^ permalink raw reply [flat|nested] 20+ messages in thread
* [dpdk-dev] [PATCH v3 1/3] test/test/mempool_perf: Remove mempool global vars
2017-04-06 6:45 ` [dpdk-dev] [PATCH v2 " Santosh Shukla
2017-04-06 6:45 ` [dpdk-dev] [PATCH v2 2/2] test/mempool_perf: support default mempool autotest Santosh Shukla
2017-04-07 15:51 ` [dpdk-dev] [PATCH v2 1/2] test/mempool_perf: Free mempool on exit Olivier Matz
@ 2017-04-18 8:34 ` Santosh Shukla
2017-04-18 8:34 ` [dpdk-dev] [PATCH v3 2/3] test/test/mempool_perf: Free mempool on exit Santosh Shukla
` (3 more replies)
2 siblings, 4 replies; 20+ messages in thread
From: Santosh Shukla @ 2017-04-18 8:34 UTC (permalink / raw)
To: olivier.matz, dev
Cc: thomas.monjalon, hemant.agrawal, shreyansh.jain, Santosh Shukla, stable
Cc: stable@dpdk.org
Signed-off-by: Santosh Shukla <santosh.shukla@caviumnetworks.com>
---
v3:
- Cleanup code change as per discussion in
http://dpdk.org/dev/patchwork/patch/23262/
test/test/test_mempool_perf.c | 61 ++++++++++++++++++++++---------------------
1 file changed, 31 insertions(+), 30 deletions(-)
diff --git a/test/test/test_mempool_perf.c b/test/test/test_mempool_perf.c
index ebf1721ac..213fcba5d 100644
--- a/test/test/test_mempool_perf.c
+++ b/test/test/test_mempool_perf.c
@@ -109,8 +109,6 @@
goto label; \
} while (0)
-static struct rte_mempool *mp;
-static struct rte_mempool *mp_cache, *mp_nocache;
static int use_external_cache;
static unsigned external_cache_size = RTE_MEMPOOL_CACHE_MAX_SIZE;
@@ -144,10 +142,11 @@ my_obj_init(struct rte_mempool *mp, __attribute__((unused)) void *arg,
}
static int
-per_lcore_mempool_test(__attribute__((unused)) void *arg)
+per_lcore_mempool_test(void *arg)
{
void *obj_table[MAX_KEEP];
unsigned i, idx;
+ struct rte_mempool *mp = (struct rte_mempool *)arg;
unsigned lcore_id = rte_lcore_id();
int ret = 0;
uint64_t start_cycles, end_cycles;
@@ -221,7 +220,7 @@ per_lcore_mempool_test(__attribute__((unused)) void *arg)
/* launch all the per-lcore test, and display the result */
static int
-launch_cores(unsigned cores)
+launch_cores(struct rte_mempool *mp, unsigned cores)
{
unsigned lcore_id;
uint64_t rate;
@@ -249,13 +248,13 @@ launch_cores(unsigned cores)
break;
cores--;
rte_eal_remote_launch(per_lcore_mempool_test,
- NULL, lcore_id);
+ mp, lcore_id);
}
/* start synchro and launch test on master */
rte_atomic32_set(&synchro, 1);
- ret = per_lcore_mempool_test(NULL);
+ ret = per_lcore_mempool_test(mp);
cores = cores_save;
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
@@ -282,7 +281,7 @@ launch_cores(unsigned cores)
/* for a given number of core, launch all test cases */
static int
-do_one_mempool_test(unsigned cores)
+do_one_mempool_test(struct rte_mempool *mp, unsigned cores)
{
unsigned bulk_tab_get[] = { 1, 4, 32, 0 };
unsigned bulk_tab_put[] = { 1, 4, 32, 0 };
@@ -299,7 +298,7 @@ do_one_mempool_test(unsigned cores)
n_get_bulk = *get_bulk_ptr;
n_put_bulk = *put_bulk_ptr;
n_keep = *keep_ptr;
- ret = launch_cores(cores);
+ ret = launch_cores(mp, cores);
if (ret < 0)
return -1;
@@ -312,26 +311,28 @@ do_one_mempool_test(unsigned cores)
static int
test_mempool_perf(void)
{
+ struct rte_mempool *mp = NULL;
+ struct rte_mempool *mp_cache = NULL;
+ struct rte_mempool *mp_nocache = NULL;
+
rte_atomic32_init(&synchro);
/* create a mempool (without cache) */
- if (mp_nocache == NULL)
- mp_nocache = rte_mempool_create("perf_test_nocache", MEMPOOL_SIZE,
- MEMPOOL_ELT_SIZE, 0, 0,
- NULL, NULL,
- my_obj_init, NULL,
- SOCKET_ID_ANY, 0);
+ mp_nocache = rte_mempool_create("perf_test_nocache", MEMPOOL_SIZE,
+ MEMPOOL_ELT_SIZE, 0, 0,
+ NULL, NULL,
+ my_obj_init, NULL,
+ SOCKET_ID_ANY, 0);
if (mp_nocache == NULL)
return -1;
/* create a mempool (with cache) */
- if (mp_cache == NULL)
- mp_cache = rte_mempool_create("perf_test_cache", MEMPOOL_SIZE,
- MEMPOOL_ELT_SIZE,
- RTE_MEMPOOL_CACHE_MAX_SIZE, 0,
- NULL, NULL,
- my_obj_init, NULL,
- SOCKET_ID_ANY, 0);
+ mp_cache = rte_mempool_create("perf_test_cache", MEMPOOL_SIZE,
+ MEMPOOL_ELT_SIZE,
+ RTE_MEMPOOL_CACHE_MAX_SIZE, 0,
+ NULL, NULL,
+ my_obj_init, NULL,
+ SOCKET_ID_ANY, 0);
if (mp_cache == NULL)
return -1;
@@ -339,26 +340,26 @@ test_mempool_perf(void)
printf("start performance test (without cache)\n");
mp = mp_nocache;
- if (do_one_mempool_test(1) < 0)
+ if (do_one_mempool_test(mp, 1) < 0)
return -1;
- if (do_one_mempool_test(2) < 0)
+ if (do_one_mempool_test(mp, 2) < 0)
return -1;
- if (do_one_mempool_test(rte_lcore_count()) < 0)
+ if (do_one_mempool_test(mp, rte_lcore_count()) < 0)
return -1;
/* performance test with 1, 2 and max cores */
printf("start performance test (with cache)\n");
mp = mp_cache;
- if (do_one_mempool_test(1) < 0)
+ if (do_one_mempool_test(mp, 1) < 0)
return -1;
- if (do_one_mempool_test(2) < 0)
+ if (do_one_mempool_test(mp, 2) < 0)
return -1;
- if (do_one_mempool_test(rte_lcore_count()) < 0)
+ if (do_one_mempool_test(mp, rte_lcore_count()) < 0)
return -1;
/* performance test with 1, 2 and max cores */
@@ -366,13 +367,13 @@ test_mempool_perf(void)
mp = mp_nocache;
use_external_cache = 1;
- if (do_one_mempool_test(1) < 0)
+ if (do_one_mempool_test(mp, 1) < 0)
return -1;
- if (do_one_mempool_test(2) < 0)
+ if (do_one_mempool_test(mp, 2) < 0)
return -1;
- if (do_one_mempool_test(rte_lcore_count()) < 0)
+ if (do_one_mempool_test(mp, rte_lcore_count()) < 0)
return -1;
rte_mempool_list_dump(stdout);
--
2.11.0
^ permalink raw reply [flat|nested] 20+ messages in thread
* [dpdk-dev] [PATCH v3 2/3] test/test/mempool_perf: Free mempool on exit
2017-04-18 8:34 ` [dpdk-dev] [PATCH v3 1/3] test/test/mempool_perf: Remove mempool global vars Santosh Shukla
@ 2017-04-18 8:34 ` Santosh Shukla
2017-04-18 8:34 ` [dpdk-dev] [PATCH v3 3/3] test/test/mempool_perf: support default mempool autotest Santosh Shukla
` (2 subsequent siblings)
3 siblings, 0 replies; 20+ messages in thread
From: Santosh Shukla @ 2017-04-18 8:34 UTC (permalink / raw)
To: olivier.matz, dev
Cc: thomas.monjalon, hemant.agrawal, shreyansh.jain, Santosh Shukla, stable
Mempool_perf test not freeing pool memory.
Cc: stable@dpdk.org
Signed-off-by: Santosh Shukla <santosh.shukla@caviumnetworks.com>
Acked-by: Shreyansh Jain <shreyansh.jain@nxp.com>
---
v1--> v2:
- patch context fix
test/test/test_mempool_perf.c | 30 ++++++++++++++++++------------
1 file changed, 18 insertions(+), 12 deletions(-)
diff --git a/test/test/test_mempool_perf.c b/test/test/test_mempool_perf.c
index 213fcba5d..f29718dc4 100644
--- a/test/test/test_mempool_perf.c
+++ b/test/test/test_mempool_perf.c
@@ -314,6 +314,7 @@ test_mempool_perf(void)
struct rte_mempool *mp = NULL;
struct rte_mempool *mp_cache = NULL;
struct rte_mempool *mp_nocache = NULL;
+ int ret = -1;
rte_atomic32_init(&synchro);
@@ -324,7 +325,7 @@ test_mempool_perf(void)
my_obj_init, NULL,
SOCKET_ID_ANY, 0);
if (mp_nocache == NULL)
- return -1;
+ goto err;
/* create a mempool (with cache) */
mp_cache = rte_mempool_create("perf_test_cache", MEMPOOL_SIZE,
@@ -334,33 +335,33 @@ test_mempool_perf(void)
my_obj_init, NULL,
SOCKET_ID_ANY, 0);
if (mp_cache == NULL)
- return -1;
+ goto err;
/* performance test with 1, 2 and max cores */
printf("start performance test (without cache)\n");
mp = mp_nocache;
if (do_one_mempool_test(mp, 1) < 0)
- return -1;
+ goto err;
if (do_one_mempool_test(mp, 2) < 0)
- return -1;
+ goto err;
if (do_one_mempool_test(mp, rte_lcore_count()) < 0)
- return -1;
+ goto err;
/* performance test with 1, 2 and max cores */
printf("start performance test (with cache)\n");
mp = mp_cache;
if (do_one_mempool_test(mp, 1) < 0)
- return -1;
+ goto err;
if (do_one_mempool_test(mp, 2) < 0)
- return -1;
+ goto err;
if (do_one_mempool_test(mp, rte_lcore_count()) < 0)
- return -1;
+ goto err;
/* performance test with 1, 2 and max cores */
printf("start performance test (with user-owned cache)\n");
@@ -368,17 +369,22 @@ test_mempool_perf(void)
use_external_cache = 1;
if (do_one_mempool_test(mp, 1) < 0)
- return -1;
+ goto err;
if (do_one_mempool_test(mp, 2) < 0)
- return -1;
+ goto err;
if (do_one_mempool_test(mp, rte_lcore_count()) < 0)
- return -1;
+ goto err;
rte_mempool_list_dump(stdout);
- return 0;
+ ret = 0;
+
+err:
+ rte_mempool_free(mp_cache);
+ rte_mempool_free(mp_nocache);
+ return ret;
}
REGISTER_TEST_COMMAND(mempool_perf_autotest, test_mempool_perf);
--
2.11.0
^ permalink raw reply [flat|nested] 20+ messages in thread
* [dpdk-dev] [PATCH v3 3/3] test/test/mempool_perf: support default mempool autotest
2017-04-18 8:34 ` [dpdk-dev] [PATCH v3 1/3] test/test/mempool_perf: Remove mempool global vars Santosh Shukla
2017-04-18 8:34 ` [dpdk-dev] [PATCH v3 2/3] test/test/mempool_perf: Free mempool on exit Santosh Shukla
@ 2017-04-18 8:34 ` Santosh Shukla
2017-04-18 13:42 ` [dpdk-dev] [PATCH v3 1/3] test/test/mempool_perf: Remove mempool global vars Olivier MATZ
2017-04-18 14:41 ` [dpdk-dev] [PATCH v4 " Santosh Shukla
3 siblings, 0 replies; 20+ messages in thread
From: Santosh Shukla @ 2017-04-18 8:34 UTC (permalink / raw)
To: olivier.matz, dev
Cc: thomas.monjalon, hemant.agrawal, shreyansh.jain, Santosh Shukla
Mempool_perf autotest currently does perf regression for:
* nochache
* cache
Introducing default_pool, mainly targeted for ext-mempool regression
test. Ext-mempool don't need 'cache' modes so only adding test-case
support for 'nocache' mode.
So to run ext-mempool perf regression, user has to set
RTE_MBUF_DEFAULT_MEMPOOL_OPS="<>"
There is chance of duplication ie.. if user sets
RTE_MBUF_DEFAULT_MEMPOOL_OPS="ring_mp_mc" then regression
will happen twice for 'ring_mp_mc'
Signed-off-by: Santosh Shukla <santosh.shukla@caviumnetworks.com>
Acked-by: Shreyansh Jain <shreyansh.jain@nxp.com>
---
v1--> v2:
- Patch context fix.
test/test/test_mempool_perf.c | 44 +++++++++++++++++++++++++++++++++++++++++++
1 file changed, 44 insertions(+)
diff --git a/test/test/test_mempool_perf.c b/test/test/test_mempool_perf.c
index f29718dc4..f663e63db 100644
--- a/test/test/test_mempool_perf.c
+++ b/test/test/test_mempool_perf.c
@@ -314,6 +314,7 @@ test_mempool_perf(void)
struct rte_mempool *mp = NULL;
struct rte_mempool *mp_cache = NULL;
struct rte_mempool *mp_nocache = NULL;
+ struct rte_mempool *default_pool = NULL;
int ret = -1;
rte_atomic32_init(&synchro);
@@ -337,6 +338,34 @@ test_mempool_perf(void)
if (mp_cache == NULL)
goto err;
+ /* Create a mempool based on Default handler */
+ default_pool = rte_mempool_create_empty("default_pool",
+ MEMPOOL_SIZE,
+ MEMPOOL_ELT_SIZE,
+ 0, 0,
+ SOCKET_ID_ANY, 0);
+
+ if (default_pool == NULL) {
+ printf("cannot allocate %s mempool\n",
+ RTE_MBUF_DEFAULT_MEMPOOL_OPS);
+ goto err;
+ }
+
+ if (rte_mempool_set_ops_byname(default_pool,
+ RTE_MBUF_DEFAULT_MEMPOOL_OPS, NULL)
+ < 0) {
+ printf("cannot set %s handler\n", RTE_MBUF_DEFAULT_MEMPOOL_OPS);
+ goto err;
+ }
+
+ if (rte_mempool_populate_default(default_pool) < 0) {
+ printf("cannot populate %s mempool\n",
+ RTE_MBUF_DEFAULT_MEMPOOL_OPS);
+ goto err;
+ }
+
+ rte_mempool_obj_iter(default_pool, my_obj_init, NULL);
+
/* performance test with 1, 2 and max cores */
printf("start performance test (without cache)\n");
mp = mp_nocache;
@@ -351,6 +380,20 @@ test_mempool_perf(void)
goto err;
/* performance test with 1, 2 and max cores */
+ printf("start performance test for %s (without cache)\n",
+ RTE_MBUF_DEFAULT_MEMPOOL_OPS);
+ mp = default_pool;
+
+ if (do_one_mempool_test(mp, 1) < 0)
+ goto err;
+
+ if (do_one_mempool_test(mp, 2) < 0)
+ goto err;
+
+ if (do_one_mempool_test(mp, rte_lcore_count()) < 0)
+ goto err;
+
+ /* performance test with 1, 2 and max cores */
printf("start performance test (with cache)\n");
mp = mp_cache;
@@ -384,6 +427,7 @@ test_mempool_perf(void)
err:
rte_mempool_free(mp_cache);
rte_mempool_free(mp_nocache);
+ rte_mempool_free(default_pool);
return ret;
}
--
2.11.0
^ permalink raw reply [flat|nested] 20+ messages in thread
* Re: [dpdk-dev] [PATCH v3 1/3] test/test/mempool_perf: Remove mempool global vars
2017-04-18 8:34 ` [dpdk-dev] [PATCH v3 1/3] test/test/mempool_perf: Remove mempool global vars Santosh Shukla
2017-04-18 8:34 ` [dpdk-dev] [PATCH v3 2/3] test/test/mempool_perf: Free mempool on exit Santosh Shukla
2017-04-18 8:34 ` [dpdk-dev] [PATCH v3 3/3] test/test/mempool_perf: support default mempool autotest Santosh Shukla
@ 2017-04-18 13:42 ` Olivier MATZ
2017-04-18 14:39 ` santosh
2017-04-18 14:41 ` [dpdk-dev] [PATCH v4 " Santosh Shukla
3 siblings, 1 reply; 20+ messages in thread
From: Olivier MATZ @ 2017-04-18 13:42 UTC (permalink / raw)
To: Santosh Shukla; +Cc: dev, thomas, hemant.agrawal, shreyansh.jain, stable
Hi Santosh,
On Tue, 18 Apr 2017 14:04:46 +0530, Santosh Shukla <santosh.shukla@caviumnetworks.com> wrote:
> Cc: stable@dpdk.org
> Signed-off-by: Santosh Shukla <santosh.shukla@caviumnetworks.com>
> ---
> v3:
> - Cleanup code change as per discussion in
> http://dpdk.org/dev/patchwork/patch/23262/
>
> test/test/test_mempool_perf.c | 61 ++++++++++++++++++++++---------------------
> 1 file changed, 31 insertions(+), 30 deletions(-)
>
> diff --git a/test/test/test_mempool_perf.c b/test/test/test_mempool_perf.c
> index ebf1721ac..213fcba5d 100644
> --- a/test/test/test_mempool_perf.c
> +++ b/test/test/test_mempool_perf.c
> @@ -109,8 +109,6 @@
> goto label; \
> } while (0)
>
> -static struct rte_mempool *mp;
> -static struct rte_mempool *mp_cache, *mp_nocache;
> static int use_external_cache;
> static unsigned external_cache_size = RTE_MEMPOOL_CACHE_MAX_SIZE;
>
> @@ -144,10 +142,11 @@ my_obj_init(struct rte_mempool *mp, __attribute__((unused)) void *arg,
> }
>
> static int
> -per_lcore_mempool_test(__attribute__((unused)) void *arg)
> +per_lcore_mempool_test(void *arg)
> {
> void *obj_table[MAX_KEEP];
> unsigned i, idx;
> + struct rte_mempool *mp = (struct rte_mempool *)arg;
as a nit: the cast is not required.
> unsigned lcore_id = rte_lcore_id();
> int ret = 0;
> uint64_t start_cycles, end_cycles;
> @@ -221,7 +220,7 @@ per_lcore_mempool_test(__attribute__((unused)) void *arg)
>
> /* launch all the per-lcore test, and display the result */
> static int
> -launch_cores(unsigned cores)
> +launch_cores(struct rte_mempool *mp, unsigned cores)
> {
> unsigned lcore_id;
> uint64_t rate;
> @@ -249,13 +248,13 @@ launch_cores(unsigned cores)
> break;
> cores--;
> rte_eal_remote_launch(per_lcore_mempool_test,
> - NULL, lcore_id);
> + mp, lcore_id);
> }
>
> /* start synchro and launch test on master */
> rte_atomic32_set(&synchro, 1);
>
> - ret = per_lcore_mempool_test(NULL);
> + ret = per_lcore_mempool_test(mp);
>
> cores = cores_save;
> RTE_LCORE_FOREACH_SLAVE(lcore_id) {
> @@ -282,7 +281,7 @@ launch_cores(unsigned cores)
>
> /* for a given number of core, launch all test cases */
> static int
> -do_one_mempool_test(unsigned cores)
> +do_one_mempool_test(struct rte_mempool *mp, unsigned cores)
> {
> unsigned bulk_tab_get[] = { 1, 4, 32, 0 };
> unsigned bulk_tab_put[] = { 1, 4, 32, 0 };
> @@ -299,7 +298,7 @@ do_one_mempool_test(unsigned cores)
> n_get_bulk = *get_bulk_ptr;
> n_put_bulk = *put_bulk_ptr;
> n_keep = *keep_ptr;
> - ret = launch_cores(cores);
> + ret = launch_cores(mp, cores);
>
> if (ret < 0)
> return -1;
> @@ -312,26 +311,28 @@ do_one_mempool_test(unsigned cores)
> static int
> test_mempool_perf(void)
> {
> + struct rte_mempool *mp = NULL;
> + struct rte_mempool *mp_cache = NULL;
> + struct rte_mempool *mp_nocache = NULL;
> +
> rte_atomic32_init(&synchro);
>
> /* create a mempool (without cache) */
> - if (mp_nocache == NULL)
> - mp_nocache = rte_mempool_create("perf_test_nocache", MEMPOOL_SIZE,
> - MEMPOOL_ELT_SIZE, 0, 0,
> - NULL, NULL,
> - my_obj_init, NULL,
> - SOCKET_ID_ANY, 0);
> + mp_nocache = rte_mempool_create("perf_test_nocache", MEMPOOL_SIZE,
> + MEMPOOL_ELT_SIZE, 0, 0,
> + NULL, NULL,
> + my_obj_init, NULL,
> + SOCKET_ID_ANY, 0);
> if (mp_nocache == NULL)
> return -1;
>
> /* create a mempool (with cache) */
> - if (mp_cache == NULL)
> - mp_cache = rte_mempool_create("perf_test_cache", MEMPOOL_SIZE,
> - MEMPOOL_ELT_SIZE,
> - RTE_MEMPOOL_CACHE_MAX_SIZE, 0,
> - NULL, NULL,
> - my_obj_init, NULL,
> - SOCKET_ID_ANY, 0);
> + mp_cache = rte_mempool_create("perf_test_cache", MEMPOOL_SIZE,
> + MEMPOOL_ELT_SIZE,
> + RTE_MEMPOOL_CACHE_MAX_SIZE, 0,
> + NULL, NULL,
> + my_obj_init, NULL,
> + SOCKET_ID_ANY, 0);
> if (mp_cache == NULL)
> return -1;
>
> @@ -339,26 +340,26 @@ test_mempool_perf(void)
> printf("start performance test (without cache)\n");
> mp = mp_nocache;
>
> - if (do_one_mempool_test(1) < 0)
> + if (do_one_mempool_test(mp, 1) < 0)
> return -1;
We could even do shorter: the mp variable can probably be removed, and
directly replaced by mp_nocache or mp_cache in function calls.
Thanks,
Olivier
^ permalink raw reply [flat|nested] 20+ messages in thread
* Re: [dpdk-dev] [PATCH v3 1/3] test/test/mempool_perf: Remove mempool global vars
2017-04-18 13:42 ` [dpdk-dev] [PATCH v3 1/3] test/test/mempool_perf: Remove mempool global vars Olivier MATZ
@ 2017-04-18 14:39 ` santosh
0 siblings, 0 replies; 20+ messages in thread
From: santosh @ 2017-04-18 14:39 UTC (permalink / raw)
To: Olivier MATZ; +Cc: dev, hemant.agrawal, shreyansh.jain, stable
Hi Olivier,
On Tuesday 18 April 2017 07:12 PM, Olivier MATZ wrote:
> Hi Santosh,
>
> On Tue, 18 Apr 2017 14:04:46 +0530, Santosh Shukla <santosh.shukla@caviumnetworks.com> wrote:
>> Cc: stable@dpdk.org
>> Signed-off-by: Santosh Shukla <santosh.shukla@caviumnetworks.com>
>> ---
>> v3:
>> - Cleanup code change as per discussion in
>> http://dpdk.org/dev/patchwork/patch/23262/
>>
>> test/test/test_mempool_perf.c | 61 ++++++++++++++++++++++---------------------
>> 1 file changed, 31 insertions(+), 30 deletions(-)
>>
>> diff --git a/test/test/test_mempool_perf.c b/test/test/test_mempool_perf.c
>> index ebf1721ac..213fcba5d 100644
>> --- a/test/test/test_mempool_perf.c
>> +++ b/test/test/test_mempool_perf.c
>> @@ -109,8 +109,6 @@
>> goto label; \
>> } while (0)
>>
>> -static struct rte_mempool *mp;
>> -static struct rte_mempool *mp_cache, *mp_nocache;
>> static int use_external_cache;
>> static unsigned external_cache_size = RTE_MEMPOOL_CACHE_MAX_SIZE;
>>
>> @@ -144,10 +142,11 @@ my_obj_init(struct rte_mempool *mp, __attribute__((unused)) void *arg,
>> }
>>
>> static int
>> -per_lcore_mempool_test(__attribute__((unused)) void *arg)
>> +per_lcore_mempool_test(void *arg)
>> {
>> void *obj_table[MAX_KEEP];
>> unsigned i, idx;
>> + struct rte_mempool *mp = (struct rte_mempool *)arg;
> as a nit: the cast is not required.
>
Will consider in v4.
>> unsigned lcore_id = rte_lcore_id();
>> int ret = 0;
>> uint64_t start_cycles, end_cycles;
>> @@ -221,7 +220,7 @@ per_lcore_mempool_test(__attribute__((unused)) void *arg)
>>
>> /* launch all the per-lcore test, and display the result */
>> static int
>> -launch_cores(unsigned cores)
>> +launch_cores(struct rte_mempool *mp, unsigned cores)
>> {
>> unsigned lcore_id;
>> uint64_t rate;
>> @@ -249,13 +248,13 @@ launch_cores(unsigned cores)
>> break;
>> cores--;
>> rte_eal_remote_launch(per_lcore_mempool_test,
>> - NULL, lcore_id);
>> + mp, lcore_id);
>> }
>>
>> /* start synchro and launch test on master */
>> rte_atomic32_set(&synchro, 1);
>>
>> - ret = per_lcore_mempool_test(NULL);
>> + ret = per_lcore_mempool_test(mp);
>>
>> cores = cores_save;
>> RTE_LCORE_FOREACH_SLAVE(lcore_id) {
>> @@ -282,7 +281,7 @@ launch_cores(unsigned cores)
>>
>> /* for a given number of core, launch all test cases */
>> static int
>> -do_one_mempool_test(unsigned cores)
>> +do_one_mempool_test(struct rte_mempool *mp, unsigned cores)
>> {
>> unsigned bulk_tab_get[] = { 1, 4, 32, 0 };
>> unsigned bulk_tab_put[] = { 1, 4, 32, 0 };
>> @@ -299,7 +298,7 @@ do_one_mempool_test(unsigned cores)
>> n_get_bulk = *get_bulk_ptr;
>> n_put_bulk = *put_bulk_ptr;
>> n_keep = *keep_ptr;
>> - ret = launch_cores(cores);
>> + ret = launch_cores(mp, cores);
>>
>> if (ret < 0)
>> return -1;
>> @@ -312,26 +311,28 @@ do_one_mempool_test(unsigned cores)
>> static int
>> test_mempool_perf(void)
>> {
>> + struct rte_mempool *mp = NULL;
>> + struct rte_mempool *mp_cache = NULL;
>> + struct rte_mempool *mp_nocache = NULL;
>> +
>> rte_atomic32_init(&synchro);
>>
>> /* create a mempool (without cache) */
>> - if (mp_nocache == NULL)
>> - mp_nocache = rte_mempool_create("perf_test_nocache", MEMPOOL_SIZE,
>> - MEMPOOL_ELT_SIZE, 0, 0,
>> - NULL, NULL,
>> - my_obj_init, NULL,
>> - SOCKET_ID_ANY, 0);
>> + mp_nocache = rte_mempool_create("perf_test_nocache", MEMPOOL_SIZE,
>> + MEMPOOL_ELT_SIZE, 0, 0,
>> + NULL, NULL,
>> + my_obj_init, NULL,
>> + SOCKET_ID_ANY, 0);
>> if (mp_nocache == NULL)
>> return -1;
>>
>> /* create a mempool (with cache) */
>> - if (mp_cache == NULL)
>> - mp_cache = rte_mempool_create("perf_test_cache", MEMPOOL_SIZE,
>> - MEMPOOL_ELT_SIZE,
>> - RTE_MEMPOOL_CACHE_MAX_SIZE, 0,
>> - NULL, NULL,
>> - my_obj_init, NULL,
>> - SOCKET_ID_ANY, 0);
>> + mp_cache = rte_mempool_create("perf_test_cache", MEMPOOL_SIZE,
>> + MEMPOOL_ELT_SIZE,
>> + RTE_MEMPOOL_CACHE_MAX_SIZE, 0,
>> + NULL, NULL,
>> + my_obj_init, NULL,
>> + SOCKET_ID_ANY, 0);
>> if (mp_cache == NULL)
>> return -1;
>>
>> @@ -339,26 +340,26 @@ test_mempool_perf(void)
>> printf("start performance test (without cache)\n");
>> mp = mp_nocache;
>>
>> - if (do_one_mempool_test(1) < 0)
>> + if (do_one_mempool_test(mp, 1) < 0)
>> return -1;
> We could even do shorter: the mp variable can probably be removed, and
> directly replaced by mp_nocache or mp_cache in function calls.
In v4.
Thanks,
Santosh
>
> Thanks,
> Olivier
^ permalink raw reply [flat|nested] 20+ messages in thread
* [dpdk-dev] [PATCH v4 1/3] test/test/mempool_perf: Remove mempool global vars
2017-04-18 8:34 ` [dpdk-dev] [PATCH v3 1/3] test/test/mempool_perf: Remove mempool global vars Santosh Shukla
` (2 preceding siblings ...)
2017-04-18 13:42 ` [dpdk-dev] [PATCH v3 1/3] test/test/mempool_perf: Remove mempool global vars Olivier MATZ
@ 2017-04-18 14:41 ` Santosh Shukla
2017-04-18 14:41 ` [dpdk-dev] [PATCH v4 2/3] test/test/mempool_perf: Free mempool on exit Santosh Shukla
` (2 more replies)
3 siblings, 3 replies; 20+ messages in thread
From: Santosh Shukla @ 2017-04-18 14:41 UTC (permalink / raw)
To: olivier.matz, dev; +Cc: hemant.agrawal, shreyansh.jain, Santosh Shukla, stable
Cc: stable@dpdk.org
Signed-off-by: Santosh Shukla <santosh.shukla@caviumnetworks.com>
---
v3 --> v4:
- removed extra mp local var.
- Includes nit changes refer
http://dpdk.org/dev/patchwork/patch/23680/
test/test/test_mempool_perf.c | 63 +++++++++++++++++++++----------------------
1 file changed, 30 insertions(+), 33 deletions(-)
diff --git a/test/test/test_mempool_perf.c b/test/test/test_mempool_perf.c
index ebf1721ac..1b4045eb3 100644
--- a/test/test/test_mempool_perf.c
+++ b/test/test/test_mempool_perf.c
@@ -109,8 +109,6 @@
goto label; \
} while (0)
-static struct rte_mempool *mp;
-static struct rte_mempool *mp_cache, *mp_nocache;
static int use_external_cache;
static unsigned external_cache_size = RTE_MEMPOOL_CACHE_MAX_SIZE;
@@ -144,10 +142,11 @@ my_obj_init(struct rte_mempool *mp, __attribute__((unused)) void *arg,
}
static int
-per_lcore_mempool_test(__attribute__((unused)) void *arg)
+per_lcore_mempool_test(void *arg)
{
void *obj_table[MAX_KEEP];
unsigned i, idx;
+ struct rte_mempool *mp = arg;
unsigned lcore_id = rte_lcore_id();
int ret = 0;
uint64_t start_cycles, end_cycles;
@@ -221,7 +220,7 @@ per_lcore_mempool_test(__attribute__((unused)) void *arg)
/* launch all the per-lcore test, and display the result */
static int
-launch_cores(unsigned cores)
+launch_cores(struct rte_mempool *mp, unsigned int cores)
{
unsigned lcore_id;
uint64_t rate;
@@ -249,13 +248,13 @@ launch_cores(unsigned cores)
break;
cores--;
rte_eal_remote_launch(per_lcore_mempool_test,
- NULL, lcore_id);
+ mp, lcore_id);
}
/* start synchro and launch test on master */
rte_atomic32_set(&synchro, 1);
- ret = per_lcore_mempool_test(NULL);
+ ret = per_lcore_mempool_test(mp);
cores = cores_save;
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
@@ -282,7 +281,7 @@ launch_cores(unsigned cores)
/* for a given number of core, launch all test cases */
static int
-do_one_mempool_test(unsigned cores)
+do_one_mempool_test(struct rte_mempool *mp, unsigned int cores)
{
unsigned bulk_tab_get[] = { 1, 4, 32, 0 };
unsigned bulk_tab_put[] = { 1, 4, 32, 0 };
@@ -299,7 +298,7 @@ do_one_mempool_test(unsigned cores)
n_get_bulk = *get_bulk_ptr;
n_put_bulk = *put_bulk_ptr;
n_keep = *keep_ptr;
- ret = launch_cores(cores);
+ ret = launch_cores(mp, cores);
if (ret < 0)
return -1;
@@ -312,67 +311,65 @@ do_one_mempool_test(unsigned cores)
static int
test_mempool_perf(void)
{
+ struct rte_mempool *mp_cache = NULL;
+ struct rte_mempool *mp_nocache = NULL;
+
rte_atomic32_init(&synchro);
/* create a mempool (without cache) */
- if (mp_nocache == NULL)
- mp_nocache = rte_mempool_create("perf_test_nocache", MEMPOOL_SIZE,
- MEMPOOL_ELT_SIZE, 0, 0,
- NULL, NULL,
- my_obj_init, NULL,
- SOCKET_ID_ANY, 0);
+ mp_nocache = rte_mempool_create("perf_test_nocache", MEMPOOL_SIZE,
+ MEMPOOL_ELT_SIZE, 0, 0,
+ NULL, NULL,
+ my_obj_init, NULL,
+ SOCKET_ID_ANY, 0);
if (mp_nocache == NULL)
return -1;
/* create a mempool (with cache) */
- if (mp_cache == NULL)
- mp_cache = rte_mempool_create("perf_test_cache", MEMPOOL_SIZE,
- MEMPOOL_ELT_SIZE,
- RTE_MEMPOOL_CACHE_MAX_SIZE, 0,
- NULL, NULL,
- my_obj_init, NULL,
- SOCKET_ID_ANY, 0);
+ mp_cache = rte_mempool_create("perf_test_cache", MEMPOOL_SIZE,
+ MEMPOOL_ELT_SIZE,
+ RTE_MEMPOOL_CACHE_MAX_SIZE, 0,
+ NULL, NULL,
+ my_obj_init, NULL,
+ SOCKET_ID_ANY, 0);
if (mp_cache == NULL)
return -1;
/* performance test with 1, 2 and max cores */
printf("start performance test (without cache)\n");
- mp = mp_nocache;
- if (do_one_mempool_test(1) < 0)
+ if (do_one_mempool_test(mp_nocache, 1) < 0)
return -1;
- if (do_one_mempool_test(2) < 0)
+ if (do_one_mempool_test(mp_nocache, 2) < 0)
return -1;
- if (do_one_mempool_test(rte_lcore_count()) < 0)
+ if (do_one_mempool_test(mp_nocache, rte_lcore_count()) < 0)
return -1;
/* performance test with 1, 2 and max cores */
printf("start performance test (with cache)\n");
- mp = mp_cache;
- if (do_one_mempool_test(1) < 0)
+ if (do_one_mempool_test(mp_cache, 1) < 0)
return -1;
- if (do_one_mempool_test(2) < 0)
+ if (do_one_mempool_test(mp_cache, 2) < 0)
return -1;
- if (do_one_mempool_test(rte_lcore_count()) < 0)
+ if (do_one_mempool_test(mp_cache, rte_lcore_count()) < 0)
return -1;
/* performance test with 1, 2 and max cores */
printf("start performance test (with user-owned cache)\n");
- mp = mp_nocache;
use_external_cache = 1;
- if (do_one_mempool_test(1) < 0)
+ if (do_one_mempool_test(mp_nocache, 1) < 0)
return -1;
- if (do_one_mempool_test(2) < 0)
+ if (do_one_mempool_test(mp_nocache, 2) < 0)
return -1;
- if (do_one_mempool_test(rte_lcore_count()) < 0)
+ if (do_one_mempool_test(mp_nocache, rte_lcore_count()) < 0)
return -1;
rte_mempool_list_dump(stdout);
--
2.11.0
^ permalink raw reply [flat|nested] 20+ messages in thread
* [dpdk-dev] [PATCH v4 2/3] test/test/mempool_perf: Free mempool on exit
2017-04-18 14:41 ` [dpdk-dev] [PATCH v4 " Santosh Shukla
@ 2017-04-18 14:41 ` Santosh Shukla
2017-04-18 14:41 ` [dpdk-dev] [PATCH v4 3/3] test/test/mempool_perf: support default mempool autotest Santosh Shukla
2017-04-18 15:31 ` [dpdk-dev] [PATCH v4 1/3] test/test/mempool_perf: Remove mempool global vars Olivier MATZ
2 siblings, 0 replies; 20+ messages in thread
From: Santosh Shukla @ 2017-04-18 14:41 UTC (permalink / raw)
To: olivier.matz, dev; +Cc: hemant.agrawal, shreyansh.jain, Santosh Shukla, stable
Mempool_perf test not freeing pool memory.
Cc: stable@dpdk.org
Signed-off-by: Santosh Shukla <santosh.shukla@caviumnetworks.com>
Acked-by: Shreyansh Jain <shreyansh.jain@nxp.com>
---
v1 --> v2:
- Fix patch context
v2 -- v3 --> v4
- Done changes because of [1/3] patch
test/test/test_mempool_perf.c | 30 ++++++++++++++++++------------
1 file changed, 18 insertions(+), 12 deletions(-)
diff --git a/test/test/test_mempool_perf.c b/test/test/test_mempool_perf.c
index 1b4045eb3..3749cf8be 100644
--- a/test/test/test_mempool_perf.c
+++ b/test/test/test_mempool_perf.c
@@ -313,6 +313,7 @@ test_mempool_perf(void)
{
struct rte_mempool *mp_cache = NULL;
struct rte_mempool *mp_nocache = NULL;
+ int ret = -1;
rte_atomic32_init(&synchro);
@@ -323,7 +324,7 @@ test_mempool_perf(void)
my_obj_init, NULL,
SOCKET_ID_ANY, 0);
if (mp_nocache == NULL)
- return -1;
+ goto err;
/* create a mempool (with cache) */
mp_cache = rte_mempool_create("perf_test_cache", MEMPOOL_SIZE,
@@ -333,48 +334,53 @@ test_mempool_perf(void)
my_obj_init, NULL,
SOCKET_ID_ANY, 0);
if (mp_cache == NULL)
- return -1;
+ goto err;
/* performance test with 1, 2 and max cores */
printf("start performance test (without cache)\n");
if (do_one_mempool_test(mp_nocache, 1) < 0)
- return -1;
+ goto err;
if (do_one_mempool_test(mp_nocache, 2) < 0)
- return -1;
+ goto err;
if (do_one_mempool_test(mp_nocache, rte_lcore_count()) < 0)
- return -1;
+ goto err;
/* performance test with 1, 2 and max cores */
printf("start performance test (with cache)\n");
if (do_one_mempool_test(mp_cache, 1) < 0)
- return -1;
+ goto err;
if (do_one_mempool_test(mp_cache, 2) < 0)
- return -1;
+ goto err;
if (do_one_mempool_test(mp_cache, rte_lcore_count()) < 0)
- return -1;
+ goto err;
/* performance test with 1, 2 and max cores */
printf("start performance test (with user-owned cache)\n");
use_external_cache = 1;
if (do_one_mempool_test(mp_nocache, 1) < 0)
- return -1;
+ goto err;
if (do_one_mempool_test(mp_nocache, 2) < 0)
- return -1;
+ goto err;
if (do_one_mempool_test(mp_nocache, rte_lcore_count()) < 0)
- return -1;
+ goto err;
rte_mempool_list_dump(stdout);
- return 0;
+ ret = 0;
+
+err:
+ rte_mempool_free(mp_cache);
+ rte_mempool_free(mp_nocache);
+ return ret;
}
REGISTER_TEST_COMMAND(mempool_perf_autotest, test_mempool_perf);
--
2.11.0
^ permalink raw reply [flat|nested] 20+ messages in thread
* [dpdk-dev] [PATCH v4 3/3] test/test/mempool_perf: support default mempool autotest
2017-04-18 14:41 ` [dpdk-dev] [PATCH v4 " Santosh Shukla
2017-04-18 14:41 ` [dpdk-dev] [PATCH v4 2/3] test/test/mempool_perf: Free mempool on exit Santosh Shukla
@ 2017-04-18 14:41 ` Santosh Shukla
2017-04-18 15:31 ` [dpdk-dev] [PATCH v4 1/3] test/test/mempool_perf: Remove mempool global vars Olivier MATZ
2 siblings, 0 replies; 20+ messages in thread
From: Santosh Shukla @ 2017-04-18 14:41 UTC (permalink / raw)
To: olivier.matz, dev; +Cc: hemant.agrawal, shreyansh.jain, Santosh Shukla
Mempool_perf autotest currently does perf regression for:
* nochache
* cache
Introducing default_pool, mainly targeted for ext-mempool regression
test. Ext-mempool don't need 'cache' modes so only adding test-case
support for 'nocache' mode.
So to run ext-mempool perf regression, user has to set
RTE_MBUF_DEFAULT_MEMPOOL_OPS="<>"
There is chance of duplication ie.. if user sets
RTE_MBUF_DEFAULT_MEMPOOL_OPS="ring_mp_mc" then regression
will happen twice for 'ring_mp_mc'
Signed-off-by: Santosh Shukla <santosh.shukla@caviumnetworks.com>
Acked-by: Shreyansh Jain <shreyansh.jain@nxp.com>
---
v1 --> v2:
- Fix patch context
v2 --> v3 --> v4:
- Done code changes because of [1/3] patch
test/test/test_mempool_perf.c | 43 +++++++++++++++++++++++++++++++++++++++++++
1 file changed, 43 insertions(+)
diff --git a/test/test/test_mempool_perf.c b/test/test/test_mempool_perf.c
index 3749cf8be..07b28c066 100644
--- a/test/test/test_mempool_perf.c
+++ b/test/test/test_mempool_perf.c
@@ -313,6 +313,7 @@ test_mempool_perf(void)
{
struct rte_mempool *mp_cache = NULL;
struct rte_mempool *mp_nocache = NULL;
+ struct rte_mempool *default_pool = NULL;
int ret = -1;
rte_atomic32_init(&synchro);
@@ -336,6 +337,34 @@ test_mempool_perf(void)
if (mp_cache == NULL)
goto err;
+ /* Create a mempool based on Default handler */
+ default_pool = rte_mempool_create_empty("default_pool",
+ MEMPOOL_SIZE,
+ MEMPOOL_ELT_SIZE,
+ 0, 0,
+ SOCKET_ID_ANY, 0);
+
+ if (default_pool == NULL) {
+ printf("cannot allocate %s mempool\n",
+ RTE_MBUF_DEFAULT_MEMPOOL_OPS);
+ goto err;
+ }
+
+ if (rte_mempool_set_ops_byname(default_pool,
+ RTE_MBUF_DEFAULT_MEMPOOL_OPS, NULL)
+ < 0) {
+ printf("cannot set %s handler\n", RTE_MBUF_DEFAULT_MEMPOOL_OPS);
+ goto err;
+ }
+
+ if (rte_mempool_populate_default(default_pool) < 0) {
+ printf("cannot populate %s mempool\n",
+ RTE_MBUF_DEFAULT_MEMPOOL_OPS);
+ goto err;
+ }
+
+ rte_mempool_obj_iter(default_pool, my_obj_init, NULL);
+
/* performance test with 1, 2 and max cores */
printf("start performance test (without cache)\n");
@@ -349,6 +378,19 @@ test_mempool_perf(void)
goto err;
/* performance test with 1, 2 and max cores */
+ printf("start performance test for %s (without cache)\n",
+ RTE_MBUF_DEFAULT_MEMPOOL_OPS);
+
+ if (do_one_mempool_test(default_pool, 1) < 0)
+ goto err;
+
+ if (do_one_mempool_test(default_pool, 2) < 0)
+ goto err;
+
+ if (do_one_mempool_test(default_pool, rte_lcore_count()) < 0)
+ goto err;
+
+ /* performance test with 1, 2 and max cores */
printf("start performance test (with cache)\n");
if (do_one_mempool_test(mp_cache, 1) < 0)
@@ -380,6 +422,7 @@ test_mempool_perf(void)
err:
rte_mempool_free(mp_cache);
rte_mempool_free(mp_nocache);
+ rte_mempool_free(default_pool);
return ret;
}
--
2.11.0
^ permalink raw reply [flat|nested] 20+ messages in thread
* Re: [dpdk-dev] [PATCH v4 1/3] test/test/mempool_perf: Remove mempool global vars
2017-04-18 14:41 ` [dpdk-dev] [PATCH v4 " Santosh Shukla
2017-04-18 14:41 ` [dpdk-dev] [PATCH v4 2/3] test/test/mempool_perf: Free mempool on exit Santosh Shukla
2017-04-18 14:41 ` [dpdk-dev] [PATCH v4 3/3] test/test/mempool_perf: support default mempool autotest Santosh Shukla
@ 2017-04-18 15:31 ` Olivier MATZ
2017-04-19 12:48 ` [dpdk-dev] [dpdk-stable] " Thomas Monjalon
2 siblings, 1 reply; 20+ messages in thread
From: Olivier MATZ @ 2017-04-18 15:31 UTC (permalink / raw)
To: Santosh Shukla; +Cc: dev, hemant.agrawal, shreyansh.jain, stable
On Tue, 18 Apr 2017 20:11:28 +0530, Santosh Shukla <santosh.shukla@caviumnetworks.com> wrote:
> Cc: stable@dpdk.org
> Signed-off-by: Santosh Shukla <santosh.shukla@caviumnetworks.com>
Series:
Acked-by: Olivier Matz <olivier.matz@6wind.com>
Note for Thomas when applying: you may want to remove one "test/" in
the title and remove the uppercase letter.
^ permalink raw reply [flat|nested] 20+ messages in thread
* Re: [dpdk-dev] [dpdk-stable] [PATCH v4 1/3] test/test/mempool_perf: Remove mempool global vars
2017-04-18 15:31 ` [dpdk-dev] [PATCH v4 1/3] test/test/mempool_perf: Remove mempool global vars Olivier MATZ
@ 2017-04-19 12:48 ` Thomas Monjalon
0 siblings, 0 replies; 20+ messages in thread
From: Thomas Monjalon @ 2017-04-19 12:48 UTC (permalink / raw)
To: Santosh Shukla; +Cc: Olivier MATZ, dev, hemant.agrawal, shreyansh.jain
18/04/2017 17:31, Olivier MATZ:
> On Tue, 18 Apr 2017 20:11:28 +0530, Santosh Shukla
<santosh.shukla@caviumnetworks.com> wrote:
> > Cc: stable@dpdk.org
> > Signed-off-by: Santosh Shukla <santosh.shukla@caviumnetworks.com>
>
> Series:
> Acked-by: Olivier Matz <olivier.matz@6wind.com>
>
>
> Note for Thomas when applying: you may want to remove one "test/" in
> the title and remove the uppercase letter.
Applied, thanks
Note: stable@dpdk.org removed because there are no Fixes lines,
and they do not seem to be some fixes.
^ permalink raw reply [flat|nested] 20+ messages in thread
end of thread, other threads:[~2017-04-19 12:48 UTC | newest]
Thread overview: 20+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-04-05 8:51 [dpdk-dev] [PATCH 1/2] test/mempool_perf: Free mempool on exit Santosh Shukla
2017-04-05 8:51 ` [dpdk-dev] [PATCH 2/2] test/mempool_perf: support default mempool autotest Santosh Shukla
2017-04-05 10:02 ` Shreyansh Jain
2017-04-05 12:40 ` santosh
2017-04-05 9:57 ` [dpdk-dev] [PATCH 1/2] test/mempool_perf: Free mempool on exit Shreyansh Jain
2017-04-05 12:33 ` santosh
2017-04-06 6:45 ` [dpdk-dev] [PATCH v2 " Santosh Shukla
2017-04-06 6:45 ` [dpdk-dev] [PATCH v2 2/2] test/mempool_perf: support default mempool autotest Santosh Shukla
2017-04-07 15:51 ` [dpdk-dev] [PATCH v2 1/2] test/mempool_perf: Free mempool on exit Olivier Matz
[not found] ` <BLUPR0701MB17140B8FD2D59B1A7835769FEA0E0@BLUPR0701MB1714.namprd07.prod.outlook.com>
[not found] ` <d0ea6fc4-7cbb-8766-616e-097c4e0fbb14@caviumnetworks.com>
2017-04-10 20:09 ` Olivier MATZ
2017-04-18 8:34 ` [dpdk-dev] [PATCH v3 1/3] test/test/mempool_perf: Remove mempool global vars Santosh Shukla
2017-04-18 8:34 ` [dpdk-dev] [PATCH v3 2/3] test/test/mempool_perf: Free mempool on exit Santosh Shukla
2017-04-18 8:34 ` [dpdk-dev] [PATCH v3 3/3] test/test/mempool_perf: support default mempool autotest Santosh Shukla
2017-04-18 13:42 ` [dpdk-dev] [PATCH v3 1/3] test/test/mempool_perf: Remove mempool global vars Olivier MATZ
2017-04-18 14:39 ` santosh
2017-04-18 14:41 ` [dpdk-dev] [PATCH v4 " Santosh Shukla
2017-04-18 14:41 ` [dpdk-dev] [PATCH v4 2/3] test/test/mempool_perf: Free mempool on exit Santosh Shukla
2017-04-18 14:41 ` [dpdk-dev] [PATCH v4 3/3] test/test/mempool_perf: support default mempool autotest Santosh Shukla
2017-04-18 15:31 ` [dpdk-dev] [PATCH v4 1/3] test/test/mempool_perf: Remove mempool global vars Olivier MATZ
2017-04-19 12:48 ` [dpdk-dev] [dpdk-stable] " Thomas Monjalon
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).