patches for DPDK stable branches
 help / color / mirror / Atom feed
* [dpdk-stable] [PATCH] app/testpmd: fix number of mbufs in pool
@ 2017-05-19  8:13 Olivier Matz
  2017-05-21 10:42 ` Yuanhan Liu
  0 siblings, 1 reply; 2+ messages in thread
From: Olivier Matz @ 2017-05-19  8:13 UTC (permalink / raw)
  To: stable

[ backported from upstream commit 3ab64341daf8bae485a7e27c68f1dd80c7fd5130 ]

The number of mbufs in pools is not consistent depending on the
options passed by the user and the number of ports, especially
in numa mode, when the number of mbuf is specified by the user.

When the user specifies the number of mbuf (per pool), it should
overrides the default value.

- before the patch

./build/app/testpmd -- -i --numa
  <mbuf_pool_socket_0>: n=331456, size=2176, socket=0
  <mbuf_pool_socket_1>: n=331456, size=2176, socket=1

./build/app/testpmd -- --total-num-mbufs=8000 -i --numa
  <mbuf_pool_socket_0>: n=256000, size=2176, socket=0
  <mbuf_pool_socket_1>: n=256000, size=2176, socket=1
  # BAD, should be n=8000 for each socket

./build/app/testpmd -- -i
  <mbuf_pool_socket_0>: n=331456, size=2176, socket=0

./build/app/testpmd -- --total-num-mbufs=8000 -i
  <mbuf_pool_socket_0>: n=8000, size=2176, socket=0

./build/app/testpmd --vdev=eth_null0 --vdev=eth_null1 -- \
     -i --numa
  <mbuf_pool_socket_0>: n=331456, size=2176, socket=0
  <mbuf_pool_socket_1>: n=331456, size=2176, socket=1

./build/app/testpmd --vdev=eth_null0 --vdev=eth_null1 -- \
     --total-num-mbufs=8000 -i --numa
  <mbuf_pool_socket_0>: n=128000, size=2176, socket=0
  <mbuf_pool_socket_1>: n=128000, size=2176, socket=1
  # BAD, should be n=8000 for each socket

./build/app/testpmd --vdev=eth_null0 --vdev=eth_null1 -- -i
  <mbuf_pool_socket_0>: n=331456, size=2176, socket=0

./build/app/testpmd --vdev=eth_null0 --vdev=eth_null1 -- \
     --total-num-mbufs=8000 -i
  <mbuf_pool_socket_0>: n=8000, size=2176, socket=0

- after the patch

./build/app/testpmd -- -i --numa
  <mbuf_pool_socket_0>: n=331456, size=2176, socket=0
  <mbuf_pool_socket_1>: n=331456, size=2176, socket=1

./build/app/testpmd -- --total-num-mbufs=8000 -i --numa
  <mbuf_pool_socket_0>: n=8000, size=2176, socket=0
  <mbuf_pool_socket_1>: n=8000, size=2176, socket=1

./build/app/testpmd -- -i
  <mbuf_pool_socket_0>: n=331456, size=2176, socket=0

./build/app/testpmd -- --total-num-mbufs=8000 -i
  <mbuf_pool_socket_0>: n=8000, size=2176, socket=0

./build/app/testpmd --vdev=eth_null0 --vdev=eth_null1 -- \
     -i --numa
  <mbuf_pool_socket_0>: n=331456, size=2176, socket=0
  <mbuf_pool_socket_1>: n=331456, size=2176, socket=1

./build/app/testpmd --vdev=eth_null0 --vdev=eth_null1 -- \
     --total-num-mbufs=8000 -i --numa
  <mbuf_pool_socket_0>: n=8000, size=2176, socket=0
  <mbuf_pool_socket_1>: n=8000, size=2176, socket=1

./build/app/testpmd --vdev=eth_null0 --vdev=eth_null1 -- -i
  <mbuf_pool_socket_0>: n=331456, size=2176, socket=0

./build/app/testpmd --vdev=eth_null0 --vdev=eth_null1 -- \
     --total-num-mbufs=8000 -i
  <mbuf_pool_socket_0>: n=8000, size=2176, socket=0

Fixes: b6ea6408fbc7 ("ethdev: store numa_node per device")
Cc: stable@dpdk.org

Signed-off-by: Olivier Matz <olivier.matz@6wind.com>
Acked-by: Jingjing Wu <jingjing.wu@intel.com>

Since the
commit 999b2ee0fe45 ("app/testpmd: enable NUMA support by default")
is not present in the stable branch, update the commit log and retest:
  --no-numa is removed
  --numa is added when nothing was specified

(cherry picked from commit 3ab64341daf8bae485a7e27c68f1dd80c7fd5130)
Signed-off-by: Olivier Matz <olivier.matz@6wind.com>

 Conflicts:
       app/test-pmd/testpmd.c
---
 app/test-pmd/testpmd.c | 65 +++++++++++++++++++++-----------------------------
 1 file changed, 27 insertions(+), 38 deletions(-)

diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index f0ac7f379..56a8aa965 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -518,34 +518,6 @@ init_config(void)
 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
 	}
 
-	/*
-	 * Create pools of mbuf.
-	 * If NUMA support is disabled, create a single pool of mbuf in
-	 * socket 0 memory by default.
-	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
-	 *
-	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
-	 * nb_txd can be configured at run time.
-	 */
-	if (param_total_num_mbufs)
-		nb_mbuf_per_pool = param_total_num_mbufs;
-	else {
-		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
-				+ RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
-
-		if (!numa_support)
-			nb_mbuf_per_pool =
-				(nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
-	}
-
-	if (!numa_support) {
-		if (socket_num == UMA_NO_CONFIG)
-			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
-		else
-			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
-						 socket_num);
-	}
-
 	FOREACH_PORT(pid, ports) {
 		port = &ports[pid];
 		rte_eth_dev_info_get(pid, &port->dev_info);
@@ -568,20 +540,37 @@ init_config(void)
 		port->need_reconfig_queues = 1;
 	}
 
+	/*
+	 * Create pools of mbuf.
+	 * If NUMA support is disabled, create a single pool of mbuf in
+	 * socket 0 memory by default.
+	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
+	 *
+	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
+	 * nb_txd can be configured at run time.
+	 */
+	if (param_total_num_mbufs)
+		nb_mbuf_per_pool = param_total_num_mbufs;
+	else {
+		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
+			(nb_lcores * mb_mempool_cache) +
+			RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
+		nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
+	}
+
 	if (numa_support) {
 		uint8_t i;
-		unsigned int nb_mbuf;
-
-		if (param_total_num_mbufs && nb_ports != 0)
-			nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
 
-		for (i = 0; i < max_socket; i++) {
-			nb_mbuf = (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
-			if (nb_mbuf)
-				mbuf_pool_create(mbuf_data_size,
-						nb_mbuf,i);
-		}
+		for (i = 0; i < max_socket; i++)
+			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, i);
+	} else {
+		if (socket_num == UMA_NO_CONFIG)
+			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
+		else
+			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
+						 socket_num);
 	}
+
 	init_port_config();
 
 	/*
-- 
2.11.0

^ permalink raw reply	[flat|nested] 2+ messages in thread

* Re: [dpdk-stable] [PATCH] app/testpmd: fix number of mbufs in pool
  2017-05-19  8:13 [dpdk-stable] [PATCH] app/testpmd: fix number of mbufs in pool Olivier Matz
@ 2017-05-21 10:42 ` Yuanhan Liu
  0 siblings, 0 replies; 2+ messages in thread
From: Yuanhan Liu @ 2017-05-21 10:42 UTC (permalink / raw)
  To: Olivier Matz; +Cc: stable

On Fri, May 19, 2017 at 10:13:02AM +0200, Olivier Matz wrote:
> [ backported from upstream commit 3ab64341daf8bae485a7e27c68f1dd80c7fd5130 ]

Thanks for the backport, applied to dpdk-stable/16.11.

	--yliu
> 
> The number of mbufs in pools is not consistent depending on the
> options passed by the user and the number of ports, especially
> in numa mode, when the number of mbuf is specified by the user.
> 
> When the user specifies the number of mbuf (per pool), it should
> overrides the default value.
> 
> - before the patch
> 
> ./build/app/testpmd -- -i --numa
>   <mbuf_pool_socket_0>: n=331456, size=2176, socket=0
>   <mbuf_pool_socket_1>: n=331456, size=2176, socket=1
> 
> ./build/app/testpmd -- --total-num-mbufs=8000 -i --numa
>   <mbuf_pool_socket_0>: n=256000, size=2176, socket=0
>   <mbuf_pool_socket_1>: n=256000, size=2176, socket=1
>   # BAD, should be n=8000 for each socket
> 
> ./build/app/testpmd -- -i
>   <mbuf_pool_socket_0>: n=331456, size=2176, socket=0
> 
> ./build/app/testpmd -- --total-num-mbufs=8000 -i
>   <mbuf_pool_socket_0>: n=8000, size=2176, socket=0
> 
> ./build/app/testpmd --vdev=eth_null0 --vdev=eth_null1 -- \
>      -i --numa
>   <mbuf_pool_socket_0>: n=331456, size=2176, socket=0
>   <mbuf_pool_socket_1>: n=331456, size=2176, socket=1
> 
> ./build/app/testpmd --vdev=eth_null0 --vdev=eth_null1 -- \
>      --total-num-mbufs=8000 -i --numa
>   <mbuf_pool_socket_0>: n=128000, size=2176, socket=0
>   <mbuf_pool_socket_1>: n=128000, size=2176, socket=1
>   # BAD, should be n=8000 for each socket
> 
> ./build/app/testpmd --vdev=eth_null0 --vdev=eth_null1 -- -i
>   <mbuf_pool_socket_0>: n=331456, size=2176, socket=0
> 
> ./build/app/testpmd --vdev=eth_null0 --vdev=eth_null1 -- \
>      --total-num-mbufs=8000 -i
>   <mbuf_pool_socket_0>: n=8000, size=2176, socket=0
> 
> - after the patch
> 
> ./build/app/testpmd -- -i --numa
>   <mbuf_pool_socket_0>: n=331456, size=2176, socket=0
>   <mbuf_pool_socket_1>: n=331456, size=2176, socket=1
> 
> ./build/app/testpmd -- --total-num-mbufs=8000 -i --numa
>   <mbuf_pool_socket_0>: n=8000, size=2176, socket=0
>   <mbuf_pool_socket_1>: n=8000, size=2176, socket=1
> 
> ./build/app/testpmd -- -i
>   <mbuf_pool_socket_0>: n=331456, size=2176, socket=0
> 
> ./build/app/testpmd -- --total-num-mbufs=8000 -i
>   <mbuf_pool_socket_0>: n=8000, size=2176, socket=0
> 
> ./build/app/testpmd --vdev=eth_null0 --vdev=eth_null1 -- \
>      -i --numa
>   <mbuf_pool_socket_0>: n=331456, size=2176, socket=0
>   <mbuf_pool_socket_1>: n=331456, size=2176, socket=1
> 
> ./build/app/testpmd --vdev=eth_null0 --vdev=eth_null1 -- \
>      --total-num-mbufs=8000 -i --numa
>   <mbuf_pool_socket_0>: n=8000, size=2176, socket=0
>   <mbuf_pool_socket_1>: n=8000, size=2176, socket=1
> 
> ./build/app/testpmd --vdev=eth_null0 --vdev=eth_null1 -- -i
>   <mbuf_pool_socket_0>: n=331456, size=2176, socket=0
> 
> ./build/app/testpmd --vdev=eth_null0 --vdev=eth_null1 -- \
>      --total-num-mbufs=8000 -i
>   <mbuf_pool_socket_0>: n=8000, size=2176, socket=0
> 
> Fixes: b6ea6408fbc7 ("ethdev: store numa_node per device")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Olivier Matz <olivier.matz@6wind.com>
> Acked-by: Jingjing Wu <jingjing.wu@intel.com>
> 
> Since the
> commit 999b2ee0fe45 ("app/testpmd: enable NUMA support by default")
> is not present in the stable branch, update the commit log and retest:
>   --no-numa is removed
>   --numa is added when nothing was specified
> 
> (cherry picked from commit 3ab64341daf8bae485a7e27c68f1dd80c7fd5130)
> Signed-off-by: Olivier Matz <olivier.matz@6wind.com>
> 
>  Conflicts:
>        app/test-pmd/testpmd.c
> ---
>  app/test-pmd/testpmd.c | 65 +++++++++++++++++++++-----------------------------
>  1 file changed, 27 insertions(+), 38 deletions(-)
> 
> diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
> index f0ac7f379..56a8aa965 100644
> --- a/app/test-pmd/testpmd.c
> +++ b/app/test-pmd/testpmd.c
> @@ -518,34 +518,6 @@ init_config(void)
>  		fwd_lcores[lc_id]->cpuid_idx = lc_id;
>  	}
>  
> -	/*
> -	 * Create pools of mbuf.
> -	 * If NUMA support is disabled, create a single pool of mbuf in
> -	 * socket 0 memory by default.
> -	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
> -	 *
> -	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
> -	 * nb_txd can be configured at run time.
> -	 */
> -	if (param_total_num_mbufs)
> -		nb_mbuf_per_pool = param_total_num_mbufs;
> -	else {
> -		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
> -				+ RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
> -
> -		if (!numa_support)
> -			nb_mbuf_per_pool =
> -				(nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
> -	}
> -
> -	if (!numa_support) {
> -		if (socket_num == UMA_NO_CONFIG)
> -			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
> -		else
> -			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
> -						 socket_num);
> -	}
> -
>  	FOREACH_PORT(pid, ports) {
>  		port = &ports[pid];
>  		rte_eth_dev_info_get(pid, &port->dev_info);
> @@ -568,20 +540,37 @@ init_config(void)
>  		port->need_reconfig_queues = 1;
>  	}
>  
> +	/*
> +	 * Create pools of mbuf.
> +	 * If NUMA support is disabled, create a single pool of mbuf in
> +	 * socket 0 memory by default.
> +	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
> +	 *
> +	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
> +	 * nb_txd can be configured at run time.
> +	 */
> +	if (param_total_num_mbufs)
> +		nb_mbuf_per_pool = param_total_num_mbufs;
> +	else {
> +		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
> +			(nb_lcores * mb_mempool_cache) +
> +			RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
> +		nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
> +	}
> +
>  	if (numa_support) {
>  		uint8_t i;
> -		unsigned int nb_mbuf;
> -
> -		if (param_total_num_mbufs && nb_ports != 0)
> -			nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
>  
> -		for (i = 0; i < max_socket; i++) {
> -			nb_mbuf = (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
> -			if (nb_mbuf)
> -				mbuf_pool_create(mbuf_data_size,
> -						nb_mbuf,i);
> -		}
> +		for (i = 0; i < max_socket; i++)
> +			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, i);
> +	} else {
> +		if (socket_num == UMA_NO_CONFIG)
> +			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
> +		else
> +			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
> +						 socket_num);
>  	}
> +
>  	init_port_config();
>  
>  	/*
> -- 
> 2.11.0

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2017-05-21 10:46 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-05-19  8:13 [dpdk-stable] [PATCH] app/testpmd: fix number of mbufs in pool Olivier Matz
2017-05-21 10:42 ` Yuanhan Liu

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).