DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH] app/testpmd: reduce memory consumption
@ 2019-11-21 15:12 David Marchand
  2019-11-21 15:36 ` Ferruh Yigit
                   ` (3 more replies)
  0 siblings, 4 replies; 20+ messages in thread
From: David Marchand @ 2019-11-21 15:12 UTC (permalink / raw)
  To: dev; +Cc: thomas, ferruh.yigit, Wenzhuo Lu, Jingjing Wu, Bernard Iremonger

Following [1], testpmd memory consumption has skyrocketted.
The rte_port structure has gotten quite fat.

struct rte_port {
[...]
  struct rte_eth_rxconf rx_conf[65536];            /* 266280 3145728 */
  /* --- cacheline 53312 boundary (3411968 bytes) was 40 bytes ago --- */
  struct rte_eth_txconf tx_conf[65536];            /* 3412008 3670016 */
  /* --- cacheline 110656 boundary (7081984 bytes) was 40 bytes ago --- */
[...]
  /* size: 8654936, cachelines: 135234, members: 31 */
[...]

testpmd handles RTE_MAX_ETHPORTS ports (32 by default) which means that it
needs ~256MB just for this internal representation.

The reason is that a testpmd rte_port (the name is quite confusing, as
it is a local type) maintains configurations for all queues of a port.
But where you would expect testpmd to use RTE_MAX_QUEUES_PER_PORT as the
maximum queue count, the rte_port uses MAX_QUEUE_ID set to 64k.

Prefer the ethdev maximum value.

After this patch:
struct rte_port {
[...]
  struct rte_eth_rxconf      rx_conf[1025];        /*  8240 49200 */
  /* --- cacheline 897 boundary (57408 bytes) was 32 bytes ago --- */
  struct rte_eth_txconf      tx_conf[1025];        /* 57440 57400 */
  /* --- cacheline 1794 boundary (114816 bytes) was 24 bytes ago --- */
[...]
  /* size: 139488, cachelines: 2180, members: 31 */
[...]

[1]: https://git.dpdk.org/dpdk/commit/?id=436b3a6b6e62

Signed-off-by: David Marchand <david.marchand@redhat.com>
---
 app/test-pmd/testpmd.c |  6 +++---
 app/test-pmd/testpmd.h | 16 +++++++---------
 2 files changed, 10 insertions(+), 12 deletions(-)

diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index 73ebf37aae..d28211ba52 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -979,7 +979,7 @@ check_socket_id(const unsigned int socket_id)
 queueid_t
 get_allowed_max_nb_rxq(portid_t *pid)
 {
-	queueid_t allowed_max_rxq = MAX_QUEUE_ID;
+	queueid_t allowed_max_rxq = RTE_MAX_QUEUES_PER_PORT;
 	bool max_rxq_valid = false;
 	portid_t pi;
 	struct rte_eth_dev_info dev_info;
@@ -1029,7 +1029,7 @@ check_nb_rxq(queueid_t rxq)
 queueid_t
 get_allowed_max_nb_txq(portid_t *pid)
 {
-	queueid_t allowed_max_txq = MAX_QUEUE_ID;
+	queueid_t allowed_max_txq = RTE_MAX_QUEUES_PER_PORT;
 	bool max_txq_valid = false;
 	portid_t pi;
 	struct rte_eth_dev_info dev_info;
@@ -1079,7 +1079,7 @@ check_nb_txq(queueid_t txq)
 queueid_t
 get_allowed_max_nb_hairpinq(portid_t *pid)
 {
-	queueid_t allowed_max_hairpinq = MAX_QUEUE_ID;
+	queueid_t allowed_max_hairpinq = RTE_MAX_QUEUES_PER_PORT;
 	portid_t pi;
 	struct rte_eth_hairpin_cap cap;
 
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index 90694a3309..217d577018 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -58,8 +58,6 @@ typedef uint16_t portid_t;
 typedef uint16_t queueid_t;
 typedef uint16_t streamid_t;
 
-#define MAX_QUEUE_ID ((1 << (sizeof(queueid_t) * 8)) - 1)
-
 #if defined RTE_LIBRTE_PMD_SOFTNIC
 #define SOFTNIC			1
 #else
@@ -179,22 +177,22 @@ struct rte_port {
 	uint8_t                 need_reconfig_queues; /**< need reconfiguring queues or not */
 	uint8_t                 rss_flag;   /**< enable rss or not */
 	uint8_t                 dcb_flag;   /**< enable dcb */
-	uint16_t                nb_rx_desc[MAX_QUEUE_ID+1]; /**< per queue rx desc number */
-	uint16_t                nb_tx_desc[MAX_QUEUE_ID+1]; /**< per queue tx desc number */
-	struct rte_eth_rxconf   rx_conf[MAX_QUEUE_ID+1]; /**< per queue rx configuration */
-	struct rte_eth_txconf   tx_conf[MAX_QUEUE_ID+1]; /**< per queue tx configuration */
+	uint16_t                nb_rx_desc[RTE_MAX_QUEUES_PER_PORT+1]; /**< per queue rx desc number */
+	uint16_t                nb_tx_desc[RTE_MAX_QUEUES_PER_PORT+1]; /**< per queue tx desc number */
+	struct rte_eth_rxconf   rx_conf[RTE_MAX_QUEUES_PER_PORT+1]; /**< per queue rx configuration */
+	struct rte_eth_txconf   tx_conf[RTE_MAX_QUEUES_PER_PORT+1]; /**< per queue tx configuration */
 	struct rte_ether_addr   *mc_addr_pool; /**< pool of multicast addrs */
 	uint32_t                mc_addr_nb; /**< nb. of addr. in mc_addr_pool */
 	uint8_t                 slave_flag; /**< bonding slave port */
 	struct port_flow        *flow_list; /**< Associated flows. */
-	const struct rte_eth_rxtx_callback *rx_dump_cb[MAX_QUEUE_ID+1];
-	const struct rte_eth_rxtx_callback *tx_dump_cb[MAX_QUEUE_ID+1];
+	const struct rte_eth_rxtx_callback *rx_dump_cb[RTE_MAX_QUEUES_PER_PORT+1];
+	const struct rte_eth_rxtx_callback *tx_dump_cb[RTE_MAX_QUEUES_PER_PORT+1];
 #ifdef SOFTNIC
 	struct softnic_port     softport;  /**< softnic params */
 #endif
 	/**< metadata value to insert in Tx packets. */
 	uint32_t		tx_metadata;
-	const struct rte_eth_rxtx_callback *tx_set_md_cb[MAX_QUEUE_ID+1];
+	const struct rte_eth_rxtx_callback *tx_set_md_cb[RTE_MAX_QUEUES_PER_PORT+1];
 };
 
 /**
-- 
2.23.0


^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [dpdk-dev] [PATCH] app/testpmd: reduce memory consumption
  2019-11-21 15:12 [dpdk-dev] [PATCH] app/testpmd: reduce memory consumption David Marchand
@ 2019-11-21 15:36 ` Ferruh Yigit
  2019-11-21 16:17   ` David Marchand
  2019-11-21 16:45 ` Stephen Hemminger
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 20+ messages in thread
From: Ferruh Yigit @ 2019-11-21 15:36 UTC (permalink / raw)
  To: David Marchand, dev; +Cc: thomas, Wenzhuo Lu, Jingjing Wu, Bernard Iremonger

On 11/21/2019 3:12 PM, David Marchand wrote:
> Following [1], testpmd memory consumption has skyrocketted.
> The rte_port structure has gotten quite fat.
> 
> struct rte_port {
> [...]
>   struct rte_eth_rxconf rx_conf[65536];            /* 266280 3145728 */
>   /* --- cacheline 53312 boundary (3411968 bytes) was 40 bytes ago --- */
>   struct rte_eth_txconf tx_conf[65536];            /* 3412008 3670016 */
>   /* --- cacheline 110656 boundary (7081984 bytes) was 40 bytes ago --- */
> [...]
>   /* size: 8654936, cachelines: 135234, members: 31 */
> [...]
> 
> testpmd handles RTE_MAX_ETHPORTS ports (32 by default) which means that it
> needs ~256MB just for this internal representation.
> 
> The reason is that a testpmd rte_port (the name is quite confusing, as
> it is a local type) maintains configurations for all queues of a port.
> But where you would expect testpmd to use RTE_MAX_QUEUES_PER_PORT as the
> maximum queue count, the rte_port uses MAX_QUEUE_ID set to 64k.
> 
> Prefer the ethdev maximum value.
> 
> After this patch:
> struct rte_port {
> [...]
>   struct rte_eth_rxconf      rx_conf[1025];        /*  8240 49200 */
>   /* --- cacheline 897 boundary (57408 bytes) was 32 bytes ago --- */
>   struct rte_eth_txconf      tx_conf[1025];        /* 57440 57400 */
>   /* --- cacheline 1794 boundary (114816 bytes) was 24 bytes ago --- */
> [...]
>   /* size: 139488, cachelines: 2180, members: 31 */
> [...]
> 
> [1]: https://git.dpdk.org/dpdk/commit/?id=436b3a6b6e62
> 
> Signed-off-by: David Marchand <david.marchand@redhat.com>

Thanks for figuring this out,
Acked-by: Ferruh Yigit <ferruh.yigit@intel.com>

<...>

> diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
> index 90694a3309..217d577018 100644
> --- a/app/test-pmd/testpmd.h
> +++ b/app/test-pmd/testpmd.h
> @@ -58,8 +58,6 @@ typedef uint16_t portid_t;
>  typedef uint16_t queueid_t;
>  typedef uint16_t streamid_t;
>  
> -#define MAX_QUEUE_ID ((1 << (sizeof(queueid_t) * 8)) - 1)

No strong opinion, but would it be simpler if assign 'MAX_QUEUE_ID' to
'RTE_MAX_QUEUES_PER_PORT' instead?
#define MAX_QUEUE_ID RTE_MAX_QUEUES_PER_PORT


^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [dpdk-dev] [PATCH] app/testpmd: reduce memory consumption
  2019-11-21 15:36 ` Ferruh Yigit
@ 2019-11-21 16:17   ` David Marchand
  2019-11-21 16:23     ` David Marchand
  0 siblings, 1 reply; 20+ messages in thread
From: David Marchand @ 2019-11-21 16:17 UTC (permalink / raw)
  To: Ferruh Yigit
  Cc: dev, Thomas Monjalon, Wenzhuo Lu, Jingjing Wu, Bernard Iremonger

On Thu, Nov 21, 2019 at 4:36 PM Ferruh Yigit <ferruh.yigit@intel.com> wrote:
> > diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
> > index 90694a3309..217d577018 100644
> > --- a/app/test-pmd/testpmd.h
> > +++ b/app/test-pmd/testpmd.h
> > @@ -58,8 +58,6 @@ typedef uint16_t portid_t;
> >  typedef uint16_t queueid_t;
> >  typedef uint16_t streamid_t;
> >
> > -#define MAX_QUEUE_ID ((1 << (sizeof(queueid_t) * 8)) - 1)
>
> No strong opinion, but would it be simpler if assign 'MAX_QUEUE_ID' to
> 'RTE_MAX_QUEUES_PER_PORT' instead?
> #define MAX_QUEUE_ID RTE_MAX_QUEUES_PER_PORT

This was my first solution once I spotted this.
But I prefer to globally replace: when reading this code, using
MAX_QUEUE_ID leaves the impression that testpmd has its own
restriction on max queue count.


-- 
David Marchand


^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [dpdk-dev] [PATCH] app/testpmd: reduce memory consumption
  2019-11-21 16:17   ` David Marchand
@ 2019-11-21 16:23     ` David Marchand
  2019-11-21 21:22       ` Thomas Monjalon
  0 siblings, 1 reply; 20+ messages in thread
From: David Marchand @ 2019-11-21 16:23 UTC (permalink / raw)
  To: Ferruh Yigit
  Cc: dev, Thomas Monjalon, Wenzhuo Lu, Jingjing Wu, Bernard Iremonger

On Thu, Nov 21, 2019 at 5:17 PM David Marchand
<david.marchand@redhat.com> wrote:
>
> On Thu, Nov 21, 2019 at 4:36 PM Ferruh Yigit <ferruh.yigit@intel.com> wrote:
> > > diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
> > > index 90694a3309..217d577018 100644
> > > --- a/app/test-pmd/testpmd.h
> > > +++ b/app/test-pmd/testpmd.h
> > > @@ -58,8 +58,6 @@ typedef uint16_t portid_t;
> > >  typedef uint16_t queueid_t;
> > >  typedef uint16_t streamid_t;
> > >
> > > -#define MAX_QUEUE_ID ((1 << (sizeof(queueid_t) * 8)) - 1)
> >
> > No strong opinion, but would it be simpler if assign 'MAX_QUEUE_ID' to
> > 'RTE_MAX_QUEUES_PER_PORT' instead?
> > #define MAX_QUEUE_ID RTE_MAX_QUEUES_PER_PORT
>
> This was my first solution once I spotted this.
> But I prefer to globally replace: when reading this code, using
> MAX_QUEUE_ID leaves the impression that testpmd has its own
> restriction on max queue count.

Btw, not sure we want to backport this, or maybe up to branches
containing d44f8a485f5d ("app/testpmd: enable per queue configure")
Opinions?


-- 
David Marchand


^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [dpdk-dev] [PATCH] app/testpmd: reduce memory consumption
  2019-11-21 15:12 [dpdk-dev] [PATCH] app/testpmd: reduce memory consumption David Marchand
  2019-11-21 15:36 ` Ferruh Yigit
@ 2019-11-21 16:45 ` Stephen Hemminger
  2019-11-21 20:32   ` David Marchand
  2019-11-21 21:25 ` Thomas Monjalon
  2019-11-22 10:43 ` [dpdk-dev] [PATCH v2] " David Marchand
  3 siblings, 1 reply; 20+ messages in thread
From: Stephen Hemminger @ 2019-11-21 16:45 UTC (permalink / raw)
  To: David Marchand
  Cc: dev, thomas, ferruh.yigit, Wenzhuo Lu, Jingjing Wu, Bernard Iremonger

On Thu, 21 Nov 2019 16:12:55 +0100
David Marchand <david.marchand@redhat.com> wrote:

> -	uint16_t                nb_rx_desc[MAX_QUEUE_ID+1]; /**< per queue rx desc number */
> -	uint16_t                nb_tx_desc[MAX_QUEUE_ID+1]; /**< per queue tx desc number */
> -	struct rte_eth_rxconf   rx_conf[MAX_QUEUE_ID+1]; /**< per queue rx configuration */
> -	struct rte_eth_txconf   tx_conf[MAX_QUEUE_ID+1]; /**< per queue tx configuration */
> +	uint16_t                nb_rx_desc[RTE_MAX_QUEUES_PER_PORT+1]; /**< per queue rx desc number */
> +	uint16_t                nb_tx_desc[RTE_MAX_QUEUES_PER_PORT+1]; /**< per queue tx desc number */
> +	struct rte_eth_rxconf   rx_conf[RTE_MAX_QUEUES_PER_PORT+1]; /**< per queue rx configuration */
> +	struct rte_eth_txconf   tx_conf[RTE_MAX_QUEUES_PER_PORT+1]; /**< per queue tx configuration */

Why not  put all the per-queue stuff together in one structure
and put it at the end. Then dynamically size based on number of queues?

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [dpdk-dev] [PATCH] app/testpmd: reduce memory consumption
  2019-11-21 16:45 ` Stephen Hemminger
@ 2019-11-21 20:32   ` David Marchand
  0 siblings, 0 replies; 20+ messages in thread
From: David Marchand @ 2019-11-21 20:32 UTC (permalink / raw)
  To: Stephen Hemminger
  Cc: dev, Thomas Monjalon, Yigit, Ferruh, Wenzhuo Lu, Jingjing Wu,
	Bernard Iremonger

On Thu, Nov 21, 2019 at 5:45 PM Stephen Hemminger
<stephen@networkplumber.org> wrote:
>
> On Thu, 21 Nov 2019 16:12:55 +0100
> David Marchand <david.marchand@redhat.com> wrote:
>
> > -     uint16_t                nb_rx_desc[MAX_QUEUE_ID+1]; /**< per queue rx desc number */
> > -     uint16_t                nb_tx_desc[MAX_QUEUE_ID+1]; /**< per queue tx desc number */
> > -     struct rte_eth_rxconf   rx_conf[MAX_QUEUE_ID+1]; /**< per queue rx configuration */
> > -     struct rte_eth_txconf   tx_conf[MAX_QUEUE_ID+1]; /**< per queue tx configuration */
> > +     uint16_t                nb_rx_desc[RTE_MAX_QUEUES_PER_PORT+1]; /**< per queue rx desc number */
> > +     uint16_t                nb_tx_desc[RTE_MAX_QUEUES_PER_PORT+1]; /**< per queue tx desc number */
> > +     struct rte_eth_rxconf   rx_conf[RTE_MAX_QUEUES_PER_PORT+1]; /**< per queue rx configuration */
> > +     struct rte_eth_txconf   tx_conf[RTE_MAX_QUEUES_PER_PORT+1]; /**< per queue tx configuration */
>
> Why not  put all the per-queue stuff together in one structure
> and put it at the end. Then dynamically size based on number of queues?

This is something that could be done.
At first glance, the code is relying on those arrays being contiguous,
but it should not be a problem.
The reason for the size '+1' is not obvious to me.
Not saying that would be difficult to investigate and fix/rework all this.

My approach seems the quickest and less risky after rc3.
I can look at this post 19.11 (but volunteers are welcome, testpmd
needs some love).


-- 
David Marchand


^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [dpdk-dev] [PATCH] app/testpmd: reduce memory consumption
  2019-11-21 16:23     ` David Marchand
@ 2019-11-21 21:22       ` Thomas Monjalon
  0 siblings, 0 replies; 20+ messages in thread
From: Thomas Monjalon @ 2019-11-21 21:22 UTC (permalink / raw)
  To: David Marchand
  Cc: Ferruh Yigit, dev, Wenzhuo Lu, Jingjing Wu, Bernard Iremonger

21/11/2019 17:23, David Marchand:
> On Thu, Nov 21, 2019 at 5:17 PM David Marchand
> <david.marchand@redhat.com> wrote:
> >
> > On Thu, Nov 21, 2019 at 4:36 PM Ferruh Yigit <ferruh.yigit@intel.com> wrote:
> > > > diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
> > > > index 90694a3309..217d577018 100644
> > > > --- a/app/test-pmd/testpmd.h
> > > > +++ b/app/test-pmd/testpmd.h
> > > > @@ -58,8 +58,6 @@ typedef uint16_t portid_t;
> > > >  typedef uint16_t queueid_t;
> > > >  typedef uint16_t streamid_t;
> > > >
> > > > -#define MAX_QUEUE_ID ((1 << (sizeof(queueid_t) * 8)) - 1)
> > >
> > > No strong opinion, but would it be simpler if assign 'MAX_QUEUE_ID' to
> > > 'RTE_MAX_QUEUES_PER_PORT' instead?
> > > #define MAX_QUEUE_ID RTE_MAX_QUEUES_PER_PORT
> >
> > This was my first solution once I spotted this.
> > But I prefer to globally replace: when reading this code, using
> > MAX_QUEUE_ID leaves the impression that testpmd has its own
> > restriction on max queue count.
> 
> Btw, not sure we want to backport this, or maybe up to branches
> containing d44f8a485f5d ("app/testpmd: enable per queue configure")
> Opinions?

I am for not backporting.
It is an optimization (stop wasting some memory).




^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [dpdk-dev] [PATCH] app/testpmd: reduce memory consumption
  2019-11-21 15:12 [dpdk-dev] [PATCH] app/testpmd: reduce memory consumption David Marchand
  2019-11-21 15:36 ` Ferruh Yigit
  2019-11-21 16:45 ` Stephen Hemminger
@ 2019-11-21 21:25 ` Thomas Monjalon
  2019-11-21 21:55   ` Thomas Monjalon
  2019-11-22 10:43 ` [dpdk-dev] [PATCH v2] " David Marchand
  3 siblings, 1 reply; 20+ messages in thread
From: Thomas Monjalon @ 2019-11-21 21:25 UTC (permalink / raw)
  To: David Marchand
  Cc: dev, ferruh.yigit, Wenzhuo Lu, Jingjing Wu, Bernard Iremonger

21/11/2019 16:12, David Marchand:
> Following [1], testpmd memory consumption has skyrocketted.
> The rte_port structure has gotten quite fat.
> 
> struct rte_port {
> [...]
>   struct rte_eth_rxconf rx_conf[65536];            /* 266280 3145728 */
>   /* --- cacheline 53312 boundary (3411968 bytes) was 40 bytes ago --- */
>   struct rte_eth_txconf tx_conf[65536];            /* 3412008 3670016 */
>   /* --- cacheline 110656 boundary (7081984 bytes) was 40 bytes ago --- */
> [...]
>   /* size: 8654936, cachelines: 135234, members: 31 */
> [...]
> 
> testpmd handles RTE_MAX_ETHPORTS ports (32 by default) which means that it
> needs ~256MB just for this internal representation.
> 
> The reason is that a testpmd rte_port (the name is quite confusing, as
> it is a local type) maintains configurations for all queues of a port.
> But where you would expect testpmd to use RTE_MAX_QUEUES_PER_PORT as the
> maximum queue count, the rte_port uses MAX_QUEUE_ID set to 64k.
> 
> Prefer the ethdev maximum value.
> 
> After this patch:
> struct rte_port {
> [...]
>   struct rte_eth_rxconf      rx_conf[1025];        /*  8240 49200 */
>   /* --- cacheline 897 boundary (57408 bytes) was 32 bytes ago --- */
>   struct rte_eth_txconf      tx_conf[1025];        /* 57440 57400 */
>   /* --- cacheline 1794 boundary (114816 bytes) was 24 bytes ago --- */
> [...]
>   /* size: 139488, cachelines: 2180, members: 31 */
> [...]
> 
> [1]: https://git.dpdk.org/dpdk/commit/?id=436b3a6b6e62
> 
> Signed-off-by: David Marchand <david.marchand@redhat.com>

Acked-by: Thomas Monjalon <thomas@monjalon.net>

I was really concerned by the memory requirement increase
due to my patch on ethdev structs.
Thank you for finding these giant arrays.



^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [dpdk-dev] [PATCH] app/testpmd: reduce memory consumption
  2019-11-21 21:25 ` Thomas Monjalon
@ 2019-11-21 21:55   ` Thomas Monjalon
  0 siblings, 0 replies; 20+ messages in thread
From: Thomas Monjalon @ 2019-11-21 21:55 UTC (permalink / raw)
  To: David Marchand
  Cc: dev, ferruh.yigit, Wenzhuo Lu, Jingjing Wu, Bernard Iremonger

21/11/2019 22:25, Thomas Monjalon:
> 21/11/2019 16:12, David Marchand:
> > Following [1], testpmd memory consumption has skyrocketted.
> > The rte_port structure has gotten quite fat.
> > 
> > struct rte_port {
> > [...]
> >   struct rte_eth_rxconf rx_conf[65536];            /* 266280 3145728 */
> >   /* --- cacheline 53312 boundary (3411968 bytes) was 40 bytes ago --- */
> >   struct rte_eth_txconf tx_conf[65536];            /* 3412008 3670016 */
> >   /* --- cacheline 110656 boundary (7081984 bytes) was 40 bytes ago --- */
> > [...]
> >   /* size: 8654936, cachelines: 135234, members: 31 */
> > [...]
> > 
> > testpmd handles RTE_MAX_ETHPORTS ports (32 by default) which means that it
> > needs ~256MB just for this internal representation.
> > 
> > The reason is that a testpmd rte_port (the name is quite confusing, as
> > it is a local type) maintains configurations for all queues of a port.
> > But where you would expect testpmd to use RTE_MAX_QUEUES_PER_PORT as the
> > maximum queue count, the rte_port uses MAX_QUEUE_ID set to 64k.
> > 
> > Prefer the ethdev maximum value.
> > 
> > After this patch:
> > struct rte_port {
> > [...]
> >   struct rte_eth_rxconf      rx_conf[1025];        /*  8240 49200 */
> >   /* --- cacheline 897 boundary (57408 bytes) was 32 bytes ago --- */
> >   struct rte_eth_txconf      tx_conf[1025];        /* 57440 57400 */
> >   /* --- cacheline 1794 boundary (114816 bytes) was 24 bytes ago --- */
> > [...]
> >   /* size: 139488, cachelines: 2180, members: 31 */
> > [...]
> > 
> > [1]: https://git.dpdk.org/dpdk/commit/?id=436b3a6b6e62
> > 
> > Signed-off-by: David Marchand <david.marchand@redhat.com>
> 
> Acked-by: Thomas Monjalon <thomas@monjalon.net>
> 
> I was really concerned by the memory requirement increase
> due to my patch on ethdev structs.
> Thank you for finding these giant arrays.

After testing this patch, I realized that you can decrease
the memory requirement of test-null.sh from 150 (300 in patch [1])
to only 20 MB.

The following patch [1] was workarounding the big memory requirement
by increasing the allocated memory to 300 MB.
[1] https://patches.dpdk.org/patch/63151/



^ permalink raw reply	[flat|nested] 20+ messages in thread

* [dpdk-dev] [PATCH v2] app/testpmd: reduce memory consumption
  2019-11-21 15:12 [dpdk-dev] [PATCH] app/testpmd: reduce memory consumption David Marchand
                   ` (2 preceding siblings ...)
  2019-11-21 21:25 ` Thomas Monjalon
@ 2019-11-22 10:43 ` David Marchand
  2019-11-22 12:24   ` Ferruh Yigit
  2019-11-22 14:14   ` [dpdk-dev] [PATCH v2] app/testpmd: reduce memory consumption Ferruh Yigit
  3 siblings, 2 replies; 20+ messages in thread
From: David Marchand @ 2019-11-22 10:43 UTC (permalink / raw)
  To: dev
  Cc: thomas, ferruh.yigit, stephen, Wenzhuo Lu, Jingjing Wu,
	Bernard Iremonger, Andrew Rybchenko

Following [1], testpmd memory consumption has skyrocketted.
The rte_port structure has gotten quite fat.

struct rte_port {
[...]
  struct rte_eth_rxconf rx_conf[65536];            /* 266280 3145728 */
  /* --- cacheline 53312 boundary (3411968 bytes) was 40 bytes ago --- */
  struct rte_eth_txconf tx_conf[65536];            /* 3412008 3670016 */
  /* --- cacheline 110656 boundary (7081984 bytes) was 40 bytes ago --- */
[...]
  /* size: 8654936, cachelines: 135234, members: 31 */
[...]

testpmd handles RTE_MAX_ETHPORTS ports (32 by default) which means that it
needs ~256MB just for this internal representation.

The reason is that a testpmd rte_port (the name is quite confusing, as
it is a local type) maintains configurations for all queues of a port.
But where you would expect testpmd to use RTE_MAX_QUEUES_PER_PORT as the
maximum queue count, the rte_port uses MAX_QUEUE_ID set to 64k.

Prefer the ethdev maximum value.

After this patch:
struct rte_port {
[...]
  struct rte_eth_rxconf      rx_conf[1025];        /*  8240 49200 */
  /* --- cacheline 897 boundary (57408 bytes) was 32 bytes ago --- */
  struct rte_eth_txconf      tx_conf[1025];        /* 57440 57400 */
  /* --- cacheline 1794 boundary (114816 bytes) was 24 bytes ago --- */
[...]
  /* size: 139488, cachelines: 2180, members: 31 */
[...]

With this, we can ask for less memory in test-null.sh.

[1]: https://git.dpdk.org/dpdk/commit/?id=436b3a6b6e62

Signed-off-by: David Marchand <david.marchand@redhat.com>
Acked-by: Ferruh Yigit <ferruh.yigit@intel.com>
Acked-by: Thomas Monjalon <thomas@monjalon.net>
---
Changelog since v1:
- updated test-null.sh

---
 app/test-pmd/testpmd.c |  6 +++---
 app/test-pmd/testpmd.h | 16 +++++++---------
 devtools/test-null.sh  |  2 +-
 3 files changed, 11 insertions(+), 13 deletions(-)

diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index 73ebf37aae..d28211ba52 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -979,7 +979,7 @@ check_socket_id(const unsigned int socket_id)
 queueid_t
 get_allowed_max_nb_rxq(portid_t *pid)
 {
-	queueid_t allowed_max_rxq = MAX_QUEUE_ID;
+	queueid_t allowed_max_rxq = RTE_MAX_QUEUES_PER_PORT;
 	bool max_rxq_valid = false;
 	portid_t pi;
 	struct rte_eth_dev_info dev_info;
@@ -1029,7 +1029,7 @@ check_nb_rxq(queueid_t rxq)
 queueid_t
 get_allowed_max_nb_txq(portid_t *pid)
 {
-	queueid_t allowed_max_txq = MAX_QUEUE_ID;
+	queueid_t allowed_max_txq = RTE_MAX_QUEUES_PER_PORT;
 	bool max_txq_valid = false;
 	portid_t pi;
 	struct rte_eth_dev_info dev_info;
@@ -1079,7 +1079,7 @@ check_nb_txq(queueid_t txq)
 queueid_t
 get_allowed_max_nb_hairpinq(portid_t *pid)
 {
-	queueid_t allowed_max_hairpinq = MAX_QUEUE_ID;
+	queueid_t allowed_max_hairpinq = RTE_MAX_QUEUES_PER_PORT;
 	portid_t pi;
 	struct rte_eth_hairpin_cap cap;
 
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index 90694a3309..217d577018 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -58,8 +58,6 @@ typedef uint16_t portid_t;
 typedef uint16_t queueid_t;
 typedef uint16_t streamid_t;
 
-#define MAX_QUEUE_ID ((1 << (sizeof(queueid_t) * 8)) - 1)
-
 #if defined RTE_LIBRTE_PMD_SOFTNIC
 #define SOFTNIC			1
 #else
@@ -179,22 +177,22 @@ struct rte_port {
 	uint8_t                 need_reconfig_queues; /**< need reconfiguring queues or not */
 	uint8_t                 rss_flag;   /**< enable rss or not */
 	uint8_t                 dcb_flag;   /**< enable dcb */
-	uint16_t                nb_rx_desc[MAX_QUEUE_ID+1]; /**< per queue rx desc number */
-	uint16_t                nb_tx_desc[MAX_QUEUE_ID+1]; /**< per queue tx desc number */
-	struct rte_eth_rxconf   rx_conf[MAX_QUEUE_ID+1]; /**< per queue rx configuration */
-	struct rte_eth_txconf   tx_conf[MAX_QUEUE_ID+1]; /**< per queue tx configuration */
+	uint16_t                nb_rx_desc[RTE_MAX_QUEUES_PER_PORT+1]; /**< per queue rx desc number */
+	uint16_t                nb_tx_desc[RTE_MAX_QUEUES_PER_PORT+1]; /**< per queue tx desc number */
+	struct rte_eth_rxconf   rx_conf[RTE_MAX_QUEUES_PER_PORT+1]; /**< per queue rx configuration */
+	struct rte_eth_txconf   tx_conf[RTE_MAX_QUEUES_PER_PORT+1]; /**< per queue tx configuration */
 	struct rte_ether_addr   *mc_addr_pool; /**< pool of multicast addrs */
 	uint32_t                mc_addr_nb; /**< nb. of addr. in mc_addr_pool */
 	uint8_t                 slave_flag; /**< bonding slave port */
 	struct port_flow        *flow_list; /**< Associated flows. */
-	const struct rte_eth_rxtx_callback *rx_dump_cb[MAX_QUEUE_ID+1];
-	const struct rte_eth_rxtx_callback *tx_dump_cb[MAX_QUEUE_ID+1];
+	const struct rte_eth_rxtx_callback *rx_dump_cb[RTE_MAX_QUEUES_PER_PORT+1];
+	const struct rte_eth_rxtx_callback *tx_dump_cb[RTE_MAX_QUEUES_PER_PORT+1];
 #ifdef SOFTNIC
 	struct softnic_port     softport;  /**< softnic params */
 #endif
 	/**< metadata value to insert in Tx packets. */
 	uint32_t		tx_metadata;
-	const struct rte_eth_rxtx_callback *tx_set_md_cb[MAX_QUEUE_ID+1];
+	const struct rte_eth_rxtx_callback *tx_set_md_cb[RTE_MAX_QUEUES_PER_PORT+1];
 };
 
 /**
diff --git a/devtools/test-null.sh b/devtools/test-null.sh
index 9f9a459f76..6e5b1ad529 100755
--- a/devtools/test-null.sh
+++ b/devtools/test-null.sh
@@ -25,6 +25,6 @@ else
 fi
 
 (sleep 1 && echo stop) |
-$testpmd -c $coremask --no-huge -m 150 \
+$testpmd -c $coremask --no-huge -m 20 \
 	$libs --vdev net_null1 --vdev net_null2 $eal_options -- \
 	--no-mlockall --total-num-mbufs=2048 $testpmd_options -ia
-- 
2.23.0


^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [dpdk-dev] [PATCH v2] app/testpmd: reduce memory consumption
  2019-11-22 10:43 ` [dpdk-dev] [PATCH v2] " David Marchand
@ 2019-11-22 12:24   ` Ferruh Yigit
  2019-11-22 13:12     ` Thomas Monjalon
  2019-11-22 14:14   ` [dpdk-dev] [PATCH v2] app/testpmd: reduce memory consumption Ferruh Yigit
  1 sibling, 1 reply; 20+ messages in thread
From: Ferruh Yigit @ 2019-11-22 12:24 UTC (permalink / raw)
  To: David Marchand, dev
  Cc: thomas, stephen, Wenzhuo Lu, Jingjing Wu, Bernard Iremonger,
	Andrew Rybchenko

On 11/22/2019 10:43 AM, David Marchand wrote:
> diff --git a/devtools/test-null.sh b/devtools/test-null.sh
> index 9f9a459f76..6e5b1ad529 100755
> --- a/devtools/test-null.sh
> +++ b/devtools/test-null.sh
> @@ -25,6 +25,6 @@ else
>  fi
>  
>  (sleep 1 && echo stop) |
> -$testpmd -c $coremask --no-huge -m 150 \
> +$testpmd -c $coremask --no-huge -m 20 \
>  	$libs --vdev net_null1 --vdev net_null2 $eal_options -- \
>  	--no-mlockall --total-num-mbufs=2048 $testpmd_options -ia

What do you think to separate this part, and go with first version of the patchset?

And I am not sure if we should update at all, what is the benefit?

Also script fails after update, because of the additional physical devices and
their memory requirement, it is possible to make it run with additional testpmd
argument but fails by default.

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [dpdk-dev] [PATCH v2] app/testpmd: reduce memory consumption
  2019-11-22 12:24   ` Ferruh Yigit
@ 2019-11-22 13:12     ` Thomas Monjalon
  2019-11-22 13:14       ` Ferruh Yigit
  0 siblings, 1 reply; 20+ messages in thread
From: Thomas Monjalon @ 2019-11-22 13:12 UTC (permalink / raw)
  To: Ferruh Yigit
  Cc: David Marchand, dev, stephen, Wenzhuo Lu, Jingjing Wu,
	Bernard Iremonger, Andrew Rybchenko

22/11/2019 13:24, Ferruh Yigit:
> On 11/22/2019 10:43 AM, David Marchand wrote:
> > diff --git a/devtools/test-null.sh b/devtools/test-null.sh
> > index 9f9a459f76..6e5b1ad529 100755
> > --- a/devtools/test-null.sh
> > +++ b/devtools/test-null.sh
> > @@ -25,6 +25,6 @@ else
> >  fi
> >  
> >  (sleep 1 && echo stop) |
> > -$testpmd -c $coremask --no-huge -m 150 \
> > +$testpmd -c $coremask --no-huge -m 20 \
> >  	$libs --vdev net_null1 --vdev net_null2 $eal_options -- \
> >  	--no-mlockall --total-num-mbufs=2048 $testpmd_options -ia
> 
> What do you think to separate this part, and go with first version of the patchset?
> 
> And I am not sure if we should update at all, what is the benefit?

The benefit it to test that there is no anormal memory needs.

> Also script fails after update, because of the additional physical devices and
> their memory requirement, it is possible to make it run with additional testpmd
> argument but fails by default.

This test is supposed to be for null devices only.
Why are you trying to add physical devices?
Oh wait, we miss an option -w 0 to be in whitelist mode?




^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [dpdk-dev] [PATCH v2] app/testpmd: reduce memory consumption
  2019-11-22 13:12     ` Thomas Monjalon
@ 2019-11-22 13:14       ` Ferruh Yigit
  2019-11-22 13:48         ` [dpdk-dev] [PATCH] devtools: disable automatic probing in null testing Thomas Monjalon
  0 siblings, 1 reply; 20+ messages in thread
From: Ferruh Yigit @ 2019-11-22 13:14 UTC (permalink / raw)
  To: Thomas Monjalon
  Cc: David Marchand, dev, stephen, Wenzhuo Lu, Jingjing Wu,
	Bernard Iremonger, Andrew Rybchenko

On 11/22/2019 1:12 PM, Thomas Monjalon wrote:
> 22/11/2019 13:24, Ferruh Yigit:
>> On 11/22/2019 10:43 AM, David Marchand wrote:
>>> diff --git a/devtools/test-null.sh b/devtools/test-null.sh
>>> index 9f9a459f76..6e5b1ad529 100755
>>> --- a/devtools/test-null.sh
>>> +++ b/devtools/test-null.sh
>>> @@ -25,6 +25,6 @@ else
>>>  fi
>>>  
>>>  (sleep 1 && echo stop) |
>>> -$testpmd -c $coremask --no-huge -m 150 \
>>> +$testpmd -c $coremask --no-huge -m 20 \
>>>  	$libs --vdev net_null1 --vdev net_null2 $eal_options -- \
>>>  	--no-mlockall --total-num-mbufs=2048 $testpmd_options -ia
>>
>> What do you think to separate this part, and go with first version of the patchset?
>>
>> And I am not sure if we should update at all, what is the benefit?

Fair enough.

> 
> The benefit it to test that there is no anormal memory needs.
> 
>> Also script fails after update, because of the additional physical devices and
>> their memory requirement, it is possible to make it run with additional testpmd
>> argument but fails by default.
> 
> This test is supposed to be for null devices only.
> Why are you trying to add physical devices?
> Oh wait, we miss an option -w 0 to be in whitelist mode?
> 
I am not trying to add physical devices, they are already there in my
environment, yes adding "-w" can solve it.

^ permalink raw reply	[flat|nested] 20+ messages in thread

* [dpdk-dev] [PATCH] devtools: disable automatic probing in null testing
  2019-11-22 13:14       ` Ferruh Yigit
@ 2019-11-22 13:48         ` Thomas Monjalon
  2019-11-22 13:56           ` Ferruh Yigit
  2019-11-22 14:03           ` Gaëtan Rivet
  0 siblings, 2 replies; 20+ messages in thread
From: Thomas Monjalon @ 2019-11-22 13:48 UTC (permalink / raw)
  To: david.marchand; +Cc: ferruh.yigit, dev

The script test-null.sh is supposed to do a quick and simple
run of testpmd with null PMD only, for sanity check.
As it is not supposed to test probing of any other PMD,
physical device probing is switched to whitelist mode
by using a fake PCI address (0:0.0).
It will also help to keep memory usage stable across platforms.

Signed-off-by: Thomas Monjalon <thomas@monjalon.net>
---
 devtools/test-null.sh | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/devtools/test-null.sh b/devtools/test-null.sh
index 9f9a459f76..d82c6ad193 100755
--- a/devtools/test-null.sh
+++ b/devtools/test-null.sh
@@ -26,5 +26,5 @@ fi
 
 (sleep 1 && echo stop) |
 $testpmd -c $coremask --no-huge -m 150 \
-	$libs --vdev net_null1 --vdev net_null2 $eal_options -- \
+	$libs -w 0:0.0 --vdev net_null1 --vdev net_null2 $eal_options -- \
 	--no-mlockall --total-num-mbufs=2048 $testpmd_options -ia
-- 
2.23.0


^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [dpdk-dev] [PATCH] devtools: disable automatic probing in null testing
  2019-11-22 13:48         ` [dpdk-dev] [PATCH] devtools: disable automatic probing in null testing Thomas Monjalon
@ 2019-11-22 13:56           ` Ferruh Yigit
  2019-11-22 15:56             ` David Marchand
  2019-11-22 14:03           ` Gaëtan Rivet
  1 sibling, 1 reply; 20+ messages in thread
From: Ferruh Yigit @ 2019-11-22 13:56 UTC (permalink / raw)
  To: Thomas Monjalon, david.marchand; +Cc: dev

On 11/22/2019 1:48 PM, Thomas Monjalon wrote:
> The script test-null.sh is supposed to do a quick and simple
> run of testpmd with null PMD only, for sanity check.
> As it is not supposed to test probing of any other PMD,
> physical device probing is switched to whitelist mode
> by using a fake PCI address (0:0.0).
> It will also help to keep memory usage stable across platforms.
> 
> Signed-off-by: Thomas Monjalon <thomas@monjalon.net>
> ---
>  devtools/test-null.sh | 2 +-
>  1 file changed, 1 insertion(+), 1 deletion(-)
> 
> diff --git a/devtools/test-null.sh b/devtools/test-null.sh
> index 9f9a459f76..d82c6ad193 100755
> --- a/devtools/test-null.sh
> +++ b/devtools/test-null.sh
> @@ -26,5 +26,5 @@ fi
>  
>  (sleep 1 && echo stop) |
>  $testpmd -c $coremask --no-huge -m 150 \
> -	$libs --vdev net_null1 --vdev net_null2 $eal_options -- \
> +	$libs -w 0:0.0 --vdev net_null1 --vdev net_null2 $eal_options -- \
>  	--no-mlockall --total-num-mbufs=2048 $testpmd_options -ia
> 

Tested-by: Ferruh Yigit <ferruh.yigit@intel.com>

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [dpdk-dev] [PATCH] devtools: disable automatic probing in null testing
  2019-11-22 13:48         ` [dpdk-dev] [PATCH] devtools: disable automatic probing in null testing Thomas Monjalon
  2019-11-22 13:56           ` Ferruh Yigit
@ 2019-11-22 14:03           ` Gaëtan Rivet
  2019-11-22 14:36             ` Thomas Monjalon
  1 sibling, 1 reply; 20+ messages in thread
From: Gaëtan Rivet @ 2019-11-22 14:03 UTC (permalink / raw)
  To: Thomas Monjalon; +Cc: david.marchand, ferruh.yigit, dev

Hi Thomas,

On Fri, Nov 22, 2019 at 02:48:08PM +0100, Thomas Monjalon wrote:
> The script test-null.sh is supposed to do a quick and simple
> run of testpmd with null PMD only, for sanity check.
> As it is not supposed to test probing of any other PMD,
> physical device probing is switched to whitelist mode
> by using a fake PCI address (0:0.0).
> It will also help to keep memory usage stable across platforms.
> 

With https://patchwork.dpdk.org/patch/62014/, --manual-probe could be
used as a more standard way of workarounding the PCI bus.

This is a common issue, we should have cleaner way of addressing it than
using hacks relying on the PCI bus not panicking up upon finding an
inexistant address. Which is a questionable behavior, users should not
be encouraged to rely on it.

> Signed-off-by: Thomas Monjalon <thomas@monjalon.net>
> ---
>  devtools/test-null.sh | 2 +-
>  1 file changed, 1 insertion(+), 1 deletion(-)
> 
> diff --git a/devtools/test-null.sh b/devtools/test-null.sh
> index 9f9a459f76..d82c6ad193 100755
> --- a/devtools/test-null.sh
> +++ b/devtools/test-null.sh
> @@ -26,5 +26,5 @@ fi
>  
>  (sleep 1 && echo stop) |
>  $testpmd -c $coremask --no-huge -m 150 \
> -	$libs --vdev net_null1 --vdev net_null2 $eal_options -- \
> +	$libs -w 0:0.0 --vdev net_null1 --vdev net_null2 $eal_options -- \
>  	--no-mlockall --total-num-mbufs=2048 $testpmd_options -ia
> -- 
> 2.23.0
> 

-- 
Gaëtan Rivet
6WIND

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [dpdk-dev] [PATCH v2] app/testpmd: reduce memory consumption
  2019-11-22 10:43 ` [dpdk-dev] [PATCH v2] " David Marchand
  2019-11-22 12:24   ` Ferruh Yigit
@ 2019-11-22 14:14   ` Ferruh Yigit
  1 sibling, 0 replies; 20+ messages in thread
From: Ferruh Yigit @ 2019-11-22 14:14 UTC (permalink / raw)
  To: David Marchand, dev
  Cc: thomas, stephen, Wenzhuo Lu, Jingjing Wu, Bernard Iremonger,
	Andrew Rybchenko

On 11/22/2019 10:43 AM, David Marchand wrote:
> Following [1], testpmd memory consumption has skyrocketted.
> The rte_port structure has gotten quite fat.
> 
> struct rte_port {
> [...]
>   struct rte_eth_rxconf rx_conf[65536];            /* 266280 3145728 */
>   /* --- cacheline 53312 boundary (3411968 bytes) was 40 bytes ago --- */
>   struct rte_eth_txconf tx_conf[65536];            /* 3412008 3670016 */
>   /* --- cacheline 110656 boundary (7081984 bytes) was 40 bytes ago --- */
> [...]
>   /* size: 8654936, cachelines: 135234, members: 31 */
> [...]
> 
> testpmd handles RTE_MAX_ETHPORTS ports (32 by default) which means that it
> needs ~256MB just for this internal representation.
> 
> The reason is that a testpmd rte_port (the name is quite confusing, as
> it is a local type) maintains configurations for all queues of a port.
> But where you would expect testpmd to use RTE_MAX_QUEUES_PER_PORT as the
> maximum queue count, the rte_port uses MAX_QUEUE_ID set to 64k.
> 
> Prefer the ethdev maximum value.
> 
> After this patch:
> struct rte_port {
> [...]
>   struct rte_eth_rxconf      rx_conf[1025];        /*  8240 49200 */
>   /* --- cacheline 897 boundary (57408 bytes) was 32 bytes ago --- */
>   struct rte_eth_txconf      tx_conf[1025];        /* 57440 57400 */
>   /* --- cacheline 1794 boundary (114816 bytes) was 24 bytes ago --- */
> [...]
>   /* size: 139488, cachelines: 2180, members: 31 */
> [...]
> 
> With this, we can ask for less memory in test-null.sh.
> 
> [1]: https://git.dpdk.org/dpdk/commit/?id=436b3a6b6e62
> 
> Signed-off-by: David Marchand <david.marchand@redhat.com>
> Acked-by: Ferruh Yigit <ferruh.yigit@intel.com>
> Acked-by: Thomas Monjalon <thomas@monjalon.net>

Applied to dpdk-next-net/master, thanks.

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [dpdk-dev] [PATCH] devtools: disable automatic probing in null testing
  2019-11-22 14:03           ` Gaëtan Rivet
@ 2019-11-22 14:36             ` Thomas Monjalon
  0 siblings, 0 replies; 20+ messages in thread
From: Thomas Monjalon @ 2019-11-22 14:36 UTC (permalink / raw)
  To: Gaëtan Rivet; +Cc: david.marchand, ferruh.yigit, dev

22/11/2019 15:03, Gaëtan Rivet:
> Hi Thomas,
> 
> On Fri, Nov 22, 2019 at 02:48:08PM +0100, Thomas Monjalon wrote:
> > The script test-null.sh is supposed to do a quick and simple
> > run of testpmd with null PMD only, for sanity check.
> > As it is not supposed to test probing of any other PMD,
> > physical device probing is switched to whitelist mode
> > by using a fake PCI address (0:0.0).
> > It will also help to keep memory usage stable across platforms.
> > 
> 
> With https://patchwork.dpdk.org/patch/62014/, --manual-probe could be
> used as a more standard way of workarounding the PCI bus.

I really would like we have a better bus API before
implementing some new command line options.
Command line options should be optional and considered only helpers
for internal applications.
Anyway it is another discussion, and I hope I will have time and courage
to participate in such rework soon.

> This is a common issue, we should have cleaner way of addressing it than
> using hacks relying on the PCI bus not panicking up upon finding an
> inexistant address. Which is a questionable behavior, users should not
> be encouraged to rely on it.

Yes



^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [dpdk-dev] [PATCH] devtools: disable automatic probing in null testing
  2019-11-22 13:56           ` Ferruh Yigit
@ 2019-11-22 15:56             ` David Marchand
  2019-11-24 22:52               ` Thomas Monjalon
  0 siblings, 1 reply; 20+ messages in thread
From: David Marchand @ 2019-11-22 15:56 UTC (permalink / raw)
  To: Thomas Monjalon; +Cc: dev, Ferruh Yigit

On Fri, Nov 22, 2019 at 2:56 PM Ferruh Yigit <ferruh.yigit@intel.com> wrote:
>
> On 11/22/2019 1:48 PM, Thomas Monjalon wrote:
> > The script test-null.sh is supposed to do a quick and simple
> > run of testpmd with null PMD only, for sanity check.
> > As it is not supposed to test probing of any other PMD,
> > physical device probing is switched to whitelist mode
> > by using a fake PCI address (0:0.0).
> > It will also help to keep memory usage stable across platforms.
> >
> > Signed-off-by: Thomas Monjalon <thomas@monjalon.net>
> > ---
> >  devtools/test-null.sh | 2 +-
> >  1 file changed, 1 insertion(+), 1 deletion(-)
> >
> > diff --git a/devtools/test-null.sh b/devtools/test-null.sh
> > index 9f9a459f76..d82c6ad193 100755
> > --- a/devtools/test-null.sh
> > +++ b/devtools/test-null.sh
> > @@ -26,5 +26,5 @@ fi
> >
> >  (sleep 1 && echo stop) |
> >  $testpmd -c $coremask --no-huge -m 150 \
> > -     $libs --vdev net_null1 --vdev net_null2 $eal_options -- \
> > +     $libs -w 0:0.0 --vdev net_null1 --vdev net_null2 $eal_options -- \
> >       --no-mlockall --total-num-mbufs=2048 $testpmd_options -ia
> >
>
> Tested-by: Ferruh Yigit <ferruh.yigit@intel.com>

Ugly, but does the job.
Acked-by: David Marchand <david.marchand@redhat.com>


-- 
David Marchand


^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [dpdk-dev] [PATCH] devtools: disable automatic probing in null testing
  2019-11-22 15:56             ` David Marchand
@ 2019-11-24 22:52               ` Thomas Monjalon
  0 siblings, 0 replies; 20+ messages in thread
From: Thomas Monjalon @ 2019-11-24 22:52 UTC (permalink / raw)
  To: David Marchand; +Cc: dev, Ferruh Yigit

22/11/2019 16:56, David Marchand:
> On Fri, Nov 22, 2019 at 2:56 PM Ferruh Yigit <ferruh.yigit@intel.com> wrote:
> >
> > On 11/22/2019 1:48 PM, Thomas Monjalon wrote:
> > > The script test-null.sh is supposed to do a quick and simple
> > > run of testpmd with null PMD only, for sanity check.
> > > As it is not supposed to test probing of any other PMD,
> > > physical device probing is switched to whitelist mode
> > > by using a fake PCI address (0:0.0).
> > > It will also help to keep memory usage stable across platforms.
> > >
> > > Signed-off-by: Thomas Monjalon <thomas@monjalon.net>
> > > ---
> > >  devtools/test-null.sh | 2 +-
> > >  1 file changed, 1 insertion(+), 1 deletion(-)
> > >
> > > diff --git a/devtools/test-null.sh b/devtools/test-null.sh
> > > index 9f9a459f76..d82c6ad193 100755
> > > --- a/devtools/test-null.sh
> > > +++ b/devtools/test-null.sh
> > > @@ -26,5 +26,5 @@ fi
> > >
> > >  (sleep 1 && echo stop) |
> > >  $testpmd -c $coremask --no-huge -m 150 \
> > > -     $libs --vdev net_null1 --vdev net_null2 $eal_options -- \
> > > +     $libs -w 0:0.0 --vdev net_null1 --vdev net_null2 $eal_options -- \
> > >       --no-mlockall --total-num-mbufs=2048 $testpmd_options -ia
> > >
> >
> > Tested-by: Ferruh Yigit <ferruh.yigit@intel.com>
> 
> Ugly, but does the job.
> Acked-by: David Marchand <david.marchand@redhat.com>

Applied



^ permalink raw reply	[flat|nested] 20+ messages in thread

end of thread, other threads:[~2019-11-24 22:52 UTC | newest]

Thread overview: 20+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-11-21 15:12 [dpdk-dev] [PATCH] app/testpmd: reduce memory consumption David Marchand
2019-11-21 15:36 ` Ferruh Yigit
2019-11-21 16:17   ` David Marchand
2019-11-21 16:23     ` David Marchand
2019-11-21 21:22       ` Thomas Monjalon
2019-11-21 16:45 ` Stephen Hemminger
2019-11-21 20:32   ` David Marchand
2019-11-21 21:25 ` Thomas Monjalon
2019-11-21 21:55   ` Thomas Monjalon
2019-11-22 10:43 ` [dpdk-dev] [PATCH v2] " David Marchand
2019-11-22 12:24   ` Ferruh Yigit
2019-11-22 13:12     ` Thomas Monjalon
2019-11-22 13:14       ` Ferruh Yigit
2019-11-22 13:48         ` [dpdk-dev] [PATCH] devtools: disable automatic probing in null testing Thomas Monjalon
2019-11-22 13:56           ` Ferruh Yigit
2019-11-22 15:56             ` David Marchand
2019-11-24 22:52               ` Thomas Monjalon
2019-11-22 14:03           ` Gaëtan Rivet
2019-11-22 14:36             ` Thomas Monjalon
2019-11-22 14:14   ` [dpdk-dev] [PATCH v2] app/testpmd: reduce memory consumption Ferruh Yigit

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).