DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev]  [PATCH] examples/l2fwd: increase pktmbuf pool size
@ 2017-12-28 20:19 Pavan Nikhilesh
  2017-12-28 20:36 ` Stephen Hemminger
  2018-01-02  9:53 ` [dpdk-dev] [PATCH v2] " Pavan Nikhilesh
  0 siblings, 2 replies; 6+ messages in thread
From: Pavan Nikhilesh @ 2017-12-28 20:19 UTC (permalink / raw)
  To: bruce.richardson, pablo.de.lara.guarch; +Cc: dev, Pavan Nikhilesh

Make pktmbuf pool size a function of ports and lcores detected instead
of using constant 8192.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
---

 Check patch generates warning `Unnecessary typecast of c90 int constant`
 but it is needed to make compiler happy (generates error comparison
 between signed and unsigned integer).

 examples/l2fwd/main.c | 32 ++++++++++++++++++++++----------
 1 file changed, 22 insertions(+), 10 deletions(-)

diff --git a/examples/l2fwd/main.c b/examples/l2fwd/main.c
index e89e2e1bf..9ec544840 100644
--- a/examples/l2fwd/main.c
+++ b/examples/l2fwd/main.c
@@ -75,8 +75,6 @@ static int mac_updating = 1;

 #define RTE_LOGTYPE_L2FWD RTE_LOGTYPE_USER1

-#define NB_MBUF   8192
-
 #define MAX_PKT_BURST 32
 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
 #define MEMPOOL_CACHE_SIZE 256
@@ -124,6 +122,17 @@ static const struct rte_eth_conf port_conf = {
 	},
 };

+/*
+ * This expression is used to calculate the number of mbufs needed
+ * depending on user input, taking  into account memory for rx and
+ * tx hardware rings, cache per lcore and mbuf pkt burst per port
+ * per lcore. RTE_MAX is used to ensure that NB_MBUF never goes below
+ * a minimum value of 8192
+ */
+#define NB_MBUF RTE_MAX(\
+		nb_ports * (nb_rxd + nb_txd + MAX_PKT_BURST +\
+		nb_lcores * MEMPOOL_CACHE_SIZE), (unsigned int)8192)
+
 struct rte_mempool * l2fwd_pktmbuf_pool = NULL;

 /* Per-port statistics struct */
@@ -556,6 +565,7 @@ main(int argc, char **argv)
 	uint16_t portid, last_port;
 	unsigned lcore_id, rx_lcore_id;
 	unsigned nb_ports_in_mask = 0;
+	unsigned int nb_lcores = 0;

 	/* init EAL */
 	ret = rte_eal_init(argc, argv);
@@ -578,13 +588,6 @@ main(int argc, char **argv)
 	/* convert to number of cycles */
 	timer_period *= rte_get_timer_hz();

-	/* create the mbuf pool */
-	l2fwd_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", NB_MBUF,
-		MEMPOOL_CACHE_SIZE, 0, RTE_MBUF_DEFAULT_BUF_SIZE,
-		rte_socket_id());
-	if (l2fwd_pktmbuf_pool == NULL)
-		rte_exit(EXIT_FAILURE, "Cannot init mbuf pool\n");
-
 	nb_ports = rte_eth_dev_count();
 	if (nb_ports == 0)
 		rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");
@@ -636,9 +639,11 @@ main(int argc, char **argv)
 				rte_exit(EXIT_FAILURE, "Not enough cores\n");
 		}

-		if (qconf != &lcore_queue_conf[rx_lcore_id])
+		if (qconf != &lcore_queue_conf[rx_lcore_id]) {
 			/* Assigned a new logical core in the loop above. */
 			qconf = &lcore_queue_conf[rx_lcore_id];
+			nb_lcores++;
+		}

 		qconf->rx_port_list[qconf->n_rx_port] = portid;
 		qconf->n_rx_port++;
@@ -647,6 +652,13 @@ main(int argc, char **argv)

 	nb_ports_available = nb_ports;

+	/* create the mbuf pool */
+	l2fwd_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", NB_MBUF,
+		MEMPOOL_CACHE_SIZE, 0, RTE_MBUF_DEFAULT_BUF_SIZE,
+		rte_socket_id());
+	if (l2fwd_pktmbuf_pool == NULL)
+		rte_exit(EXIT_FAILURE, "Cannot init mbuf pool\n");
+
 	/* Initialise each port */
 	for (portid = 0; portid < nb_ports; portid++) {
 		/* skip ports that are not enabled */
--
2.15.1

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2018-01-15 11:19 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-12-28 20:19 [dpdk-dev] [PATCH] examples/l2fwd: increase pktmbuf pool size Pavan Nikhilesh
2017-12-28 20:36 ` Stephen Hemminger
2017-12-28 20:41   ` Pavan Nikhilesh
2018-01-02  9:53 ` [dpdk-dev] [PATCH v2] " Pavan Nikhilesh
2018-01-10 12:26   ` De Lara Guarch, Pablo
2018-01-15 11:18     ` Thomas Monjalon

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).