DPDK patches and discussions
 help / color / mirror / Atom feed
From: Olivier Matz <olivier.matz@6wind.com>
To: dev@dpdk.org
Cc: jerin.jacob@caviumnetworks.com, hemant.agrawal@nxp.com,
	david.hunt@intel.com
Subject: [dpdk-dev] [RFC 3/7] testpmd: new parameter to set mbuf pool ops
Date: Mon, 19 Sep 2016 15:42:43 +0200	[thread overview]
Message-ID: <1474292567-21912-4-git-send-email-olivier.matz@6wind.com> (raw)
In-Reply-To: <1474292567-21912-1-git-send-email-olivier.matz@6wind.com>

Signed-off-by: Olivier Matz <olivier.matz@6wind.com>
---
 app/test-pmd/parameters.c |  5 +++++
 app/test-pmd/testpmd.c    | 15 ++++++++++++++-
 app/test-pmd/testpmd.h    |  1 +
 3 files changed, 20 insertions(+), 1 deletion(-)

diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c
index 6a6a07e..cbd287d 100644
--- a/app/test-pmd/parameters.c
+++ b/app/test-pmd/parameters.c
@@ -131,6 +131,7 @@ usage(char* progname)
 	printf("  --total-num-mbufs=N: set the number of mbufs to be allocated "
 	       "in mbuf pools.\n");
 	printf("  --max-pkt-len=N: set the maximum size of packet to N bytes.\n");
+	printf("  --mbuf-pool-ops=<handler>: set an alternative mbuf pool handler\n");
 #ifdef RTE_LIBRTE_CMDLINE
 	printf("  --eth-peers-configfile=name: config file with ethernet addresses "
 	       "of peer ports.\n");
@@ -519,6 +520,7 @@ launch_args_parse(int argc, char** argv)
 		{ "mbuf-size",			1, 0, 0 },
 		{ "total-num-mbufs",		1, 0, 0 },
 		{ "max-pkt-len",		1, 0, 0 },
+		{ "mbuf-pool-ops",		1, 0, 0 },
 		{ "pkt-filter-mode",            1, 0, 0 },
 		{ "pkt-filter-report-hash",     1, 0, 0 },
 		{ "pkt-filter-size",            1, 0, 0 },
@@ -701,6 +703,9 @@ launch_args_parse(int argc, char** argv)
 						 "Invalid max-pkt-len=%d - should be > %d\n",
 						 n, ETHER_MIN_LEN);
 			}
+			if (!strcmp(lgopts[opt_idx].name, "mbuf-pool-ops")) {
+				mbuf_pool_ops = strdup(optarg);
+			}
 			if (!strcmp(lgopts[opt_idx].name, "pkt-filter-mode")) {
 				if (!strcmp(optarg, "signature"))
 					fdir_conf.mode =
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index cc3d2d0..669bf97 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -167,6 +167,7 @@ uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
                                       * specified on command-line. */
+const char *mbuf_pool_ops = NULL;
 
 /*
  * Configuration of packet segments used by the "txonly" processing engine.
@@ -419,6 +420,7 @@ mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
 	char pool_name[RTE_MEMPOOL_NAMESIZE];
 	struct rte_mempool *rte_mp = NULL;
 	uint32_t mb_size;
+	int ret;
 
 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
@@ -444,6 +446,17 @@ mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
 				sizeof(struct rte_pktmbuf_pool_private),
 				socket_id, 0);
 
+			if (rte_mp != NULL) {
+				ret = rte_mempool_set_ops_byname(rte_mp,
+					mbuf_pool_ops, NULL);
+				if (ret != 0) {
+					RTE_LOG(ERR, MBUF,
+						"cannot set mempool handler\n");
+					rte_mempool_free(rte_mp);
+					rte_mp = NULL;
+				}
+			}
+
 			if (rte_mempool_populate_anon(rte_mp) == 0) {
 				rte_mempool_free(rte_mp);
 				rte_mp = NULL;
@@ -454,7 +467,7 @@ mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
 			/* wrapper to rte_mempool_create() */
 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
 				mb_mempool_cache, 0, mbuf_seg_size, socket_id,
-				NULL);
+				mbuf_pool_ops);
 		}
 	}
 
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index 2b281cc..c7bab77 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -357,6 +357,7 @@ extern enum dcb_queue_mapping_mode dcb_q_mapping;
 
 extern uint16_t mbuf_data_size; /**< Mbuf data space size. */
 extern uint32_t param_total_num_mbufs;
+extern const char *mbuf_pool_ops;  /**< mbuf pool handler to use */
 
 extern struct rte_fdir_conf fdir_conf;
 
-- 
2.8.1

  parent reply	other threads:[~2016-09-19 13:43 UTC|newest]

Thread overview: 15+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-09-19 13:42 [dpdk-dev] [RFC 0/7] changing mbuf pool handler Olivier Matz
2016-09-19 13:42 ` [dpdk-dev] [RFC 1/7] mbuf: set the handler at mbuf pool creation Olivier Matz
2016-09-19 13:42 ` [dpdk-dev] [RFC 2/7] mbuf: use helper to create the pool Olivier Matz
2017-01-16 15:30   ` Santosh Shukla
2017-01-31 10:31     ` Olivier Matz
2016-09-19 13:42 ` Olivier Matz [this message]
2016-09-19 13:42 ` [dpdk-dev] [RFC 4/7] l3fwd: rework long options parsing Olivier Matz
2016-09-19 13:42 ` [dpdk-dev] [RFC 5/7] l3fwd: new parameter to set mbuf pool ops Olivier Matz
2016-09-19 13:42 ` [dpdk-dev] [RFC 6/7] l2fwd: rework long options parsing Olivier Matz
2016-09-19 13:42 ` [dpdk-dev] [RFC 7/7] l2fwd: new parameter to set mbuf pool ops Olivier Matz
2016-09-22 11:52 ` [dpdk-dev] [RFC 0/7] changing mbuf pool handler Hemant Agrawal
2016-10-03 15:49   ` Olivier Matz
2016-10-05  9:41     ` Hunt, David
2016-10-05 11:49       ` Hemant Agrawal
2016-10-05 13:15         ` Hunt, David

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1474292567-21912-4-git-send-email-olivier.matz@6wind.com \
    --to=olivier.matz@6wind.com \
    --cc=david.hunt@intel.com \
    --cc=dev@dpdk.org \
    --cc=hemant.agrawal@nxp.com \
    --cc=jerin.jacob@caviumnetworks.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).