DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH] app/flow-perf: export some configuration options
@ 2021-10-04 12:55 Wisam Jaddo
  2021-10-25 19:23 ` Thomas Monjalon
  0 siblings, 1 reply; 2+ messages in thread
From: Wisam Jaddo @ 2021-10-04 12:55 UTC (permalink / raw)
  To: dev, suanmingm, arybchenko; +Cc: asafp

Some options are needed in the runtime many times, so leaving
it during compilation is not correct. As a result some options
has been exported into command line options to be used at run
time.

The options exported are:
--txq=N
--rxq=N
--txd=N
--rxd=N
--mbuf-size=N
--mbuf-cache-size=N
--total-mbuf-count=N

Signed-off-by: Wisam Jaddo <wisamm@nvidia.com>
Reviewed-by: Alexander Kozyrev <akozyrev@nvidia.com>
---
 app/test-flow-perf/actions_gen.c |  14 ++---
 app/test-flow-perf/actions_gen.h |   2 +-
 app/test-flow-perf/config.h      |   4 +-
 app/test-flow-perf/flow_gen.c    |   3 +-
 app/test-flow-perf/flow_gen.h    |   1 +
 app/test-flow-perf/main.c        | 102 +++++++++++++++++++++++++------
 doc/guides/tools/flow-perf.rst   |  33 ++++++++--
 7 files changed, 124 insertions(+), 35 deletions(-)

diff --git a/app/test-flow-perf/actions_gen.c b/app/test-flow-perf/actions_gen.c
index 82cddfc676..7c209f7266 100644
--- a/app/test-flow-perf/actions_gen.c
+++ b/app/test-flow-perf/actions_gen.c
@@ -909,25 +909,25 @@ void
 fill_actions(struct rte_flow_action *actions, uint64_t *flow_actions,
 	uint32_t counter, uint16_t next_table, uint16_t hairpinq,
 	uint64_t encap_data, uint64_t decap_data, uint8_t core_idx,
-	bool unique_data)
+	bool unique_data, uint8_t rx_queues_count)
 {
 	struct additional_para additional_para_data;
 	uint8_t actions_counter = 0;
 	uint16_t hairpin_queues[hairpinq];
-	uint16_t queues[RXQ_NUM];
+	uint16_t queues[rx_queues_count];
 	uint16_t i, j;
 
-	for (i = 0; i < RXQ_NUM; i++)
+	for (i = 0; i < rx_queues_count; i++)
 		queues[i] = i;
 
 	for (i = 0; i < hairpinq; i++)
-		hairpin_queues[i] = i + RXQ_NUM;
+		hairpin_queues[i] = i + rx_queues_count;
 
 	additional_para_data = (struct additional_para){
-		.queue = counter % RXQ_NUM,
+		.queue = counter % rx_queues_count,
 		.next_table = next_table,
 		.queues = queues,
-		.queues_number = RXQ_NUM,
+		.queues_number = rx_queues_count,
 		.counter = counter,
 		.encap_data = encap_data,
 		.decap_data = decap_data,
@@ -938,7 +938,7 @@ fill_actions(struct rte_flow_action *actions, uint64_t *flow_actions,
 	if (hairpinq != 0) {
 		additional_para_data.queues = hairpin_queues;
 		additional_para_data.queues_number = hairpinq;
-		additional_para_data.queue = (counter % hairpinq) + RXQ_NUM;
+		additional_para_data.queue = (counter % hairpinq) + rx_queues_count;
 	}
 
 	static const struct actions_dict {
diff --git a/app/test-flow-perf/actions_gen.h b/app/test-flow-perf/actions_gen.h
index 6f2f833496..8990686269 100644
--- a/app/test-flow-perf/actions_gen.h
+++ b/app/test-flow-perf/actions_gen.h
@@ -20,6 +20,6 @@
 void fill_actions(struct rte_flow_action *actions, uint64_t *flow_actions,
 	uint32_t counter, uint16_t next_table, uint16_t hairpinq,
 	uint64_t encap_data, uint64_t decap_data, uint8_t core_idx,
-	bool unique_data);
+	bool unique_data, uint8_t rx_queues_count);
 
 #endif /* FLOW_PERF_ACTION_GEN */
diff --git a/app/test-flow-perf/config.h b/app/test-flow-perf/config.h
index a14d4e05e1..3d85e0d49a 100644
--- a/app/test-flow-perf/config.h
+++ b/app/test-flow-perf/config.h
@@ -8,8 +8,8 @@
 #define GET_RSS_HF() (ETH_RSS_IP)
 
 /* Configuration */
-#define RXQ_NUM 4
-#define TXQ_NUM 4
+#define RXQ_NUM 1
+#define TXQ_NUM 1
 #define TOTAL_MBUF_NUM 32000
 #define MBUF_SIZE 2048
 #define MBUF_CACHE_SIZE 512
diff --git a/app/test-flow-perf/flow_gen.c b/app/test-flow-perf/flow_gen.c
index 8f87fac5f6..51871dbfdc 100644
--- a/app/test-flow-perf/flow_gen.c
+++ b/app/test-flow-perf/flow_gen.c
@@ -46,6 +46,7 @@ generate_flow(uint16_t port_id,
 	uint64_t encap_data,
 	uint64_t decap_data,
 	uint8_t core_idx,
+	uint8_t rx_queues_count,
 	bool unique_data,
 	struct rte_flow_error *error)
 {
@@ -63,7 +64,7 @@ generate_flow(uint16_t port_id,
 	fill_actions(actions, flow_actions,
 		outer_ip_src, next_table, hairpinq,
 		encap_data, decap_data, core_idx,
-		unique_data);
+		unique_data, rx_queues_count);
 
 	fill_items(items, flow_items, outer_ip_src, core_idx);
 
diff --git a/app/test-flow-perf/flow_gen.h b/app/test-flow-perf/flow_gen.h
index dc887fceae..1118a9fc14 100644
--- a/app/test-flow-perf/flow_gen.h
+++ b/app/test-flow-perf/flow_gen.h
@@ -35,6 +35,7 @@ generate_flow(uint16_t port_id,
 	uint64_t encap_data,
 	uint64_t decap_data,
 	uint8_t core_idx,
+	uint8_t rx_queues_count,
 	bool unique_data,
 	struct rte_flow_error *error);
 
diff --git a/app/test-flow-perf/main.c b/app/test-flow-perf/main.c
index b99e603f81..102e9e6ede 100644
--- a/app/test-flow-perf/main.c
+++ b/app/test-flow-perf/main.c
@@ -65,6 +65,14 @@ static bool dump_socket_mem_flag;
 static bool enable_fwd;
 static bool unique_data;
 
+static uint8_t rx_queues_count;
+static uint8_t tx_queues_count;
+static uint8_t rxd_count;
+static uint8_t txd_count;
+static uint32_t mbuf_size;
+static uint32_t mbuf_cache_size;
+static uint32_t total_mbuf_num;
+
 static struct rte_mempool *mbuf_mp;
 static uint32_t nb_lcores;
 static uint32_t rules_count;
@@ -145,6 +153,14 @@ usage(char *progname)
 		" default is %d\n", DEFAULT_GROUP);
 	printf("  --cores=N: to set the number of needed "
 		"cores to insert rte_flow rules, default is 1\n");
+	printf("  --rxq=N: to set the count of receive queues\n");
+	printf("  --txq=N: to set the count of send queues\n");
+	printf("  --rxd=N: to set the count of rxd\n");
+	printf("  --txd=N: to set the count of txd\n");
+	printf("  --mbuf-size=N: to set the size of mbuf\n");
+	printf("  --mbuf-cache-size=N: to set the size of mbuf cache\n");
+	printf("  --total-mbuf-count=N: to set the count of total mbuf count\n");
+
 
 	printf("To set flow items:\n");
 	printf("  --ether: add ether layer in flow items\n");
@@ -575,6 +591,14 @@ args_parse(int argc, char **argv)
 		{ "unique-data",                0, 0, 0 },
 		{ "portmask",                   1, 0, 0 },
 		{ "cores",                      1, 0, 0 },
+		{ "meter-profile-alg",          1, 0, 0 },
+		{ "rxq",                        1, 0, 0 },
+		{ "txq",                        1, 0, 0 },
+		{ "rxd",                        1, 0, 0 },
+		{ "txd",                        1, 0, 0 },
+		{ "mbuf-size",                  1, 0, 0 },
+		{ "mbuf-cache-size",            1, 0, 0 },
+		{ "total-mbuf-count",           1, 0, 0 },
 		/* Attributes */
 		{ "ingress",                    0, 0, 0 },
 		{ "egress",                     0, 0, 0 },
@@ -625,7 +649,7 @@ args_parse(int argc, char **argv)
 		{ "set-ipv4-dscp",              0, 0, 0 },
 		{ "set-ipv6-dscp",              0, 0, 0 },
 		{ "flag",                       0, 0, 0 },
-		{ "meter",		        0, 0, 0 },
+		{ "meter",                      0, 0, 0 },
 		{ "raw-encap",                  1, 0, 0 },
 		{ "raw-decap",                  1, 0, 0 },
 		{ "vxlan-encap",                0, 0, 0 },
@@ -789,6 +813,34 @@ args_parse(int argc, char **argv)
 					rte_exit(EXIT_FAILURE, "Invalid fwd port mask\n");
 				ports_mask = pm;
 			}
+			if (strcmp(lgopts[opt_idx].name, "rxq") == 0) {
+				n = atoi(optarg);
+				rx_queues_count = (uint8_t) n;
+			}
+			if (strcmp(lgopts[opt_idx].name, "txq") == 0) {
+				n = atoi(optarg);
+				tx_queues_count = (uint8_t) n;
+			}
+			if (strcmp(lgopts[opt_idx].name, "rxd") == 0) {
+				n = atoi(optarg);
+				rxd_count = (uint8_t) n;
+			}
+			if (strcmp(lgopts[opt_idx].name, "txd") == 0) {
+				n = atoi(optarg);
+				txd_count = (uint8_t) n;
+			}
+			if (strcmp(lgopts[opt_idx].name, "mbuf-size") == 0) {
+				n = atoi(optarg);
+				mbuf_size = (uint32_t) n;
+			}
+			if (strcmp(lgopts[opt_idx].name, "mbuf-cache-size") == 0) {
+				n = atoi(optarg);
+				mbuf_cache_size = (uint32_t) n;
+			}
+			if (strcmp(lgopts[opt_idx].name, "total-mbuf-count") == 0) {
+				n = atoi(optarg);
+				total_mbuf_num = (uint32_t) n;
+			}
 			if (strcmp(lgopts[opt_idx].name, "cores") == 0) {
 				n = atoi(optarg);
 				if ((int) rte_lcore_count() <= n) {
@@ -1175,7 +1227,8 @@ insert_flows(int port_id, uint8_t core_id)
 		 */
 		flow = generate_flow(port_id, 0, flow_attrs,
 			global_items, global_actions,
-			flow_group, 0, 0, 0, 0, core_id, unique_data, &error);
+			flow_group, 0, 0, 0, 0, core_id, rx_queues_count,
+			unique_data, &error);
 
 		if (flow == NULL) {
 			print_flow_error(error);
@@ -1191,7 +1244,8 @@ insert_flows(int port_id, uint8_t core_id)
 			JUMP_ACTION_TABLE, counter,
 			hairpin_queues_num,
 			encap_data, decap_data,
-			core_id, unique_data, &error);
+			core_id, rx_queues_count,
+			unique_data, &error);
 
 		if (!counter) {
 			first_flow_latency = (double) (rte_get_timer_cycles() - start_batch);
@@ -1662,7 +1716,7 @@ init_lcore_info(void)
 	 * logical cores except first core, since it's reserved for
 	 * stats prints.
 	 */
-	nb_fwd_streams = nr_port * RXQ_NUM;
+	nb_fwd_streams = nr_port * rx_queues_count;
 	if ((int)(nb_lcores - 1) >= nb_fwd_streams)
 		for (i = 0; i < (int)(nb_lcores - 1); i++) {
 			lcore = rte_get_next_lcore(lcore, 0, 0);
@@ -1692,7 +1746,7 @@ init_lcore_info(void)
 	lcore = rte_get_next_lcore(-1, 0, 0);
 	for (port = 0; port < nr_port; port++) {
 		/* Create FWD stream */
-		for (queue = 0; queue < RXQ_NUM; queue++) {
+		for (queue = 0; queue < rx_queues_count; queue++) {
 			if (!lcore_infos[lcore].streams_nb ||
 				!(stream_id % lcore_infos[lcore].streams_nb)) {
 				lcore = rte_get_next_lcore(lcore, 0, 0);
@@ -1745,17 +1799,17 @@ init_port(void)
 	struct rte_eth_rxconf rxq_conf;
 	struct rte_eth_dev_info dev_info;
 
-	nr_queues = RXQ_NUM;
+	nr_queues = rx_queues_count;
 	if (hairpin_queues_num != 0)
-		nr_queues = RXQ_NUM + hairpin_queues_num;
+		nr_queues = rx_queues_count + hairpin_queues_num;
 
 	nr_ports = rte_eth_dev_count_avail();
 	if (nr_ports == 0)
 		rte_exit(EXIT_FAILURE, "Error: no port detected\n");
 
 	mbuf_mp = rte_pktmbuf_pool_create("mbuf_pool",
-					TOTAL_MBUF_NUM, MBUF_CACHE_SIZE,
-					0, MBUF_SIZE,
+					total_mbuf_num, mbuf_cache_size,
+					0, mbuf_size,
 					rte_socket_id());
 	if (mbuf_mp == NULL)
 		rte_exit(EXIT_FAILURE, "Error: can't init mbuf pool\n");
@@ -1781,8 +1835,8 @@ init_port(void)
 				ret, port_id);
 
 		rxq_conf = dev_info.default_rxconf;
-		for (std_queue = 0; std_queue < RXQ_NUM; std_queue++) {
-			ret = rte_eth_rx_queue_setup(port_id, std_queue, NR_RXD,
+		for (std_queue = 0; std_queue < rx_queues_count; std_queue++) {
+			ret = rte_eth_rx_queue_setup(port_id, std_queue, rxd_count,
 					rte_eth_dev_socket_id(port_id),
 					&rxq_conf,
 					mbuf_mp);
@@ -1793,8 +1847,8 @@ init_port(void)
 		}
 
 		txq_conf = dev_info.default_txconf;
-		for (std_queue = 0; std_queue < TXQ_NUM; std_queue++) {
-			ret = rte_eth_tx_queue_setup(port_id, std_queue, NR_TXD,
+		for (std_queue = 0; std_queue < tx_queues_count; std_queue++) {
+			ret = rte_eth_tx_queue_setup(port_id, std_queue, txd_count,
 					rte_eth_dev_socket_id(port_id),
 					&txq_conf);
 			if (ret < 0)
@@ -1814,32 +1868,32 @@ init_port(void)
 			/*
 			 * Configure peer which represents hairpin Tx.
 			 * Hairpin queue numbers start after standard queues
-			 * (RXQ_NUM and TXQ_NUM).
+			 * (rx_queues_count and tx_queues_count).
 			 */
-			for (hairpin_queue = RXQ_NUM, std_queue = 0;
+			for (hairpin_queue = rx_queues_count, std_queue = 0;
 					hairpin_queue < nr_queues;
 					hairpin_queue++, std_queue++) {
 				hairpin_conf.peers[0].port = port_id;
 				hairpin_conf.peers[0].queue =
-					std_queue + TXQ_NUM;
+					std_queue + tx_queues_count;
 				ret = rte_eth_rx_hairpin_queue_setup(
 						port_id, hairpin_queue,
-						NR_RXD, &hairpin_conf);
+						rxd_count, &hairpin_conf);
 				if (ret != 0)
 					rte_exit(EXIT_FAILURE,
 						":: Hairpin rx queue setup failed: err=%d, port=%u\n",
 						ret, port_id);
 			}
 
-			for (hairpin_queue = TXQ_NUM, std_queue = 0;
+			for (hairpin_queue = tx_queues_count, std_queue = 0;
 					hairpin_queue < nr_queues;
 					hairpin_queue++, std_queue++) {
 				hairpin_conf.peers[0].port = port_id;
 				hairpin_conf.peers[0].queue =
-					std_queue + RXQ_NUM;
+					std_queue + rx_queues_count;
 				ret = rte_eth_tx_hairpin_queue_setup(
 						port_id, hairpin_queue,
-						NR_TXD, &hairpin_conf);
+						txd_count, &hairpin_conf);
 				if (ret != 0)
 					rte_exit(EXIT_FAILURE,
 						":: Hairpin tx queue setup failed: err=%d, port=%u\n",
@@ -1877,6 +1931,14 @@ main(int argc, char **argv)
 	flow_group = DEFAULT_GROUP;
 	unique_data = false;
 
+	rx_queues_count = (uint8_t) RXQ_NUM;
+	tx_queues_count = (uint8_t) TXQ_NUM;
+	rxd_count = (uint8_t) NR_RXD;
+	txd_count = (uint8_t) NR_TXD;
+	mbuf_size = (uint32_t) MBUF_SIZE;
+	mbuf_cache_size = (uint32_t) MBUF_CACHE_SIZE;
+	total_mbuf_num = (uint32_t) TOTAL_MBUF_NUM;
+
 	signal(SIGINT, signal_handler);
 	signal(SIGTERM, signal_handler);
 
diff --git a/doc/guides/tools/flow-perf.rst b/doc/guides/tools/flow-perf.rst
index 280bf7e0e0..0855f88689 100644
--- a/doc/guides/tools/flow-perf.rst
+++ b/doc/guides/tools/flow-perf.rst
@@ -100,10 +100,35 @@ The command line options are:
 	Set the number of needed cores to insert/delete rte_flow rules.
 	Default cores count is 1.
 
-*       ``--unique-data``
-        Flag to set using unique data for all actions that support data,
-        Such as header modify and encap actions. Default is using fixed
-        data for any action that support data for all flows.
+*	``--meter-profile-alg``
+	Set the traffic metering algorithm.
+	Example: meter-profile-alg=srtcmp, default algorithm is srtcm_rfc2697
+
+*	``--unique-data``
+	Flag to set using unique data for all actions that support data,
+	Such as header modify and encap actions. Default is using fixed
+	data for any action that support data for all flows.
+
+*	``--rxq=N``
+	Set the count of receive queues, default is 1.
+
+*	``--txq=N``
+	Set the count of send queues, default is 1.
+
+*	``--rxd=N``
+	Set the count of rxd, default is 256.
+
+*	``--txd=N``
+	Set the count of txd, default is 256.
+
+*	``--mbuf-size=N``
+	Set the size of mbuf, default size is 2048.
+
+*	``--mbuf-cache-size=N``
+	Set the size of mbuf cache, default size is 512.
+
+*	``--total-mbuf-count=N``
+	Set the count of total mbuf number, default count is 32000.
 
 Attributes:
 
-- 
2.17.1


^ permalink raw reply	[flat|nested] 2+ messages in thread

* Re: [dpdk-dev] [PATCH] app/flow-perf: export some configuration options
  2021-10-04 12:55 [dpdk-dev] [PATCH] app/flow-perf: export some configuration options Wisam Jaddo
@ 2021-10-25 19:23 ` Thomas Monjalon
  0 siblings, 0 replies; 2+ messages in thread
From: Thomas Monjalon @ 2021-10-25 19:23 UTC (permalink / raw)
  To: Wisam Jaddo; +Cc: dev, suanmingm, arybchenko, asafp

04/10/2021 14:55, Wisam Jaddo:
> Some options are needed in the runtime many times, so leaving
> it during compilation is not correct. As a result some options
> has been exported into command line options to be used at run
> time.
> 
> The options exported are:
> --txq=N
> --rxq=N
> --txd=N
> --rxd=N
> --mbuf-size=N
> --mbuf-cache-size=N
> --total-mbuf-count=N
> 
> Signed-off-by: Wisam Jaddo <wisamm@nvidia.com>
> Reviewed-by: Alexander Kozyrev <akozyrev@nvidia.com>

Applied, thanks.




^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2021-10-25 19:23 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-10-04 12:55 [dpdk-dev] [PATCH] app/flow-perf: export some configuration options Wisam Jaddo
2021-10-25 19:23 ` Thomas Monjalon

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).