DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH v2] examples/ip_pipeline: CPU utilization measurement and display
@ 2016-02-22 14:07 Fan Zhang
  2016-03-07 11:31 ` Thomas Monjalon
  2016-03-09  6:50 ` Wan, Qun
  0 siblings, 2 replies; 5+ messages in thread
From: Fan Zhang @ 2016-02-22 14:07 UTC (permalink / raw)
  To: dev

This patch adds CPU utilization measurement and idle cycle rate
computation to packet framework. The measurement is done by measuring
the cycles spent while a thread pulls zero packet from RX queue. These
cycles are treated as idle cycles (or headroom). A CLI command is added
to display idle cycle rate of specific thread. The CLI command format is
shown as following:

t <thread_id> headroom

Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
Acked-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
---
 examples/ip_pipeline/app.h       |   8 +++
 examples/ip_pipeline/init.c      |   8 ++-
 examples/ip_pipeline/thread.c    |  66 ++++++++++++++++++++++-
 examples/ip_pipeline/thread.h    |  14 +++++
 examples/ip_pipeline/thread_fe.c | 113 +++++++++++++++++++++++++++++++++++++++
 examples/ip_pipeline/thread_fe.h |   6 +++
 6 files changed, 211 insertions(+), 4 deletions(-)

diff --git a/examples/ip_pipeline/app.h b/examples/ip_pipeline/app.h
index 6510d6d..2c91256 100644
--- a/examples/ip_pipeline/app.h
+++ b/examples/ip_pipeline/app.h
@@ -263,6 +263,10 @@ struct app_thread_data {
 
 	struct rte_ring *msgq_in;
 	struct rte_ring *msgq_out;
+
+	uint64_t headroom_time;
+	uint64_t headroom_cycles;
+	double headroom_ratio;
 };
 
 struct app_eal_params {
@@ -421,6 +425,10 @@ struct app_eal_params {
 #define APP_MAX_CMDS                             64
 #endif
 
+#ifndef APP_THREAD_HEADROOM_STATS_COLLECT
+#define APP_THREAD_HEADROOM_STATS_COLLECT        1
+#endif
+
 struct app_params {
 	/* Config */
 	char app_name[APP_APPNAME_SIZE];
diff --git a/examples/ip_pipeline/init.c b/examples/ip_pipeline/init.c
index 186ca03..af33e8f 100644
--- a/examples/ip_pipeline/init.c
+++ b/examples/ip_pipeline/init.c
@@ -1343,8 +1343,8 @@ app_init_pipelines(struct app_params *app)
 
 		data->ptype = ptype;
 
-		data->timer_period = (rte_get_tsc_hz() * params->timer_period)
-			/ 1000;
+		data->timer_period = (rte_get_tsc_hz() *
+			params->timer_period) / 100;
 	}
 }
 
@@ -1379,6 +1379,10 @@ app_init_threads(struct app_params *app)
 		t->timer_period = (rte_get_tsc_hz() * APP_THREAD_TIMER_PERIOD) / 1000;
 		t->thread_req_deadline = time + t->timer_period;
 
+		t->headroom_cycles = 0;
+		t->headroom_time = rte_get_tsc_cycles();
+		t->headroom_ratio = 0.0;
+
 		t->msgq_in = app_thread_msgq_in_get(app,
 				params->socket_id,
 				params->core_id,
diff --git a/examples/ip_pipeline/thread.c b/examples/ip_pipeline/thread.c
index 78f1bd8..a0f1f12 100644
--- a/examples/ip_pipeline/thread.c
+++ b/examples/ip_pipeline/thread.c
@@ -39,6 +39,43 @@
 #include "app.h"
 #include "thread.h"
 
+#if APP_THREAD_HEADROOM_STATS_COLLECT
+
+#define PIPELINE_RUN_REGULAR(thread, pipeline)		\
+do {							\
+	uint64_t t0 = rte_rdtsc_precise();		\
+	int n_pkts = rte_pipeline_run(pipeline->p);	\
+							\
+	if (n_pkts == 0) {				\
+		uint64_t t1 = rte_rdtsc_precise();	\
+							\
+		thread->headroom_cycles += t1 - t0;	\
+	}						\
+} while (0)
+
+
+#define PIPELINE_RUN_CUSTOM(thread, data)		\
+do {							\
+	uint64_t t0 = rte_rdtsc_precise();		\
+	int n_pkts = data->f_run(data->be);		\
+							\
+	if (n_pkts == 0) {				\
+		uint64_t t1 = rte_rdtsc_precise();	\
+							\
+		thread->headroom_cycles += t1 - t0;	\
+	}						\
+} while (0)
+
+#else
+
+#define PIPELINE_RUN_REGULAR(thread, pipeline)		\
+	rte_pipeline_run(pipeline->p)
+
+#define PIPELINE_RUN_CUSTOM(thread, data)		\
+	data->f_run(data->be)
+
+#endif
+
 static inline void *
 thread_msg_recv(struct rte_ring *r)
 {
@@ -165,6 +202,17 @@ thread_msg_req_handle(struct app_thread_data *t)
 			thread_msg_send(t->msgq_out, rsp);
 			break;
 		}
+
+		case THREAD_MSG_REQ_HEADROOM_READ: {
+			struct thread_headroom_read_msg_rsp *rsp =
+				(struct thread_headroom_read_msg_rsp *)
+				req;
+
+			rsp->headroom_ratio = t->headroom_ratio;
+			rsp->status = 0;
+			thread_msg_send(t->msgq_out, rsp);
+			break;
+		}
 		default:
 			break;
 		}
@@ -172,6 +220,18 @@ thread_msg_req_handle(struct app_thread_data *t)
 	return 0;
 }
 
+static void
+thread_headroom_update(struct app_thread_data *t, uint64_t time)
+{
+	uint64_t time_diff = time - t->headroom_time;
+
+	t->headroom_ratio =
+		((double) t->headroom_cycles) / ((double) time_diff);
+
+	t->headroom_cycles = 0;
+	t->headroom_time = rte_rdtsc_precise();
+}
+
 int
 app_thread(void *arg)
 {
@@ -188,14 +248,14 @@ app_thread(void *arg)
 			struct app_thread_pipeline_data *data = &t->regular[j];
 			struct pipeline *p = data->be;
 
-			rte_pipeline_run(p->p);
+			PIPELINE_RUN_REGULAR(t, p);
 		}
 
 		/* Run custom pipelines */
 		for (j = 0; j < n_custom; j++) {
 			struct app_thread_pipeline_data *data = &t->custom[j];
 
-			data->f_run(data->be);
+			PIPELINE_RUN_CUSTOM(t, data);
 		}
 
 		/* Timer */
@@ -244,6 +304,7 @@ app_thread(void *arg)
 
 				if (deadline <= time) {
 					thread_msg_req_handle(t);
+					thread_headroom_update(t, time);
 					deadline = time + t->timer_period;
 					t->thread_req_deadline = deadline;
 				}
@@ -252,6 +313,7 @@ app_thread(void *arg)
 					t_deadline = deadline;
 			}
 
+
 			t->deadline = t_deadline;
 		}
 	}
diff --git a/examples/ip_pipeline/thread.h b/examples/ip_pipeline/thread.h
index dc877c0..e52b22e 100644
--- a/examples/ip_pipeline/thread.h
+++ b/examples/ip_pipeline/thread.h
@@ -40,6 +40,7 @@
 enum thread_msg_req_type {
 	THREAD_MSG_REQ_PIPELINE_ENABLE = 0,
 	THREAD_MSG_REQ_PIPELINE_DISABLE,
+	THREAD_MSG_REQ_HEADROOM_READ,
 	THREAD_MSG_REQS
 };
 
@@ -81,4 +82,17 @@ struct thread_pipeline_disable_msg_rsp {
 	int status;
 };
 
+/*
+ * THREAD HEADROOM
+ */
+struct thread_headroom_read_msg_req {
+	enum thread_msg_req_type type;
+};
+
+struct thread_headroom_read_msg_rsp {
+	int status;
+
+	double headroom_ratio;
+};
+
 #endif /* THREAD_H_ */
diff --git a/examples/ip_pipeline/thread_fe.c b/examples/ip_pipeline/thread_fe.c
index 95f0107..4a435f7 100644
--- a/examples/ip_pipeline/thread_fe.c
+++ b/examples/ip_pipeline/thread_fe.c
@@ -170,6 +170,54 @@ app_pipeline_disable(struct app_params *app,
 	return 0;
 }
 
+int
+app_thread_headroom(struct app_params *app,
+		uint32_t socket_id,
+		uint32_t core_id,
+		uint32_t hyper_th_id)
+{
+	struct thread_headroom_read_msg_req *req;
+	struct thread_headroom_read_msg_rsp *rsp;
+	int thread_id;
+	int status;
+
+	if (app == NULL)
+		return -1;
+
+	thread_id = cpu_core_map_get_lcore_id(app->core_map,
+			socket_id,
+			core_id,
+			hyper_th_id);
+
+	if ((thread_id < 0) ||
+		((app->core_mask & (1LLU << thread_id)) == 0))
+		return -1;
+
+	req = app_msg_alloc(app);
+	if (req == NULL)
+		return -1;
+
+	req->type = THREAD_MSG_REQ_HEADROOM_READ;
+
+	rsp = thread_msg_send_recv(app,
+		socket_id, core_id, hyper_th_id, req, MSG_TIMEOUT_DEFAULT);
+
+	if (rsp == NULL)
+		return -1;
+
+	status = rsp->status;
+
+	if (status != 0)
+		return -1;
+
+	printf("%.3f%%\n", rsp->headroom_ratio * 100);
+
+
+	app_msg_free(app, rsp);
+
+	return 0;
+}
+
 /*
  * pipeline enable
  */
@@ -318,9 +366,74 @@ cmdline_parse_inst_t cmd_pipeline_disable = {
 	},
 };
 
+
+/*
+ * thread headroom
+ */
+
+struct cmd_thread_headroom_result {
+	cmdline_fixed_string_t t_string;
+	cmdline_fixed_string_t t_id_string;
+	cmdline_fixed_string_t headroom_string;
+};
+
+static void
+cmd_thread_headroom_parsed(
+	void *parsed_result,
+	__rte_unused struct cmdline *cl,
+	 void *data)
+{
+	struct cmd_thread_headroom_result *params = parsed_result;
+	struct app_params *app = data;
+	int status;
+	uint32_t core_id, socket_id, hyper_th_id;
+
+	if (parse_pipeline_core(&socket_id,
+			&core_id,
+			&hyper_th_id,
+			params->t_id_string) != 0) {
+		printf("Command failed\n");
+		return;
+	}
+
+	status = app_thread_headroom(app,
+			socket_id,
+			core_id,
+			hyper_th_id);
+
+	if (status != 0)
+		printf("Command failed\n");
+}
+
+cmdline_parse_token_string_t cmd_thread_headroom_t_string =
+	TOKEN_STRING_INITIALIZER(struct cmd_thread_headroom_result,
+	t_string, "t");
+
+cmdline_parse_token_string_t cmd_thread_headroom_t_id_string =
+	TOKEN_STRING_INITIALIZER(struct cmd_thread_headroom_result,
+	t_id_string, NULL);
+
+cmdline_parse_token_string_t cmd_thread_headroom_headroom_string =
+	TOKEN_STRING_INITIALIZER(struct cmd_thread_headroom_result,
+		headroom_string, "headroom");
+
+cmdline_parse_inst_t cmd_thread_headroom = {
+	.f = cmd_thread_headroom_parsed,
+	.data = NULL,
+	.help_str = "Display thread headroom",
+	.tokens = {
+		(void *)&cmd_thread_headroom_t_string,
+		(void *)&cmd_thread_headroom_t_id_string,
+		(void *)&cmd_thread_headroom_headroom_string,
+		NULL,
+	},
+};
+
+
 static cmdline_parse_ctx_t thread_cmds[] = {
 	(cmdline_parse_inst_t *) &cmd_pipeline_enable,
 	(cmdline_parse_inst_t *) &cmd_pipeline_disable,
+	(cmdline_parse_inst_t *) &cmd_thread_headroom,
 	NULL,
 };
 
diff --git a/examples/ip_pipeline/thread_fe.h b/examples/ip_pipeline/thread_fe.h
index 52352c1..2fd4ee8 100644
--- a/examples/ip_pipeline/thread_fe.h
+++ b/examples/ip_pipeline/thread_fe.h
@@ -92,4 +92,10 @@ app_pipeline_disable(struct app_params *app,
 		uint32_t hyper_th_id,
 		uint32_t pipeline_id);
 
+int
+app_thread_headroom(struct app_params *app,
+		uint32_t core_id,
+		uint32_t socket_id,
+		uint32_t hyper_th_id);
+
 #endif /* THREAD_FE_H_ */
-- 
2.5.0

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [dpdk-dev] [PATCH v2] examples/ip_pipeline: CPU utilization measurement and display
  2016-02-22 14:07 [dpdk-dev] [PATCH v2] examples/ip_pipeline: CPU utilization measurement and display Fan Zhang
@ 2016-03-07 11:31 ` Thomas Monjalon
  2016-03-07 11:33   ` Thomas Monjalon
  2016-03-09  6:50 ` Wan, Qun
  1 sibling, 1 reply; 5+ messages in thread
From: Thomas Monjalon @ 2016-03-07 11:31 UTC (permalink / raw)
  To: Fan Zhang; +Cc: dev

2016-02-22 14:07, Fan Zhang:
> This patch adds CPU utilization measurement and idle cycle rate
> computation to packet framework. The measurement is done by measuring
> the cycles spent while a thread pulls zero packet from RX queue. These
> cycles are treated as idle cycles (or headroom). A CLI command is added
> to display idle cycle rate of specific thread. The CLI command format is
> shown as following:
> 
> t <thread_id> headroom
> 
> Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
> Acked-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>

Applied, thanks

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [dpdk-dev] [PATCH v2] examples/ip_pipeline: CPU utilization measurement and display
  2016-03-07 11:31 ` Thomas Monjalon
@ 2016-03-07 11:33   ` Thomas Monjalon
  2016-03-08  8:05     ` Dumitrescu, Cristian
  0 siblings, 1 reply; 5+ messages in thread
From: Thomas Monjalon @ 2016-03-07 11:33 UTC (permalink / raw)
  To: Fan Zhang; +Cc: dev, Pawel Wodkowski

2016-03-07 12:31, Thomas Monjalon:
> 2016-02-22 14:07, Fan Zhang:
> > This patch adds CPU utilization measurement and idle cycle rate
> > computation to packet framework. The measurement is done by measuring
> > the cycles spent while a thread pulls zero packet from RX queue. These
> > cycles are treated as idle cycles (or headroom). A CLI command is added
> > to display idle cycle rate of specific thread. The CLI command format is
> > shown as following:
> > 
> > t <thread_id> headroom
> > 
> > Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
> > Acked-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
> 
> Applied, thanks

Question: Would you be helped by librte_jobstats?

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [dpdk-dev] [PATCH v2] examples/ip_pipeline: CPU utilization measurement and display
  2016-03-07 11:33   ` Thomas Monjalon
@ 2016-03-08  8:05     ` Dumitrescu, Cristian
  0 siblings, 0 replies; 5+ messages in thread
From: Dumitrescu, Cristian @ 2016-03-08  8:05 UTC (permalink / raw)
  To: Thomas Monjalon, Zhang, Roy Fan; +Cc: dev, Wodkowski, PawelX



> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Thomas Monjalon
> Sent: Monday, March 7, 2016 11:34 AM
> To: Zhang, Roy Fan <roy.fan.zhang@intel.com>
> Cc: dev@dpdk.org; Wodkowski, PawelX <pawelx.wodkowski@intel.com>
> Subject: Re: [dpdk-dev] [PATCH v2] examples/ip_pipeline: CPU utilization
> measurement and display
> 
> 2016-03-07 12:31, Thomas Monjalon:
> > 2016-02-22 14:07, Fan Zhang:
> > > This patch adds CPU utilization measurement and idle cycle rate
> > > computation to packet framework. The measurement is done by
> measuring
> > > the cycles spent while a thread pulls zero packet from RX queue. These
> > > cycles are treated as idle cycles (or headroom). A CLI command is added
> > > to display idle cycle rate of specific thread. The CLI command format is
> > > shown as following:
> > >
> > > t <thread_id> headroom
> > >
> > > Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
> > > Acked-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
> >
> > Applied, thanks
> 
> Question: Would you be helped by librte_jobstats?

All we needed for now is retrieving the current TSC value. We might consider using librte_jobstats for the next release.

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [dpdk-dev] [PATCH v2] examples/ip_pipeline: CPU utilization measurement and display
  2016-02-22 14:07 [dpdk-dev] [PATCH v2] examples/ip_pipeline: CPU utilization measurement and display Fan Zhang
  2016-03-07 11:31 ` Thomas Monjalon
@ 2016-03-09  6:50 ` Wan, Qun
  1 sibling, 0 replies; 5+ messages in thread
From: Wan, Qun @ 2016-03-09  6:50 UTC (permalink / raw)
  To: Zhang, Roy Fan, dev

Tested-ny: Qun Wan <qun.wan@intel.com>
pipeline> t s1c1 headroom
57.085%

-----Original Message-----
From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Fan Zhang
Sent: Monday, February 22, 2016 10:07 PM
To: dev@dpdk.org
Subject: [dpdk-dev] [PATCH v2] examples/ip_pipeline: CPU utilization measurement and display

This patch adds CPU utilization measurement and idle cycle rate computation to packet framework. The measurement is done by measuring the cycles spent while a thread pulls zero packet from RX queue. These cycles are treated as idle cycles (or headroom). A CLI command is added to display idle cycle rate of specific thread. The CLI command format is shown as following:

t <thread_id> headroom

Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
Acked-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
---
 examples/ip_pipeline/app.h       |   8 +++
 examples/ip_pipeline/init.c      |   8 ++-
 examples/ip_pipeline/thread.c    |  66 ++++++++++++++++++++++-
 examples/ip_pipeline/thread.h    |  14 +++++
 examples/ip_pipeline/thread_fe.c | 113 +++++++++++++++++++++++++++++++++++++++
 examples/ip_pipeline/thread_fe.h |   6 +++
 6 files changed, 211 insertions(+), 4 deletions(-)

diff --git a/examples/ip_pipeline/app.h b/examples/ip_pipeline/app.h index 6510d6d..2c91256 100644
--- a/examples/ip_pipeline/app.h
+++ b/examples/ip_pipeline/app.h
@@ -263,6 +263,10 @@ struct app_thread_data {
 
 	struct rte_ring *msgq_in;
 	struct rte_ring *msgq_out;
+
+	uint64_t headroom_time;
+	uint64_t headroom_cycles;
+	double headroom_ratio;
 };
 
 struct app_eal_params {
@@ -421,6 +425,10 @@ struct app_eal_params {
 #define APP_MAX_CMDS                             64
 #endif
 
+#ifndef APP_THREAD_HEADROOM_STATS_COLLECT
+#define APP_THREAD_HEADROOM_STATS_COLLECT        1
+#endif
+
 struct app_params {
 	/* Config */
 	char app_name[APP_APPNAME_SIZE];
diff --git a/examples/ip_pipeline/init.c b/examples/ip_pipeline/init.c index 186ca03..af33e8f 100644
--- a/examples/ip_pipeline/init.c
+++ b/examples/ip_pipeline/init.c
@@ -1343,8 +1343,8 @@ app_init_pipelines(struct app_params *app)
 
 		data->ptype = ptype;
 
-		data->timer_period = (rte_get_tsc_hz() * params->timer_period)
-			/ 1000;
+		data->timer_period = (rte_get_tsc_hz() *
+			params->timer_period) / 100;
 	}
 }
 
@@ -1379,6 +1379,10 @@ app_init_threads(struct app_params *app)
 		t->timer_period = (rte_get_tsc_hz() * APP_THREAD_TIMER_PERIOD) / 1000;
 		t->thread_req_deadline = time + t->timer_period;
 
+		t->headroom_cycles = 0;
+		t->headroom_time = rte_get_tsc_cycles();
+		t->headroom_ratio = 0.0;
+
 		t->msgq_in = app_thread_msgq_in_get(app,
 				params->socket_id,
 				params->core_id,
diff --git a/examples/ip_pipeline/thread.c b/examples/ip_pipeline/thread.c index 78f1bd8..a0f1f12 100644
--- a/examples/ip_pipeline/thread.c
+++ b/examples/ip_pipeline/thread.c
@@ -39,6 +39,43 @@
 #include "app.h"
 #include "thread.h"
 
+#if APP_THREAD_HEADROOM_STATS_COLLECT
+
+#define PIPELINE_RUN_REGULAR(thread, pipeline)		\
+do {							\
+	uint64_t t0 = rte_rdtsc_precise();		\
+	int n_pkts = rte_pipeline_run(pipeline->p);	\
+							\
+	if (n_pkts == 0) {				\
+		uint64_t t1 = rte_rdtsc_precise();	\
+							\
+		thread->headroom_cycles += t1 - t0;	\
+	}						\
+} while (0)
+
+
+#define PIPELINE_RUN_CUSTOM(thread, data)		\
+do {							\
+	uint64_t t0 = rte_rdtsc_precise();		\
+	int n_pkts = data->f_run(data->be);		\
+							\
+	if (n_pkts == 0) {				\
+		uint64_t t1 = rte_rdtsc_precise();	\
+							\
+		thread->headroom_cycles += t1 - t0;	\
+	}						\
+} while (0)
+
+#else
+
+#define PIPELINE_RUN_REGULAR(thread, pipeline)		\
+	rte_pipeline_run(pipeline->p)
+
+#define PIPELINE_RUN_CUSTOM(thread, data)		\
+	data->f_run(data->be)
+
+#endif
+
 static inline void *
 thread_msg_recv(struct rte_ring *r)
 {
@@ -165,6 +202,17 @@ thread_msg_req_handle(struct app_thread_data *t)
 			thread_msg_send(t->msgq_out, rsp);
 			break;
 		}
+
+		case THREAD_MSG_REQ_HEADROOM_READ: {
+			struct thread_headroom_read_msg_rsp *rsp =
+				(struct thread_headroom_read_msg_rsp *)
+				req;
+
+			rsp->headroom_ratio = t->headroom_ratio;
+			rsp->status = 0;
+			thread_msg_send(t->msgq_out, rsp);
+			break;
+		}
 		default:
 			break;
 		}
@@ -172,6 +220,18 @@ thread_msg_req_handle(struct app_thread_data *t)
 	return 0;
 }
 
+static void
+thread_headroom_update(struct app_thread_data *t, uint64_t time) {
+	uint64_t time_diff = time - t->headroom_time;
+
+	t->headroom_ratio =
+		((double) t->headroom_cycles) / ((double) time_diff);
+
+	t->headroom_cycles = 0;
+	t->headroom_time = rte_rdtsc_precise(); }
+
 int
 app_thread(void *arg)
 {
@@ -188,14 +248,14 @@ app_thread(void *arg)
 			struct app_thread_pipeline_data *data = &t->regular[j];
 			struct pipeline *p = data->be;
 
-			rte_pipeline_run(p->p);
+			PIPELINE_RUN_REGULAR(t, p);
 		}
 
 		/* Run custom pipelines */
 		for (j = 0; j < n_custom; j++) {
 			struct app_thread_pipeline_data *data = &t->custom[j];
 
-			data->f_run(data->be);
+			PIPELINE_RUN_CUSTOM(t, data);
 		}
 
 		/* Timer */
@@ -244,6 +304,7 @@ app_thread(void *arg)
 
 				if (deadline <= time) {
 					thread_msg_req_handle(t);
+					thread_headroom_update(t, time);
 					deadline = time + t->timer_period;
 					t->thread_req_deadline = deadline;
 				}
@@ -252,6 +313,7 @@ app_thread(void *arg)
 					t_deadline = deadline;
 			}
 
+
 			t->deadline = t_deadline;
 		}
 	}
diff --git a/examples/ip_pipeline/thread.h b/examples/ip_pipeline/thread.h index dc877c0..e52b22e 100644
--- a/examples/ip_pipeline/thread.h
+++ b/examples/ip_pipeline/thread.h
@@ -40,6 +40,7 @@
 enum thread_msg_req_type {
 	THREAD_MSG_REQ_PIPELINE_ENABLE = 0,
 	THREAD_MSG_REQ_PIPELINE_DISABLE,
+	THREAD_MSG_REQ_HEADROOM_READ,
 	THREAD_MSG_REQS
 };
 
@@ -81,4 +82,17 @@ struct thread_pipeline_disable_msg_rsp {
 	int status;
 };
 
+/*
+ * THREAD HEADROOM
+ */
+struct thread_headroom_read_msg_req {
+	enum thread_msg_req_type type;
+};
+
+struct thread_headroom_read_msg_rsp {
+	int status;
+
+	double headroom_ratio;
+};
+
 #endif /* THREAD_H_ */
diff --git a/examples/ip_pipeline/thread_fe.c b/examples/ip_pipeline/thread_fe.c
index 95f0107..4a435f7 100644
--- a/examples/ip_pipeline/thread_fe.c
+++ b/examples/ip_pipeline/thread_fe.c
@@ -170,6 +170,54 @@ app_pipeline_disable(struct app_params *app,
 	return 0;
 }
 
+int
+app_thread_headroom(struct app_params *app,
+		uint32_t socket_id,
+		uint32_t core_id,
+		uint32_t hyper_th_id)
+{
+	struct thread_headroom_read_msg_req *req;
+	struct thread_headroom_read_msg_rsp *rsp;
+	int thread_id;
+	int status;
+
+	if (app == NULL)
+		return -1;
+
+	thread_id = cpu_core_map_get_lcore_id(app->core_map,
+			socket_id,
+			core_id,
+			hyper_th_id);
+
+	if ((thread_id < 0) ||
+		((app->core_mask & (1LLU << thread_id)) == 0))
+		return -1;
+
+	req = app_msg_alloc(app);
+	if (req == NULL)
+		return -1;
+
+	req->type = THREAD_MSG_REQ_HEADROOM_READ;
+
+	rsp = thread_msg_send_recv(app,
+		socket_id, core_id, hyper_th_id, req, MSG_TIMEOUT_DEFAULT);
+
+	if (rsp == NULL)
+		return -1;
+
+	status = rsp->status;
+
+	if (status != 0)
+		return -1;
+
+	printf("%.3f%%\n", rsp->headroom_ratio * 100);
+
+
+	app_msg_free(app, rsp);
+
+	return 0;
+}
+
 /*
  * pipeline enable
  */
@@ -318,9 +366,74 @@ cmdline_parse_inst_t cmd_pipeline_disable = {
 	},
 };
 
+
+/*
+ * thread headroom
+ */
+
+struct cmd_thread_headroom_result {
+	cmdline_fixed_string_t t_string;
+	cmdline_fixed_string_t t_id_string;
+	cmdline_fixed_string_t headroom_string; };
+
+static void
+cmd_thread_headroom_parsed(
+	void *parsed_result,
+	__rte_unused struct cmdline *cl,
+	 void *data)
+{
+	struct cmd_thread_headroom_result *params = parsed_result;
+	struct app_params *app = data;
+	int status;
+	uint32_t core_id, socket_id, hyper_th_id;
+
+	if (parse_pipeline_core(&socket_id,
+			&core_id,
+			&hyper_th_id,
+			params->t_id_string) != 0) {
+		printf("Command failed\n");
+		return;
+	}
+
+	status = app_thread_headroom(app,
+			socket_id,
+			core_id,
+			hyper_th_id);
+
+	if (status != 0)
+		printf("Command failed\n");
+}
+
+cmdline_parse_token_string_t cmd_thread_headroom_t_string =
+	TOKEN_STRING_INITIALIZER(struct cmd_thread_headroom_result,
+	t_string, "t");
+
+cmdline_parse_token_string_t cmd_thread_headroom_t_id_string =
+	TOKEN_STRING_INITIALIZER(struct cmd_thread_headroom_result,
+	t_id_string, NULL);
+
+cmdline_parse_token_string_t cmd_thread_headroom_headroom_string =
+	TOKEN_STRING_INITIALIZER(struct cmd_thread_headroom_result,
+		headroom_string, "headroom");
+
+cmdline_parse_inst_t cmd_thread_headroom = {
+	.f = cmd_thread_headroom_parsed,
+	.data = NULL,
+	.help_str = "Display thread headroom",
+	.tokens = {
+		(void *)&cmd_thread_headroom_t_string,
+		(void *)&cmd_thread_headroom_t_id_string,
+		(void *)&cmd_thread_headroom_headroom_string,
+		NULL,
+	},
+};
+
+
 static cmdline_parse_ctx_t thread_cmds[] = {
 	(cmdline_parse_inst_t *) &cmd_pipeline_enable,
 	(cmdline_parse_inst_t *) &cmd_pipeline_disable,
+	(cmdline_parse_inst_t *) &cmd_thread_headroom,
 	NULL,
 };
 
diff --git a/examples/ip_pipeline/thread_fe.h b/examples/ip_pipeline/thread_fe.h
index 52352c1..2fd4ee8 100644
--- a/examples/ip_pipeline/thread_fe.h
+++ b/examples/ip_pipeline/thread_fe.h
@@ -92,4 +92,10 @@ app_pipeline_disable(struct app_params *app,
 		uint32_t hyper_th_id,
 		uint32_t pipeline_id);
 
+int
+app_thread_headroom(struct app_params *app,
+		uint32_t core_id,
+		uint32_t socket_id,
+		uint32_t hyper_th_id);
+
 #endif /* THREAD_FE_H_ */
--
2.5.0

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2016-03-09  6:50 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-02-22 14:07 [dpdk-dev] [PATCH v2] examples/ip_pipeline: CPU utilization measurement and display Fan Zhang
2016-03-07 11:31 ` Thomas Monjalon
2016-03-07 11:33   ` Thomas Monjalon
2016-03-08  8:05     ` Dumitrescu, Cristian
2016-03-09  6:50 ` Wan, Qun

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).