DPDK patches and discussions
 help / color / mirror / Atom feed
From: Ethan Zhuang <zhuangwj@gmail.com>
To: cristian.dumitrescu@intel.com
Cc: dev@dpdk.org, jasvinder.singh@intel.com, ferruh.yigit@intel.com,
	WeiJie Zhuang <zhuangwj@gmail.com>
Subject: [dpdk-dev] [PATCH v4 2/4] examples/ip_pipeline: kni interface support
Date: Tue, 21 Jun 2016 18:55:53 +0800	[thread overview]
Message-ID: <1466506555-4236-2-git-send-email-zhuangwj@gmail.com> (raw)
In-Reply-To: <1466506555-4236-1-git-send-email-zhuangwj@gmail.com>

From: WeiJie Zhuang <zhuangwj@gmail.com>

1. add KNI support to the IP Pipeline sample Application
2. some bug fix

Signed-off-by: WeiJie Zhuang <zhuangwj@gmail.com>
---
 examples/ip_pipeline/app.h                         | 183 ++++++++++++++++++-
 examples/ip_pipeline/config_check.c                |  26 ++-
 examples/ip_pipeline/config_parse.c                | 203 ++++++++++++++++++++-
 examples/ip_pipeline/init.c                        | 148 ++++++++++++++-
 examples/ip_pipeline/pipeline/pipeline_common_fe.c |  27 +++
 examples/ip_pipeline/pipeline/pipeline_master_be.c |   9 +
 examples/ip_pipeline/pipeline_be.h                 |  33 ++++
 7 files changed, 618 insertions(+), 11 deletions(-)

diff --git a/examples/ip_pipeline/app.h b/examples/ip_pipeline/app.h
index 7611341..6a6fdd9 100644
--- a/examples/ip_pipeline/app.h
+++ b/examples/ip_pipeline/app.h
@@ -44,6 +44,9 @@
 #include <cmdline_parse.h>
 
 #include <rte_ethdev.h>
+#ifdef RTE_LIBRTE_KNI
+#include <rte_kni.h>
+#endif
 
 #include "cpu_core_map.h"
 #include "pipeline.h"
@@ -132,6 +135,22 @@ struct app_pktq_swq_params {
 	uint32_t mempool_indirect_id;
 };
 
+struct app_pktq_kni_params {
+	char *name;
+	uint32_t parsed;
+
+	uint32_t socket_id;
+	uint32_t core_id;
+	uint32_t hyper_th_id;
+	uint32_t force_bind;
+
+	uint32_t mempool_id; /* Position in the app->mempool_params */
+	uint32_t burst_read;
+	uint32_t burst_write;
+	uint32_t dropless;
+	uint64_t n_retries;
+};
+
 #ifndef APP_FILE_NAME_SIZE
 #define APP_FILE_NAME_SIZE                       256
 #endif
@@ -185,6 +204,7 @@ enum app_pktq_in_type {
 	APP_PKTQ_IN_HWQ,
 	APP_PKTQ_IN_SWQ,
 	APP_PKTQ_IN_TM,
+	APP_PKTQ_IN_KNI,
 	APP_PKTQ_IN_SOURCE,
 };
 
@@ -197,6 +217,7 @@ enum app_pktq_out_type {
 	APP_PKTQ_OUT_HWQ,
 	APP_PKTQ_OUT_SWQ,
 	APP_PKTQ_OUT_TM,
+	APP_PKTQ_OUT_KNI,
 	APP_PKTQ_OUT_SINK,
 };
 
@@ -420,6 +441,8 @@ struct app_eal_params {
 
 #define APP_MAX_PKTQ_TM                          APP_MAX_LINKS
 
+#define APP_MAX_PKTQ_KNI                         APP_MAX_LINKS
+
 #ifndef APP_MAX_PKTQ_SOURCE
 #define APP_MAX_PKTQ_SOURCE                      64
 #endif
@@ -471,6 +494,7 @@ struct app_params {
 	struct app_pktq_hwq_out_params hwq_out_params[APP_MAX_HWQ_OUT];
 	struct app_pktq_swq_params swq_params[APP_MAX_PKTQ_SWQ];
 	struct app_pktq_tm_params tm_params[APP_MAX_PKTQ_TM];
+	struct app_pktq_kni_params kni_params[APP_MAX_PKTQ_KNI];
 	struct app_pktq_source_params source_params[APP_MAX_PKTQ_SOURCE];
 	struct app_pktq_sink_params sink_params[APP_MAX_PKTQ_SINK];
 	struct app_msgq_params msgq_params[APP_MAX_MSGQ];
@@ -482,6 +506,7 @@ struct app_params {
 	uint32_t n_pktq_hwq_out;
 	uint32_t n_pktq_swq;
 	uint32_t n_pktq_tm;
+	uint32_t n_pktq_kni;
 	uint32_t n_pktq_source;
 	uint32_t n_pktq_sink;
 	uint32_t n_msgq;
@@ -495,6 +520,9 @@ struct app_params {
 	struct app_link_data link_data[APP_MAX_LINKS];
 	struct rte_ring *swq[APP_MAX_PKTQ_SWQ];
 	struct rte_sched_port *tm[APP_MAX_PKTQ_TM];
+#ifdef RTE_LIBRTE_KNI
+	struct rte_kni *kni[APP_MAX_PKTQ_KNI];
+#endif /* RTE_LIBRTE_KNI */
 	struct rte_ring *msgq[APP_MAX_MSGQ];
 	struct pipeline_type pipeline_type[APP_MAX_PIPELINE_TYPES];
 	struct app_pipeline_data pipeline_data[APP_MAX_PIPELINES];
@@ -667,11 +695,11 @@ app_swq_get_reader(struct app_params *app,
 	struct app_pktq_swq_params *swq,
 	uint32_t *pktq_in_id)
 {
-	struct app_pipeline_params *reader;
+	struct app_pipeline_params *reader = NULL;
 	uint32_t pos = swq - app->swq_params;
 	uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
 		RTE_DIM(app->pipeline_params));
-	uint32_t n_readers = 0, id, i;
+	uint32_t n_readers = 0, id = 0, i;
 
 	for (i = 0; i < n_pipelines; i++) {
 		struct app_pipeline_params *p = &app->pipeline_params[i];
@@ -727,11 +755,11 @@ app_tm_get_reader(struct app_params *app,
 	struct app_pktq_tm_params *tm,
 	uint32_t *pktq_in_id)
 {
-	struct app_pipeline_params *reader;
+	struct app_pipeline_params *reader = NULL;
 	uint32_t pos = tm - app->tm_params;
 	uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
 		RTE_DIM(app->pipeline_params));
-	uint32_t n_readers = 0, id, i;
+	uint32_t n_readers = 0, id = 0, i;
 
 	for (i = 0; i < n_pipelines; i++) {
 		struct app_pipeline_params *p = &app->pipeline_params[i];
@@ -758,6 +786,66 @@ app_tm_get_reader(struct app_params *app,
 }
 
 static inline uint32_t
+app_kni_get_readers(struct app_params *app, struct app_pktq_kni_params *kni)
+{
+	uint32_t pos = kni - app->kni_params;
+	uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
+		RTE_DIM(app->pipeline_params));
+	uint32_t n_readers = 0, i;
+
+	for (i = 0; i < n_pipelines; i++) {
+		struct app_pipeline_params *p = &app->pipeline_params[i];
+		uint32_t n_pktq_in = RTE_MIN(p->n_pktq_in, RTE_DIM(p->pktq_in));
+		uint32_t j;
+
+		for (j = 0; j < n_pktq_in; j++) {
+			struct app_pktq_in_params *pktq = &p->pktq_in[j];
+
+			if ((pktq->type == APP_PKTQ_IN_KNI) &&
+				(pktq->id == pos))
+				n_readers++;
+		}
+	}
+
+	return n_readers;
+}
+
+static inline struct app_pipeline_params *
+app_kni_get_reader(struct app_params *app,
+				  struct app_pktq_kni_params *kni,
+				  uint32_t *pktq_in_id)
+{
+	struct app_pipeline_params *reader = NULL;
+	uint32_t pos = kni - app->kni_params;
+	uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
+		RTE_DIM(app->pipeline_params));
+	uint32_t n_readers = 0, id = 0, i;
+
+	for (i = 0; i < n_pipelines; i++) {
+		struct app_pipeline_params *p = &app->pipeline_params[i];
+		uint32_t n_pktq_in = RTE_MIN(p->n_pktq_in, RTE_DIM(p->pktq_in));
+		uint32_t j;
+
+		for (j = 0; j < n_pktq_in; j++) {
+			struct app_pktq_in_params *pktq = &p->pktq_in[j];
+
+			if ((pktq->type == APP_PKTQ_IN_KNI) &&
+				(pktq->id == pos)) {
+				n_readers++;
+				reader = p;
+				id = j;
+			}
+		}
+	}
+
+	if (n_readers != 1)
+		return NULL;
+
+	*pktq_in_id = id;
+	return reader;
+}
+
+static inline uint32_t
 app_source_get_readers(struct app_params *app,
 struct app_pktq_source_params *source)
 {
@@ -861,11 +949,11 @@ app_swq_get_writer(struct app_params *app,
 	struct app_pktq_swq_params *swq,
 	uint32_t *pktq_out_id)
 {
-	struct app_pipeline_params *writer;
+	struct app_pipeline_params *writer = NULL;
 	uint32_t pos = swq - app->swq_params;
 	uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
 		RTE_DIM(app->pipeline_params));
-	uint32_t n_writers = 0, id, i;
+	uint32_t n_writers = 0, id = 0, i;
 
 	for (i = 0; i < n_pipelines; i++) {
 		struct app_pipeline_params *p = &app->pipeline_params[i];
@@ -923,11 +1011,11 @@ app_tm_get_writer(struct app_params *app,
 	struct app_pktq_tm_params *tm,
 	uint32_t *pktq_out_id)
 {
-	struct app_pipeline_params *writer;
+	struct app_pipeline_params *writer = NULL;
 	uint32_t pos = tm - app->tm_params;
 	uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
 		RTE_DIM(app->pipeline_params));
-	uint32_t n_writers = 0, id, i;
+	uint32_t n_writers = 0, id = 0, i;
 
 	for (i = 0; i < n_pipelines; i++) {
 		struct app_pipeline_params *p = &app->pipeline_params[i];
@@ -939,10 +1027,73 @@ app_tm_get_writer(struct app_params *app,
 			struct app_pktq_out_params *pktq = &p->pktq_out[j];
 
 			if ((pktq->type == APP_PKTQ_OUT_TM) &&
+				(pktq->id == pos)) {
+				n_writers++;
+				writer = p;
+				id = j;
+			}
+		}
+	}
+
+	if (n_writers != 1)
+		return NULL;
+
+	*pktq_out_id = id;
+	return writer;
+}
+
+static inline uint32_t
+app_kni_get_writers(struct app_params *app, struct app_pktq_kni_params *kni)
+{
+	uint32_t pos = kni - app->kni_params;
+	uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
+		RTE_DIM(app->pipeline_params));
+	uint32_t n_writers = 0, i;
+
+	for (i = 0; i < n_pipelines; i++) {
+		struct app_pipeline_params *p = &app->pipeline_params[i];
+		uint32_t n_pktq_out = RTE_MIN(p->n_pktq_out,
+			RTE_DIM(p->pktq_out));
+		uint32_t j;
+
+		for (j = 0; j < n_pktq_out; j++) {
+			struct app_pktq_out_params *pktq = &p->pktq_out[j];
+
+			if ((pktq->type == APP_PKTQ_OUT_KNI) &&
 				(pktq->id == pos))
 				n_writers++;
+		}
+	}
+
+	return n_writers;
+}
+
+static inline struct app_pipeline_params *
+app_kni_get_writer(struct app_params *app,
+				  struct app_pktq_kni_params *kni,
+				  uint32_t *pktq_out_id)
+{
+	struct app_pipeline_params *writer = NULL;
+	uint32_t pos = kni - app->kni_params;
+	uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
+		RTE_DIM(app->pipeline_params));
+	uint32_t n_writers = 0, id = 0, i;
+
+	for (i = 0; i < n_pipelines; i++) {
+		struct app_pipeline_params *p = &app->pipeline_params[i];
+		uint32_t n_pktq_out = RTE_MIN(p->n_pktq_out,
+			RTE_DIM(p->pktq_out));
+		uint32_t j;
+
+		for (j = 0; j < n_pktq_out; j++) {
+			struct app_pktq_out_params *pktq = &p->pktq_out[j];
+
+			if ((pktq->type == APP_PKTQ_OUT_KNI) &&
+				(pktq->id == pos)) {
+				n_writers++;
 				writer = p;
 				id = j;
+			}
 		}
 	}
 
@@ -1051,6 +1202,22 @@ app_get_link_for_tm(struct app_params *app, struct app_pktq_tm_params *p_tm)
 	return &app->link_params[link_param_idx];
 }
 
+static inline struct app_link_params *
+app_get_link_for_kni(struct app_params *app, struct app_pktq_kni_params *p_kni)
+{
+	char link_name[APP_PARAM_NAME_SIZE];
+	uint32_t link_id;
+	ssize_t link_param_idx;
+
+	sscanf(p_kni->name, "KNI%" PRIu32, &link_id);
+	sprintf(link_name, "LINK%" PRIu32, link_id);
+	link_param_idx = APP_PARAM_FIND(app->link_params, link_name);
+	APP_CHECK((link_param_idx >= 0),
+			  "Cannot find %s for %s", link_name, p_kni->name);
+
+	return &app->link_params[link_param_idx];
+}
+
 void app_pipeline_params_get(struct app_params *app,
 	struct app_pipeline_params *p_in,
 	struct pipeline_params *p_out);
diff --git a/examples/ip_pipeline/config_check.c b/examples/ip_pipeline/config_check.c
index 18f57be..af1b628 100644
--- a/examples/ip_pipeline/config_check.c
+++ b/examples/ip_pipeline/config_check.c
@@ -1,7 +1,7 @@
 /*-
  *   BSD LICENSE
  *
- *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
  *   All rights reserved.
  *
  *   Redistribution and use in source and binary forms, with or without
@@ -316,6 +316,29 @@ check_tms(struct app_params *app)
 }
 
 static void
+check_knis(struct app_params *app) {
+	uint32_t i;
+
+	for (i = 0; i < app->n_pktq_kni; i++) {
+		struct app_pktq_kni_params *p = &app->kni_params[i];
+		uint32_t n_readers = app_kni_get_readers(app, p);
+		uint32_t n_writers = app_kni_get_writers(app, p);
+
+		APP_CHECK((n_readers != 0),
+			"%s has no reader\n", p->name);
+
+		APP_CHECK((n_readers == 1),
+			"%s has more than one reader\n", p->name);
+
+		APP_CHECK((n_writers != 0),
+			"%s has no writer\n", p->name);
+
+		APP_CHECK((n_writers == 1),
+			"%s has more than one writer\n", p->name);
+	}
+}
+
+static void
 check_sources(struct app_params *app)
 {
 	uint32_t i;
@@ -453,6 +476,7 @@ app_config_check(struct app_params *app)
 	check_txqs(app);
 	check_swqs(app);
 	check_tms(app);
+	check_knis(app);
 	check_sources(app);
 	check_sinks(app);
 	check_msgqs(app);
diff --git a/examples/ip_pipeline/config_parse.c b/examples/ip_pipeline/config_parse.c
index 504018e..0adca98 100644
--- a/examples/ip_pipeline/config_parse.c
+++ b/examples/ip_pipeline/config_parse.c
@@ -189,6 +189,20 @@ struct app_pktq_tm_params default_tm_params = {
 	.burst_write = 32,
 };
 
+struct app_pktq_kni_params default_kni_params = {
+	.parsed = 0,
+	.socket_id = 0,
+	.core_id = 0,
+	.hyper_th_id = 0,
+	.force_bind = 0,
+
+	.mempool_id = 0,
+	.burst_read = 32,
+	.burst_write = 32,
+	.dropless = 0,
+	.n_retries = 0,
+};
+
 struct app_pktq_source_params default_source_params = {
 	.parsed = 0,
 	.mempool_id = 0,
@@ -300,6 +314,18 @@ app_print_usage(char *prgname)
 	link_param_pos;							\
 })
 
+#define APP_PARAM_ADD_LINK_FOR_KNI(app, kni_name)			\
+({									\
+	char link_name[APP_PARAM_NAME_SIZE];				\
+	ssize_t link_param_pos;						\
+	uint32_t link_id;						\
+									\
+	sscanf((kni_name), "KNI%" SCNu32, &link_id);		\
+	sprintf(link_name, "LINK%" PRIu32, link_id);			\
+	link_param_pos = APP_PARAM_ADD((app)->link_params, link_name);	\
+	link_param_pos;							\
+})
+
 #define PARSE_CHECK_DUPLICATE_SECTION(obj)				\
 do {									\
 	APP_CHECK(((obj)->parsed == 0),					\
@@ -826,6 +852,10 @@ parse_pipeline_pktq_in(struct app_params *app,
 			type = APP_PKTQ_IN_TM;
 			id = APP_PARAM_ADD(app->tm_params, name);
 			APP_PARAM_ADD_LINK_FOR_TM(app, name);
+		} else if (validate_name(name, "KNI", 1) == 0) {
+			type = APP_PKTQ_IN_KNI;
+			id = APP_PARAM_ADD(app->kni_params, name);
+			APP_PARAM_ADD_LINK_FOR_KNI(app, name);
 		} else if (validate_name(name, "SOURCE", 1) == 0) {
 			type = APP_PKTQ_IN_SOURCE;
 			id = APP_PARAM_ADD(app->source_params, name);
@@ -871,6 +901,10 @@ parse_pipeline_pktq_out(struct app_params *app,
 			type = APP_PKTQ_OUT_TM;
 			id = APP_PARAM_ADD(app->tm_params, name);
 			APP_PARAM_ADD_LINK_FOR_TM(app, name);
+		} else if (validate_name(name, "KNI", 1) == 0) {
+			type = APP_PKTQ_OUT_KNI;
+			id = APP_PARAM_ADD(app->kni_params, name);
+			APP_PARAM_ADD_LINK_FOR_KNI(app, name);
 		} else if (validate_name(name, "SINK", 1) == 0) {
 			type = APP_PKTQ_OUT_SINK;
 			id = APP_PARAM_ADD(app->sink_params, name);
@@ -1581,6 +1615,15 @@ parse_txq(struct app_params *app,
 			continue;
 		}
 
+		if (strcmp(ent->name, "n_retries") == 0) {
+			int status = parser_read_uint64(&param->n_retries,
+				ent->value);
+
+			PARSE_ERROR((status == 0), section_name,
+				ent->name);
+			continue;
+		}
+
 		/* unrecognized */
 		PARSE_ERROR_INVALID(0, section_name, ent->name);
 	}
@@ -1816,7 +1859,7 @@ parse_tm(struct app_params *app,
 	param = &app->tm_params[param_idx];
 	PARSE_CHECK_DUPLICATE_SECTION(param);
 
-	APP_PARAM_ADD_LINK_FOR_TXQ(app, section_name);
+	APP_PARAM_ADD_LINK_FOR_TM(app, section_name);
 
 	for (i = 0; i < n_entries; i++) {
 		struct rte_cfgfile_entry *ent = &entries[i];
@@ -1853,6 +1896,102 @@ parse_tm(struct app_params *app,
 }
 
 static void
+parse_kni(struct app_params *app,
+		  const char *section_name,
+		  struct rte_cfgfile *cfg)
+{
+	struct app_pktq_kni_params *param;
+	struct rte_cfgfile_entry *entries;
+	int n_entries, i;
+	ssize_t param_idx;
+
+	n_entries = rte_cfgfile_section_num_entries(cfg, section_name);
+	PARSE_ERROR_SECTION_NO_ENTRIES((n_entries > 0), section_name);
+
+	entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
+	PARSE_ERROR_MALLOC(entries != NULL);
+
+	rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
+
+	param_idx = APP_PARAM_ADD(app->kni_params, section_name);
+	param = &app->kni_params[param_idx];
+	PARSE_CHECK_DUPLICATE_SECTION(param);
+
+	APP_PARAM_ADD_LINK_FOR_KNI(app, section_name);
+
+	for (i = 0; i < n_entries; i++) {
+		struct rte_cfgfile_entry *ent = &entries[i];
+
+		if (strcmp(ent->name, "core") == 0) {
+			int status = parse_pipeline_core(
+					&param->socket_id,
+					&param->core_id,
+					&param->hyper_th_id,
+					ent->value);
+
+			PARSE_ERROR((status == 0), section_name,
+						ent->name);
+			param->force_bind = 1;
+			continue;
+		}
+
+		if (strcmp(ent->name, "mempool") == 0) {
+			int status = validate_name(ent->value,
+				"MEMPOOL", 1);
+			ssize_t idx;
+
+			PARSE_ERROR((status == 0), section_name,
+						ent->name);
+
+			idx = APP_PARAM_ADD(app->mempool_params, ent->value);
+			param->mempool_id = idx;
+			continue;
+		}
+
+		if (strcmp(ent->name, "burst_read") == 0) {
+			int status = parser_read_uint32(&param->burst_read,
+						ent->value);
+
+			PARSE_ERROR((status == 0), section_name,
+						ent->name);
+			continue;
+		}
+
+		if (strcmp(ent->name, "burst_write") == 0) {
+			int status = parser_read_uint32(&param->burst_write,
+						ent->value);
+
+			PARSE_ERROR((status == 0), section_name,
+						ent->name);
+			continue;
+		}
+
+		if (strcmp(ent->name, "dropless") == 0) {
+			int status = parser_read_arg_bool(ent->value);
+
+			PARSE_ERROR((status != -EINVAL), section_name,
+						ent->name);
+			param->dropless = status;
+			continue;
+		}
+
+		if (strcmp(ent->name, "n_retries") == 0) {
+			int status = parser_read_uint64(&param->n_retries,
+						ent->value);
+
+			PARSE_ERROR((status == 0), section_name,
+						ent->name);
+			continue;
+		}
+
+		/* unrecognized */
+		PARSE_ERROR_INVALID(0, section_name, ent->name);
+	}
+
+	free(entries);
+}
+
+static void
 parse_source(struct app_params *app,
 	const char *section_name,
 	struct rte_cfgfile *cfg)
@@ -2147,6 +2286,7 @@ static const struct config_section cfg_file_scheme[] = {
 	{"TXQ", 2, parse_txq},
 	{"SWQ", 1, parse_swq},
 	{"TM", 1, parse_tm},
+	{"KNI", 1, parse_kni},
 	{"SOURCE", 1, parse_source},
 	{"SINK", 1, parse_sink},
 	{"MSGQ-REQ-PIPELINE", 1, parse_msgq_req_pipeline},
@@ -2285,6 +2425,7 @@ app_config_parse(struct app_params *app, const char *file_name)
 	APP_PARAM_COUNT(app->hwq_out_params, app->n_pktq_hwq_out);
 	APP_PARAM_COUNT(app->swq_params, app->n_pktq_swq);
 	APP_PARAM_COUNT(app->tm_params, app->n_pktq_tm);
+	APP_PARAM_COUNT(app->kni_params, app->n_pktq_kni);
 	APP_PARAM_COUNT(app->source_params, app->n_pktq_source);
 	APP_PARAM_COUNT(app->sink_params, app->n_pktq_sink);
 	APP_PARAM_COUNT(app->msgq_params, app->n_msgq);
@@ -2582,6 +2723,7 @@ save_txq_params(struct app_params *app, FILE *f)
 		fprintf(f, "%s = %s\n",
 			"dropless",
 			p->dropless ? "yes" : "no");
+		fprintf(f, "%s = %" PRIu64 "\n", "n_retries", p->n_retries);
 
 		fputc('\n', f);
 	}
@@ -2647,6 +2789,53 @@ save_tm_params(struct app_params *app, FILE *f)
 }
 
 static void
+save_kni_params(struct app_params *app, FILE *f)
+{
+	struct app_pktq_kni_params *p;
+	size_t i, count;
+
+	count = RTE_DIM(app->kni_params);
+	for (i = 0; i < count; i++) {
+		p = &app->kni_params[i];
+		if (!APP_PARAM_VALID(p))
+			continue;
+
+		/* section name */
+		fprintf(f, "[%s]\n", p->name);
+
+		/* core */
+		if (p->force_bind) {
+			fprintf(f, "; force_bind = 1\n");
+			fprintf(f, "core = s%" PRIu32 "c%" PRIu32 "%s\n",
+					p->socket_id,
+					p->core_id,
+					(p->hyper_th_id) ? "h" : "");
+		} else
+			fprintf(f, "; force_bind = 0\n");
+
+		/* mempool */
+		fprintf(f, "%s = %s\n", "mempool",
+				app->mempool_params[p->mempool_id].name);
+
+		/* burst_read */
+		fprintf(f, "%s = %" PRIu32 "\n", "burst_read", p->burst_read);
+
+		/* burst_write */
+		fprintf(f, "%s = %" PRIu32 "\n", "burst_write", p->burst_write);
+
+		/* dropless */
+		fprintf(f, "%s = %s\n",
+				"dropless",
+				p->dropless ? "yes" : "no");
+
+		/* n_retries */
+		fprintf(f, "%s = %" PRIu64 "\n", "n_retries", p->n_retries);
+
+		fputc('\n', f);
+	}
+}
+
+static void
 save_source_params(struct app_params *app, FILE *f)
 {
 	struct app_pktq_source_params *p;
@@ -2753,6 +2942,9 @@ save_pipeline_params(struct app_params *app, FILE *f)
 				case APP_PKTQ_IN_TM:
 					name = app->tm_params[pp->id].name;
 					break;
+				case APP_PKTQ_IN_KNI:
+					name = app->kni_params[pp->id].name;
+					break;
 				case APP_PKTQ_IN_SOURCE:
 					name = app->source_params[pp->id].name;
 					break;
@@ -2787,6 +2979,9 @@ save_pipeline_params(struct app_params *app, FILE *f)
 				case APP_PKTQ_OUT_TM:
 					name = app->tm_params[pp->id].name;
 					break;
+				case APP_PKTQ_OUT_KNI:
+					name = app->kni_params[pp->id].name;
+					break;
 				case APP_PKTQ_OUT_SINK:
 					name = app->sink_params[pp->id].name;
 					break;
@@ -2872,6 +3067,7 @@ app_config_save(struct app_params *app, const char *file_name)
 	save_txq_params(app, file);
 	save_swq_params(app, file);
 	save_tm_params(app, file);
+	save_kni_params(app, file);
 	save_source_params(app, file);
 	save_sink_params(app, file);
 	save_msgq_params(app, file);
@@ -2921,6 +3117,11 @@ app_config_init(struct app_params *app)
 			&default_tm_params,
 			sizeof(default_tm_params));
 
+	for (i = 0; i < RTE_DIM(app->kni_params); i++)
+		memcpy(&app->kni_params[i],
+			   &default_kni_params,
+			   sizeof(default_kni_params));
+
 	for (i = 0; i < RTE_DIM(app->source_params); i++)
 		memcpy(&app->source_params[i],
 			&default_source_params,
diff --git a/examples/ip_pipeline/init.c b/examples/ip_pipeline/init.c
index 7120bab..cd167f6 100644
--- a/examples/ip_pipeline/init.c
+++ b/examples/ip_pipeline/init.c
@@ -1176,6 +1176,111 @@ app_init_tm(struct app_params *app)
 	}
 }
 
+#ifdef RTE_LIBRTE_KNI
+static int
+kni_config_network_interface(uint8_t port_id, uint8_t if_up) {
+	int ret = 0;
+
+	if (port_id >= rte_eth_dev_count())
+		return -EINVAL;
+
+	ret = (if_up) ?
+		rte_eth_dev_set_link_up(port_id) :
+		rte_eth_dev_set_link_down(port_id);
+
+	return ret;
+}
+
+static int
+kni_change_mtu(uint8_t port_id, unsigned new_mtu) {
+	int ret;
+
+	if (port_id >= rte_eth_dev_count())
+		return -EINVAL;
+
+	if (new_mtu > ETHER_MAX_LEN)
+		return -EINVAL;
+
+	/* Set new MTU */
+	ret = rte_eth_dev_set_mtu(port_id, new_mtu);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+#endif /* RTE_LIBRTE_KNI */
+
+#ifndef RTE_LIBRTE_KNI
+static void
+app_init_kni(struct app_params *app) {
+	if (app->n_pktq_kni == 0)
+		return;
+
+	rte_panic("Can not init KNI without librte_kni support.\n");
+}
+#else
+static void
+app_init_kni(struct app_params *app) {
+	uint32_t i;
+
+	if (app->n_pktq_kni == 0)
+		return;
+
+	rte_kni_init(app->n_pktq_kni);
+
+	for (i = 0; i < app->n_pktq_kni; i++) {
+		struct app_pktq_kni_params *p_kni = &app->kni_params[i];
+		struct app_link_params *p_link;
+		struct rte_eth_dev_info dev_info;
+		struct app_mempool_params *mempool_params;
+		struct rte_mempool *mempool;
+		struct rte_kni_conf conf;
+		struct rte_kni_ops ops;
+
+		/* LINK */
+		p_link = app_get_link_for_kni(app, p_kni);
+		memset(&dev_info, 0, sizeof(dev_info));
+		rte_eth_dev_info_get(p_link->pmd_id, &dev_info);
+
+		/* MEMPOOL */
+		mempool_params = &app->mempool_params[p_kni->mempool_id];
+		mempool = app->mempool[p_kni->mempool_id];
+
+		/* KNI */
+		memset(&conf, 0, sizeof(conf));
+		snprintf(conf.name, RTE_KNI_NAMESIZE, "%s", p_kni->name);
+		conf.force_bind = p_kni->force_bind;
+		if (conf.force_bind) {
+			int lcore_id;
+
+			lcore_id = cpu_core_map_get_lcore_id(app->core_map,
+				p_kni->socket_id,
+				p_kni->core_id,
+				p_kni->hyper_th_id);
+
+			if (lcore_id < 0)
+				rte_panic("%s invalid CPU core\n", p_kni->name);
+
+			conf.core_id = (uint32_t) lcore_id;
+		}
+		conf.group_id = p_link->pmd_id;
+		conf.mbuf_size = mempool_params->buffer_size;
+		conf.addr = dev_info.pci_dev->addr;
+		conf.id = dev_info.pci_dev->id;
+
+		memset(&ops, 0, sizeof(ops));
+		ops.port_id = (uint8_t) p_link->pmd_id;
+		ops.change_mtu = kni_change_mtu;
+		ops.config_network_if = kni_config_network_interface;
+
+		APP_LOG(app, HIGH, "Initializing %s ...", p_kni->name);
+		app->kni[i] = rte_kni_alloc(mempool, &conf, &ops);
+		if (!app->kni[i])
+			rte_panic("%s init error\n", p_kni->name);
+	}
+}
+#endif /* RTE_LIBRTE_KNI */
+
 static void
 app_init_msgq(struct app_params *app)
 {
@@ -1281,10 +1386,21 @@ void app_pipeline_params_get(struct app_params *app,
 			break;
 		}
 		case APP_PKTQ_IN_TM:
+		{
 			out->type = PIPELINE_PORT_IN_SCHED_READER;
 			out->params.sched.sched = app->tm[in->id];
 			out->burst_size = app->tm_params[in->id].burst_read;
 			break;
+		}
+#ifdef RTE_LIBRTE_KNI
+		case APP_PKTQ_IN_KNI:
+		{
+			out->type = PIPELINE_PORT_IN_KNI_READER;
+			out->params.kni.kni = app->kni[in->id];
+			out->burst_size = app->kni_params[in->id].burst_read;
+			break;
+		}
+#endif /* RTE_LIBRTE_KNI */
 		case APP_PKTQ_IN_SOURCE:
 		{
 			uint32_t mempool_id =
@@ -1409,7 +1525,8 @@ void app_pipeline_params_get(struct app_params *app,
 			}
 			break;
 		}
-		case APP_PKTQ_OUT_TM: {
+		case APP_PKTQ_OUT_TM:
+		{
 			struct rte_port_sched_writer_params *params =
 				&out->params.sched;
 
@@ -1419,6 +1536,34 @@ void app_pipeline_params_get(struct app_params *app,
 				app->tm_params[in->id].burst_write;
 			break;
 		}
+#ifdef RTE_LIBRTE_KNI
+		case APP_PKTQ_OUT_KNI:
+		{
+			struct app_pktq_kni_params *p_kni =
+				&app->kni_params[in->id];
+
+			if (p_kni->dropless == 0) {
+				struct rte_port_kni_writer_params *params =
+					&out->params.kni;
+
+				out->type = PIPELINE_PORT_OUT_KNI_WRITER;
+				params->kni = app->kni[in->id];
+				params->tx_burst_sz =
+					app->kni_params[in->id].burst_write;
+			} else {
+				struct rte_port_kni_writer_nodrop_params
+					*params = &out->params.kni_nodrop;
+
+				out->type = PIPELINE_PORT_OUT_KNI_WRITER_NODROP;
+				params->kni = app->kni[in->id];
+				params->tx_burst_sz =
+					app->kni_params[in->id].burst_write;
+				params->n_retries =
+					app->kni_params[in->id].n_retries;
+			}
+			break;
+		}
+#endif /* RTE_LIBRTE_KNI */
 		case APP_PKTQ_OUT_SINK:
 		{
 			out->type = PIPELINE_PORT_OUT_SINK;
@@ -1607,6 +1752,7 @@ int app_init(struct app_params *app)
 	app_init_link(app);
 	app_init_swq(app);
 	app_init_tm(app);
+	app_init_kni(app);
 	app_init_msgq(app);
 
 	app_pipeline_common_cmd_push(app);
diff --git a/examples/ip_pipeline/pipeline/pipeline_common_fe.c b/examples/ip_pipeline/pipeline/pipeline_common_fe.c
index 70c57e4..cd1d082 100644
--- a/examples/ip_pipeline/pipeline/pipeline_common_fe.c
+++ b/examples/ip_pipeline/pipeline/pipeline_common_fe.c
@@ -130,6 +130,33 @@ app_pipeline_track_pktq_out_to_link(struct app_params *app,
 			break;
 		}
 
+		case APP_PKTQ_OUT_KNI:
+		{
+			struct pipeline_params pp;
+			struct pipeline_type *ptype;
+			struct app_pktq_kni_params *kni;
+			uint32_t pktq_in_id;
+			int status;
+
+			kni = &app->kni_params[pktq_out->id];
+			p = app_kni_get_reader(app, kni, &pktq_in_id);
+			if (p == NULL)
+				return NULL;
+
+			ptype = app_pipeline_type_find(app, p->type);
+			if ((ptype == NULL) || (ptype->fe_ops->f_track == NULL))
+				return NULL;
+
+			app_pipeline_params_get(app, p, &pp);
+			status = ptype->fe_ops->f_track(&pp,
+				pktq_in_id,
+				&pktq_out_id);
+			if (status)
+				return NULL;
+
+			break;
+		}
+
 		case APP_PKTQ_OUT_SINK:
 		default:
 			return NULL;
diff --git a/examples/ip_pipeline/pipeline/pipeline_master_be.c b/examples/ip_pipeline/pipeline/pipeline_master_be.c
index 79869a4..9a7c8c1 100644
--- a/examples/ip_pipeline/pipeline/pipeline_master_be.c
+++ b/examples/ip_pipeline/pipeline/pipeline_master_be.c
@@ -106,6 +106,9 @@ pipeline_run(void *pipeline)
 	struct pipeline_master *p = (struct pipeline_master *) pipeline;
 	struct app_params *app = p->app;
 	int status;
+#ifdef RTE_LIBRTE_KNI
+	uint32_t i;
+#endif /* RTE_LIBRTE_KNI */
 
 	/* Application post-init phase */
 	if (p->post_init_done == 0) {
@@ -144,6 +147,12 @@ pipeline_run(void *pipeline)
 		rte_exit(0, "Bye!\n");
 	}
 
+#ifdef RTE_LIBRTE_KNI
+	/* Handle KNI requests from Linux kernel */
+	for (i = 0; i < app->n_pktq_kni; i++)
+		rte_kni_handle_request(app->kni[i]);
+#endif /* RTE_LIBRTE_KNI */
+
 	return 0;
 }
 
diff --git a/examples/ip_pipeline/pipeline_be.h b/examples/ip_pipeline/pipeline_be.h
index 5501ab7..b562472 100644
--- a/examples/ip_pipeline/pipeline_be.h
+++ b/examples/ip_pipeline/pipeline_be.h
@@ -40,6 +40,9 @@
 #include <rte_port_ras.h>
 #include <rte_port_sched.h>
 #include <rte_port_source_sink.h>
+#ifdef RTE_LIBRTE_KNI
+#include <rte_port_kni.h>
+#endif
 #include <rte_pipeline.h>
 
 enum pipeline_port_in_type {
@@ -49,6 +52,7 @@ enum pipeline_port_in_type {
 	PIPELINE_PORT_IN_RING_READER_IPV4_FRAG,
 	PIPELINE_PORT_IN_RING_READER_IPV6_FRAG,
 	PIPELINE_PORT_IN_SCHED_READER,
+	PIPELINE_PORT_IN_KNI_READER,
 	PIPELINE_PORT_IN_SOURCE,
 };
 
@@ -61,6 +65,9 @@ struct pipeline_port_in_params {
 		struct rte_port_ring_reader_ipv4_frag_params ring_ipv4_frag;
 		struct rte_port_ring_reader_ipv6_frag_params ring_ipv6_frag;
 		struct rte_port_sched_reader_params sched;
+#ifdef RTE_LIBRTE_KNI
+		struct rte_port_kni_reader_params kni;
+#endif
 		struct rte_port_source_params source;
 	} params;
 	uint32_t burst_size;
@@ -82,6 +89,10 @@ pipeline_port_in_params_convert(struct pipeline_port_in_params  *p)
 		return (void *) &p->params.ring_ipv6_frag;
 	case PIPELINE_PORT_IN_SCHED_READER:
 		return (void *) &p->params.sched;
+#ifdef RTE_LIBRTE_KNI
+	case PIPELINE_PORT_IN_KNI_READER:
+		return (void *) &p->params.kni;
+#endif
 	case PIPELINE_PORT_IN_SOURCE:
 		return (void *) &p->params.source;
 	default:
@@ -105,6 +116,10 @@ pipeline_port_in_params_get_ops(struct pipeline_port_in_params  *p)
 		return &rte_port_ring_reader_ipv6_frag_ops;
 	case PIPELINE_PORT_IN_SCHED_READER:
 		return &rte_port_sched_reader_ops;
+#ifdef RTE_LIBRTE_KNI
+	case PIPELINE_PORT_IN_KNI_READER:
+		return &rte_port_kni_reader_ops;
+#endif
 	case PIPELINE_PORT_IN_SOURCE:
 		return &rte_port_source_ops;
 	default:
@@ -122,6 +137,8 @@ enum pipeline_port_out_type {
 	PIPELINE_PORT_OUT_RING_WRITER_IPV4_RAS,
 	PIPELINE_PORT_OUT_RING_WRITER_IPV6_RAS,
 	PIPELINE_PORT_OUT_SCHED_WRITER,
+	PIPELINE_PORT_OUT_KNI_WRITER,
+	PIPELINE_PORT_OUT_KNI_WRITER_NODROP,
 	PIPELINE_PORT_OUT_SINK,
 };
 
@@ -137,6 +154,10 @@ struct pipeline_port_out_params {
 		struct rte_port_ring_writer_ipv4_ras_params ring_ipv4_ras;
 		struct rte_port_ring_writer_ipv6_ras_params ring_ipv6_ras;
 		struct rte_port_sched_writer_params sched;
+#ifdef RTE_LIBRTE_KNI
+		struct rte_port_kni_writer_params kni;
+		struct rte_port_kni_writer_nodrop_params kni_nodrop;
+#endif
 		struct rte_port_sink_params sink;
 	} params;
 };
@@ -163,6 +184,12 @@ pipeline_port_out_params_convert(struct pipeline_port_out_params  *p)
 		return (void *) &p->params.ring_ipv6_ras;
 	case PIPELINE_PORT_OUT_SCHED_WRITER:
 		return (void *) &p->params.sched;
+#ifdef RTE_LIBRTE_KNI
+	case PIPELINE_PORT_OUT_KNI_WRITER:
+		return (void *) &p->params.kni;
+	case PIPELINE_PORT_OUT_KNI_WRITER_NODROP:
+		return (void *) &p->params.kni_nodrop;
+#endif
 	case PIPELINE_PORT_OUT_SINK:
 		return (void *) &p->params.sink;
 	default:
@@ -192,6 +219,12 @@ pipeline_port_out_params_get_ops(struct pipeline_port_out_params  *p)
 		return &rte_port_ring_writer_ipv6_ras_ops;
 	case PIPELINE_PORT_OUT_SCHED_WRITER:
 		return &rte_port_sched_writer_ops;
+#ifdef RTE_LIBRTE_KNI
+	case PIPELINE_PORT_OUT_KNI_WRITER:
+		return &rte_port_kni_writer_ops;
+	case PIPELINE_PORT_OUT_KNI_WRITER_NODROP:
+		return &rte_port_kni_writer_nodrop_ops;
+#endif
 	case PIPELINE_PORT_OUT_SINK:
 		return &rte_port_sink_ops;
 	default:
-- 
2.7.4

  reply	other threads:[~2016-06-21 10:58 UTC|newest]

Thread overview: 22+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-05-27  5:07 [dpdk-dev] [PATCH] port: add KNI interface support 1. add KNI port type to the packet framework 2. add KNI support to the IP Pipeline sample Application WeiJie Zhuang
2016-05-28 11:25 ` [dpdk-dev] [PATCH] port: add kni interface support WeiJie Zhuang
2016-05-30 14:40   ` Dumitrescu, Cristian
2016-06-01  4:18     ` Ethan
2016-06-09 23:42   ` Dumitrescu, Cristian
2016-06-13 10:25     ` Dumitrescu, Cristian
2016-06-13 10:47     ` Ethan
2016-06-13 13:18       ` Dumitrescu, Cristian
2016-06-16 11:34         ` Ethan
2016-06-16 11:27   ` [dpdk-dev] [PATCH v3 1/3] " WeiJie Zhuang
2016-06-16 11:27     ` [dpdk-dev] [PATCH v3 2/3] port: add kni nodrop writer WeiJie Zhuang
2016-06-18 21:47       ` Dumitrescu, Cristian
2016-06-16 11:27     ` [dpdk-dev] [PATCH v3 3/3] port: document update WeiJie Zhuang
2016-06-18 16:44     ` [dpdk-dev] [PATCH v3 1/3] port: add kni interface support Dumitrescu, Cristian
2016-06-21 11:10       ` Ethan
2016-06-21 11:31         ` Dumitrescu, Cristian
2016-06-21 10:55   ` [dpdk-dev] [PATCH v4 1/4] port: " Ethan Zhuang
2016-06-21 10:55     ` Ethan Zhuang [this message]
2016-06-21 10:55     ` [dpdk-dev] [PATCH v4 3/4] examples/ip_pipeline: kni example configuration Ethan Zhuang
2016-06-21 10:55     ` [dpdk-dev] [PATCH v4 4/4] doc: kni port support in the packet framework Ethan Zhuang
2016-06-21 12:03     ` [dpdk-dev] [PATCH v4 1/4] port: kni interface support Dumitrescu, Cristian
2016-06-21 16:09       ` Thomas Monjalon

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1466506555-4236-2-git-send-email-zhuangwj@gmail.com \
    --to=zhuangwj@gmail.com \
    --cc=cristian.dumitrescu@intel.com \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@intel.com \
    --cc=jasvinder.singh@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).