DPDK patches and discussions
 help / color / mirror / Atom feed
From: Jasvinder Singh <jasvinder.singh@intel.com>
To: dev@dpdk.org
Cc: cristian.dumitrescu@intel.com
Subject: [dpdk-dev] [PATCH v3] ip_pipeline: add rss support
Date: Mon, 30 May 2016 18:14:49 +0100	[thread overview]
Message-ID: <1464628489-81609-1-git-send-email-jasvinder.singh@intel.com> (raw)
In-Reply-To: <1462984313-239763-1-git-send-email-jasvinder.singh@intel.com>

This patch enables rss (receive side scaling) per network interface
through the configuration file. The user can specify following
parameters in LINK section for enabling the rss feature - rss_qs,
rss_proto_ipv4, rss_proto_ipv6 and ip_proto_l2.

The "rss_qs" is mandatory parameter which indicates the queues to be
used for rss, while rest of the parameters are optional. When optional
parameters are not provided in the configuration file, default setting
(ETH_RSS_IPV4 | ETH_RSS_IPV6) is assumed for "rss_hf" field of the
rss_conf structure.

For example, following configuration can be applied for using the rss
on port 0 of the network interface;

[PIPELINE0]
type = MASTER
core = 0

[LINK0]
rss_qs = 0 1

[PIPELINE1]
type = PASS-THROUGH
core = 1
pktq_in = RXQ0.0 RXQ0.1 RXQ1.0
pktq_out = TXQ0.0 TXQ1.0 TXQ0.1

Signed-off-by: Jasvinder Singh <jasvinder.singh@intel.com>
Acked-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
---
v3
- rebase on top of ip_pipeline configuration file clean up patch
  (http://dpdk.org/dev/patchwork/patch/13106/)
v2
- add check on the number of rss queues entries

 examples/ip_pipeline/app.h          |  27 ++--
 examples/ip_pipeline/config_check.c |  32 +++-
 examples/ip_pipeline/config_parse.c | 299 +++++++++++++++++++++++++++++++++++-
 examples/ip_pipeline/init.c         |  70 ++++++++-
 4 files changed, 408 insertions(+), 20 deletions(-)

diff --git a/examples/ip_pipeline/app.h b/examples/ip_pipeline/app.h
index 05d608b..976fbd0 100644
--- a/examples/ip_pipeline/app.h
+++ b/examples/ip_pipeline/app.h
@@ -51,6 +51,14 @@
 #define APP_PARAM_NAME_SIZE                      PIPELINE_NAME_SIZE
 #define APP_LINK_PCI_BDF_SIZE                    16
 
+#ifndef APP_LINK_MAX_HWQ_IN
+#define APP_LINK_MAX_HWQ_IN                      64
+#endif
+
+#ifndef APP_LINK_MAX_HWQ_OUT
+#define APP_LINK_MAX_HWQ_OUT                     64
+#endif
+
 struct app_mempool_params {
 	char *name;
 	uint32_t parsed;
@@ -70,6 +78,12 @@ struct app_link_params {
 	uint32_t tcp_local_q; /* 0 = Disabled (pkts go to default queue 0) */
 	uint32_t udp_local_q; /* 0 = Disabled (pkts go to default queue 0) */
 	uint32_t sctp_local_q; /* 0 = Disabled (pkts go to default queue 0) */
+	uint32_t rss_qs[APP_LINK_MAX_HWQ_IN];
+	uint32_t n_rss_qs;
+	uint64_t rss_proto_ipv4;
+	uint64_t rss_proto_ipv6;
+	uint64_t rss_proto_l2;
+	uint32_t promisc;
 	uint32_t state; /* DOWN = 0, UP = 1 */
 	uint32_t ip; /* 0 = Invalid */
 	uint32_t depth; /* Valid only when IP is valid */
@@ -77,7 +91,6 @@ struct app_link_params {
 	char pci_bdf[APP_LINK_PCI_BDF_SIZE];
 
 	struct rte_eth_conf conf;
-	uint8_t promisc;
 };
 
 struct app_pktq_hwq_in_params {
@@ -383,17 +396,9 @@ struct app_eal_params {
 #define APP_MAX_MEMPOOLS                         8
 #endif
 
-#ifndef APP_LINK_MAX_HWQ_IN
-#define APP_LINK_MAX_HWQ_IN                      64
-#endif
-
-#ifndef APP_LINK_MAX_HWQ_OUT
-#define APP_LINK_MAX_HWQ_OUT                     64
-#endif
-
-#define APP_MAX_HWQ_IN                     (APP_MAX_LINKS * APP_LINK_MAX_HWQ_IN)
+#define APP_MAX_HWQ_IN                  (APP_MAX_LINKS * APP_LINK_MAX_HWQ_IN)
 
-#define APP_MAX_HWQ_OUT                   (APP_MAX_LINKS * APP_LINK_MAX_HWQ_OUT)
+#define APP_MAX_HWQ_OUT                 (APP_MAX_LINKS * APP_LINK_MAX_HWQ_OUT)
 
 #ifndef APP_MAX_PKTQ_SWQ
 #define APP_MAX_PKTQ_SWQ                         256
diff --git a/examples/ip_pipeline/config_check.c b/examples/ip_pipeline/config_check.c
index fd9ff49..18f57be 100644
--- a/examples/ip_pipeline/config_check.c
+++ b/examples/ip_pipeline/config_check.c
@@ -56,6 +56,26 @@ check_mempools(struct app_params *app)
 	}
 }
 
+static inline uint32_t
+link_rxq_used(struct app_link_params *link, uint32_t q_id)
+{
+	uint32_t i;
+
+	if ((link->arp_q == q_id) ||
+		(link->tcp_syn_q == q_id) ||
+		(link->ip_local_q == q_id) ||
+		(link->tcp_local_q == q_id) ||
+		(link->udp_local_q == q_id) ||
+		(link->sctp_local_q == q_id))
+		return 1;
+
+	for (i = 0; i < link->n_rss_qs; i++)
+		if (link->rss_qs[i] == q_id)
+			return 1;
+
+	return 0;
+}
+
 static void
 check_links(struct app_params *app)
 {
@@ -90,14 +110,12 @@ check_links(struct app_params *app)
 			rxq_max = link->udp_local_q;
 		if (link->sctp_local_q > rxq_max)
 			rxq_max = link->sctp_local_q;
+		for (i = 0; i < link->n_rss_qs; i++)
+			if (link->rss_qs[i] > rxq_max)
+				rxq_max = link->rss_qs[i];
 
 		for (i = 1; i <= rxq_max; i++)
-			APP_CHECK(((link->arp_q == i) ||
-				(link->tcp_syn_q == i) ||
-				(link->ip_local_q == i) ||
-				(link->tcp_local_q == i) ||
-				(link->udp_local_q == i) ||
-				(link->sctp_local_q == i)),
+			APP_CHECK((link_rxq_used(link, i)),
 				"%s RXQs are not contiguous (A)\n", link->name);
 
 		n_rxq = app_link_get_n_rxq(app, link);
@@ -118,7 +136,7 @@ check_links(struct app_params *app)
 				"%s RXQs are not contiguous (C)\n", link->name);
 		}
 
-		/* Check that link RXQs are contiguous */
+		/* Check that link TXQs are contiguous */
 		n_txq = app_link_get_n_txq(app, link);
 
 		APP_CHECK((n_txq),  "%s does not have any TXQ\n", link->name);
diff --git a/examples/ip_pipeline/config_parse.c b/examples/ip_pipeline/config_parse.c
index 53130a0..68fa92b 100644
--- a/examples/ip_pipeline/config_parse.c
+++ b/examples/ip_pipeline/config_parse.c
@@ -81,6 +81,11 @@ static const struct app_link_params link_params_default = {
 	.tcp_local_q = 0,
 	.udp_local_q = 0,
 	.sctp_local_q = 0,
+	.rss_qs = {0},
+	.n_rss_qs = 0,
+	.rss_proto_ipv4 = ETH_RSS_IPV4,
+	.rss_proto_ipv6 = ETH_RSS_IPV6,
+	.rss_proto_l2 = 0,
 	.state = 0,
 	.ip = 0,
 	.depth = 0,
@@ -104,6 +109,13 @@ static const struct app_link_params link_params_default = {
 			.max_rx_pkt_len = 9000, /* Jumbo frame max packet len */
 			.split_hdr_size = 0, /* Header split buffer size */
 		},
+		.rx_adv_conf = {
+			.rss_conf = {
+				.rss_key = NULL,
+				.rss_key_len = 40,
+				.rss_hf = 0,
+			},
+		},
 		.txmode = {
 			.mq_mode = ETH_MQ_TX_NONE,
 		},
@@ -1125,6 +1137,149 @@ parse_mempool(struct app_params *app,
 	free(entries);
 }
 
+static int
+parse_link_rss_qs(struct app_link_params *p,
+	char *value)
+{
+	p->n_rss_qs = 0;
+
+	while (1) {
+		char *token = strtok_r(value, PARSE_DELIMITER, &value);
+
+		if (token == NULL)
+			break;
+
+		if (p->n_rss_qs == RTE_DIM(p->rss_qs))
+			return -ENOMEM;
+
+		if (parser_read_uint32(&p->rss_qs[p->n_rss_qs++], token))
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+parse_link_rss_proto_ipv4(struct app_link_params *p,
+	char *value)
+{
+	uint64_t mask = 0;
+
+	while (1) {
+		char *token = strtok_r(value, PARSE_DELIMITER, &value);
+
+		if (token == NULL)
+			break;
+
+		if (strcmp(token, "IP") == 0) {
+			mask |= ETH_RSS_IPV4;
+			continue;
+		}
+		if (strcmp(token, "FRAG") == 0) {
+			mask |= ETH_RSS_FRAG_IPV4;
+			continue;
+		}
+		if (strcmp(token, "TCP") == 0) {
+			mask |= ETH_RSS_NONFRAG_IPV4_TCP;
+			continue;
+		}
+		if (strcmp(token, "UDP") == 0) {
+			mask |= ETH_RSS_NONFRAG_IPV4_UDP;
+			continue;
+		}
+		if (strcmp(token, "SCTP") == 0) {
+			mask |= ETH_RSS_NONFRAG_IPV4_SCTP;
+			continue;
+		}
+		if (strcmp(token, "OTHER") == 0) {
+			mask |= ETH_RSS_NONFRAG_IPV4_OTHER;
+			continue;
+		}
+		return -EINVAL;
+	}
+
+	p->rss_proto_ipv4 = mask;
+	return 0;
+}
+
+static int
+parse_link_rss_proto_ipv6(struct app_link_params *p,
+	char *value)
+{
+	uint64_t mask = 0;
+
+	while (1) {
+		char *token = strtok_r(value, PARSE_DELIMITER, &value);
+
+		if (token == NULL)
+			break;
+
+		if (strcmp(token, "IP") == 0) {
+			mask |= ETH_RSS_IPV6;
+			continue;
+		}
+		if (strcmp(token, "FRAG") == 0) {
+			mask |= ETH_RSS_FRAG_IPV6;
+			continue;
+		}
+		if (strcmp(token, "TCP") == 0) {
+			mask |= ETH_RSS_NONFRAG_IPV6_TCP;
+			continue;
+		}
+		if (strcmp(token, "UDP") == 0) {
+			mask |= ETH_RSS_NONFRAG_IPV6_UDP;
+			continue;
+		}
+		if (strcmp(token, "SCTP") == 0) {
+			mask |= ETH_RSS_NONFRAG_IPV6_SCTP;
+			continue;
+		}
+		if (strcmp(token, "OTHER") == 0) {
+			mask |= ETH_RSS_NONFRAG_IPV6_OTHER;
+			continue;
+		}
+		if (strcmp(token, "IP_EX") == 0) {
+			mask |= ETH_RSS_IPV6_EX;
+			continue;
+		}
+		if (strcmp(token, "TCP_EX") == 0) {
+			mask |= ETH_RSS_IPV6_TCP_EX;
+			continue;
+		}
+		if (strcmp(token, "UDP_EX") == 0) {
+			mask |= ETH_RSS_IPV6_UDP_EX;
+			continue;
+		}
+		return -EINVAL;
+	}
+
+	p->rss_proto_ipv6 = mask;
+	return 0;
+}
+
+static int
+parse_link_rss_proto_l2(struct app_link_params *p,
+	char *value)
+{
+	uint64_t mask = 0;
+
+	while (1) {
+		char *token = strtok_r(value, PARSE_DELIMITER, &value);
+
+		if (token == NULL)
+			break;
+
+		if (strcmp(token, "L2") == 0) {
+			mask |= ETH_RSS_L2_PAYLOAD;
+			continue;
+		}
+		return -EINVAL;
+	}
+
+	p->rss_proto_l2 = mask;
+	return 0;
+}
+
 static void
 parse_link(struct app_params *app,
 	const char *section_name,
@@ -1133,6 +1288,10 @@ parse_link(struct app_params *app,
 	struct app_link_params *param;
 	struct rte_cfgfile_entry *entries;
 	int n_entries, i;
+	int rss_qs_present = 0;
+	int rss_proto_ipv4_present = 0;
+	int rss_proto_ipv6_present = 0;
+	int rss_proto_l2_present = 0;
 	int pci_bdf_present = 0;
 	ssize_t param_idx;
 
@@ -1186,7 +1345,6 @@ parse_link(struct app_params *app,
 			continue;
 		}
 
-
 		if (strcmp(ent->name, "tcp_local_q") == 0) {
 			int status = parser_read_uint32(
 				&param->tcp_local_q, ent->value);
@@ -1214,6 +1372,44 @@ parse_link(struct app_params *app,
 			continue;
 		}
 
+		if (strcmp(ent->name, "rss_qs") == 0) {
+			int status = parse_link_rss_qs(param, ent->value);
+
+			PARSE_ERROR((status == 0), section_name,
+				ent->name);
+			rss_qs_present = 1;
+			continue;
+		}
+
+		if (strcmp(ent->name, "rss_proto_ipv4") == 0) {
+			int status =
+				parse_link_rss_proto_ipv4(param, ent->value);
+
+			PARSE_ERROR((status != -EINVAL), section_name,
+				ent->name);
+			rss_proto_ipv4_present = 1;
+			continue;
+		}
+
+		if (strcmp(ent->name, "rss_proto_ipv6") == 0) {
+			int status =
+				parse_link_rss_proto_ipv6(param, ent->value);
+
+			PARSE_ERROR((status != -EINVAL), section_name,
+				ent->name);
+			rss_proto_ipv6_present = 1;
+			continue;
+		}
+
+		if (strcmp(ent->name, "rss_proto_l2") == 0) {
+			int status = parse_link_rss_proto_l2(param, ent->value);
+
+			PARSE_ERROR((status != -EINVAL), section_name,
+				ent->name);
+			rss_proto_l2_present = 1;
+			continue;
+		}
+
 		if (strcmp(ent->name, "pci_bdf") == 0) {
 			PARSE_ERROR_DUPLICATE((pci_bdf_present == 0),
 				section_name, ent->name);
@@ -1239,6 +1435,29 @@ parse_link(struct app_params *app,
 			"this entry is mandatory (port_mask is not "
 			"provided)");
 
+	if (rss_proto_ipv4_present)
+		PARSE_ERROR_MESSAGE((rss_qs_present),
+			section_name, "rss_proto_ipv4",
+			"entry not allowed (rss_qs entry is not provided)");
+	if (rss_proto_ipv6_present)
+		PARSE_ERROR_MESSAGE((rss_qs_present),
+			section_name, "rss_proto_ipv6",
+			"entry not allowed (rss_qs entry is not provided)");
+	if (rss_proto_l2_present)
+		PARSE_ERROR_MESSAGE((rss_qs_present),
+			section_name, "rss_proto_l2",
+			"entry not allowed (rss_qs entry is not provided)");
+	if (rss_proto_ipv4_present |
+		rss_proto_ipv6_present |
+		rss_proto_l2_present){
+		if (rss_proto_ipv4_present == 0)
+			param->rss_proto_ipv4 = 0;
+		if (rss_proto_ipv6_present == 0)
+			param->rss_proto_ipv6 = 0;
+		if (rss_proto_l2_present == 0)
+			param->rss_proto_l2 = 0;
+	}
+
 	free(entries);
 }
 
@@ -2237,6 +2456,84 @@ save_links_params(struct app_params *app, FILE *f)
 		fprintf(f, "%s = %" PRIu32 "\n", "sctp_local_q",
 			p->sctp_local_q);
 
+		if (p->n_rss_qs) {
+			uint32_t j;
+
+			/* rss_qs */
+			fprintf(f, "rss_qs = ");
+			for (j = 0; j < p->n_rss_qs; j++)
+				fprintf(f, "%" PRIu32 " ",	p->rss_qs[j]);
+			fputc('\n', f);
+
+			/* rss_proto_ipv4 */
+			if (p->rss_proto_ipv4) {
+				fprintf(f, "rss_proto_ipv4 = ");
+				if (p->rss_proto_ipv4 & ETH_RSS_IPV4)
+					fprintf(f, "IP ");
+				if (p->rss_proto_ipv4 & ETH_RSS_FRAG_IPV4)
+					fprintf(f, "FRAG ");
+				if (p->rss_proto_ipv4 &
+					ETH_RSS_NONFRAG_IPV4_TCP)
+					fprintf(f, "TCP ");
+				if (p->rss_proto_ipv4 &
+					ETH_RSS_NONFRAG_IPV4_UDP)
+					fprintf(f, "UDP ");
+				if (p->rss_proto_ipv4 &
+					ETH_RSS_NONFRAG_IPV4_SCTP)
+					fprintf(f, "SCTP ");
+				if (p->rss_proto_ipv4 &
+					ETH_RSS_NONFRAG_IPV4_OTHER)
+					fprintf(f, "OTHER ");
+				fprintf(f, "\n");
+			} else
+				fprintf(f, "; rss_proto_ipv4 = <NONE>\n");
+
+			/* rss_proto_ipv6 */
+			if (p->rss_proto_ipv6) {
+				fprintf(f, "rss_proto_ipv6 = ");
+				if (p->rss_proto_ipv6 & ETH_RSS_IPV6)
+					fprintf(f, "IP ");
+				if (p->rss_proto_ipv6 & ETH_RSS_FRAG_IPV6)
+					fprintf(f, "FRAG ");
+				if (p->rss_proto_ipv6 &
+					ETH_RSS_NONFRAG_IPV6_TCP)
+					fprintf(f, "TCP ");
+				if (p->rss_proto_ipv6 &
+					ETH_RSS_NONFRAG_IPV6_UDP)
+					fprintf(f, "UDP ");
+				if (p->rss_proto_ipv6 &
+					ETH_RSS_NONFRAG_IPV6_SCTP)
+					fprintf(f, "SCTP ");
+				if (p->rss_proto_ipv6 &
+					ETH_RSS_NONFRAG_IPV6_OTHER)
+					fprintf(f, "OTHER ");
+				if (p->rss_proto_ipv6 & ETH_RSS_IPV6_EX)
+					fprintf(f, "IP_EX ");
+				if (p->rss_proto_ipv6 &
+					ETH_RSS_IPV6_TCP_EX)
+					fprintf(f, "TCP_EX ");
+				if (p->rss_proto_ipv6 &
+					ETH_RSS_IPV6_UDP_EX)
+					fprintf(f, "UDP_EX ");
+				fprintf(f, "\n");
+			} else
+				fprintf(f, "; rss_proto_ipv6 = <NONE>\n");
+
+			/* rss_proto_l2 */
+			if (p->rss_proto_l2) {
+				fprintf(f, "rss_proto_l2 = ");
+				if (p->rss_proto_l2 & ETH_RSS_L2_PAYLOAD)
+					fprintf(f, "L2 ");
+				fprintf(f, "\n");
+			} else
+				fprintf(f, "; rss_proto_l2 = <NONE>\n");
+		} else {
+			fprintf(f, "; rss_qs = <NONE>\n");
+			fprintf(f, "; rss_proto_ipv4 = <NONE>\n");
+			fprintf(f, "; rss_proto_ipv6 = <NONE>\n");
+			fprintf(f, "; rss_proto_l2 = <NONE>\n");
+		}
+
 		if (strlen(p->pci_bdf))
 			fprintf(f, "%s = %s\n", "pci_bdf", p->pci_bdf);
 
diff --git a/examples/ip_pipeline/init.c b/examples/ip_pipeline/init.c
index b7df490..b2eafe3 100644
--- a/examples/ip_pipeline/init.c
+++ b/examples/ip_pipeline/init.c
@@ -55,6 +55,8 @@
 
 #define APP_NAME_SIZE	32
 
+#define APP_RETA_SIZE_MAX     (ETH_RSS_RETA_SIZE_512 / RTE_RETA_GROUP_SIZE)
+
 static void
 app_init_core_map(struct app_params *app)
 {
@@ -902,6 +904,67 @@ app_get_cpu_socket_id(uint32_t pmd_id)
 	return (status != SOCKET_ID_ANY) ? status : 0;
 }
 
+static inline int
+app_link_rss_enabled(struct app_link_params *cp)
+{
+	return (cp->n_rss_qs) ? 1 : 0;
+}
+
+static void
+app_link_rss_setup(struct app_link_params *cp)
+{
+	struct rte_eth_dev_info dev_info;
+	struct rte_eth_rss_reta_entry64 reta_conf[APP_RETA_SIZE_MAX];
+	uint32_t i;
+	int status;
+
+    /* Get RETA size */
+	memset(&dev_info, 0, sizeof(dev_info));
+	rte_eth_dev_info_get(cp->pmd_id, &dev_info);
+
+	if (dev_info.reta_size == 0)
+		rte_panic("%s (%u): RSS setup error (null RETA size)\n",
+			cp->name, cp->pmd_id);
+
+	if (dev_info.reta_size > ETH_RSS_RETA_SIZE_512)
+		rte_panic("%s (%u): RSS setup error (RETA size too big)\n",
+			cp->name, cp->pmd_id);
+
+	/* Setup RETA contents */
+	memset(reta_conf, 0, sizeof(reta_conf));
+
+	for (i = 0; i < dev_info.reta_size; i++)
+		reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX;
+
+	for (i = 0; i < dev_info.reta_size; i++) {
+		uint32_t reta_id = i / RTE_RETA_GROUP_SIZE;
+		uint32_t reta_pos = i % RTE_RETA_GROUP_SIZE;
+		uint32_t rss_qs_pos = i % cp->n_rss_qs;
+
+		reta_conf[reta_id].reta[reta_pos] =
+			(uint16_t) cp->rss_qs[rss_qs_pos];
+	}
+
+	/* RETA update */
+	status = rte_eth_dev_rss_reta_update(cp->pmd_id,
+		reta_conf,
+		dev_info.reta_size);
+	if (status != 0)
+		rte_panic("%s (%u): RSS setup error (RETA update failed)\n",
+			cp->name, cp->pmd_id);
+}
+
+static void
+app_init_link_set_config(struct app_link_params *p)
+{
+	if (p->n_rss_qs) {
+		p->conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
+		p->conf.rx_adv_conf.rss_conf.rss_hf = p->rss_proto_ipv4 |
+			p->rss_proto_ipv6 |
+			p->rss_proto_l2;
+	}
+}
+
 static void
 app_init_link(struct app_params *app)
 {
@@ -917,6 +980,7 @@ app_init_link(struct app_params *app)
 		sscanf(p_link->name, "LINK%" PRIu32, &link_id);
 		n_hwq_in = app_link_get_n_rxq(app, p_link);
 		n_hwq_out = app_link_get_n_txq(app, p_link);
+		app_init_link_set_config(p_link);
 
 		APP_LOG(app, HIGH, "Initializing %s (%" PRIu32") "
 			"(%" PRIu32 " RXQ, %" PRIu32 " TXQ) ...",
@@ -1001,9 +1065,13 @@ app_init_link(struct app_params *app)
 			rte_panic("Cannot start %s (error %" PRId32 ")\n",
 				p_link->name, status);
 
-		/* LINK UP */
+		/* LINK FILTERS */
 		app_link_set_arp_filter(app, p_link);
 		app_link_set_tcp_syn_filter(app, p_link);
+		if (app_link_rss_enabled(p_link))
+			app_link_rss_setup(p_link);
+
+		/* LINK UP */
 		app_link_up_internal(app, p_link);
 	}
 
-- 
2.5.5

  reply	other threads:[~2016-05-30 17:08 UTC|newest]

Thread overview: 4+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-04-29  9:33 [dpdk-dev] [PATCH] " Jasvinder Singh
2016-05-11 16:31 ` [dpdk-dev] [PATCH v2] " Jasvinder Singh
2016-05-30 17:14   ` Jasvinder Singh [this message]
2016-06-08 18:07     ` [dpdk-dev] [PATCH v3] " Thomas Monjalon

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1464628489-81609-1-git-send-email-jasvinder.singh@intel.com \
    --to=jasvinder.singh@intel.com \
    --cc=cristian.dumitrescu@intel.com \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).