DPDK patches and discussions
 help / color / mirror / Atom feed
From: xuan.ding@intel.com
To: xiaoyun.li@intel.com, aman.deep.singh@intel.com,
	yuying.zhang@intel.com, qi.z.zhang@intel.com,
	qiming.yang@intel.com
Cc: dev@dpdk.org, ping.yu@intel.com, Xuan Ding <xuan.ding@intel.com>,
	Yuan Wang <yuanx.wang@intel.com>
Subject: [RFC 1/2] app/testpmd: add header split configuration
Date: Thu,  3 Mar 2022 06:38:40 +0000	[thread overview]
Message-ID: <20220303063841.48763-2-xuan.ding@intel.com> (raw)
In-Reply-To: <20220303063841.48763-1-xuan.ding@intel.com>

From: Xuan Ding <xuan.ding@intel.com>

This patch adds header split configuration in testpmd. The header split
feature is off by default. To enable header split, you need:
1. Configure Rx queue with rx_offload header split on.
2. Set the protocol type of header split.

Command for set header split protocol type:
testpmd> port config <port_id> header_split inner_l2|out_l2|ip|tcp_udp|sctp

Signed-off-by: Xuan Ding <xuan.ding@intel.com>
Signed-off-by: Yuan Wang <yuanx.wang@intel.com>
---
 app/test-pmd/cmdline.c | 85 ++++++++++++++++++++++++++++++++++++++++++
 app/test-pmd/testpmd.c |  6 ++-
 app/test-pmd/testpmd.h |  2 +
 3 files changed, 92 insertions(+), 1 deletion(-)

diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index b4ba8da2b0..71b9b8bc4a 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -16352,6 +16352,90 @@ cmdline_parse_inst_t cmd_config_per_port_rx_offload = {
 	}
 };
 
+/* config a per port header split protocol */
+struct cmd_config_per_port_headersplit_protocol_result {
+	cmdline_fixed_string_t port;
+	cmdline_fixed_string_t config;
+	uint16_t port_id;
+	cmdline_fixed_string_t headersplit;
+	cmdline_fixed_string_t protocol;
+};
+
+cmdline_parse_token_string_t cmd_config_per_port_headersplit_protocol_result_port =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_config_per_port_headersplit_protocol_result,
+		 port, "port");
+cmdline_parse_token_string_t cmd_config_per_port_headersplit_protocol_result_config =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_config_per_port_headersplit_protocol_result,
+		 config, "config");
+cmdline_parse_token_num_t cmd_config_per_port_headersplit_protocol_result_port_id =
+	TOKEN_NUM_INITIALIZER
+		(struct cmd_config_per_port_headersplit_protocol_result,
+		 port_id, RTE_UINT16);
+cmdline_parse_token_string_t cmd_config_per_port_headersplit_protocol_result_headersplit =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_config_per_port_headersplit_protocol_result,
+		 headersplit, "header_split");
+cmdline_parse_token_string_t cmd_config_per_port_headersplit_protocol_result_protocol =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_config_per_port_headersplit_protocol_result,
+		 protocol, "inner_l2#out_l2#ip#tcp_udp#sctp");
+
+static void
+cmd_config_per_port_headersplit_protocol_parsed(void *parsed_result,
+				__rte_unused struct cmdline *cl,
+				__rte_unused void *data)
+{
+	struct cmd_config_per_port_headersplit_protocol_result *res = parsed_result;
+	portid_t port_id = res->port_id;
+	struct rte_port *port = &ports[port_id];
+	uint16_t protocol;
+
+	if (port_id_is_invalid(port_id, ENABLED_WARN))
+		return;
+
+	if (port->port_status != RTE_PORT_STOPPED) {
+		fprintf(stderr,
+			"Error: Can't config offload when Port %d is not stopped\n",
+			port_id);
+		return;
+	}
+
+	if (!strcmp(res->protocol, "inner_l2"))
+		protocol = RTE_ETH_RX_HEADER_SPLIT_INNER_L2;
+	else if (!strcmp(res->protocol, "out_l2"))
+		protocol = RTE_ETH_RX_HEADER_SPLIT_OUTER_L2;
+	else if (!strcmp(res->protocol, "ip"))
+		protocol = RTE_ETH_RX_HEADER_SPLIT_IP;
+	else if (!strcmp(res->protocol, "tcp_udp"))
+		protocol = RTE_ETH_RX_HEADER_SPLIT_TCP_UDP;
+	else if (!strcmp(res->protocol, "sctp"))
+		protocol = RTE_ETH_RX_HEADER_SPLIT_SCTP;
+	else {
+		fprintf(stderr, "Unknown protocol name: %s\n", res->protocol);
+		return;
+	}
+
+	rx_pkt_header_split_proto = protocol;
+
+	cmd_reconfig_device_queue(port_id, 1, 1);
+}
+
+cmdline_parse_inst_t cmd_config_per_port_headersplit_protocol = {
+	.f = cmd_config_per_port_headersplit_protocol_parsed,
+	.data = NULL,
+	.help_str = "port config <port_id> header_split inner_l2|out_l2|ip|tcp_udp|sctp",
+	.tokens = {
+		(void *)&cmd_config_per_port_headersplit_protocol_result_port,
+		(void *)&cmd_config_per_port_headersplit_protocol_result_config,
+		(void *)&cmd_config_per_port_headersplit_protocol_result_port_id,
+		(void *)&cmd_config_per_port_headersplit_protocol_result_headersplit,
+		(void *)&cmd_config_per_port_headersplit_protocol_result_protocol,
+		NULL,
+	}
+};
+
 /* Enable/Disable a per queue offloading */
 struct cmd_config_per_queue_rx_offload_result {
 	cmdline_fixed_string_t port;
@@ -18070,6 +18154,7 @@ cmdline_parse_ctx_t main_ctx[] = {
 	(cmdline_parse_inst_t *)&cmd_rx_offload_get_capa,
 	(cmdline_parse_inst_t *)&cmd_rx_offload_get_configuration,
 	(cmdline_parse_inst_t *)&cmd_config_per_port_rx_offload,
+	(cmdline_parse_inst_t *)&cmd_config_per_port_headersplit_protocol,
 	(cmdline_parse_inst_t *)&cmd_config_per_queue_rx_offload,
 	(cmdline_parse_inst_t *)&cmd_tx_offload_get_capa,
 	(cmdline_parse_inst_t *)&cmd_tx_offload_get_configuration,
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index 6d2e52c790..4aba8e4ac4 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -253,6 +253,8 @@ uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
 /**< Split policy for packets to TX. */
 
+uint8_t rx_pkt_header_split_proto;
+
 uint8_t txonly_multi_flow;
 /**< Whether multiple flows are generated in TXONLY mode. */
 
@@ -2568,7 +2570,8 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
 	int ret;
 
 	if (rx_pkt_nb_segs <= 1 ||
-	    (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0) {
+	    (((rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0) &&
+	     ((rx_conf->offloads & RTE_ETH_RX_OFFLOAD_HEADER_SPLIT) == 0))) {
 		rx_conf->rx_seg = NULL;
 		rx_conf->rx_nseg = 0;
 		ret = rte_eth_rx_queue_setup(port_id, rx_queue_id,
@@ -2592,6 +2595,7 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
 		rx_seg->offset = i < rx_pkt_nb_offs ?
 				   rx_pkt_seg_offsets[i] : 0;
 		rx_seg->mp = mpx ? mpx : mp;
+		rx_seg->proto = rx_pkt_header_split_proto;
 	}
 	rx_conf->rx_nseg = rx_pkt_nb_segs;
 	rx_conf->rx_seg = rx_useg;
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index 9967825044..a9681372a4 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -531,6 +531,8 @@ enum tx_pkt_split {
 
 extern enum tx_pkt_split tx_pkt_split;
 
+extern uint8_t rx_pkt_header_split_proto;
+
 extern uint8_t txonly_multi_flow;
 
 extern uint32_t rxq_share;
-- 
2.17.1


  reply	other threads:[~2022-03-03  6:40 UTC|newest]

Thread overview: 3+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-03-03  6:38 [RFC 0/2] net/ice: support header split in Rx data path xuan.ding
2022-03-03  6:38 ` xuan.ding [this message]
2022-03-03  6:38 ` [RFC 2/2] " xuan.ding

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220303063841.48763-2-xuan.ding@intel.com \
    --to=xuan.ding@intel.com \
    --cc=aman.deep.singh@intel.com \
    --cc=dev@dpdk.org \
    --cc=ping.yu@intel.com \
    --cc=qi.z.zhang@intel.com \
    --cc=qiming.yang@intel.com \
    --cc=xiaoyun.li@intel.com \
    --cc=yuanx.wang@intel.com \
    --cc=yuying.zhang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).