DPDK patches and discussions
 help / color / mirror / Atom feed
From: Ye Xiaolong <xiaolong.ye@intel.com>
To: Leyi Rong <leyi.rong@intel.com>
Cc: haiyue.wang@intel.com, wenzhuo.lu@intel.com,
	qi.z.zhang@intel.com, dev@dpdk.org
Subject: Re: [dpdk-dev] [PATCH v3 3/5] net/ice: add protocol extraction support for per Rx queue
Date: Thu, 19 Sep 2019 07:30:17 +0800	[thread overview]
Message-ID: <20190918233017.GA77813@intel.com> (raw)
In-Reply-To: <20190917085317.57598-4-leyi.rong@intel.com>

On 09/17, Leyi Rong wrote:
>From: Haiyue Wang <haiyue.wang@intel.com>
>
>The ice has the feature to extract protocol fields into flex descriptor
>by programming per queue. Currently, the ice PMD will put the protocol
>fields into rte_mbuf::udata64 with different type format. Application
>can access the protocol fields quickly.
>

[snip]

>+static int
>+parse_queue_set(const char *input, int xtr_type, struct ice_devargs *devargs)
>+{
>+	const char *str = input;
>+	char *end = NULL;
>+	uint32_t min, max;
>+	uint32_t idx;
>+
>+	while (isblank(*str))
>+		str++;
>+
>+	if ((!isdigit(*str) && *str != '(') || (*str == '\0'))

Minor nit, (*str == '\0') seems redundant here, no?

>+		return -1;
>+
>+	/* process single number or single range of number */
>+	if (*str != '(') {
>+		errno = 0;
>+		idx = strtoul(str, &end, 10);
>+		if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM)
>+			return -1;
>+
>+		while (isblank(*end))
>+			end++;
>+
>+		min = idx;
>+		max = idx;
>+
>+		/* process single <number>-<number> */
>+		if (*end == '-') {
>+			end++;
>+			while (isblank(*end))
>+				end++;
>+			if (!isdigit(*end))
>+				return -1;
>+
>+			errno = 0;
>+			idx = strtoul(end, &end, 10);
>+			if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM)
>+				return -1;
>+
>+			max = idx;
>+			while (isblank(*end))
>+				end++;
>+		}
>+
>+		if (*end != ':')
>+			return -1;
>+
>+		for (idx = RTE_MIN(min, max);
>+		     idx <= RTE_MAX(min, max); idx++)
>+			devargs->proto_xtr[idx] = xtr_type;
>+
>+		return 0;
>+	}
>+
>+	/* process set within bracket */
>+	str++;
>+	while (isblank(*str))
>+		str++;
>+	if (*str == '\0')
>+		return -1;
>+
>+	min = ICE_MAX_QUEUE_NUM;
>+	do {
>+		/* go ahead to the first digit */
>+		while (isblank(*str))
>+			str++;
>+		if (!isdigit(*str))
>+			return -1;
>+
>+		/* get the digit value */
>+		errno = 0;
>+		idx = strtoul(str, &end, 10);
>+		if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM)
>+			return -1;
>+
>+		/* go ahead to separator '-',',' and ')' */
>+		while (isblank(*end))
>+			end++;
>+		if (*end == '-') {
>+			if (min == ICE_MAX_QUEUE_NUM)
>+				min = idx;
>+			else /* avoid continuous '-' */
>+				return -1;
>+		} else if (*end == ',' || *end == ')') {
>+			max = idx;
>+			if (min == ICE_MAX_QUEUE_NUM)
>+				min = idx;
>+
>+			for (idx = RTE_MIN(min, max);
>+			     idx <= RTE_MAX(min, max); idx++)
>+				devargs->proto_xtr[idx] = xtr_type;
>+
>+			min = ICE_MAX_QUEUE_NUM;
>+		} else {
>+			return -1;
>+		}
>+
>+		str = end + 1;
>+	} while (*end != ')' && *end != '\0');
>+
>+	return 0;
>+}
>+
>+static int
>+parse_queue_proto_xtr(const char *queues, struct ice_devargs *devargs)
>+{
>+	const char *queue_start;
>+	uint32_t idx;
>+	int xtr_type;
>+	char xtr_name[32];
>+
>+	while (isblank(*queues))
>+		queues++;
>+
>+	if (*queues != '[') {
>+		xtr_type = lookup_proto_xtr_type(queues);
>+		if (xtr_type < 0)
>+			return -1;
>+
>+		devargs->proto_xtr_dflt = xtr_type;

If we memset(devargs->proto_xtr, xtr_type, ICE_MAX_QUEUE_NUM) here, seems
we don't need proto_xtr_dflt.

>+		return 0;
>+	}
>+
>+	queues++;
>+	do {
>+		while (isblank(*queues))
>+			queues++;
>+		if (*queues == '\0')
>+			return -1;
>+
>+		queue_start = queues;
>+
>+		/* go across a complete bracket */
>+		if (*queue_start == '(') {
>+			queues += strcspn(queues, ")");
>+			if (*queues != ')')
>+				return -1;
>+		}
>+
>+		/* scan the separator ':' */
>+		queues += strcspn(queues, ":");
>+		if (*queues++ != ':')
>+			return -1;
>+		while (isblank(*queues))
>+			queues++;
>+
>+		for (idx = 0; ; idx++) {
>+			if (isblank(queues[idx]) ||
>+			    queues[idx] == ',' ||
>+			    queues[idx] == ']' ||
>+			    queues[idx] == '\0')
>+				break;
>+
>+			if (idx > sizeof(xtr_name) - 2)
>+				return -1;
>+
>+			xtr_name[idx] = queues[idx];
>+		}
>+		xtr_name[idx] = '\0';
>+		xtr_type = lookup_proto_xtr_type(xtr_name);
>+		if (xtr_type < 0)
>+			return -1;
>+
>+		queues += idx;
>+
>+		while (isblank(*queues) || *queues == ',' || *queues == ']')
>+			queues++;
>+
>+		if (parse_queue_set(queue_start, xtr_type, devargs) < 0)
>+			return -1;
>+	} while (*queues != '\0');
>+
>+	return 0;
>+}
>+
>+static int
>+handle_proto_xtr_arg(__rte_unused const char *key, const char *value,
>+		     void *extra_args)
>+{
>+	struct ice_devargs *devargs = extra_args;
>+
>+	if (value == NULL || extra_args == NULL)
>+		return -EINVAL;
>+
>+	if (parse_queue_proto_xtr(value, devargs) < 0) {
>+		PMD_DRV_LOG(ERR,
>+			    "The protocol extraction parameter is wrong : '%s'",
>+			    value);
>+		return -1;
>+	}
>+
>+	return 0;
>+}
>+
>+static void
>+ice_parse_proto_xtr_devarg(struct rte_kvargs *kvlist,
>+			   struct ice_devargs *devargs)
>+{
>+	int i;
>+
>+	devargs->proto_xtr_dflt = PROTO_XTR_NONE;
>+
>+	for (i = 0; i < ICE_MAX_QUEUE_NUM; i++)
>+		devargs->proto_xtr[i] = PROTO_XTR_NONE;

memset(devargs->proto_xtr, PROTO_XTR_NONE, ICE_MAX_QUEUE_NUM) ?

>+
>+	rte_kvargs_process(kvlist, ICE_PROTO_XTR_ARG,
>+			   handle_proto_xtr_arg, devargs);

Do we need to check ret of rte_kvargs_process here and change ice_parse_proto_xtr_devarg 
to return int? 

>+}
>+
>+static bool
>+ice_proto_xtr_support(struct ice_hw *hw)
>+{
>+#define FLX_REG(val, fld, idx) \
>+	(((val) & GLFLXP_RXDID_FLX_WRD_##idx##_##fld##_M) >> \
>+	 GLFLXP_RXDID_FLX_WRD_##idx##_##fld##_S)
>+	static struct {
>+		uint32_t rxdid;
>+		uint16_t protid_0;
>+		uint16_t protid_1;
>+	} xtr_sets[] = {
>+		{ ICE_RXDID_COMMS_AUX_VLAN, ICE_PROT_EVLAN_O, ICE_PROT_VLAN_O },
>+		{ ICE_RXDID_COMMS_AUX_IPV4, ICE_PROT_IPV4_OF_OR_S,
>+		  ICE_PROT_IPV4_OF_OR_S },
>+		{ ICE_RXDID_COMMS_AUX_IPV6, ICE_PROT_IPV6_OF_OR_S,
>+		  ICE_PROT_IPV6_OF_OR_S },
>+		{ ICE_RXDID_COMMS_AUX_IPV6_FLOW, ICE_PROT_IPV6_OF_OR_S,
>+		  ICE_PROT_IPV6_OF_OR_S },
>+		{ ICE_RXDID_COMMS_AUX_TCP, ICE_PROT_TCP_IL, ICE_PROT_ID_INVAL },
>+	};
>+	uint32_t i;
>+
>+	for (i = 0; i < RTE_DIM(xtr_sets); i++) {
>+		uint32_t rxdid = xtr_sets[i].rxdid;
>+		uint32_t v;
>+
>+		if (xtr_sets[i].protid_0 != ICE_PROT_ID_INVAL) {
>+			v = ICE_READ_REG(hw, GLFLXP_RXDID_FLX_WRD_4(rxdid));
>+
>+			if (FLX_REG(v, PROT_MDID, 4) != xtr_sets[i].protid_0 ||
>+			    FLX_REG(v, RXDID_OPCODE, 4) != ICE_RX_OPC_EXTRACT)
>+				return false;
>+		}
>+
>+		if (xtr_sets[i].protid_1 != ICE_PROT_ID_INVAL) {
>+			v = ICE_READ_REG(hw, GLFLXP_RXDID_FLX_WRD_5(rxdid));
>+
>+			if (FLX_REG(v, PROT_MDID, 5) != xtr_sets[i].protid_1 ||
>+			    FLX_REG(v, RXDID_OPCODE, 5) != ICE_RX_OPC_EXTRACT)
>+				return false;
>+		}
>+	}
>+
>+	return true;
>+}
>+
> static int
> ice_res_pool_init(struct ice_res_pool_info *pool, uint32_t base,
> 		  uint32_t num)
>@@ -1079,6 +1368,8 @@ ice_interrupt_handler(void *param)
> static int
> ice_pf_sw_init(struct rte_eth_dev *dev)
> {
>+	struct ice_adapter *ad =
>+			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
> 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
> 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
> 
>@@ -1088,6 +1379,21 @@ ice_pf_sw_init(struct rte_eth_dev *dev)
> 
> 	pf->lan_nb_qps = pf->lan_nb_qp_max;
> 
>+	if (ice_proto_xtr_support(hw))
>+		pf->proto_xtr = rte_zmalloc(NULL, pf->lan_nb_qps, 0);
>+
>+	if (pf->proto_xtr != NULL) {
>+		uint16_t i;
>+
>+		for (i = 0; i < pf->lan_nb_qps; i++)
>+			pf->proto_xtr[i] =
>+				ad->devargs.proto_xtr[i] != PROTO_XTR_NONE ?
>+				ad->devargs.proto_xtr[i] :
>+				ad->devargs.proto_xtr_dflt;
>+	} else {
>+		PMD_DRV_LOG(NOTICE, "Protocol extraction is disabled");
>+	}
>+
> 	return 0;
> }
> 
>@@ -1378,6 +1684,8 @@ static int ice_parse_devargs(struct rte_eth_dev *dev)
> 		return -EINVAL;
> 	}
> 
>+	ice_parse_proto_xtr_devarg(kvlist, &ad->devargs);
>+
> 	ret = rte_kvargs_process(kvlist, ICE_SAFE_MODE_SUPPORT_ARG,
> 				 &parse_bool, &ad->devargs.safe_mode_support);
> 
>@@ -1547,6 +1855,7 @@ ice_dev_init(struct rte_eth_dev *dev)
> 	ice_sched_cleanup_all(hw);
> 	rte_free(hw->port_info);
> 	ice_shutdown_all_ctrlq(hw);
>+	rte_free(pf->proto_xtr);
> 
> 	return ret;
> }
>@@ -1672,6 +1981,8 @@ ice_dev_close(struct rte_eth_dev *dev)
> 	rte_free(hw->port_info);
> 	hw->port_info = NULL;
> 	ice_shutdown_all_ctrlq(hw);
>+	rte_free(pf->proto_xtr);
>+	pf->proto_xtr = NULL;
> }
> 
> static int
>@@ -3795,6 +4106,7 @@ RTE_PMD_REGISTER_PCI(net_ice, rte_ice_pmd);
> RTE_PMD_REGISTER_PCI_TABLE(net_ice, pci_id_ice_map);
> RTE_PMD_REGISTER_KMOD_DEP(net_ice, "* igb_uio | uio_pci_generic | vfio-pci");
> RTE_PMD_REGISTER_PARAM_STRING(net_ice,
>+			      ICE_PROTO_XTR_ARG "=[queue:]<vlan|ipv4|ipv6|ipv6_flow|tcp>"
> 			      ICE_SAFE_MODE_SUPPORT_ARG "=<0|1>");
> 
> RTE_INIT(ice_init_log)
>diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
>index f569da833..e58192104 100644
>--- a/drivers/net/ice/ice_ethdev.h
>+++ b/drivers/net/ice/ice_ethdev.h
>@@ -263,6 +263,7 @@ struct ice_pf {
> 	uint16_t lan_nb_qp_max;
> 	uint16_t lan_nb_qps; /* The number of queue pairs of LAN */
> 	uint16_t base_queue; /* The base queue pairs index  in the device */
>+	uint8_t *proto_xtr; /* Protocol extraction type for all queues */
> 	struct ice_hw_port_stats stats_offset;
> 	struct ice_hw_port_stats stats;
> 	/* internal packet statistics, it should be excluded from the total */
>@@ -273,11 +274,15 @@ struct ice_pf {
> 	struct ice_flow_list flow_list;
> };
> 
>+#define ICE_MAX_QUEUE_NUM  2048
>+
> /**
>  * Cache devargs parse result.
>  */
> struct ice_devargs {
> 	int safe_mode_support;
>+	uint8_t proto_xtr_dflt;
>+	uint8_t proto_xtr[ICE_MAX_QUEUE_NUM];
> };
> 
> /**
>diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
>index d2e36853f..e28310b96 100644
>--- a/drivers/net/ice/ice_rxtx.c
>+++ b/drivers/net/ice/ice_rxtx.c
>@@ -13,6 +13,36 @@
> 		PKT_TX_TCP_SEG |		 \
> 		PKT_TX_OUTER_IP_CKSUM)
> 
>+static inline uint8_t
>+ice_rxdid_to_proto_xtr_type(uint8_t rxdid)
>+{
>+	static uint8_t xtr_map[] = {
>+		[ICE_RXDID_COMMS_AUX_VLAN]      = PROTO_XTR_VLAN,
>+		[ICE_RXDID_COMMS_AUX_IPV4]      = PROTO_XTR_IPV4,
>+		[ICE_RXDID_COMMS_AUX_IPV6]      = PROTO_XTR_IPV6,
>+		[ICE_RXDID_COMMS_AUX_IPV6_FLOW] = PROTO_XTR_IPV6_FLOW,
>+		[ICE_RXDID_COMMS_AUX_TCP]       = PROTO_XTR_TCP,
>+	};
>+
>+	return rxdid < RTE_DIM(xtr_map) ? xtr_map[rxdid] : PROTO_XTR_NONE;
>+}
>+
>+static inline uint8_t
>+ice_proto_xtr_type_to_rxdid(uint8_t xtr_tpye)
>+{
>+	static uint8_t rxdid_map[] = {
>+		[PROTO_XTR_VLAN]      = ICE_RXDID_COMMS_AUX_VLAN,
>+		[PROTO_XTR_IPV4]      = ICE_RXDID_COMMS_AUX_IPV4,
>+		[PROTO_XTR_IPV6]      = ICE_RXDID_COMMS_AUX_IPV6,
>+		[PROTO_XTR_IPV6_FLOW] = ICE_RXDID_COMMS_AUX_IPV6_FLOW,
>+		[PROTO_XTR_TCP]       = ICE_RXDID_COMMS_AUX_TCP,
>+	};
>+	uint8_t rxdid;
>+
>+	rxdid = xtr_tpye < RTE_DIM(rxdid_map) ? rxdid_map[xtr_tpye] : 0;
>+
>+	return rxdid != 0 ? rxdid : ICE_RXDID_COMMS_GENERIC;
>+}
> 
> static enum ice_status
> ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
>@@ -84,6 +114,11 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
> 	rx_ctx.showiv = 0;
> 	rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
> 
>+	rxdid = ice_proto_xtr_type_to_rxdid(rxq->proto_xtr);
>+
>+	PMD_DRV_LOG(DEBUG, "Port (%u) - Rx queue (%u) is set with RXDID : %u",
>+		    rxq->port_id, rxq->queue_id, rxdid);
>+
> 	/* Enable Flexible Descriptors in the queue context which
> 	 * allows this driver to select a specific receive descriptor format
> 	 */
>@@ -641,6 +676,8 @@ ice_rx_queue_setup(struct rte_eth_dev *dev,
> 	rxq->drop_en = rx_conf->rx_drop_en;
> 	rxq->vsi = vsi;
> 	rxq->rx_deferred_start = rx_conf->rx_deferred_start;
>+	rxq->proto_xtr = pf->proto_xtr != NULL ?
>+			 pf->proto_xtr[queue_idx] : PROTO_XTR_NONE;
> 
> 	/* Allocate the maximun number of RX ring hardware descriptor. */
> 	len = ICE_MAX_RING_DESC;
>@@ -1062,6 +1099,10 @@ ice_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union ice_rx_flex_desc *rxdp)
> 		   mb->vlan_tci, mb->vlan_tci_outer);
> }
> 
>+#define ICE_RX_PROTO_XTR_VALID \
>+	((1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S) | \
>+	 (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
>+
> static inline void
> ice_rxd_to_pkt_fields(struct rte_mbuf *mb,
> 		      volatile union ice_rx_flex_desc *rxdp)
>@@ -1075,6 +1116,26 @@ ice_rxd_to_pkt_fields(struct rte_mbuf *mb,
> 		mb->ol_flags |= PKT_RX_RSS_HASH;
> 		mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
> 	}
>+
>+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
>+	init_proto_xtr_flds(mb);
>+
>+	stat_err = rte_le_to_cpu_16(desc->status_error1);
>+	if (stat_err & ICE_RX_PROTO_XTR_VALID) {
>+		struct proto_xtr_flds *xtr = get_proto_xtr_flds(mb);
>+
>+		if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S))
>+			xtr->u.raw.data0 =
>+				rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
>+
>+		if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
>+			xtr->u.raw.data1 =
>+				rte_le_to_cpu_16(desc->flex_ts.flex.aux1);
>+
>+		xtr->type = ice_rxdid_to_proto_xtr_type(desc->rxdid);
>+		xtr->magic = PROTO_XTR_MAGIC_ID;
>+	}
>+#endif
> }
> 
> #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
>diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h
>index 64e891875..de16637f3 100644
>--- a/drivers/net/ice/ice_rxtx.h
>+++ b/drivers/net/ice/ice_rxtx.h
>@@ -5,6 +5,7 @@
> #ifndef _ICE_RXTX_H_
> #define _ICE_RXTX_H_
> 
>+#include "rte_pmd_ice.h"
> #include "ice_ethdev.h"
> 
> #define ICE_ALIGN_RING_DESC  32
>@@ -78,6 +79,7 @@ struct ice_rx_queue {
> 	uint16_t max_pkt_len; /* Maximum packet length */
> 	bool q_set; /* indicate if rx queue has been configured */
> 	bool rx_deferred_start; /* don't start this queue in dev start */
>+	uint8_t proto_xtr; /* Protocol extraction from flexible descriptor */
> 	ice_rx_release_mbufs_t rx_rel_mbufs;
> };
> 
>diff --git a/drivers/net/ice/ice_rxtx_vec_common.h b/drivers/net/ice/ice_rxtx_vec_common.h
>index c5f0d564f..080ca4175 100644
>--- a/drivers/net/ice/ice_rxtx_vec_common.h
>+++ b/drivers/net/ice/ice_rxtx_vec_common.h
>@@ -234,6 +234,9 @@ ice_rx_vec_queue_default(struct ice_rx_queue *rxq)
> 	if (rxq->nb_rx_desc % rxq->rx_free_thresh)
> 		return -1;
> 
>+	if (rxq->proto_xtr != PROTO_XTR_NONE)
>+		return -1;
>+
> 	return 0;
> }
> 
>diff --git a/drivers/net/ice/meson.build b/drivers/net/ice/meson.build
>index 36b4b3c85..6828170a9 100644
>--- a/drivers/net/ice/meson.build
>+++ b/drivers/net/ice/meson.build
>@@ -34,3 +34,5 @@ if arch_subdir == 'x86'
> 		objs += ice_avx2_lib.extract_objects('ice_rxtx_vec_avx2.c')
> 	endif
> endif
>+
>+install_headers('rte_pmd_ice.h')
>diff --git a/drivers/net/ice/rte_pmd_ice.h b/drivers/net/ice/rte_pmd_ice.h
>new file mode 100644
>index 000000000..719487e1e
>--- /dev/null
>+++ b/drivers/net/ice/rte_pmd_ice.h
>@@ -0,0 +1,152 @@
>+/* SPDX-License-Identifier: BSD-3-Clause
>+ * Copyright(c) 2019 Intel Corporation
>+ */
>+
>+#ifndef _RTE_PMD_ICE_H_
>+#define _RTE_PMD_ICE_H_
>+
>+#include <stdio.h>
>+#include <rte_mbuf.h>
>+#include <rte_ethdev.h>
>+
>+#ifdef __cplusplus
>+extern "C" {
>+#endif
>+
>+enum proto_xtr_type {
>+	PROTO_XTR_NONE,
>+	PROTO_XTR_VLAN,
>+	PROTO_XTR_IPV4,
>+	PROTO_XTR_IPV6,
>+	PROTO_XTR_IPV6_FLOW,
>+	PROTO_XTR_TCP,
>+};
>+
>+struct proto_xtr_flds {
>+	union {
>+		struct {
>+			uint16_t data0;
>+			uint16_t data1;
>+		} raw;
>+		struct {
>+			uint16_t stag_vid:12,
>+				 stag_dei:1,
>+				 stag_pcp:3;
>+			uint16_t ctag_vid:12,
>+				 ctag_dei:1,
>+				 ctag_pcp:3;
>+		} vlan;
>+		struct {
>+			uint16_t protocol:8,
>+				 ttl:8;
>+			uint16_t tos:8,
>+				 ihl:4,
>+				 version:4;
>+		} ipv4;
>+		struct {
>+			uint16_t hoplimit:8,
>+				 nexthdr:8;
>+			uint16_t flowhi4:4,
>+				 tc:8,
>+				 version:4;
>+		} ipv6;
>+		struct {
>+			uint16_t flowlo16;
>+			uint16_t flowhi4:4,
>+				 tc:8,
>+				 version:4;
>+		} ipv6_flow;
>+		struct {
>+			uint16_t fin:1,
>+				 syn:1,
>+				 rst:1,
>+				 psh:1,
>+				 ack:1,
>+				 urg:1,
>+				 ece:1,
>+				 cwr:1,
>+				 res1:4,
>+				 doff:4;
>+			uint16_t rsvd;
>+		} tcp;
>+	} u;
>+
>+	uint16_t rsvd;
>+
>+	uint8_t type;
>+
>+#define PROTO_XTR_MAGIC_ID	0xCE
>+	uint8_t magic;
>+};
>+
>+static inline void
>+init_proto_xtr_flds(struct rte_mbuf *mb)
>+{
>+	mb->udata64 = 0;
>+}
>+
>+static inline struct proto_xtr_flds *
>+get_proto_xtr_flds(struct rte_mbuf *mb)
>+{
>+	RTE_BUILD_BUG_ON(sizeof(struct proto_xtr_flds) > sizeof(mb->udata64));
>+
>+	return (struct proto_xtr_flds *)&mb->udata64;
>+}
>+
>+static inline void
>+dump_proto_xtr_flds(struct rte_mbuf *mb)
>+{
>+	struct proto_xtr_flds *xtr = get_proto_xtr_flds(mb);
>+
>+	if (xtr->magic != PROTO_XTR_MAGIC_ID || xtr->type == PROTO_XTR_NONE)
>+		return;
>+
>+	printf(" - Protocol Extraction:[0x%04x:0x%04x],",
>+	       xtr->u.raw.data0, xtr->u.raw.data1);
>+
>+	if (xtr->type == PROTO_XTR_VLAN)
>+		printf("vlan,stag=%u:%u:%u,ctag=%u:%u:%u ",
>+		       xtr->u.vlan.stag_pcp,
>+		       xtr->u.vlan.stag_dei,
>+		       xtr->u.vlan.stag_vid,
>+		       xtr->u.vlan.ctag_pcp,
>+		       xtr->u.vlan.ctag_dei,
>+		       xtr->u.vlan.ctag_vid);
>+	else if (xtr->type == PROTO_XTR_IPV4)
>+		printf("ipv4,ver=%u,hdrlen=%u,tos=%u,ttl=%u,proto=%u ",
>+		       xtr->u.ipv4.version,
>+		       xtr->u.ipv4.ihl,
>+		       xtr->u.ipv4.tos,
>+		       xtr->u.ipv4.ttl,
>+		       xtr->u.ipv4.protocol);
>+	else if (xtr->type == PROTO_XTR_IPV6)
>+		printf("ipv6,ver=%u,tc=%u,flow_hi4=0x%x,nexthdr=%u,hoplimit=%u ",
>+		       xtr->u.ipv6.version,
>+		       xtr->u.ipv6.tc,
>+		       xtr->u.ipv6.flowhi4,
>+		       xtr->u.ipv6.nexthdr,
>+		       xtr->u.ipv6.hoplimit);
>+	else if (xtr->type == PROTO_XTR_IPV6_FLOW)
>+		printf("ipv6_flow,ver=%u,tc=%u,flow=0x%x%04x ",
>+		       xtr->u.ipv6_flow.version,
>+		       xtr->u.ipv6_flow.tc,
>+		       xtr->u.ipv6_flow.flowhi4,
>+		       xtr->u.ipv6_flow.flowlo16);
>+	else if (xtr->type == PROTO_XTR_TCP)
>+		printf("tcp,doff=%u,flags=%s%s%s%s%s%s%s%s ",
>+		       xtr->u.tcp.doff,
>+		       xtr->u.tcp.cwr ? "C" : "",
>+		       xtr->u.tcp.ece ? "E" : "",
>+		       xtr->u.tcp.urg ? "U" : "",
>+		       xtr->u.tcp.ack ? "A" : "",
>+		       xtr->u.tcp.psh ? "P" : "",
>+		       xtr->u.tcp.rst ? "R" : "",
>+		       xtr->u.tcp.syn ? "S" : "",
>+		       xtr->u.tcp.fin ? "F" : "");
>+}
>+
>+#ifdef __cplusplus
>+}
>+#endif
>+
>+#endif /* _RTE_PMD_ICE_H_ */
>-- 
>2.17.1
>

  reply	other threads:[~2019-09-18 23:33 UTC|newest]

Thread overview: 54+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-08-29  2:34 [dpdk-dev] [PATCH 0/6] enable Rx flexible descriptor Leyi Rong
2019-08-29  2:34 ` [dpdk-dev] [PATCH 1/6] net/ice: add Rx flex descriptor definition Leyi Rong
2019-08-29  8:04   ` [dpdk-dev] [PATCH v2 0/6] enable Rx flexible descriptor Leyi Rong
2019-08-29  8:04     ` [dpdk-dev] [PATCH v2 1/6] net/ice: add Rx flex descriptor definition Leyi Rong
2019-08-29  8:04     ` [dpdk-dev] [PATCH v2 2/6] net/ice: handle the Rx flex descriptor Leyi Rong
2019-08-29  8:04     ` [dpdk-dev] [PATCH v2 3/6] net/ice: add protocol extraction support for per Rx queue Leyi Rong
2019-08-29  8:04     ` [dpdk-dev] [PATCH v2 4/6] net/ice: support more ptype Leyi Rong
2019-08-29  8:04     ` [dpdk-dev] [PATCH v2 5/6] net/ice: switch to flexible descriptor in SSE path Leyi Rong
2019-08-29  8:04     ` [dpdk-dev] [PATCH v2 6/6] net/ice: switch to Rx flexible descriptor in AVX path Leyi Rong
2019-08-29 23:31       ` Zhang, Qi Z
2019-08-30  1:05         ` Wang, Haiyue
2019-08-30  1:06           ` Zhang, Qi Z
2019-08-30  6:17         ` Rong, Leyi
2019-08-29  2:34 ` [dpdk-dev] [PATCH 2/6] net/ice: handle the Rx flex descriptor Leyi Rong
2019-08-29  2:34 ` [dpdk-dev] [PATCH 3/6] net/ice: add protocol extraction support for per Rx queue Leyi Rong
2019-08-29  2:34 ` [dpdk-dev] [PATCH 4/6] net/ice: support more ptype Leyi Rong
2019-08-29  2:34 ` [dpdk-dev] [PATCH 5/6] net/ice: switch to flexible descriptor in SSE path Leyi Rong
2019-08-29  2:34 ` [dpdk-dev] [PATCH 6/6] net/ice: switch to Rx flexible descriptor in AVX path Leyi Rong
2019-09-17  8:53 ` [dpdk-dev] [PATCH v3 0/5] enable Rx flexible descriptor Leyi Rong
2019-09-17  8:53   ` [dpdk-dev] [PATCH v3 1/5] net/ice: add Rx flex descriptor definition Leyi Rong
2019-09-18 21:56     ` Ye Xiaolong
2019-09-17  8:53   ` [dpdk-dev] [PATCH v3 2/5] net/ice: handle the Rx flex descriptor Leyi Rong
2019-09-17  8:53   ` [dpdk-dev] [PATCH v3 3/5] net/ice: add protocol extraction support for per Rx queue Leyi Rong
2019-09-18 23:30     ` Ye Xiaolong [this message]
2019-09-19  1:36       ` Wang, Haiyue
2019-09-19  1:44       ` Wang, Haiyue
2019-09-17  8:53   ` [dpdk-dev] [PATCH v3 4/5] net/ice: switch to flexible descriptor in SSE path Leyi Rong
2019-09-17  8:53   ` [dpdk-dev] [PATCH v3 5/5] net/ice: switch to Rx flexible descriptor in AVX path Leyi Rong
2019-09-19  6:25 ` [dpdk-dev] [PATCH v4 0/6] enable Rx flexible descriptor Leyi Rong
2019-09-19  6:25   ` [dpdk-dev] [PATCH v4 1/6] net/ice: add Rx flex descriptor definition Leyi Rong
2019-09-19  6:25   ` [dpdk-dev] [PATCH v4 2/6] net/ice: handle the Rx flex descriptor Leyi Rong
2019-09-23 11:05     ` Ye Xiaolong
2019-09-19  6:25   ` [dpdk-dev] [PATCH v4 3/6] net/ice: add protocol extraction support for per Rx queue Leyi Rong
2019-09-23  3:25     ` Yang, Qiming
2019-09-23  3:34       ` Wang, Haiyue
2019-09-23  8:29       ` Ye Xiaolong
2019-09-23 11:03         ` Wang, Haiyue
2019-09-23 14:24     ` Ye Xiaolong
2019-09-23 15:00       ` Wang, Haiyue
2019-09-23 15:55         ` Ye Xiaolong
2019-09-19  6:25   ` [dpdk-dev] [PATCH v4 4/6] net/ice: switch to flexible descriptor in SSE path Leyi Rong
2019-09-19  6:25   ` [dpdk-dev] [PATCH v4 5/6] net/ice: switch to Rx flexible descriptor in AVX path Leyi Rong
2019-09-19  6:25   ` [dpdk-dev] [PATCH v4 6/6] net/ice: remove Rx legacy descriptor definition Leyi Rong
2019-09-23 14:31     ` Ye Xiaolong
2019-09-19  6:38   ` [dpdk-dev] [PATCH v4 0/6] enable Rx flexible descriptor Zhang, Qi Z
2019-09-24  2:38 ` [dpdk-dev] [PATCH v5 " Leyi Rong
2019-09-24  2:38   ` [dpdk-dev] [PATCH v5 1/6] net/ice: add Rx flex descriptor definition Leyi Rong
2019-09-24  2:38   ` [dpdk-dev] [PATCH v5 2/6] net/ice: handle the Rx flex descriptor Leyi Rong
2019-09-24  2:38   ` [dpdk-dev] [PATCH v5 3/6] net/ice: add protocol extraction support for per Rx queue Leyi Rong
2019-09-24  9:02     ` Ye Xiaolong
2019-09-24  2:38   ` [dpdk-dev] [PATCH v5 4/6] net/ice: switch to flexible descriptor in SSE path Leyi Rong
2019-09-24  2:38   ` [dpdk-dev] [PATCH v5 5/6] net/ice: switch to Rx flexible descriptor in AVX path Leyi Rong
2019-09-24  2:38   ` [dpdk-dev] [PATCH v5 6/6] net/ice: remove Rx legacy descriptor definition Leyi Rong
2019-09-24  9:05   ` [dpdk-dev] [PATCH v5 0/6] enable Rx flexible descriptor Ye Xiaolong

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190918233017.GA77813@intel.com \
    --to=xiaolong.ye@intel.com \
    --cc=dev@dpdk.org \
    --cc=haiyue.wang@intel.com \
    --cc=leyi.rong@intel.com \
    --cc=qi.z.zhang@intel.com \
    --cc=wenzhuo.lu@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).