DPDK patches and discussions
 help / color / mirror / Atom feed
From: beilei.xing@intel.com
To: jingjing.wu@intel.com
Cc: dev@dpdk.org, qi.z.zhang@intel.com, Beilei Xing <beilei.xing@intel.com>
Subject: [PATCH v7 08/19] common/idpf: support get packet type
Date: Mon,  6 Feb 2023 05:46:07 +0000	[thread overview]
Message-ID: <20230206054618.40975-9-beilei.xing@intel.com> (raw)
In-Reply-To: <20230206054618.40975-1-beilei.xing@intel.com>

From: Beilei Xing <beilei.xing@intel.com>

Move ptype_tbl field to idpf_adapter structure.
Move get_pkt_type to common module.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/common/idpf/idpf_common_device.c | 216 +++++++++++++++++++++++
 drivers/common/idpf/idpf_common_device.h |   7 +
 drivers/common/idpf/meson.build          |   2 +
 drivers/net/idpf/idpf_ethdev.c           |   6 -
 drivers/net/idpf/idpf_ethdev.h           |   4 -
 drivers/net/idpf/idpf_rxtx.c             |   4 +-
 drivers/net/idpf/idpf_rxtx.h             |   4 -
 drivers/net/idpf/idpf_rxtx_vec_avx512.c  |   3 +-
 drivers/net/idpf/idpf_vchnl.c            | 213 ----------------------
 9 files changed, 228 insertions(+), 231 deletions(-)

diff --git a/drivers/common/idpf/idpf_common_device.c b/drivers/common/idpf/idpf_common_device.c
index 336977891c..f62d4d1976 100644
--- a/drivers/common/idpf/idpf_common_device.c
+++ b/drivers/common/idpf/idpf_common_device.c
@@ -96,6 +96,216 @@ idpf_init_mbx(struct idpf_hw *hw)
 	return ret;
 }
 
+static int
+idpf_get_pkt_type(struct idpf_adapter *adapter)
+{
+	struct virtchnl2_get_ptype_info *ptype_info;
+	uint16_t ptype_offset, i, j;
+	uint16_t ptype_recvd = 0;
+	int ret;
+
+	ret = idpf_vc_query_ptype_info(adapter);
+	if (ret != 0) {
+		DRV_LOG(ERR, "Fail to query packet type information");
+		return ret;
+	}
+
+	ptype_info = rte_zmalloc("ptype_info", IDPF_DFLT_MBX_BUF_SIZE, 0);
+		if (ptype_info == NULL)
+			return -ENOMEM;
+
+	while (ptype_recvd < IDPF_MAX_PKT_TYPE) {
+		ret = idpf_vc_read_one_msg(adapter, VIRTCHNL2_OP_GET_PTYPE_INFO,
+					   IDPF_DFLT_MBX_BUF_SIZE, (uint8_t *)ptype_info);
+		if (ret != 0) {
+			DRV_LOG(ERR, "Fail to get packet type information");
+			goto free_ptype_info;
+		}
+
+		ptype_recvd += ptype_info->num_ptypes;
+		ptype_offset = sizeof(struct virtchnl2_get_ptype_info) -
+						sizeof(struct virtchnl2_ptype);
+
+		for (i = 0; i < rte_cpu_to_le_16(ptype_info->num_ptypes); i++) {
+			bool is_inner = false, is_ip = false;
+			struct virtchnl2_ptype *ptype;
+			uint32_t proto_hdr = 0;
+
+			ptype = (struct virtchnl2_ptype *)
+					((uint8_t *)ptype_info + ptype_offset);
+			ptype_offset += IDPF_GET_PTYPE_SIZE(ptype);
+			if (ptype_offset > IDPF_DFLT_MBX_BUF_SIZE) {
+				ret = -EINVAL;
+				goto free_ptype_info;
+			}
+
+			if (rte_cpu_to_le_16(ptype->ptype_id_10) == 0xFFFF)
+				goto free_ptype_info;
+
+			for (j = 0; j < ptype->proto_id_count; j++) {
+				switch (rte_cpu_to_le_16(ptype->proto_id[j])) {
+				case VIRTCHNL2_PROTO_HDR_GRE:
+				case VIRTCHNL2_PROTO_HDR_VXLAN:
+					proto_hdr &= ~RTE_PTYPE_L4_MASK;
+					proto_hdr |= RTE_PTYPE_TUNNEL_GRENAT;
+					is_inner = true;
+					break;
+				case VIRTCHNL2_PROTO_HDR_MAC:
+					if (is_inner) {
+						proto_hdr &= ~RTE_PTYPE_INNER_L2_MASK;
+						proto_hdr |= RTE_PTYPE_INNER_L2_ETHER;
+					} else {
+						proto_hdr &= ~RTE_PTYPE_L2_MASK;
+						proto_hdr |= RTE_PTYPE_L2_ETHER;
+					}
+					break;
+				case VIRTCHNL2_PROTO_HDR_VLAN:
+					if (is_inner) {
+						proto_hdr &= ~RTE_PTYPE_INNER_L2_MASK;
+						proto_hdr |= RTE_PTYPE_INNER_L2_ETHER_VLAN;
+					}
+					break;
+				case VIRTCHNL2_PROTO_HDR_PTP:
+					proto_hdr &= ~RTE_PTYPE_L2_MASK;
+					proto_hdr |= RTE_PTYPE_L2_ETHER_TIMESYNC;
+					break;
+				case VIRTCHNL2_PROTO_HDR_LLDP:
+					proto_hdr &= ~RTE_PTYPE_L2_MASK;
+					proto_hdr |= RTE_PTYPE_L2_ETHER_LLDP;
+					break;
+				case VIRTCHNL2_PROTO_HDR_ARP:
+					proto_hdr &= ~RTE_PTYPE_L2_MASK;
+					proto_hdr |= RTE_PTYPE_L2_ETHER_ARP;
+					break;
+				case VIRTCHNL2_PROTO_HDR_PPPOE:
+					proto_hdr &= ~RTE_PTYPE_L2_MASK;
+					proto_hdr |= RTE_PTYPE_L2_ETHER_PPPOE;
+					break;
+				case VIRTCHNL2_PROTO_HDR_IPV4:
+					if (!is_ip) {
+						proto_hdr |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
+						is_ip = true;
+					} else {
+						proto_hdr |= RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+							     RTE_PTYPE_TUNNEL_IP;
+						is_inner = true;
+					}
+						break;
+				case VIRTCHNL2_PROTO_HDR_IPV6:
+					if (!is_ip) {
+						proto_hdr |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
+						is_ip = true;
+					} else {
+						proto_hdr |= RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+							     RTE_PTYPE_TUNNEL_IP;
+						is_inner = true;
+					}
+					break;
+				case VIRTCHNL2_PROTO_HDR_IPV4_FRAG:
+				case VIRTCHNL2_PROTO_HDR_IPV6_FRAG:
+					if (is_inner)
+						proto_hdr |= RTE_PTYPE_INNER_L4_FRAG;
+					else
+						proto_hdr |= RTE_PTYPE_L4_FRAG;
+					break;
+				case VIRTCHNL2_PROTO_HDR_UDP:
+					if (is_inner)
+						proto_hdr |= RTE_PTYPE_INNER_L4_UDP;
+					else
+						proto_hdr |= RTE_PTYPE_L4_UDP;
+					break;
+				case VIRTCHNL2_PROTO_HDR_TCP:
+					if (is_inner)
+						proto_hdr |= RTE_PTYPE_INNER_L4_TCP;
+					else
+						proto_hdr |= RTE_PTYPE_L4_TCP;
+					break;
+				case VIRTCHNL2_PROTO_HDR_SCTP:
+					if (is_inner)
+						proto_hdr |= RTE_PTYPE_INNER_L4_SCTP;
+					else
+						proto_hdr |= RTE_PTYPE_L4_SCTP;
+					break;
+				case VIRTCHNL2_PROTO_HDR_ICMP:
+					if (is_inner)
+						proto_hdr |= RTE_PTYPE_INNER_L4_ICMP;
+					else
+						proto_hdr |= RTE_PTYPE_L4_ICMP;
+					break;
+				case VIRTCHNL2_PROTO_HDR_ICMPV6:
+					if (is_inner)
+						proto_hdr |= RTE_PTYPE_INNER_L4_ICMP;
+					else
+						proto_hdr |= RTE_PTYPE_L4_ICMP;
+					break;
+				case VIRTCHNL2_PROTO_HDR_L2TPV2:
+				case VIRTCHNL2_PROTO_HDR_L2TPV2_CONTROL:
+				case VIRTCHNL2_PROTO_HDR_L2TPV3:
+					is_inner = true;
+					proto_hdr |= RTE_PTYPE_TUNNEL_L2TP;
+					break;
+				case VIRTCHNL2_PROTO_HDR_NVGRE:
+					is_inner = true;
+					proto_hdr |= RTE_PTYPE_TUNNEL_NVGRE;
+					break;
+				case VIRTCHNL2_PROTO_HDR_GTPC_TEID:
+					is_inner = true;
+					proto_hdr |= RTE_PTYPE_TUNNEL_GTPC;
+					break;
+				case VIRTCHNL2_PROTO_HDR_GTPU:
+				case VIRTCHNL2_PROTO_HDR_GTPU_UL:
+				case VIRTCHNL2_PROTO_HDR_GTPU_DL:
+					is_inner = true;
+					proto_hdr |= RTE_PTYPE_TUNNEL_GTPU;
+					break;
+				case VIRTCHNL2_PROTO_HDR_PAY:
+				case VIRTCHNL2_PROTO_HDR_IPV6_EH:
+				case VIRTCHNL2_PROTO_HDR_PRE_MAC:
+				case VIRTCHNL2_PROTO_HDR_POST_MAC:
+				case VIRTCHNL2_PROTO_HDR_ETHERTYPE:
+				case VIRTCHNL2_PROTO_HDR_SVLAN:
+				case VIRTCHNL2_PROTO_HDR_CVLAN:
+				case VIRTCHNL2_PROTO_HDR_MPLS:
+				case VIRTCHNL2_PROTO_HDR_MMPLS:
+				case VIRTCHNL2_PROTO_HDR_CTRL:
+				case VIRTCHNL2_PROTO_HDR_ECP:
+				case VIRTCHNL2_PROTO_HDR_EAPOL:
+				case VIRTCHNL2_PROTO_HDR_PPPOD:
+				case VIRTCHNL2_PROTO_HDR_IGMP:
+				case VIRTCHNL2_PROTO_HDR_AH:
+				case VIRTCHNL2_PROTO_HDR_ESP:
+				case VIRTCHNL2_PROTO_HDR_IKE:
+				case VIRTCHNL2_PROTO_HDR_NATT_KEEP:
+				case VIRTCHNL2_PROTO_HDR_GTP:
+				case VIRTCHNL2_PROTO_HDR_GTP_EH:
+				case VIRTCHNL2_PROTO_HDR_GTPCV2:
+				case VIRTCHNL2_PROTO_HDR_ECPRI:
+				case VIRTCHNL2_PROTO_HDR_VRRP:
+				case VIRTCHNL2_PROTO_HDR_OSPF:
+				case VIRTCHNL2_PROTO_HDR_TUN:
+				case VIRTCHNL2_PROTO_HDR_VXLAN_GPE:
+				case VIRTCHNL2_PROTO_HDR_GENEVE:
+				case VIRTCHNL2_PROTO_HDR_NSH:
+				case VIRTCHNL2_PROTO_HDR_QUIC:
+				case VIRTCHNL2_PROTO_HDR_PFCP:
+				case VIRTCHNL2_PROTO_HDR_PFCP_NODE:
+				case VIRTCHNL2_PROTO_HDR_PFCP_SESSION:
+				case VIRTCHNL2_PROTO_HDR_RTP:
+				case VIRTCHNL2_PROTO_HDR_NO_PROTO:
+				default:
+					continue;
+				}
+				adapter->ptype_tbl[ptype->ptype_id_10] = proto_hdr;
+			}
+		}
+	}
+
+free_ptype_info:
+	rte_free(ptype_info);
+	clear_cmd(adapter);
+	return ret;
+}
+
 int
 idpf_adapter_init(struct idpf_adapter *adapter)
 {
@@ -135,6 +345,12 @@ idpf_adapter_init(struct idpf_adapter *adapter)
 		goto err_check_api;
 	}
 
+	ret = idpf_get_pkt_type(adapter);
+	if (ret != 0) {
+		DRV_LOG(ERR, "Failed to set ptype table");
+		goto err_check_api;
+	}
+
 	return 0;
 
 err_check_api:
diff --git a/drivers/common/idpf/idpf_common_device.h b/drivers/common/idpf/idpf_common_device.h
index a13f8818b9..0585ba3a88 100644
--- a/drivers/common/idpf/idpf_common_device.h
+++ b/drivers/common/idpf/idpf_common_device.h
@@ -5,6 +5,7 @@
 #ifndef _IDPF_COMMON_DEVICE_H_
 #define _IDPF_COMMON_DEVICE_H_
 
+#include <rte_mbuf_ptype.h>
 #include <base/idpf_prototype.h>
 #include <base/virtchnl2.h>
 #include <idpf_common_logs.h>
@@ -19,6 +20,10 @@
 
 #define IDPF_DFLT_INTERVAL	16
 
+#define IDPF_GET_PTYPE_SIZE(p)						\
+	(sizeof(struct virtchnl2_ptype) +				\
+	 (((p)->proto_id_count ? ((p)->proto_id_count - 1) : 0) * sizeof((p)->proto_id[0])))
+
 struct idpf_adapter {
 	struct idpf_hw hw;
 	struct virtchnl2_version_info virtchnl_version;
@@ -26,6 +31,8 @@ struct idpf_adapter {
 	volatile uint32_t pend_cmd; /* pending command not finished */
 	uint32_t cmd_retval; /* return value of the cmd response from cp */
 	uint8_t *mbx_resp; /* buffer to store the mailbox response from cp */
+
+	uint32_t ptype_tbl[IDPF_MAX_PKT_TYPE] __rte_cache_min_aligned;
 };
 
 struct idpf_chunks_info {
diff --git a/drivers/common/idpf/meson.build b/drivers/common/idpf/meson.build
index c8a514e02a..ea1063a7a2 100644
--- a/drivers/common/idpf/meson.build
+++ b/drivers/common/idpf/meson.build
@@ -1,6 +1,8 @@
 # SPDX-License-Identifier: BSD-3-Clause
 # Copyright(c) 2022 Intel Corporation
 
+deps += ['mbuf']
+
 sources = files(
         'idpf_common_device.c',
         'idpf_common_virtchnl.c',
diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
index d0799087a5..84046f955a 100644
--- a/drivers/net/idpf/idpf_ethdev.c
+++ b/drivers/net/idpf/idpf_ethdev.c
@@ -602,12 +602,6 @@ idpf_adapter_ext_init(struct rte_pci_device *pci_dev, struct idpf_adapter_ext *a
 		goto err_adapter_init;
 	}
 
-	ret = idpf_get_pkt_type(adapter);
-	if (ret != 0) {
-		PMD_INIT_LOG(ERR, "Failed to set ptype table");
-		goto err_get_ptype;
-	}
-
 	adapter->max_vport_nb = adapter->base.caps.max_vports;
 
 	adapter->vports = rte_zmalloc("vports",
diff --git a/drivers/net/idpf/idpf_ethdev.h b/drivers/net/idpf/idpf_ethdev.h
index 55be98a8ed..d30807ca41 100644
--- a/drivers/net/idpf/idpf_ethdev.h
+++ b/drivers/net/idpf/idpf_ethdev.h
@@ -89,8 +89,6 @@ struct idpf_adapter_ext {
 
 	uint16_t used_vecs_num;
 
-	uint32_t ptype_tbl[IDPF_MAX_PKT_TYPE] __rte_cache_min_aligned;
-
 	bool rx_vec_allowed;
 	bool tx_vec_allowed;
 	bool rx_use_avx512;
@@ -107,6 +105,4 @@ TAILQ_HEAD(idpf_adapter_list, idpf_adapter_ext);
 #define IDPF_ADAPTER_TO_EXT(p)					\
 	container_of((p), struct idpf_adapter_ext, base)
 
-int idpf_get_pkt_type(struct idpf_adapter_ext *adapter);
-
 #endif /* _IDPF_ETHDEV_H_ */
diff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c
index ad3e31208d..0b10e4248b 100644
--- a/drivers/net/idpf/idpf_rxtx.c
+++ b/drivers/net/idpf/idpf_rxtx.c
@@ -1407,7 +1407,7 @@ idpf_splitq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 	rx_id_bufq1 = rxq->bufq1->rx_next_avail;
 	rx_id_bufq2 = rxq->bufq2->rx_next_avail;
 	rx_desc_ring = rxq->rx_ring;
-	ptype_tbl = ad->ptype_tbl;
+	ptype_tbl = rxq->adapter->ptype_tbl;
 
 	if ((rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) != 0)
 		rxq->hw_register_set = 1;
@@ -1812,7 +1812,7 @@ idpf_singleq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 
 	rx_id = rxq->rx_tail;
 	rx_ring = rxq->rx_ring;
-	ptype_tbl = ad->ptype_tbl;
+	ptype_tbl = rxq->adapter->ptype_tbl;
 
 	if ((rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) != 0)
 		rxq->hw_register_set = 1;
diff --git a/drivers/net/idpf/idpf_rxtx.h b/drivers/net/idpf/idpf_rxtx.h
index 9417651b3f..cac6040943 100644
--- a/drivers/net/idpf/idpf_rxtx.h
+++ b/drivers/net/idpf/idpf_rxtx.h
@@ -82,10 +82,6 @@
 #define IDPF_TX_OFFLOAD_NOTSUP_MASK \
 		(RTE_MBUF_F_TX_OFFLOAD_MASK ^ IDPF_TX_OFFLOAD_MASK)
 
-#define IDPF_GET_PTYPE_SIZE(p) \
-	(sizeof(struct virtchnl2_ptype) + \
-	(((p)->proto_id_count ? ((p)->proto_id_count - 1) : 0) * sizeof((p)->proto_id[0])))
-
 extern uint64_t idpf_timestamp_dynflag;
 
 struct idpf_rx_queue {
diff --git a/drivers/net/idpf/idpf_rxtx_vec_avx512.c b/drivers/net/idpf/idpf_rxtx_vec_avx512.c
index efa7cd2187..fb2b6bb53c 100644
--- a/drivers/net/idpf/idpf_rxtx_vec_avx512.c
+++ b/drivers/net/idpf/idpf_rxtx_vec_avx512.c
@@ -245,8 +245,7 @@ _idpf_singleq_recv_raw_pkts_avx512(struct idpf_rx_queue *rxq,
 				   struct rte_mbuf **rx_pkts,
 				   uint16_t nb_pkts)
 {
-	struct idpf_adapter_ext *adapter = IDPF_ADAPTER_TO_EXT(rxq->adapter);
-	const uint32_t *type_table = adapter->ptype_tbl;
+	const uint32_t *type_table = rxq->adapter->ptype_tbl;
 
 	const __m256i mbuf_init = _mm256_set_epi64x(0, 0, 0,
 						    rxq->mbuf_initializer);
diff --git a/drivers/net/idpf/idpf_vchnl.c b/drivers/net/idpf/idpf_vchnl.c
index 6f4eb52beb..45d05ed108 100644
--- a/drivers/net/idpf/idpf_vchnl.c
+++ b/drivers/net/idpf/idpf_vchnl.c
@@ -23,219 +23,6 @@
 #include "idpf_ethdev.h"
 #include "idpf_rxtx.h"
 
-int __rte_cold
-idpf_get_pkt_type(struct idpf_adapter_ext *adapter)
-{
-	struct virtchnl2_get_ptype_info *ptype_info;
-	struct idpf_adapter *base;
-	uint16_t ptype_offset, i, j;
-	uint16_t ptype_recvd = 0;
-	int ret;
-
-	base = &adapter->base;
-
-	ret = idpf_vc_query_ptype_info(base);
-	if (ret != 0) {
-		PMD_DRV_LOG(ERR, "Fail to query packet type information");
-		return ret;
-	}
-
-	ptype_info = rte_zmalloc("ptype_info", IDPF_DFLT_MBX_BUF_SIZE, 0);
-		if (ptype_info == NULL)
-			return -ENOMEM;
-
-	while (ptype_recvd < IDPF_MAX_PKT_TYPE) {
-		ret = idpf_vc_read_one_msg(base, VIRTCHNL2_OP_GET_PTYPE_INFO,
-					   IDPF_DFLT_MBX_BUF_SIZE, (uint8_t *)ptype_info);
-		if (ret != 0) {
-			PMD_DRV_LOG(ERR, "Fail to get packet type information");
-			goto free_ptype_info;
-		}
-
-		ptype_recvd += ptype_info->num_ptypes;
-		ptype_offset = sizeof(struct virtchnl2_get_ptype_info) -
-						sizeof(struct virtchnl2_ptype);
-
-		for (i = 0; i < rte_cpu_to_le_16(ptype_info->num_ptypes); i++) {
-			bool is_inner = false, is_ip = false;
-			struct virtchnl2_ptype *ptype;
-			uint32_t proto_hdr = 0;
-
-			ptype = (struct virtchnl2_ptype *)
-					((uint8_t *)ptype_info + ptype_offset);
-			ptype_offset += IDPF_GET_PTYPE_SIZE(ptype);
-			if (ptype_offset > IDPF_DFLT_MBX_BUF_SIZE) {
-				ret = -EINVAL;
-				goto free_ptype_info;
-			}
-
-			if (rte_cpu_to_le_16(ptype->ptype_id_10) == 0xFFFF)
-				goto free_ptype_info;
-
-			for (j = 0; j < ptype->proto_id_count; j++) {
-				switch (rte_cpu_to_le_16(ptype->proto_id[j])) {
-				case VIRTCHNL2_PROTO_HDR_GRE:
-				case VIRTCHNL2_PROTO_HDR_VXLAN:
-					proto_hdr &= ~RTE_PTYPE_L4_MASK;
-					proto_hdr |= RTE_PTYPE_TUNNEL_GRENAT;
-					is_inner = true;
-					break;
-				case VIRTCHNL2_PROTO_HDR_MAC:
-					if (is_inner) {
-						proto_hdr &= ~RTE_PTYPE_INNER_L2_MASK;
-						proto_hdr |= RTE_PTYPE_INNER_L2_ETHER;
-					} else {
-						proto_hdr &= ~RTE_PTYPE_L2_MASK;
-						proto_hdr |= RTE_PTYPE_L2_ETHER;
-					}
-					break;
-				case VIRTCHNL2_PROTO_HDR_VLAN:
-					if (is_inner) {
-						proto_hdr &= ~RTE_PTYPE_INNER_L2_MASK;
-						proto_hdr |= RTE_PTYPE_INNER_L2_ETHER_VLAN;
-					}
-					break;
-				case VIRTCHNL2_PROTO_HDR_PTP:
-					proto_hdr &= ~RTE_PTYPE_L2_MASK;
-					proto_hdr |= RTE_PTYPE_L2_ETHER_TIMESYNC;
-					break;
-				case VIRTCHNL2_PROTO_HDR_LLDP:
-					proto_hdr &= ~RTE_PTYPE_L2_MASK;
-					proto_hdr |= RTE_PTYPE_L2_ETHER_LLDP;
-					break;
-				case VIRTCHNL2_PROTO_HDR_ARP:
-					proto_hdr &= ~RTE_PTYPE_L2_MASK;
-					proto_hdr |= RTE_PTYPE_L2_ETHER_ARP;
-					break;
-				case VIRTCHNL2_PROTO_HDR_PPPOE:
-					proto_hdr &= ~RTE_PTYPE_L2_MASK;
-					proto_hdr |= RTE_PTYPE_L2_ETHER_PPPOE;
-					break;
-				case VIRTCHNL2_PROTO_HDR_IPV4:
-					if (!is_ip) {
-						proto_hdr |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
-						is_ip = true;
-					} else {
-						proto_hdr |= RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-							     RTE_PTYPE_TUNNEL_IP;
-						is_inner = true;
-					}
-						break;
-				case VIRTCHNL2_PROTO_HDR_IPV6:
-					if (!is_ip) {
-						proto_hdr |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
-						is_ip = true;
-					} else {
-						proto_hdr |= RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-							     RTE_PTYPE_TUNNEL_IP;
-						is_inner = true;
-					}
-					break;
-				case VIRTCHNL2_PROTO_HDR_IPV4_FRAG:
-				case VIRTCHNL2_PROTO_HDR_IPV6_FRAG:
-					if (is_inner)
-						proto_hdr |= RTE_PTYPE_INNER_L4_FRAG;
-					else
-						proto_hdr |= RTE_PTYPE_L4_FRAG;
-					break;
-				case VIRTCHNL2_PROTO_HDR_UDP:
-					if (is_inner)
-						proto_hdr |= RTE_PTYPE_INNER_L4_UDP;
-					else
-						proto_hdr |= RTE_PTYPE_L4_UDP;
-					break;
-				case VIRTCHNL2_PROTO_HDR_TCP:
-					if (is_inner)
-						proto_hdr |= RTE_PTYPE_INNER_L4_TCP;
-					else
-						proto_hdr |= RTE_PTYPE_L4_TCP;
-					break;
-				case VIRTCHNL2_PROTO_HDR_SCTP:
-					if (is_inner)
-						proto_hdr |= RTE_PTYPE_INNER_L4_SCTP;
-					else
-						proto_hdr |= RTE_PTYPE_L4_SCTP;
-					break;
-				case VIRTCHNL2_PROTO_HDR_ICMP:
-					if (is_inner)
-						proto_hdr |= RTE_PTYPE_INNER_L4_ICMP;
-					else
-						proto_hdr |= RTE_PTYPE_L4_ICMP;
-					break;
-				case VIRTCHNL2_PROTO_HDR_ICMPV6:
-					if (is_inner)
-						proto_hdr |= RTE_PTYPE_INNER_L4_ICMP;
-					else
-						proto_hdr |= RTE_PTYPE_L4_ICMP;
-					break;
-				case VIRTCHNL2_PROTO_HDR_L2TPV2:
-				case VIRTCHNL2_PROTO_HDR_L2TPV2_CONTROL:
-				case VIRTCHNL2_PROTO_HDR_L2TPV3:
-					is_inner = true;
-					proto_hdr |= RTE_PTYPE_TUNNEL_L2TP;
-					break;
-				case VIRTCHNL2_PROTO_HDR_NVGRE:
-					is_inner = true;
-					proto_hdr |= RTE_PTYPE_TUNNEL_NVGRE;
-					break;
-				case VIRTCHNL2_PROTO_HDR_GTPC_TEID:
-					is_inner = true;
-					proto_hdr |= RTE_PTYPE_TUNNEL_GTPC;
-					break;
-				case VIRTCHNL2_PROTO_HDR_GTPU:
-				case VIRTCHNL2_PROTO_HDR_GTPU_UL:
-				case VIRTCHNL2_PROTO_HDR_GTPU_DL:
-					is_inner = true;
-					proto_hdr |= RTE_PTYPE_TUNNEL_GTPU;
-					break;
-				case VIRTCHNL2_PROTO_HDR_PAY:
-				case VIRTCHNL2_PROTO_HDR_IPV6_EH:
-				case VIRTCHNL2_PROTO_HDR_PRE_MAC:
-				case VIRTCHNL2_PROTO_HDR_POST_MAC:
-				case VIRTCHNL2_PROTO_HDR_ETHERTYPE:
-				case VIRTCHNL2_PROTO_HDR_SVLAN:
-				case VIRTCHNL2_PROTO_HDR_CVLAN:
-				case VIRTCHNL2_PROTO_HDR_MPLS:
-				case VIRTCHNL2_PROTO_HDR_MMPLS:
-				case VIRTCHNL2_PROTO_HDR_CTRL:
-				case VIRTCHNL2_PROTO_HDR_ECP:
-				case VIRTCHNL2_PROTO_HDR_EAPOL:
-				case VIRTCHNL2_PROTO_HDR_PPPOD:
-				case VIRTCHNL2_PROTO_HDR_IGMP:
-				case VIRTCHNL2_PROTO_HDR_AH:
-				case VIRTCHNL2_PROTO_HDR_ESP:
-				case VIRTCHNL2_PROTO_HDR_IKE:
-				case VIRTCHNL2_PROTO_HDR_NATT_KEEP:
-				case VIRTCHNL2_PROTO_HDR_GTP:
-				case VIRTCHNL2_PROTO_HDR_GTP_EH:
-				case VIRTCHNL2_PROTO_HDR_GTPCV2:
-				case VIRTCHNL2_PROTO_HDR_ECPRI:
-				case VIRTCHNL2_PROTO_HDR_VRRP:
-				case VIRTCHNL2_PROTO_HDR_OSPF:
-				case VIRTCHNL2_PROTO_HDR_TUN:
-				case VIRTCHNL2_PROTO_HDR_VXLAN_GPE:
-				case VIRTCHNL2_PROTO_HDR_GENEVE:
-				case VIRTCHNL2_PROTO_HDR_NSH:
-				case VIRTCHNL2_PROTO_HDR_QUIC:
-				case VIRTCHNL2_PROTO_HDR_PFCP:
-				case VIRTCHNL2_PROTO_HDR_PFCP_NODE:
-				case VIRTCHNL2_PROTO_HDR_PFCP_SESSION:
-				case VIRTCHNL2_PROTO_HDR_RTP:
-				case VIRTCHNL2_PROTO_HDR_NO_PROTO:
-				default:
-					continue;
-				}
-				adapter->ptype_tbl[ptype->ptype_id_10] = proto_hdr;
-			}
-		}
-	}
-
-free_ptype_info:
-	rte_free(ptype_info);
-	clear_cmd(base);
-	return ret;
-}
-
 #define IDPF_RX_BUF_STRIDE		64
 int
 idpf_vc_config_rxq(struct idpf_vport *vport, struct idpf_rx_queue *rxq)
-- 
2.26.2


  parent reply	other threads:[~2023-02-06  6:14 UTC|newest]

Thread overview: 79+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
     [not found] <https://patches.dpdk.org/project/dpdk/cover/20230117072626.93796-1-beilei.xing@intel.com/>
2023-01-17  8:06 ` [PATCH v4 00/15] net/idpf: introduce idpf common modle beilei.xing
2023-01-17  8:06   ` [PATCH v4 01/15] common/idpf: add adapter structure beilei.xing
2023-01-17  8:06   ` [PATCH v4 02/15] common/idpf: add vport structure beilei.xing
2023-01-17  8:06   ` [PATCH v4 03/15] common/idpf: add virtual channel functions beilei.xing
2023-01-18  4:00     ` Zhang, Qi Z
2023-01-18  4:10       ` Zhang, Qi Z
2023-01-17  8:06   ` [PATCH v4 04/15] common/idpf: introduce adapter init and deinit beilei.xing
2023-01-17  8:06   ` [PATCH v4 05/15] common/idpf: add vport init/deinit beilei.xing
2023-01-17  8:06   ` [PATCH v4 06/15] common/idpf: add config RSS beilei.xing
2023-01-17  8:06   ` [PATCH v4 07/15] common/idpf: add irq map/unmap beilei.xing
2023-01-31  8:11     ` Wu, Jingjing
2023-01-17  8:06   ` [PATCH v4 08/15] common/idpf: support get packet type beilei.xing
2023-01-17  8:06   ` [PATCH v4 09/15] common/idpf: add vport info initialization beilei.xing
2023-01-31  8:24     ` Wu, Jingjing
2023-01-17  8:06   ` [PATCH v4 10/15] common/idpf: add vector flags in vport beilei.xing
2023-01-17  8:06   ` [PATCH v4 11/15] common/idpf: add rxq and txq struct beilei.xing
2023-01-17  8:06   ` [PATCH v4 12/15] common/idpf: add help functions for queue setup and release beilei.xing
2023-01-17  8:06   ` [PATCH v4 13/15] common/idpf: add Rx and Tx data path beilei.xing
2023-01-17  8:06   ` [PATCH v4 14/15] common/idpf: add vec queue setup beilei.xing
2023-01-17  8:06   ` [PATCH v4 15/15] common/idpf: add avx512 for single queue model beilei.xing
2023-02-02  9:53   ` [PATCH v5 00/15] net/idpf: introduce idpf common modle beilei.xing
2023-02-02  9:53     ` [PATCH v5 01/15] common/idpf: add adapter structure beilei.xing
2023-02-02  9:53     ` [PATCH v5 02/15] common/idpf: add vport structure beilei.xing
2023-02-02  9:53     ` [PATCH v5 03/15] common/idpf: add virtual channel functions beilei.xing
2023-02-02  9:53     ` [PATCH v5 04/15] common/idpf: introduce adapter init and deinit beilei.xing
2023-02-02  9:53     ` [PATCH v5 05/15] common/idpf: add vport init/deinit beilei.xing
2023-02-02  9:53     ` [PATCH v5 06/15] common/idpf: add config RSS beilei.xing
2023-02-02  9:53     ` [PATCH v5 07/15] common/idpf: add irq map/unmap beilei.xing
2023-02-02  9:53     ` [PATCH v5 08/15] common/idpf: support get packet type beilei.xing
2023-02-02  9:53     ` [PATCH v5 09/15] common/idpf: add vport info initialization beilei.xing
2023-02-02  9:53     ` [PATCH v5 10/15] common/idpf: add vector flags in vport beilei.xing
2023-02-02  9:53     ` [PATCH v5 11/15] common/idpf: add rxq and txq struct beilei.xing
2023-02-02  9:53     ` [PATCH v5 12/15] common/idpf: add help functions for queue setup and release beilei.xing
2023-02-02  9:53     ` [PATCH v5 13/15] common/idpf: add Rx and Tx data path beilei.xing
2023-02-02  9:53     ` [PATCH v5 14/15] common/idpf: add vec queue setup beilei.xing
2023-02-02  9:53     ` [PATCH v5 15/15] common/idpf: add avx512 for single queue model beilei.xing
2023-02-03  9:43     ` [PATCH v6 00/19] net/idpf: introduce idpf common modle beilei.xing
2023-02-03  9:43       ` [PATCH v6 01/19] common/idpf: add adapter structure beilei.xing
2023-02-03  9:43       ` [PATCH v6 02/19] common/idpf: add vport structure beilei.xing
2023-02-03  9:43       ` [PATCH v6 03/19] common/idpf: add virtual channel functions beilei.xing
2023-02-03  9:43       ` [PATCH v6 04/19] common/idpf: introduce adapter init and deinit beilei.xing
2023-02-03  9:43       ` [PATCH v6 05/19] common/idpf: add vport init/deinit beilei.xing
2023-02-03  9:43       ` [PATCH v6 06/19] common/idpf: add config RSS beilei.xing
2023-02-03  9:43       ` [PATCH v6 07/19] common/idpf: add irq map/unmap beilei.xing
2023-02-03  9:43       ` [PATCH v6 08/19] common/idpf: support get packet type beilei.xing
2023-02-03  9:43       ` [PATCH v6 09/19] common/idpf: add vport info initialization beilei.xing
2023-02-03  9:43       ` [PATCH v6 10/19] common/idpf: add vector flags in vport beilei.xing
2023-02-03  9:43       ` [PATCH v6 11/19] common/idpf: add rxq and txq struct beilei.xing
2023-02-03  9:43       ` [PATCH v6 12/19] common/idpf: add help functions for queue setup and release beilei.xing
2023-02-03  9:43       ` [PATCH v6 13/19] common/idpf: add Rx and Tx data path beilei.xing
2023-02-03  9:43       ` [PATCH v6 14/19] common/idpf: add vec queue setup beilei.xing
2023-02-03  9:43       ` [PATCH v6 15/19] common/idpf: add avx512 for single queue model beilei.xing
2023-02-03  9:43       ` [PATCH v6 16/19] common/idpf: refine API name for vport functions beilei.xing
2023-02-03  9:43       ` [PATCH v6 17/19] common/idpf: refine API name for queue config module beilei.xing
2023-02-03  9:43       ` [PATCH v6 18/19] common/idpf: refine API name for data path module beilei.xing
2023-02-03  9:43       ` [PATCH v6 19/19] common/idpf: refine API name for virtual channel functions beilei.xing
2023-02-06  2:58       ` [PATCH v6 00/19] net/idpf: introduce idpf common modle Zhang, Qi Z
2023-02-06  6:16         ` Xing, Beilei
2023-02-06  5:45       ` [PATCH v7 " beilei.xing
2023-02-06  5:46         ` [PATCH v7 01/19] common/idpf: add adapter structure beilei.xing
2023-02-06  5:46         ` [PATCH v7 02/19] common/idpf: add vport structure beilei.xing
2023-02-06  5:46         ` [PATCH v7 03/19] common/idpf: add virtual channel functions beilei.xing
2023-02-06  5:46         ` [PATCH v7 04/19] common/idpf: introduce adapter init and deinit beilei.xing
2023-02-06  5:46         ` [PATCH v7 05/19] common/idpf: add vport init/deinit beilei.xing
2023-02-06  5:46         ` [PATCH v7 06/19] common/idpf: add config RSS beilei.xing
2023-02-06  5:46         ` [PATCH v7 07/19] common/idpf: add irq map/unmap beilei.xing
2023-02-06  5:46         ` beilei.xing [this message]
2023-02-06  5:46         ` [PATCH v7 09/19] common/idpf: add vport info initialization beilei.xing
2023-02-06  5:46         ` [PATCH v7 10/19] common/idpf: add vector flags in vport beilei.xing
2023-02-06  5:46         ` [PATCH v7 11/19] common/idpf: add rxq and txq struct beilei.xing
2023-02-06  5:46         ` [PATCH v7 12/19] common/idpf: add help functions for queue setup and release beilei.xing
2023-02-06  5:46         ` [PATCH v7 13/19] common/idpf: add Rx and Tx data path beilei.xing
2023-02-06  5:46         ` [PATCH v7 14/19] common/idpf: add vec queue setup beilei.xing
2023-02-06  5:46         ` [PATCH v7 15/19] common/idpf: add avx512 for single queue model beilei.xing
2023-02-06  5:46         ` [PATCH v7 16/19] common/idpf: refine API name for vport functions beilei.xing
2023-02-06  5:46         ` [PATCH v7 17/19] common/idpf: refine API name for queue config module beilei.xing
2023-02-06  5:46         ` [PATCH v7 18/19] common/idpf: refine API name for data path module beilei.xing
2023-02-06  5:46         ` [PATCH v7 19/19] common/idpf: refine API name for virtual channel functions beilei.xing
2023-02-06 13:15         ` [PATCH v7 00/19] net/idpf: introduce idpf common modle Zhang, Qi Z

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230206054618.40975-9-beilei.xing@intel.com \
    --to=beilei.xing@intel.com \
    --cc=dev@dpdk.org \
    --cc=jingjing.wu@intel.com \
    --cc=qi.z.zhang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).