From mboxrd@z Thu Jan  1 00:00:00 1970
Return-Path: <dev-bounces@dpdk.org>
Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124])
	by inbox.dpdk.org (Postfix) with ESMTP id E5D02A034C;
	Mon, 24 Oct 2022 15:04:20 +0200 (CEST)
Received: from [217.70.189.124] (localhost [127.0.0.1])
	by mails.dpdk.org (Postfix) with ESMTP id 1084F42B9B;
	Mon, 24 Oct 2022 15:04:18 +0200 (CEST)
Received: from mga11.intel.com (mga11.intel.com [192.55.52.93])
 by mails.dpdk.org (Postfix) with ESMTP id 1D39342B9C
 for <dev@dpdk.org>; Mon, 24 Oct 2022 15:04:15 +0200 (CEST)
DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple;
 d=intel.com; i=@intel.com; q=dns/txt; s=Intel;
 t=1666616656; x=1698152656;
 h=from:to:cc:subject:date:message-id:in-reply-to:
 references:mime-version:content-transfer-encoding;
 bh=nErDuAnoETk/YxshWjjVxV6nk6wPC3cBHjQYX4/7YXk=;
 b=hGcQUQqOCMH1vyJ4ji/4w1LS7VDZnn7HIDIhEDFytJXcxJwDmfrm/P/O
 ZYK+aj9P0FasDjSqk/L4fcCMgur/SLhvPHG5W1Ws3XFKywZyK5ad6qyDy
 6q54j5S8XBQX8+lP1EdUceAluhE/M/ThHVAN6PeQZbzMS40aeJ5htr1dK
 gO5nCecTcH2emY6p6U8bSUwTzo+mhpoMYH9BTbGMKuAQHGaoIlk1QgZMB
 48fEfOjw7CZjufvxtY0jOVqI12Ht9NFdIJ4H2x8al/x1r/xw6k1KIDWtJ
 hCkMPqx/MjAbOMwnbM7YV2mBZTX+vbgi2u75ZFigHuHu0sDAGlXEU4Z4O w==;
X-IronPort-AV: E=McAfee;i="6500,9779,10510"; a="305018745"
X-IronPort-AV: E=Sophos;i="5.95,209,1661842800"; d="scan'208";a="305018745"
Received: from orsmga002.jf.intel.com ([10.7.209.21])
 by fmsmga102.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;
 24 Oct 2022 06:03:21 -0700
X-ExtLoop1: 1
X-IronPort-AV: E=McAfee;i="6500,9779,10510"; a="631248296"
X-IronPort-AV: E=Sophos;i="5.95,209,1661842800"; d="scan'208";a="631248296"
Received: from dpdk-jf-ntb-one.sh.intel.com ([10.67.111.104])
 by orsmga002.jf.intel.com with ESMTP; 24 Oct 2022 06:03:19 -0700
From: Junfeng Guo <junfeng.guo@intel.com>
To: andrew.rybchenko@oktetlabs.ru, qi.z.zhang@intel.com, jingjing.wu@intel.com,
 beilei.xing@intel.com
Cc: dev@dpdk.org, Junfeng Guo <junfeng.guo@intel.com>,
 Wenjun Wu <wenjun1.wu@intel.com>
Subject: [PATCH v10 05/14] net/idpf: add support for packet type get
Date: Mon, 24 Oct 2022 21:01:25 +0800
Message-Id: <20221024130134.1046536-6-junfeng.guo@intel.com>
X-Mailer: git-send-email 2.34.1
In-Reply-To: <20221024130134.1046536-1-junfeng.guo@intel.com>
References: <20221021051821.2164939-2-junfeng.guo@intel.com>
 <20221024130134.1046536-1-junfeng.guo@intel.com>
MIME-Version: 1.0
Content-Transfer-Encoding: 8bit
X-BeenThere: dev@dpdk.org
X-Mailman-Version: 2.1.29
Precedence: list
List-Id: DPDK patches and discussions <dev.dpdk.org>
List-Unsubscribe: <https://mails.dpdk.org/options/dev>,
 <mailto:dev-request@dpdk.org?subject=unsubscribe>
List-Archive: <http://mails.dpdk.org/archives/dev/>
List-Post: <mailto:dev@dpdk.org>
List-Help: <mailto:dev-request@dpdk.org?subject=help>
List-Subscribe: <https://mails.dpdk.org/listinfo/dev>,
 <mailto:dev-request@dpdk.org?subject=subscribe>
Errors-To: dev-bounces@dpdk.org

Add dev ops dev_supported_ptypes_get.

Signed-off-by: Wenjun Wu <wenjun1.wu@intel.com>
Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
---
 doc/guides/nics/features/idpf.ini |   1 +
 drivers/net/idpf/idpf_ethdev.c    |   7 +
 drivers/net/idpf/idpf_ethdev.h    |   6 +
 drivers/net/idpf/idpf_rxtx.c      |  19 +++
 drivers/net/idpf/idpf_rxtx.h      |   7 +
 drivers/net/idpf/idpf_vchnl.c     | 240 ++++++++++++++++++++++++++++++
 6 files changed, 280 insertions(+)

diff --git a/doc/guides/nics/features/idpf.ini b/doc/guides/nics/features/idpf.ini
index 30e1c0831e..a03068df85 100644
--- a/doc/guides/nics/features/idpf.ini
+++ b/doc/guides/nics/features/idpf.ini
@@ -8,4 +8,5 @@
 ;
 [Features]
 Queue start/stop     = Y
+Packet type parsing  = Y
 Linux                = Y
diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
index 4c058660b4..80826569c8 100644
--- a/drivers/net/idpf/idpf_ethdev.c
+++ b/drivers/net/idpf/idpf_ethdev.c
@@ -57,6 +57,7 @@ idpf_dev_link_update(struct rte_eth_dev *dev,
 }
 
 static const struct eth_dev_ops idpf_eth_dev_ops = {
+	.dev_supported_ptypes_get	= idpf_dev_supported_ptypes_get,
 	.dev_configure			= idpf_dev_configure,
 	.dev_start			= idpf_dev_start,
 	.dev_stop			= idpf_dev_stop,
@@ -641,6 +642,12 @@ idpf_adapter_init(struct rte_pci_device *pci_dev, struct idpf_adapter *adapter)
 		goto err_api;
 	}
 
+	ret = idpf_get_pkt_type(adapter);
+	if (ret != 0) {
+		PMD_INIT_LOG(ERR, "Failed to set ptype table");
+		goto err_api;
+	}
+
 	adapter->caps = rte_zmalloc("idpf_caps",
 				sizeof(struct virtchnl2_get_capabilities), 0);
 	if (adapter->caps == NULL) {
diff --git a/drivers/net/idpf/idpf_ethdev.h b/drivers/net/idpf/idpf_ethdev.h
index 094f123be1..58f070c976 100644
--- a/drivers/net/idpf/idpf_ethdev.h
+++ b/drivers/net/idpf/idpf_ethdev.h
@@ -47,6 +47,8 @@
 
 #define IDPF_NUM_MACADDR_MAX	64
 
+#define IDPF_MAX_PKT_TYPE	1024
+
 #define IDPF_VLAN_TAG_SIZE	4
 #define IDPF_ETH_OVERHEAD \
 	(RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + IDPF_VLAN_TAG_SIZE * 2)
@@ -141,6 +143,8 @@ struct idpf_adapter {
 	uint32_t max_rxq_per_msg;
 	uint32_t max_txq_per_msg;
 
+	uint32_t ptype_tbl[IDPF_MAX_PKT_TYPE] __rte_cache_min_aligned;
+
 	bool stopped;
 };
 
@@ -202,6 +206,7 @@ int idpf_dev_link_update(struct rte_eth_dev *dev,
 			 __rte_unused int wait_to_complete);
 void idpf_handle_virtchnl_msg(struct rte_eth_dev *dev);
 int idpf_vc_check_api_version(struct idpf_adapter *adapter);
+int idpf_get_pkt_type(struct idpf_adapter *adapter);
 int idpf_vc_get_caps(struct idpf_adapter *adapter);
 int idpf_vc_create_vport(struct idpf_adapter *adapter);
 int idpf_vc_destroy_vport(struct idpf_vport *vport);
@@ -213,6 +218,7 @@ int idpf_switch_queue(struct idpf_vport *vport, uint16_t qid,
 		      bool rx, bool on);
 int idpf_vc_ena_dis_queues(struct idpf_vport *vport, bool enable);
 int idpf_vc_ena_dis_vport(struct idpf_vport *vport, bool enable);
+int idpf_vc_query_ptype_info(struct idpf_adapter *adapter);
 int idpf_read_one_msg(struct idpf_adapter *adapter, uint32_t ops,
 		      uint16_t buf_len, uint8_t *buf);
 
diff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c
index 298eaf0a1a..c3c4acb69f 100644
--- a/drivers/net/idpf/idpf_rxtx.c
+++ b/drivers/net/idpf/idpf_rxtx.c
@@ -8,6 +8,25 @@
 #include "idpf_ethdev.h"
 #include "idpf_rxtx.h"
 
+const uint32_t *
+idpf_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
+{
+	static const uint32_t ptypes[] = {
+		RTE_PTYPE_L2_ETHER,
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
+		RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
+		RTE_PTYPE_L4_FRAG,
+		RTE_PTYPE_L4_NONFRAG,
+		RTE_PTYPE_L4_UDP,
+		RTE_PTYPE_L4_TCP,
+		RTE_PTYPE_L4_SCTP,
+		RTE_PTYPE_L4_ICMP,
+		RTE_PTYPE_UNKNOWN
+	};
+
+	return ptypes;
+}
+
 static inline int
 check_rx_thresh(uint16_t nb_desc, uint16_t thresh)
 {
diff --git a/drivers/net/idpf/idpf_rxtx.h b/drivers/net/idpf/idpf_rxtx.h
index 9f767079b2..11f1fde5d4 100644
--- a/drivers/net/idpf/idpf_rxtx.h
+++ b/drivers/net/idpf/idpf_rxtx.h
@@ -21,6 +21,10 @@
 #define IDPF_DEFAULT_TX_RS_THRESH	32
 #define IDPF_DEFAULT_TX_FREE_THRESH	32
 
+#define IDPF_GET_PTYPE_SIZE(p) \
+	(sizeof(struct virtchnl2_ptype) + \
+	(((p)->proto_id_count ? ((p)->proto_id_count - 1) : 0) * sizeof((p)->proto_id[0])))
+
 struct idpf_rx_queue {
 	struct idpf_adapter *adapter;	/* the adapter this queue belongs to */
 	struct rte_mempool *mp;		/* mbuf pool to populate Rx ring */
@@ -137,4 +141,7 @@ int idpf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
 void idpf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
 
 void idpf_stop_queues(struct rte_eth_dev *dev);
+
+const uint32_t *idpf_dev_supported_ptypes_get(struct rte_eth_dev *dev);
+
 #endif /* _IDPF_RXTX_H_ */
diff --git a/drivers/net/idpf/idpf_vchnl.c b/drivers/net/idpf/idpf_vchnl.c
index 9138799989..86a55d2ff1 100644
--- a/drivers/net/idpf/idpf_vchnl.c
+++ b/drivers/net/idpf/idpf_vchnl.c
@@ -238,6 +238,11 @@ idpf_execute_vc_cmd(struct idpf_adapter *adapter, struct idpf_cmd_info *args)
 		err = idpf_read_one_msg(adapter, args->ops, args->out_size, args->out_buffer);
 		clear_cmd(adapter);
 		break;
+	case VIRTCHNL2_OP_GET_PTYPE_INFO:
+		/* for multuple response message,
+		 * do not handle the response here.
+		 */
+		break;
 	default:
 		/* For other virtchnl ops in running time,
 		 * wait for the cmd done flag.
@@ -286,6 +291,215 @@ idpf_vc_check_api_version(struct idpf_adapter *adapter)
 	return err;
 }
 
+int __rte_cold
+idpf_get_pkt_type(struct idpf_adapter *adapter)
+{
+	struct virtchnl2_get_ptype_info *ptype_info;
+	uint16_t ptype_recvd = 0, ptype_offset, i, j;
+	int ret;
+
+	ret = idpf_vc_query_ptype_info(adapter);
+	if (ret != 0) {
+		PMD_DRV_LOG(ERR, "Fail to query packet type information");
+		return ret;
+	}
+
+	ptype_info = rte_zmalloc("ptype_info", IDPF_DFLT_MBX_BUF_SIZE, 0);
+		if (ptype_info == NULL)
+			return -ENOMEM;
+
+	while (ptype_recvd < IDPF_MAX_PKT_TYPE) {
+		ret = idpf_read_one_msg(adapter, VIRTCHNL2_OP_GET_PTYPE_INFO,
+					IDPF_DFLT_MBX_BUF_SIZE, (u8 *)ptype_info);
+		if (ret != 0) {
+			PMD_DRV_LOG(ERR, "Fail to get packet type information");
+			goto free_ptype_info;
+		}
+
+		ptype_recvd += ptype_info->num_ptypes;
+		ptype_offset = sizeof(struct virtchnl2_get_ptype_info) -
+						sizeof(struct virtchnl2_ptype);
+
+		for (i = 0; i < rte_cpu_to_le_16(ptype_info->num_ptypes); i++) {
+			bool is_inner = false, is_ip = false;
+			struct virtchnl2_ptype *ptype;
+			uint32_t proto_hdr = 0;
+
+			ptype = (struct virtchnl2_ptype *)
+					((u8 *)ptype_info + ptype_offset);
+			ptype_offset += IDPF_GET_PTYPE_SIZE(ptype);
+			if (ptype_offset > IDPF_DFLT_MBX_BUF_SIZE) {
+				ret = -EINVAL;
+				goto free_ptype_info;
+			}
+
+			if (rte_cpu_to_le_16(ptype->ptype_id_10) == 0xFFFF)
+				goto free_ptype_info;
+
+			for (j = 0; j < ptype->proto_id_count; j++) {
+				switch (rte_cpu_to_le_16(ptype->proto_id[j])) {
+				case VIRTCHNL2_PROTO_HDR_GRE:
+				case VIRTCHNL2_PROTO_HDR_VXLAN:
+					proto_hdr &= ~RTE_PTYPE_L4_MASK;
+					proto_hdr |= RTE_PTYPE_TUNNEL_GRENAT;
+					is_inner = true;
+					break;
+				case VIRTCHNL2_PROTO_HDR_MAC:
+					if (is_inner) {
+						proto_hdr &= ~RTE_PTYPE_INNER_L2_MASK;
+						proto_hdr |= RTE_PTYPE_INNER_L2_ETHER;
+					} else {
+						proto_hdr &= ~RTE_PTYPE_L2_MASK;
+						proto_hdr |= RTE_PTYPE_L2_ETHER;
+					}
+					break;
+				case VIRTCHNL2_PROTO_HDR_VLAN:
+					if (is_inner) {
+						proto_hdr &= ~RTE_PTYPE_INNER_L2_MASK;
+						proto_hdr |= RTE_PTYPE_INNER_L2_ETHER_VLAN;
+					}
+					break;
+				case VIRTCHNL2_PROTO_HDR_PTP:
+					proto_hdr &= ~RTE_PTYPE_L2_MASK;
+					proto_hdr |= RTE_PTYPE_L2_ETHER_TIMESYNC;
+					break;
+				case VIRTCHNL2_PROTO_HDR_LLDP:
+					proto_hdr &= ~RTE_PTYPE_L2_MASK;
+					proto_hdr |= RTE_PTYPE_L2_ETHER_LLDP;
+					break;
+				case VIRTCHNL2_PROTO_HDR_ARP:
+					proto_hdr &= ~RTE_PTYPE_L2_MASK;
+					proto_hdr |= RTE_PTYPE_L2_ETHER_ARP;
+					break;
+				case VIRTCHNL2_PROTO_HDR_PPPOE:
+					proto_hdr &= ~RTE_PTYPE_L2_MASK;
+					proto_hdr |= RTE_PTYPE_L2_ETHER_PPPOE;
+					break;
+				case VIRTCHNL2_PROTO_HDR_IPV4:
+					if (!is_ip) {
+						proto_hdr |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
+						is_ip = true;
+					} else {
+						proto_hdr |= RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+							     RTE_PTYPE_TUNNEL_IP;
+						is_inner = true;
+					}
+						break;
+				case VIRTCHNL2_PROTO_HDR_IPV6:
+					if (!is_ip) {
+						proto_hdr |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
+						is_ip = true;
+					} else {
+						proto_hdr |= RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+							     RTE_PTYPE_TUNNEL_IP;
+						is_inner = true;
+					}
+					break;
+				case VIRTCHNL2_PROTO_HDR_IPV4_FRAG:
+				case VIRTCHNL2_PROTO_HDR_IPV6_FRAG:
+					if (is_inner)
+						proto_hdr |= RTE_PTYPE_INNER_L4_FRAG;
+					else
+						proto_hdr |= RTE_PTYPE_L4_FRAG;
+					break;
+				case VIRTCHNL2_PROTO_HDR_UDP:
+					if (is_inner)
+						proto_hdr |= RTE_PTYPE_INNER_L4_UDP;
+					else
+						proto_hdr |= RTE_PTYPE_L4_UDP;
+					break;
+				case VIRTCHNL2_PROTO_HDR_TCP:
+					if (is_inner)
+						proto_hdr |= RTE_PTYPE_INNER_L4_TCP;
+					else
+						proto_hdr |= RTE_PTYPE_L4_TCP;
+					break;
+				case VIRTCHNL2_PROTO_HDR_SCTP:
+					if (is_inner)
+						proto_hdr |= RTE_PTYPE_INNER_L4_SCTP;
+					else
+						proto_hdr |= RTE_PTYPE_L4_SCTP;
+					break;
+				case VIRTCHNL2_PROTO_HDR_ICMP:
+					if (is_inner)
+						proto_hdr |= RTE_PTYPE_INNER_L4_ICMP;
+					else
+						proto_hdr |= RTE_PTYPE_L4_ICMP;
+					break;
+				case VIRTCHNL2_PROTO_HDR_ICMPV6:
+					if (is_inner)
+						proto_hdr |= RTE_PTYPE_INNER_L4_ICMP;
+					else
+						proto_hdr |= RTE_PTYPE_L4_ICMP;
+					break;
+				case VIRTCHNL2_PROTO_HDR_L2TPV2:
+				case VIRTCHNL2_PROTO_HDR_L2TPV2_CONTROL:
+				case VIRTCHNL2_PROTO_HDR_L2TPV3:
+					is_inner = true;
+					proto_hdr |= RTE_PTYPE_TUNNEL_L2TP;
+					break;
+				case VIRTCHNL2_PROTO_HDR_NVGRE:
+					is_inner = true;
+					proto_hdr |= RTE_PTYPE_TUNNEL_NVGRE;
+					break;
+				case VIRTCHNL2_PROTO_HDR_GTPC_TEID:
+					is_inner = true;
+					proto_hdr |= RTE_PTYPE_TUNNEL_GTPC;
+					break;
+				case VIRTCHNL2_PROTO_HDR_GTPU:
+				case VIRTCHNL2_PROTO_HDR_GTPU_UL:
+				case VIRTCHNL2_PROTO_HDR_GTPU_DL:
+					is_inner = true;
+					proto_hdr |= RTE_PTYPE_TUNNEL_GTPU;
+					break;
+				case VIRTCHNL2_PROTO_HDR_PAY:
+				case VIRTCHNL2_PROTO_HDR_IPV6_EH:
+				case VIRTCHNL2_PROTO_HDR_PRE_MAC:
+				case VIRTCHNL2_PROTO_HDR_POST_MAC:
+				case VIRTCHNL2_PROTO_HDR_ETHERTYPE:
+				case VIRTCHNL2_PROTO_HDR_SVLAN:
+				case VIRTCHNL2_PROTO_HDR_CVLAN:
+				case VIRTCHNL2_PROTO_HDR_MPLS:
+				case VIRTCHNL2_PROTO_HDR_MMPLS:
+				case VIRTCHNL2_PROTO_HDR_CTRL:
+				case VIRTCHNL2_PROTO_HDR_ECP:
+				case VIRTCHNL2_PROTO_HDR_EAPOL:
+				case VIRTCHNL2_PROTO_HDR_PPPOD:
+				case VIRTCHNL2_PROTO_HDR_IGMP:
+				case VIRTCHNL2_PROTO_HDR_AH:
+				case VIRTCHNL2_PROTO_HDR_ESP:
+				case VIRTCHNL2_PROTO_HDR_IKE:
+				case VIRTCHNL2_PROTO_HDR_NATT_KEEP:
+				case VIRTCHNL2_PROTO_HDR_GTP:
+				case VIRTCHNL2_PROTO_HDR_GTP_EH:
+				case VIRTCHNL2_PROTO_HDR_GTPCV2:
+				case VIRTCHNL2_PROTO_HDR_ECPRI:
+				case VIRTCHNL2_PROTO_HDR_VRRP:
+				case VIRTCHNL2_PROTO_HDR_OSPF:
+				case VIRTCHNL2_PROTO_HDR_TUN:
+				case VIRTCHNL2_PROTO_HDR_VXLAN_GPE:
+				case VIRTCHNL2_PROTO_HDR_GENEVE:
+				case VIRTCHNL2_PROTO_HDR_NSH:
+				case VIRTCHNL2_PROTO_HDR_QUIC:
+				case VIRTCHNL2_PROTO_HDR_PFCP:
+				case VIRTCHNL2_PROTO_HDR_PFCP_NODE:
+				case VIRTCHNL2_PROTO_HDR_PFCP_SESSION:
+				case VIRTCHNL2_PROTO_HDR_RTP:
+				case VIRTCHNL2_PROTO_HDR_NO_PROTO:
+				default:
+					continue;
+				}
+				adapter->ptype_tbl[ptype->ptype_id_10] = proto_hdr;
+			}
+		}
+	}
+
+free_ptype_info:
+	rte_free(ptype_info);
+	clear_cmd(adapter);
+	return ret;
+}
+
 int
 idpf_vc_get_caps(struct idpf_adapter *adapter)
 {
@@ -984,3 +1198,29 @@ idpf_vc_ena_dis_vport(struct idpf_vport *vport, bool enable)
 
 	return err;
 }
+
+int
+idpf_vc_query_ptype_info(struct idpf_adapter *adapter)
+{
+	struct virtchnl2_get_ptype_info *ptype_info;
+	struct idpf_cmd_info args;
+	int len, err;
+
+	len = sizeof(struct virtchnl2_get_ptype_info);
+	ptype_info = rte_zmalloc("ptype_info", len, 0);
+	if (ptype_info == NULL)
+		return -ENOMEM;
+
+	ptype_info->start_ptype_id = 0;
+	ptype_info->num_ptypes = IDPF_MAX_PKT_TYPE;
+	args.ops = VIRTCHNL2_OP_GET_PTYPE_INFO;
+	args.in_args = (u8 *)ptype_info;
+	args.in_args_size = len;
+
+	err = idpf_execute_vc_cmd(adapter, &args);
+	if (err != 0)
+		PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_GET_PTYPE_INFO");
+
+	rte_free(ptype_info);
+	return err;
+}
-- 
2.34.1