From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id E79A5A0552; Fri, 21 Oct 2022 07:20:52 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 8D49042BD9; Fri, 21 Oct 2022 07:20:10 +0200 (CEST) Received: from mga07.intel.com (mga07.intel.com [134.134.136.100]) by mails.dpdk.org (Postfix) with ESMTP id 301AD42B91 for ; Fri, 21 Oct 2022 07:20:05 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1666329605; x=1697865605; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=SZGbJkRXsb/1VS2PMdvnz4/cKicF3xPbA/lq24MzdeI=; b=Tm6WlNYIxCokuhZr5VjfpVv6IhWuN4jClBkPQ4rR/xovgj/fHSlL8737 77r9HcKjKOrHPSzYvQLLSpFkhKBC7grHSjevx/JvUm5Ag+WrPDjKiP6wL 45jEdgc9FBONyTd9FovC4vWXdzAE0BYa0jaBdeB2aIo37qlRg3xDYQ+V/ I+R7PE0BEFFBiB7rZICUhAXsXiRWv1XkMWwm53d1wDZGLyTtHaSbl4a6m zG/IlSFVAKX3NAILIhXA/bFtTSAvEG4VCpKzPof+gJ8gZseXzXexbkJQh FzDOzIaO9OcBsKzVewKmuCqiTaojzrGoLHCMenl8PERWL19ZhNcjqiOTL g==; X-IronPort-AV: E=McAfee;i="6500,9779,10506"; a="371128580" X-IronPort-AV: E=Sophos;i="5.95,200,1661842800"; d="scan'208";a="371128580" Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by orsmga105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 20 Oct 2022 22:20:04 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6500,9779,10506"; a="772826382" X-IronPort-AV: E=Sophos;i="5.95,200,1661842800"; d="scan'208";a="772826382" Received: from dpdk-jf-ntb-one.sh.intel.com ([10.67.111.104]) by fmsmga001.fm.intel.com with ESMTP; 20 Oct 2022 22:20:02 -0700 From: Junfeng Guo To: andrew.rybchenko@oktetlabs.ru, qi.z.zhang@intel.com, jingjing.wu@intel.com, beilei.xing@intel.com Cc: dev@dpdk.org, Junfeng Guo , Wenjun Wu Subject: [PATCH v9 07/14] net/idpf: add support for packet type get Date: Fri, 21 Oct 2022 13:18:14 +0800 Message-Id: <20221021051821.2164939-8-junfeng.guo@intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20221021051821.2164939-1-junfeng.guo@intel.com> References: <20221020062951.645121-2-junfeng.guo@intel.com> <20221021051821.2164939-1-junfeng.guo@intel.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Add dev ops dev_supported_ptypes_get. Signed-off-by: Wenjun Wu Signed-off-by: Junfeng Guo --- doc/guides/nics/features/idpf.ini | 1 + drivers/net/idpf/idpf_ethdev.c | 7 + drivers/net/idpf/idpf_ethdev.h | 2 + drivers/net/idpf/idpf_rxtx.c | 19 +++ drivers/net/idpf/idpf_rxtx.h | 6 + drivers/net/idpf/idpf_vchnl.c | 235 ++++++++++++++++++++++++++++++ 6 files changed, 270 insertions(+) diff --git a/doc/guides/nics/features/idpf.ini b/doc/guides/nics/features/idpf.ini index 597beec5d9..79ffafc0d4 100644 --- a/doc/guides/nics/features/idpf.ini +++ b/doc/guides/nics/features/idpf.ini @@ -10,6 +10,7 @@ Queue start/stop = Y Runtime Rx queue setup = Y Runtime Tx queue setup = Y +Packet type parsing = Y Multiprocess aware = Y FreeBSD = Y Linux = Y diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c index d1b6797d4a..20f0f39640 100644 --- a/drivers/net/idpf/idpf_ethdev.c +++ b/drivers/net/idpf/idpf_ethdev.c @@ -55,6 +55,7 @@ idpf_dev_link_update(struct rte_eth_dev *dev, } static const struct eth_dev_ops idpf_eth_dev_ops = { + .dev_supported_ptypes_get = idpf_dev_supported_ptypes_get, .dev_configure = idpf_dev_configure, .dev_start = idpf_dev_start, .dev_stop = idpf_dev_stop, @@ -652,6 +653,12 @@ idpf_adapter_init(struct rte_pci_device *pci_dev, struct idpf_adapter *adapter) goto err_api; } + ret = idpf_get_pkt_type(adapter); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to set ptype table"); + goto err_api; + } + adapter->caps = rte_zmalloc("idpf_caps", sizeof(struct virtchnl2_get_capabilities), 0); if (!adapter->caps) { diff --git a/drivers/net/idpf/idpf_ethdev.h b/drivers/net/idpf/idpf_ethdev.h index 9608204e4c..d414efc917 100644 --- a/drivers/net/idpf/idpf_ethdev.h +++ b/drivers/net/idpf/idpf_ethdev.h @@ -212,6 +212,7 @@ int idpf_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete); void idpf_handle_virtchnl_msg(struct rte_eth_dev *dev); int idpf_vc_check_api_version(struct idpf_adapter *adapter); +int idpf_get_pkt_type(struct idpf_adapter *adapter); int idpf_vc_get_caps(struct idpf_adapter *adapter); int idpf_vc_create_vport(struct rte_eth_dev *dev); int idpf_vc_destroy_vport(struct idpf_vport *vport); @@ -223,6 +224,7 @@ int idpf_switch_queue(struct idpf_vport *vport, uint16_t qid, bool rx, bool on); int idpf_vc_ena_dis_queues(struct idpf_vport *vport, bool enable); int idpf_vc_ena_dis_vport(struct idpf_vport *vport, bool enable); +int idpf_vc_query_ptype_info(struct idpf_adapter *adapter); int idpf_read_one_msg(struct idpf_adapter *adapter, uint32_t ops, uint16_t buf_len, uint8_t *buf); diff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c index 95193713c4..e92ea09c3b 100644 --- a/drivers/net/idpf/idpf_rxtx.c +++ b/drivers/net/idpf/idpf_rxtx.c @@ -8,6 +8,25 @@ #include "idpf_ethdev.h" #include "idpf_rxtx.h" +const uint32_t * +idpf_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused) +{ + static const uint32_t ptypes[] = { + RTE_PTYPE_L2_ETHER, + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, + RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, + RTE_PTYPE_L4_FRAG, + RTE_PTYPE_L4_NONFRAG, + RTE_PTYPE_L4_UDP, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_SCTP, + RTE_PTYPE_L4_ICMP, + RTE_PTYPE_UNKNOWN + }; + + return ptypes; +} + static inline int check_rx_thresh(uint16_t nb_desc, uint16_t thresh) { diff --git a/drivers/net/idpf/idpf_rxtx.h b/drivers/net/idpf/idpf_rxtx.h index f0427b96c5..cd1dda4688 100644 --- a/drivers/net/idpf/idpf_rxtx.h +++ b/drivers/net/idpf/idpf_rxtx.h @@ -47,6 +47,10 @@ #define IDPF_TX_OFFLOAD_NOTSUP_MASK \ (RTE_MBUF_F_TX_OFFLOAD_MASK ^ IDPF_TX_OFFLOAD_MASK) +#define IDPF_GET_PTYPE_SIZE(p) \ + (sizeof(struct virtchnl2_ptype) + \ + (((p)->proto_id_count ? ((p)->proto_id_count - 1) : 0) * sizeof((p)->proto_id[0]))) + struct idpf_rx_queue { struct idpf_adapter *adapter; /* the adapter this queue belongs to */ struct rte_mempool *mp; /* mbuf pool to populate Rx ring */ @@ -189,4 +193,6 @@ void idpf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid); void idpf_stop_queues(struct rte_eth_dev *dev); +const uint32_t *idpf_dev_supported_ptypes_get(struct rte_eth_dev *dev); + #endif /* _IDPF_RXTX_H_ */ diff --git a/drivers/net/idpf/idpf_vchnl.c b/drivers/net/idpf/idpf_vchnl.c index 6b6881872b..7389128712 100644 --- a/drivers/net/idpf/idpf_vchnl.c +++ b/drivers/net/idpf/idpf_vchnl.c @@ -303,6 +303,215 @@ idpf_vc_check_api_version(struct idpf_adapter *adapter) return err; } +int __rte_cold +idpf_get_pkt_type(struct idpf_adapter *adapter) +{ + struct virtchnl2_get_ptype_info *ptype_info; + uint16_t ptype_recvd = 0, ptype_offset, i, j; + int ret; + + ret = idpf_vc_query_ptype_info(adapter); + if (ret) { + PMD_DRV_LOG(ERR, "Fail to query packet type information"); + return ret; + } + + ptype_info = rte_zmalloc("ptype_info", IDPF_DFLT_MBX_BUF_SIZE, 0); + if (!ptype_info) + return -ENOMEM; + + while (ptype_recvd < IDPF_MAX_PKT_TYPE) { + ret = idpf_read_one_msg(adapter, VIRTCHNL2_OP_GET_PTYPE_INFO, + IDPF_DFLT_MBX_BUF_SIZE, (u8 *)ptype_info); + if (ret) { + PMD_DRV_LOG(ERR, "Fail to get packet type information"); + goto free_ptype_info; + } + + ptype_recvd += ptype_info->num_ptypes; + ptype_offset = sizeof(struct virtchnl2_get_ptype_info) - + sizeof(struct virtchnl2_ptype); + + for (i = 0; i < rte_cpu_to_le_16(ptype_info->num_ptypes); i++) { + bool is_inner = false, is_ip = false; + struct virtchnl2_ptype *ptype; + uint32_t proto_hdr = 0; + + ptype = (struct virtchnl2_ptype *) + ((u8 *)ptype_info + ptype_offset); + ptype_offset += IDPF_GET_PTYPE_SIZE(ptype); + if (ptype_offset > IDPF_DFLT_MBX_BUF_SIZE) { + ret = -EINVAL; + goto free_ptype_info; + } + + if (rte_cpu_to_le_16(ptype->ptype_id_10) == 0xFFFF) + goto free_ptype_info; + + for (j = 0; j < ptype->proto_id_count; j++) { + switch (rte_cpu_to_le_16(ptype->proto_id[j])) { + case VIRTCHNL2_PROTO_HDR_GRE: + case VIRTCHNL2_PROTO_HDR_VXLAN: + proto_hdr &= ~RTE_PTYPE_L4_MASK; + proto_hdr |= RTE_PTYPE_TUNNEL_GRENAT; + is_inner = true; + break; + case VIRTCHNL2_PROTO_HDR_MAC: + if (is_inner) { + proto_hdr &= ~RTE_PTYPE_INNER_L2_MASK; + proto_hdr |= RTE_PTYPE_INNER_L2_ETHER; + } else { + proto_hdr &= ~RTE_PTYPE_L2_MASK; + proto_hdr |= RTE_PTYPE_L2_ETHER; + } + break; + case VIRTCHNL2_PROTO_HDR_VLAN: + if (is_inner) { + proto_hdr &= ~RTE_PTYPE_INNER_L2_MASK; + proto_hdr |= RTE_PTYPE_INNER_L2_ETHER_VLAN; + } + break; + case VIRTCHNL2_PROTO_HDR_PTP: + proto_hdr &= ~RTE_PTYPE_L2_MASK; + proto_hdr |= RTE_PTYPE_L2_ETHER_TIMESYNC; + break; + case VIRTCHNL2_PROTO_HDR_LLDP: + proto_hdr &= ~RTE_PTYPE_L2_MASK; + proto_hdr |= RTE_PTYPE_L2_ETHER_LLDP; + break; + case VIRTCHNL2_PROTO_HDR_ARP: + proto_hdr &= ~RTE_PTYPE_L2_MASK; + proto_hdr |= RTE_PTYPE_L2_ETHER_ARP; + break; + case VIRTCHNL2_PROTO_HDR_PPPOE: + proto_hdr &= ~RTE_PTYPE_L2_MASK; + proto_hdr |= RTE_PTYPE_L2_ETHER_PPPOE; + break; + case VIRTCHNL2_PROTO_HDR_IPV4: + if (!is_ip) { + proto_hdr |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN; + is_ip = true; + } else { + proto_hdr |= RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP; + is_inner = true; + } + break; + case VIRTCHNL2_PROTO_HDR_IPV6: + if (!is_ip) { + proto_hdr |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN; + is_ip = true; + } else { + proto_hdr |= RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP; + is_inner = true; + } + break; + case VIRTCHNL2_PROTO_HDR_IPV4_FRAG: + case VIRTCHNL2_PROTO_HDR_IPV6_FRAG: + if (is_inner) + proto_hdr |= RTE_PTYPE_INNER_L4_FRAG; + else + proto_hdr |= RTE_PTYPE_L4_FRAG; + break; + case VIRTCHNL2_PROTO_HDR_UDP: + if (is_inner) + proto_hdr |= RTE_PTYPE_INNER_L4_UDP; + else + proto_hdr |= RTE_PTYPE_L4_UDP; + break; + case VIRTCHNL2_PROTO_HDR_TCP: + if (is_inner) + proto_hdr |= RTE_PTYPE_INNER_L4_TCP; + else + proto_hdr |= RTE_PTYPE_L4_TCP; + break; + case VIRTCHNL2_PROTO_HDR_SCTP: + if (is_inner) + proto_hdr |= RTE_PTYPE_INNER_L4_SCTP; + else + proto_hdr |= RTE_PTYPE_L4_SCTP; + break; + case VIRTCHNL2_PROTO_HDR_ICMP: + if (is_inner) + proto_hdr |= RTE_PTYPE_INNER_L4_ICMP; + else + proto_hdr |= RTE_PTYPE_L4_ICMP; + break; + case VIRTCHNL2_PROTO_HDR_ICMPV6: + if (is_inner) + proto_hdr |= RTE_PTYPE_INNER_L4_ICMP; + else + proto_hdr |= RTE_PTYPE_L4_ICMP; + break; + case VIRTCHNL2_PROTO_HDR_L2TPV2: + case VIRTCHNL2_PROTO_HDR_L2TPV2_CONTROL: + case VIRTCHNL2_PROTO_HDR_L2TPV3: + is_inner = true; + proto_hdr |= RTE_PTYPE_TUNNEL_L2TP; + break; + case VIRTCHNL2_PROTO_HDR_NVGRE: + is_inner = true; + proto_hdr |= RTE_PTYPE_TUNNEL_NVGRE; + break; + case VIRTCHNL2_PROTO_HDR_GTPC_TEID: + is_inner = true; + proto_hdr |= RTE_PTYPE_TUNNEL_GTPC; + break; + case VIRTCHNL2_PROTO_HDR_GTPU: + case VIRTCHNL2_PROTO_HDR_GTPU_UL: + case VIRTCHNL2_PROTO_HDR_GTPU_DL: + is_inner = true; + proto_hdr |= RTE_PTYPE_TUNNEL_GTPU; + break; + case VIRTCHNL2_PROTO_HDR_PAY: + case VIRTCHNL2_PROTO_HDR_IPV6_EH: + case VIRTCHNL2_PROTO_HDR_PRE_MAC: + case VIRTCHNL2_PROTO_HDR_POST_MAC: + case VIRTCHNL2_PROTO_HDR_ETHERTYPE: + case VIRTCHNL2_PROTO_HDR_SVLAN: + case VIRTCHNL2_PROTO_HDR_CVLAN: + case VIRTCHNL2_PROTO_HDR_MPLS: + case VIRTCHNL2_PROTO_HDR_MMPLS: + case VIRTCHNL2_PROTO_HDR_CTRL: + case VIRTCHNL2_PROTO_HDR_ECP: + case VIRTCHNL2_PROTO_HDR_EAPOL: + case VIRTCHNL2_PROTO_HDR_PPPOD: + case VIRTCHNL2_PROTO_HDR_IGMP: + case VIRTCHNL2_PROTO_HDR_AH: + case VIRTCHNL2_PROTO_HDR_ESP: + case VIRTCHNL2_PROTO_HDR_IKE: + case VIRTCHNL2_PROTO_HDR_NATT_KEEP: + case VIRTCHNL2_PROTO_HDR_GTP: + case VIRTCHNL2_PROTO_HDR_GTP_EH: + case VIRTCHNL2_PROTO_HDR_GTPCV2: + case VIRTCHNL2_PROTO_HDR_ECPRI: + case VIRTCHNL2_PROTO_HDR_VRRP: + case VIRTCHNL2_PROTO_HDR_OSPF: + case VIRTCHNL2_PROTO_HDR_TUN: + case VIRTCHNL2_PROTO_HDR_VXLAN_GPE: + case VIRTCHNL2_PROTO_HDR_GENEVE: + case VIRTCHNL2_PROTO_HDR_NSH: + case VIRTCHNL2_PROTO_HDR_QUIC: + case VIRTCHNL2_PROTO_HDR_PFCP: + case VIRTCHNL2_PROTO_HDR_PFCP_NODE: + case VIRTCHNL2_PROTO_HDR_PFCP_SESSION: + case VIRTCHNL2_PROTO_HDR_RTP: + case VIRTCHNL2_PROTO_HDR_NO_PROTO: + default: + continue; + } + adapter->ptype_tbl[ptype->ptype_id_10] = proto_hdr; + } + } + } + +free_ptype_info: + rte_free(ptype_info); + _clear_cmd(adapter); + return ret; +} + int idpf_vc_get_caps(struct idpf_adapter *adapter) { @@ -1005,3 +1214,29 @@ idpf_vc_ena_dis_vport(struct idpf_vport *vport, bool enable) return err; } + +int +idpf_vc_query_ptype_info(struct idpf_adapter *adapter) +{ + struct virtchnl2_get_ptype_info *ptype_info; + struct idpf_cmd_info args; + int len, err; + + len = sizeof(struct virtchnl2_get_ptype_info); + ptype_info = rte_zmalloc("ptype_info", len, 0); + if (!ptype_info) + return -ENOMEM; + + ptype_info->start_ptype_id = 0; + ptype_info->num_ptypes = IDPF_MAX_PKT_TYPE; + args.ops = VIRTCHNL2_OP_GET_PTYPE_INFO; + args.in_args = (u8 *)ptype_info; + args.in_args_size = len; + + err = idpf_execute_vc_cmd(adapter, &args); + if (err) + PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_GET_PTYPE_INFO"); + + rte_free(ptype_info); + return err; +} -- 2.34.1