From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id B6ADAA04FD; Wed, 26 Oct 2022 12:13:36 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 1011242BE1; Wed, 26 Oct 2022 12:12:48 +0200 (CEST) Received: from mga14.intel.com (mga14.intel.com [192.55.52.115]) by mails.dpdk.org (Postfix) with ESMTP id AB62942C39 for ; Wed, 26 Oct 2022 12:12:34 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1666779154; x=1698315154; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=tz60FIxKX59l1TBTbwr9GVn5XNIJcg93yfiB2iPKI+I=; b=MqgLgJAKospUnBIzAeFxZfQoToVd33b9xkDBQwnysjCYPuTIT0XyAhG7 Ge3sESsmJDVdh3s3KlHq76prxFFWjdFjkstpgH33a8Pe+Ocm6yos7PUvM ZWzzDic1ja6Rcg54dhBjtdJJKwCgWgd4PmMMwh0aAQ+EmsUKuDF+C26ah Ma0IoULlEJuvOO2EnsJJCriKxLbLz+5n0ZdWwWDJHq27xBR9UubAKpxtY MFKEW6tlYjuCTI0jWAN8IVilFEzsMmmn73v2KCt9BM18oCNMzpymlkIjV kHMuUjdydZz1wEWaxWdWpbut+jLHqZPL4YkZ6gHs6EZwRSkauPsBRjsy6 w==; X-IronPort-AV: E=McAfee;i="6500,9779,10511"; a="307904673" X-IronPort-AV: E=Sophos;i="5.95,214,1661842800"; d="scan'208";a="307904673" Received: from fmsmga008.fm.intel.com ([10.253.24.58]) by fmsmga103.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 26 Oct 2022 03:12:34 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6500,9779,10511"; a="695306379" X-IronPort-AV: E=Sophos;i="5.95,214,1661842800"; d="scan'208";a="695306379" Received: from dpdk-jf-ntb-one.sh.intel.com ([10.67.111.104]) by fmsmga008.fm.intel.com with ESMTP; 26 Oct 2022 03:12:32 -0700 From: Junfeng Guo To: andrew.rybchenko@oktetlabs.ru, qi.z.zhang@intel.com, jingjing.wu@intel.com, beilei.xing@intel.com Cc: dev@dpdk.org, Junfeng Guo , Wenjun Wu Subject: [PATCH v12 12/18] net/idpf: support packet type get Date: Wed, 26 Oct 2022 18:10:21 +0800 Message-Id: <20221026101027.240583-13-junfeng.guo@intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20221026101027.240583-1-junfeng.guo@intel.com> References: <20221024131227.1062446-2-junfeng.guo@intel.com> <20221026101027.240583-1-junfeng.guo@intel.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Get packet type during receiving packets. Signed-off-by: Wenjun Wu Signed-off-by: Junfeng Guo --- drivers/net/idpf/idpf_ethdev.c | 6 + drivers/net/idpf/idpf_ethdev.h | 6 + drivers/net/idpf/idpf_rxtx.c | 11 ++ drivers/net/idpf/idpf_rxtx.h | 5 + drivers/net/idpf/idpf_vchnl.c | 240 +++++++++++++++++++++++++++++++++ 5 files changed, 268 insertions(+) diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c index 6fb56e584d..630bdabcd4 100644 --- a/drivers/net/idpf/idpf_ethdev.c +++ b/drivers/net/idpf/idpf_ethdev.c @@ -709,6 +709,12 @@ idpf_adapter_init(struct rte_pci_device *pci_dev, struct idpf_adapter *adapter) goto err_api; } + ret = idpf_get_pkt_type(adapter); + if (ret != 0) { + PMD_INIT_LOG(ERR, "Failed to set ptype table"); + goto err_api; + } + adapter->caps = rte_zmalloc("idpf_caps", sizeof(struct virtchnl2_get_capabilities), 0); if (adapter->caps == NULL) { diff --git a/drivers/net/idpf/idpf_ethdev.h b/drivers/net/idpf/idpf_ethdev.h index af0a8e2970..db9af58f72 100644 --- a/drivers/net/idpf/idpf_ethdev.h +++ b/drivers/net/idpf/idpf_ethdev.h @@ -39,6 +39,8 @@ #define IDPF_NUM_MACADDR_MAX 64 +#define IDPF_MAX_PKT_TYPE 1024 + #define IDPF_VLAN_TAG_SIZE 4 #define IDPF_ETH_OVERHEAD \ (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + IDPF_VLAN_TAG_SIZE * 2) @@ -125,6 +127,8 @@ struct idpf_adapter { /* Max config queue number per VC message */ uint32_t max_rxq_per_msg; uint32_t max_txq_per_msg; + + uint32_t ptype_tbl[IDPF_MAX_PKT_TYPE] __rte_cache_min_aligned; }; TAILQ_HEAD(idpf_adapter_list, idpf_adapter); @@ -182,6 +186,7 @@ atomic_set_cmd(struct idpf_adapter *adapter, enum virtchnl_ops ops) struct idpf_adapter *idpf_find_adapter(struct rte_pci_device *pci_dev); void idpf_handle_virtchnl_msg(struct rte_eth_dev *dev); int idpf_vc_check_api_version(struct idpf_adapter *adapter); +int idpf_get_pkt_type(struct idpf_adapter *adapter); int idpf_vc_get_caps(struct idpf_adapter *adapter); int idpf_vc_create_vport(struct idpf_adapter *adapter); int idpf_vc_destroy_vport(struct idpf_vport *vport); @@ -193,6 +198,7 @@ int idpf_switch_queue(struct idpf_vport *vport, uint16_t qid, bool rx, bool on); int idpf_vc_ena_dis_queues(struct idpf_vport *vport, bool enable); int idpf_vc_ena_dis_vport(struct idpf_vport *vport, bool enable); +int idpf_vc_query_ptype_info(struct idpf_adapter *adapter); int idpf_read_one_msg(struct idpf_adapter *adapter, uint32_t ops, uint16_t buf_len, uint8_t *buf); diff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c index 86942b2b5f..f9f751a6ad 100644 --- a/drivers/net/idpf/idpf_rxtx.c +++ b/drivers/net/idpf/idpf_rxtx.c @@ -1282,6 +1282,7 @@ idpf_splitq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, volatile struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc; uint16_t pktlen_gen_bufq_id; struct idpf_rx_queue *rxq; + const uint32_t *ptype_tbl; struct rte_mbuf *rxm; uint16_t rx_id_bufq1; uint16_t rx_id_bufq2; @@ -1301,6 +1302,7 @@ idpf_splitq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, rx_id_bufq1 = rxq->bufq1->rx_next_avail; rx_id_bufq2 = rxq->bufq2->rx_next_avail; rx_desc_ring = rxq->rx_ring; + ptype_tbl = rxq->adapter->ptype_tbl; while (nb_rx < nb_pkts) { rx_desc = &rx_desc_ring[rx_id]; @@ -1348,6 +1350,10 @@ idpf_splitq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, rxm->next = NULL; rxm->nb_segs = 1; rxm->port = rxq->port_id; + rxm->packet_type = + ptype_tbl[(rte_le_to_cpu_16(rx_desc->ptype_err_fflags0) & + VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_M) >> + VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_S]; rx_pkts[nb_rx++] = rxm; } @@ -1534,6 +1540,7 @@ idpf_singleq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, volatile union virtchnl2_rx_desc *rxdp; union virtchnl2_rx_desc rxd; struct idpf_rx_queue *rxq; + const uint32_t *ptype_tbl; uint16_t rx_id, nb_hold; struct rte_eth_dev *dev; uint16_t rx_packet_len; @@ -1552,6 +1559,7 @@ idpf_singleq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, rx_id = rxq->rx_tail; rx_ring = rxq->rx_ring; + ptype_tbl = rxq->adapter->ptype_tbl; while (nb_rx < nb_pkts) { rxdp = &rx_ring[rx_id]; @@ -1604,6 +1612,9 @@ idpf_singleq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, rxm->pkt_len = rx_packet_len; rxm->data_len = rx_packet_len; rxm->port = rxq->port_id; + rxm->packet_type = + ptype_tbl[(uint8_t)(rte_cpu_to_le_16(rxd.flex_nic_wb.ptype_flex_flags0) & + VIRTCHNL2_RX_FLEX_DESC_PTYPE_M)]; rx_pkts[nb_rx++] = rxm; } diff --git a/drivers/net/idpf/idpf_rxtx.h b/drivers/net/idpf/idpf_rxtx.h index 30dc94b3dd..3853ed55c9 100644 --- a/drivers/net/idpf/idpf_rxtx.h +++ b/drivers/net/idpf/idpf_rxtx.h @@ -23,6 +23,10 @@ #define IDPF_TX_MAX_MTU_SEG 10 +#define IDPF_GET_PTYPE_SIZE(p) \ + (sizeof(struct virtchnl2_ptype) + \ + (((p)->proto_id_count ? ((p)->proto_id_count - 1) : 0) * sizeof((p)->proto_id[0]))) + struct idpf_rx_queue { struct idpf_adapter *adapter; /* the adapter this queue belongs to */ struct rte_mempool *mp; /* mbuf pool to populate Rx ring */ @@ -150,4 +154,5 @@ void idpf_stop_queues(struct rte_eth_dev *dev); void idpf_set_rx_function(struct rte_eth_dev *dev); void idpf_set_tx_function(struct rte_eth_dev *dev); + #endif /* _IDPF_RXTX_H_ */ diff --git a/drivers/net/idpf/idpf_vchnl.c b/drivers/net/idpf/idpf_vchnl.c index 1ba59929a0..ac399d331a 100644 --- a/drivers/net/idpf/idpf_vchnl.c +++ b/drivers/net/idpf/idpf_vchnl.c @@ -234,6 +234,11 @@ idpf_execute_vc_cmd(struct idpf_adapter *adapter, struct idpf_cmd_info *args) err = idpf_read_one_msg(adapter, args->ops, args->out_size, args->out_buffer); clear_cmd(adapter); break; + case VIRTCHNL2_OP_GET_PTYPE_INFO: + /* for multuple response message, + * do not handle the response here. + */ + break; default: /* For other virtchnl ops in running time, * wait for the cmd done flag. @@ -298,6 +303,215 @@ idpf_vc_check_api_version(struct idpf_adapter *adapter) return 0; } +int __rte_cold +idpf_get_pkt_type(struct idpf_adapter *adapter) +{ + struct virtchnl2_get_ptype_info *ptype_info; + uint16_t ptype_recvd = 0, ptype_offset, i, j; + int ret; + + ret = idpf_vc_query_ptype_info(adapter); + if (ret != 0) { + PMD_DRV_LOG(ERR, "Fail to query packet type information"); + return ret; + } + + ptype_info = rte_zmalloc("ptype_info", IDPF_DFLT_MBX_BUF_SIZE, 0); + if (ptype_info == NULL) + return -ENOMEM; + + while (ptype_recvd < IDPF_MAX_PKT_TYPE) { + ret = idpf_read_one_msg(adapter, VIRTCHNL2_OP_GET_PTYPE_INFO, + IDPF_DFLT_MBX_BUF_SIZE, (u8 *)ptype_info); + if (ret != 0) { + PMD_DRV_LOG(ERR, "Fail to get packet type information"); + goto free_ptype_info; + } + + ptype_recvd += ptype_info->num_ptypes; + ptype_offset = sizeof(struct virtchnl2_get_ptype_info) - + sizeof(struct virtchnl2_ptype); + + for (i = 0; i < rte_cpu_to_le_16(ptype_info->num_ptypes); i++) { + bool is_inner = false, is_ip = false; + struct virtchnl2_ptype *ptype; + uint32_t proto_hdr = 0; + + ptype = (struct virtchnl2_ptype *) + ((u8 *)ptype_info + ptype_offset); + ptype_offset += IDPF_GET_PTYPE_SIZE(ptype); + if (ptype_offset > IDPF_DFLT_MBX_BUF_SIZE) { + ret = -EINVAL; + goto free_ptype_info; + } + + if (rte_cpu_to_le_16(ptype->ptype_id_10) == 0xFFFF) + goto free_ptype_info; + + for (j = 0; j < ptype->proto_id_count; j++) { + switch (rte_cpu_to_le_16(ptype->proto_id[j])) { + case VIRTCHNL2_PROTO_HDR_GRE: + case VIRTCHNL2_PROTO_HDR_VXLAN: + proto_hdr &= ~RTE_PTYPE_L4_MASK; + proto_hdr |= RTE_PTYPE_TUNNEL_GRENAT; + is_inner = true; + break; + case VIRTCHNL2_PROTO_HDR_MAC: + if (is_inner) { + proto_hdr &= ~RTE_PTYPE_INNER_L2_MASK; + proto_hdr |= RTE_PTYPE_INNER_L2_ETHER; + } else { + proto_hdr &= ~RTE_PTYPE_L2_MASK; + proto_hdr |= RTE_PTYPE_L2_ETHER; + } + break; + case VIRTCHNL2_PROTO_HDR_VLAN: + if (is_inner) { + proto_hdr &= ~RTE_PTYPE_INNER_L2_MASK; + proto_hdr |= RTE_PTYPE_INNER_L2_ETHER_VLAN; + } + break; + case VIRTCHNL2_PROTO_HDR_PTP: + proto_hdr &= ~RTE_PTYPE_L2_MASK; + proto_hdr |= RTE_PTYPE_L2_ETHER_TIMESYNC; + break; + case VIRTCHNL2_PROTO_HDR_LLDP: + proto_hdr &= ~RTE_PTYPE_L2_MASK; + proto_hdr |= RTE_PTYPE_L2_ETHER_LLDP; + break; + case VIRTCHNL2_PROTO_HDR_ARP: + proto_hdr &= ~RTE_PTYPE_L2_MASK; + proto_hdr |= RTE_PTYPE_L2_ETHER_ARP; + break; + case VIRTCHNL2_PROTO_HDR_PPPOE: + proto_hdr &= ~RTE_PTYPE_L2_MASK; + proto_hdr |= RTE_PTYPE_L2_ETHER_PPPOE; + break; + case VIRTCHNL2_PROTO_HDR_IPV4: + if (!is_ip) { + proto_hdr |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN; + is_ip = true; + } else { + proto_hdr |= RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP; + is_inner = true; + } + break; + case VIRTCHNL2_PROTO_HDR_IPV6: + if (!is_ip) { + proto_hdr |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN; + is_ip = true; + } else { + proto_hdr |= RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP; + is_inner = true; + } + break; + case VIRTCHNL2_PROTO_HDR_IPV4_FRAG: + case VIRTCHNL2_PROTO_HDR_IPV6_FRAG: + if (is_inner) + proto_hdr |= RTE_PTYPE_INNER_L4_FRAG; + else + proto_hdr |= RTE_PTYPE_L4_FRAG; + break; + case VIRTCHNL2_PROTO_HDR_UDP: + if (is_inner) + proto_hdr |= RTE_PTYPE_INNER_L4_UDP; + else + proto_hdr |= RTE_PTYPE_L4_UDP; + break; + case VIRTCHNL2_PROTO_HDR_TCP: + if (is_inner) + proto_hdr |= RTE_PTYPE_INNER_L4_TCP; + else + proto_hdr |= RTE_PTYPE_L4_TCP; + break; + case VIRTCHNL2_PROTO_HDR_SCTP: + if (is_inner) + proto_hdr |= RTE_PTYPE_INNER_L4_SCTP; + else + proto_hdr |= RTE_PTYPE_L4_SCTP; + break; + case VIRTCHNL2_PROTO_HDR_ICMP: + if (is_inner) + proto_hdr |= RTE_PTYPE_INNER_L4_ICMP; + else + proto_hdr |= RTE_PTYPE_L4_ICMP; + break; + case VIRTCHNL2_PROTO_HDR_ICMPV6: + if (is_inner) + proto_hdr |= RTE_PTYPE_INNER_L4_ICMP; + else + proto_hdr |= RTE_PTYPE_L4_ICMP; + break; + case VIRTCHNL2_PROTO_HDR_L2TPV2: + case VIRTCHNL2_PROTO_HDR_L2TPV2_CONTROL: + case VIRTCHNL2_PROTO_HDR_L2TPV3: + is_inner = true; + proto_hdr |= RTE_PTYPE_TUNNEL_L2TP; + break; + case VIRTCHNL2_PROTO_HDR_NVGRE: + is_inner = true; + proto_hdr |= RTE_PTYPE_TUNNEL_NVGRE; + break; + case VIRTCHNL2_PROTO_HDR_GTPC_TEID: + is_inner = true; + proto_hdr |= RTE_PTYPE_TUNNEL_GTPC; + break; + case VIRTCHNL2_PROTO_HDR_GTPU: + case VIRTCHNL2_PROTO_HDR_GTPU_UL: + case VIRTCHNL2_PROTO_HDR_GTPU_DL: + is_inner = true; + proto_hdr |= RTE_PTYPE_TUNNEL_GTPU; + break; + case VIRTCHNL2_PROTO_HDR_PAY: + case VIRTCHNL2_PROTO_HDR_IPV6_EH: + case VIRTCHNL2_PROTO_HDR_PRE_MAC: + case VIRTCHNL2_PROTO_HDR_POST_MAC: + case VIRTCHNL2_PROTO_HDR_ETHERTYPE: + case VIRTCHNL2_PROTO_HDR_SVLAN: + case VIRTCHNL2_PROTO_HDR_CVLAN: + case VIRTCHNL2_PROTO_HDR_MPLS: + case VIRTCHNL2_PROTO_HDR_MMPLS: + case VIRTCHNL2_PROTO_HDR_CTRL: + case VIRTCHNL2_PROTO_HDR_ECP: + case VIRTCHNL2_PROTO_HDR_EAPOL: + case VIRTCHNL2_PROTO_HDR_PPPOD: + case VIRTCHNL2_PROTO_HDR_IGMP: + case VIRTCHNL2_PROTO_HDR_AH: + case VIRTCHNL2_PROTO_HDR_ESP: + case VIRTCHNL2_PROTO_HDR_IKE: + case VIRTCHNL2_PROTO_HDR_NATT_KEEP: + case VIRTCHNL2_PROTO_HDR_GTP: + case VIRTCHNL2_PROTO_HDR_GTP_EH: + case VIRTCHNL2_PROTO_HDR_GTPCV2: + case VIRTCHNL2_PROTO_HDR_ECPRI: + case VIRTCHNL2_PROTO_HDR_VRRP: + case VIRTCHNL2_PROTO_HDR_OSPF: + case VIRTCHNL2_PROTO_HDR_TUN: + case VIRTCHNL2_PROTO_HDR_VXLAN_GPE: + case VIRTCHNL2_PROTO_HDR_GENEVE: + case VIRTCHNL2_PROTO_HDR_NSH: + case VIRTCHNL2_PROTO_HDR_QUIC: + case VIRTCHNL2_PROTO_HDR_PFCP: + case VIRTCHNL2_PROTO_HDR_PFCP_NODE: + case VIRTCHNL2_PROTO_HDR_PFCP_SESSION: + case VIRTCHNL2_PROTO_HDR_RTP: + case VIRTCHNL2_PROTO_HDR_NO_PROTO: + default: + continue; + } + adapter->ptype_tbl[ptype->ptype_id_10] = proto_hdr; + } + } + } + +free_ptype_info: + rte_free(ptype_info); + clear_cmd(adapter); + return ret; +} + int idpf_vc_get_caps(struct idpf_adapter *adapter) { @@ -980,3 +1194,29 @@ idpf_vc_ena_dis_vport(struct idpf_vport *vport, bool enable) return err; } + +int +idpf_vc_query_ptype_info(struct idpf_adapter *adapter) +{ + struct virtchnl2_get_ptype_info *ptype_info; + struct idpf_cmd_info args; + int len, err; + + len = sizeof(struct virtchnl2_get_ptype_info); + ptype_info = rte_zmalloc("ptype_info", len, 0); + if (ptype_info == NULL) + return -ENOMEM; + + ptype_info->start_ptype_id = 0; + ptype_info->num_ptypes = IDPF_MAX_PKT_TYPE; + args.ops = VIRTCHNL2_OP_GET_PTYPE_INFO; + args.in_args = (u8 *)ptype_info; + args.in_args_size = len; + + err = idpf_execute_vc_cmd(adapter, &args); + if (err != 0) + PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_GET_PTYPE_INFO"); + + rte_free(ptype_info); + return err; +} -- 2.34.1