From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 1649EA034F; Wed, 24 Feb 2021 02:29:47 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 39438160771; Wed, 24 Feb 2021 02:28:38 +0100 (CET) Received: from szxga04-in.huawei.com (szxga04-in.huawei.com [45.249.212.190]) by mails.dpdk.org (Postfix) with ESMTP id 78A74160716 for ; Wed, 24 Feb 2021 02:28:23 +0100 (CET) Received: from DGGEMS414-HUB.china.huawei.com (unknown [172.30.72.58]) by szxga04-in.huawei.com (SkyGuard) with ESMTP id 4DldYc1kLWz16C0G; Wed, 24 Feb 2021 09:26:44 +0800 (CST) Received: from localhost.localdomain (10.69.192.56) by DGGEMS414-HUB.china.huawei.com (10.3.19.214) with Microsoft SMTP Server id 14.3.498.0; Wed, 24 Feb 2021 09:28:16 +0800 From: Lijun Ou To: CC: , Date: Wed, 24 Feb 2021 09:28:54 +0800 Message-ID: <1614130139-42926-9-git-send-email-oulijun@huawei.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1614130139-42926-1-git-send-email-oulijun@huawei.com> References: <1614130139-42926-1-git-send-email-oulijun@huawei.com> MIME-Version: 1.0 Content-Type: text/plain X-Originating-IP: [10.69.192.56] X-CFilter-Loop: Reflected Subject: [dpdk-dev] [PATCH 08/13] net/hns3: support RXD advanced layout X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Chengwen Feng Currently, the driver get packet type by parse the L3_ID/L4_ID/OL3_ID/OL4_ID from Rx descriptor and then lookup multiple tables, it's time consuming. Now Kunpeng930 support advanced RXD layout, which: 1. Combine OL3_ID/OL4_ID to 8bit PTYPE filed, so the driver get packet type by lookup only one table. Note: L3_ID/L4_ID become reserved fileds. 2. The 1588 timestamp located at Rx descriptor instead of query from firmware. 3. The L3E/L4E/OL3E/OL4E will be zero when L3L4P is zero, so driver could optimize the good checksum calculations (when L3E/L4E is zero then mark PKT_RX_IP_CKSUM_GOOD/PKT_RX_L4_CKSUM_GOOD). Considering compatibility, the firmware will report capability of RXD advanced layout, the driver will identify and enable it by default. This patch only provides basic function: identify and enable the RXD advanced layout, and lookup ptype table if supported. Signed-off-by: Chengwen Feng Signed-off-by: Lijun Ou --- drivers/net/hns3/hns3_cmd.c | 8 +- drivers/net/hns3/hns3_cmd.h | 5 + drivers/net/hns3/hns3_ethdev.c | 2 + drivers/net/hns3/hns3_ethdev.h | 16 +++ drivers/net/hns3/hns3_ethdev_vf.c | 2 + drivers/net/hns3/hns3_regs.h | 1 + drivers/net/hns3/hns3_rxtx.c | 200 ++++++++++++++++++++++++++++++++++++++ drivers/net/hns3/hns3_rxtx.h | 11 +++ 8 files changed, 243 insertions(+), 2 deletions(-) diff --git a/drivers/net/hns3/hns3_cmd.c b/drivers/net/hns3/hns3_cmd.c index 32cd56b..8b9f075 100644 --- a/drivers/net/hns3/hns3_cmd.c +++ b/drivers/net/hns3/hns3_cmd.c @@ -409,8 +409,9 @@ hns3_cmd_send(struct hns3_hw *hw, struct hns3_cmd_desc *desc, int num) return retval; } -static void hns3_parse_capability(struct hns3_hw *hw, - struct hns3_query_version_cmd *cmd) +static void +hns3_parse_capability(struct hns3_hw *hw, + struct hns3_query_version_cmd *cmd) { uint32_t caps = rte_le_to_cpu_32(cmd->caps[0]); @@ -429,6 +430,9 @@ static void hns3_parse_capability(struct hns3_hw *hw, hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_INDEP_TXRX_B, 1); if (hns3_get_bit(caps, HNS3_CAPS_STASH_B)) hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_STASH_B, 1); + if (hns3_get_bit(caps, HNS3_CAPS_RXD_ADV_LAYOUT_B)) + hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_RXD_ADV_LAYOUT_B, + 1); } static uint32_t diff --git a/drivers/net/hns3/hns3_cmd.h b/drivers/net/hns3/hns3_cmd.h index 7f567cb..6ceb655 100644 --- a/drivers/net/hns3/hns3_cmd.h +++ b/drivers/net/hns3/hns3_cmd.h @@ -312,6 +312,11 @@ enum HNS3_CAPS_BITS { HNS3_CAPS_TQP_TXRX_INDEP_B, HNS3_CAPS_HW_PAD_B, HNS3_CAPS_STASH_B, + HNS3_CAPS_UDP_TUNNEL_CSUM_B, + HNS3_CAPS_RAS_IMP_B, + HNS3_CAPS_FEC_B, + HNS3_CAPS_PAUSE_B, + HNS3_CAPS_RXD_ADV_LAYOUT_B, }; enum HNS3_API_CAP_BITS { diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c index a97dee4..b3fd331 100644 --- a/drivers/net/hns3/hns3_ethdev.c +++ b/drivers/net/hns3/hns3_ethdev.c @@ -4970,6 +4970,8 @@ hns3_do_start(struct hns3_adapter *hns, bool reset_queue) return ret; } + hns3_enable_rxd_adv_layout(hw); + ret = hns3_init_queues(hns, reset_queue); if (ret) { PMD_INIT_LOG(ERR, "failed to init queues, ret = %d.", ret); diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h index c495802..932600d 100644 --- a/drivers/net/hns3/hns3_ethdev.h +++ b/drivers/net/hns3/hns3_ethdev.h @@ -667,8 +667,13 @@ struct hns3_mp_param { #define HNS3_OL2TBL_NUM 4 #define HNS3_OL3TBL_NUM 16 #define HNS3_OL4TBL_NUM 16 +#define HNS3_PTYPE_NUM 256 struct hns3_ptype_table { + /* + * The next fields used to calc packet-type by the + * L3_ID/L4_ID/OL3_ID/OL4_ID from the Rx descriptor. + */ uint32_t l2l3table[HNS3_L2TBL_NUM][HNS3_L3TBL_NUM]; uint32_t l4table[HNS3_L4TBL_NUM]; uint32_t inner_l2table[HNS3_L2TBL_NUM]; @@ -677,6 +682,13 @@ struct hns3_ptype_table { uint32_t ol2table[HNS3_OL2TBL_NUM]; uint32_t ol3table[HNS3_OL3TBL_NUM]; uint32_t ol4table[HNS3_OL4TBL_NUM]; + + /* + * The next field used to calc packet-type by the PTYPE from the Rx + * descriptor, it functions only when firmware report the capability of + * HNS3_CAPS_RXD_ADV_LAYOUT_B and driver enabled it. + */ + uint32_t ptype[HNS3_PTYPE_NUM] __rte_cache_min_aligned; }; #define HNS3_FIXED_MAX_TQP_NUM_MODE 0 @@ -771,6 +783,7 @@ struct hns3_adapter { #define HNS3_DEV_SUPPORT_TX_PUSH_B 0x5 #define HNS3_DEV_SUPPORT_INDEP_TXRX_B 0x6 #define HNS3_DEV_SUPPORT_STASH_B 0x7 +#define HNS3_DEV_SUPPORT_RXD_ADV_LAYOUT_B 0x9 #define hns3_dev_dcb_supported(hw) \ hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_DCB_B) @@ -801,6 +814,9 @@ struct hns3_adapter { #define hns3_dev_stash_supported(hw) \ hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_STASH_B) +#define hns3_dev_rxd_adv_layout_supported(hw) \ + hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_RXD_ADV_LAYOUT_B) + #define HNS3_DEV_PRIVATE_TO_HW(adapter) \ (&((struct hns3_adapter *)adapter)->hw) #define HNS3_DEV_PRIVATE_TO_PF(adapter) \ diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c index 1b8c029..e7f6974 100644 --- a/drivers/net/hns3/hns3_ethdev_vf.c +++ b/drivers/net/hns3/hns3_ethdev_vf.c @@ -2125,6 +2125,8 @@ hns3vf_do_start(struct hns3_adapter *hns, bool reset_queue) if (ret) return ret; + hns3_enable_rxd_adv_layout(hw); + ret = hns3_init_queues(hns, reset_queue); if (ret) hns3_err(hw, "failed to init queues, ret = %d.", ret); diff --git a/drivers/net/hns3/hns3_regs.h b/drivers/net/hns3/hns3_regs.h index 39fc5d1..0540554 100644 --- a/drivers/net/hns3/hns3_regs.h +++ b/drivers/net/hns3/hns3_regs.h @@ -36,6 +36,7 @@ #define HNS3_GLOBAL_RESET_REG 0x20A00 #define HNS3_FUN_RST_ING 0x20C00 #define HNS3_GRO_EN_REG 0x28000 +#define HNS3_RXD_ADV_LAYOUT_EN_REG 0x28008 /* Vector0 register bits for reset */ #define HNS3_VECTOR0_FUNCRESET_INT_B 0 diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c index a8bd2cc..e65a419 100644 --- a/drivers/net/hns3/hns3_rxtx.c +++ b/drivers/net/hns3/hns3_rxtx.c @@ -1802,6 +1802,7 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, HNS3_PORT_BASE_VLAN_ENABLE; else rxq->pvid_sw_discard_en = false; + rxq->ptype_en = hns3_dev_rxd_adv_layout_supported(hw) ? true : false; rxq->configured = true; rxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET + idx * HNS3_TQP_REG_SIZE); @@ -1987,6 +1988,193 @@ hns3_init_tunnel_ptype_tbl(struct hns3_ptype_table *tbl) tbl->ol4table[2] = RTE_PTYPE_TUNNEL_NVGRE; } +static void +hns3_init_adv_layout_ptype(struct hns3_ptype_table *tbl) +{ + uint32_t *ptype = tbl->ptype; + + /* Non-tunnel L2 */ + ptype[1] = RTE_PTYPE_L2_ETHER_ARP; + ptype[3] = RTE_PTYPE_L2_ETHER_LLDP; + ptype[8] = RTE_PTYPE_L2_ETHER_TIMESYNC; + + /* Non-tunnel IPv4 */ + ptype[17] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_FRAG; + ptype[18] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_NONFRAG; + ptype[19] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_UDP; + ptype[20] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_TCP; + /* The next ptype is GRE over IPv4 */ + ptype[21] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN; + ptype[22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_SCTP; + ptype[23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_IGMP; + ptype[24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_ICMP; + /* The next ptype is PTP over IPv4 + UDP */ + ptype[25] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_UDP; + + /* IPv4 --> GRE/Teredo/VXLAN */ + ptype[29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT; + /* IPv4 --> GRE/Teredo/VXLAN --> MAC */ + ptype[30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER; + + /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */ + ptype[31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG; + ptype[32] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG; + ptype[33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP; + ptype[34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP; + ptype[35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP; + /* The next ptype's inner L4 is IGMP */ + ptype[36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN; + ptype[37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP; + + /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */ + ptype[39] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG; + ptype[40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG; + ptype[41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP; + ptype[42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP; + ptype[43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP; + /* The next ptype's inner L4 is IGMP */ + ptype[44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN; + ptype[45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP; + + /* Non-tunnel IPv6 */ + ptype[111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_FRAG; + ptype[112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_NONFRAG; + ptype[113] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_UDP; + ptype[114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_TCP; + /* The next ptype is GRE over IPv6 */ + ptype[115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN; + ptype[116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_SCTP; + ptype[117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_IGMP; + ptype[118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_ICMP; + /* Special for PTP over IPv6 + UDP */ + ptype[119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_UDP; + + /* IPv6 --> GRE/Teredo/VXLAN */ + ptype[123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT; + /* IPv6 --> GRE/Teredo/VXLAN --> MAC */ + ptype[124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER; + + /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */ + ptype[125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG; + ptype[126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG; + ptype[127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP; + ptype[128] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP; + ptype[129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP; + /* The next ptype's inner L4 is IGMP */ + ptype[130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN; + ptype[131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP; + + /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */ + ptype[133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG; + ptype[134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG; + ptype[135] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP; + ptype[136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP; + ptype[137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP; + /* The next ptype's inner L4 is IGMP */ + ptype[138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN; + ptype[139] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP; +} + void hns3_init_rx_ptype_tble(struct rte_eth_dev *dev) { @@ -1997,6 +2185,7 @@ hns3_init_rx_ptype_tble(struct rte_eth_dev *dev) hns3_init_non_tunnel_ptype_tbl(tbl); hns3_init_tunnel_ptype_tbl(tbl); + hns3_init_adv_layout_ptype(tbl); } static inline void @@ -4020,3 +4209,14 @@ hns3_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) else return fbd_num - driver_hold_bd_num; } + +void +hns3_enable_rxd_adv_layout(struct hns3_hw *hw) +{ + /* + * If the hardware support rxd advanced layout, then driver enable it + * default. + */ + if (hns3_dev_rxd_adv_layout_supported(hw)) + hns3_write_dev(hw, HNS3_RXD_ADV_LAYOUT_EN_REG, 1); +} diff --git a/drivers/net/hns3/hns3_rxtx.h b/drivers/net/hns3/hns3_rxtx.h index 7118bd4..9adeb24 100644 --- a/drivers/net/hns3/hns3_rxtx.h +++ b/drivers/net/hns3/hns3_rxtx.h @@ -88,6 +88,8 @@ #define HNS3_RXD_OL3ID_M (0xf << HNS3_RXD_OL3ID_S) #define HNS3_RXD_OL4ID_S 8 #define HNS3_RXD_OL4ID_M (0xf << HNS3_RXD_OL4ID_S) +#define HNS3_RXD_PTYPE_S 4 +#define HNS3_RXD_PTYPE_M (0xff << HNS3_RXD_PTYPE_S) #define HNS3_RXD_FBHI_S 12 #define HNS3_RXD_FBHI_M (0x3 << HNS3_RXD_FBHI_S) #define HNS3_RXD_FBLI_S 14 @@ -328,6 +330,7 @@ struct hns3_rx_queue { * point, the pvid_sw_discard_en will be false. */ bool pvid_sw_discard_en; + bool ptype_en; /* indicate if the ptype field enabled */ bool enabled; /* indicate if Rx queue has been enabled */ struct hns3_rx_basic_stats basic_stats; @@ -609,6 +612,13 @@ hns3_rx_calc_ptype(struct hns3_rx_queue *rxq, const uint32_t l234_info, const struct hns3_ptype_table * const ptype_tbl = rxq->ptype_tbl; uint32_t l2id, l3id, l4id; uint32_t ol3id, ol4id, ol2id; + uint32_t ptype; + + if (rxq->ptype_en) { + ptype = hns3_get_field(ol_info, HNS3_RXD_PTYPE_M, + HNS3_RXD_PTYPE_S); + return ptype_tbl->ptype[ptype]; + } ol4id = hns3_get_field(ol_info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S); ol3id = hns3_get_field(ol_info, HNS3_RXD_OL3ID_M, HNS3_RXD_OL3ID_S); @@ -707,5 +717,6 @@ int hns3_start_all_rxqs(struct rte_eth_dev *dev); void hns3_stop_all_txqs(struct rte_eth_dev *dev); void hns3_restore_tqp_enable_state(struct hns3_hw *hw); int hns3_tx_done_cleanup(void *txq, uint32_t free_cnt); +void hns3_enable_rxd_adv_layout(struct hns3_hw *hw); #endif /* _HNS3_RXTX_H_ */ -- 2.7.4