From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id D5558A0613 for ; Wed, 25 Sep 2019 16:17:55 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 8C9BF1BE99; Wed, 25 Sep 2019 16:17:30 +0200 (CEST) Received: from huawei.com (szxga06-in.huawei.com [45.249.212.32]) by dpdk.org (Postfix) with ESMTP id 8CC8A1BE3D for ; Wed, 25 Sep 2019 16:17:28 +0200 (CEST) Received: from DGGEMS402-HUB.china.huawei.com (unknown [172.30.72.59]) by Forcepoint Email with ESMTP id 884F56ED3F92962E170C for ; Wed, 25 Sep 2019 22:17:27 +0800 (CST) Received: from tester.localdomain (10.175.119.39) by DGGEMS402-HUB.china.huawei.com (10.3.19.202) with Microsoft SMTP Server id 14.3.439.0; Wed, 25 Sep 2019 22:17:21 +0800 From: Xiaoyun wang To: CC: , , , , , , , Xiaoyun wang Date: Wed, 25 Sep 2019 22:30:41 +0800 Message-ID: <924687da68bf91dc2dffbcfb39258917f7f0966c.1569421287.git.cloud.wangxiaoyun@huawei.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: References: MIME-Version: 1.0 Content-Type: text/plain X-Originating-IP: [10.175.119.39] X-CFilter-Loop: Reflected Subject: [dpdk-dev] [PATCH v2 13/17] net/hinic: support inner L3 checksum offload X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch supports inner L3 checksum offload, modifies rx checksum offload. Signed-off-by: Xiaoyun wang --- drivers/net/hinic/hinic_pmd_ethdev.h | 1 + drivers/net/hinic/hinic_pmd_rx.c | 10 +- drivers/net/hinic/hinic_pmd_tx.c | 190 ++++++++++++++++++++++------------- 3 files changed, 127 insertions(+), 74 deletions(-) diff --git a/drivers/net/hinic/hinic_pmd_ethdev.h b/drivers/net/hinic/hinic_pmd_ethdev.h index dd96667..3e3f3b3 100644 --- a/drivers/net/hinic/hinic_pmd_ethdev.h +++ b/drivers/net/hinic/hinic_pmd_ethdev.h @@ -178,6 +178,7 @@ struct hinic_nic_dev { * vf: the same with associate pf */ u32 default_cos; + u32 rx_csum_en; struct hinic_filter_info filter; struct hinic_ntuple_filter_list filter_ntuple_list; diff --git a/drivers/net/hinic/hinic_pmd_rx.c b/drivers/net/hinic/hinic_pmd_rx.c index 08e02ae..37b4f5c 100644 --- a/drivers/net/hinic/hinic_pmd_rx.c +++ b/drivers/net/hinic/hinic_pmd_rx.c @@ -658,7 +658,6 @@ int hinic_rx_configure(struct rte_eth_dev *dev) struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); struct rte_eth_rss_conf rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf; - u32 csum_en = 0; int err; if (nic_dev->flags & ETH_MQ_RX_RSS_FLAG) { @@ -678,9 +677,10 @@ int hinic_rx_configure(struct rte_eth_dev *dev) /* Enable both L3/L4 rx checksum offload */ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_CHECKSUM) - csum_en = HINIC_RX_CSUM_OFFLOAD_EN; + nic_dev->rx_csum_en = HINIC_RX_CSUM_OFFLOAD_EN; - err = hinic_set_rx_csum_offload(nic_dev->hwdev, csum_en); + err = hinic_set_rx_csum_offload(nic_dev->hwdev, + HINIC_RX_CSUM_OFFLOAD_EN); if (err) goto rx_csum_ofl_err; @@ -781,6 +781,10 @@ static inline uint64_t hinic_rx_csum(uint32_t status, struct hinic_rxq *rxq) { uint32_t checksum_err; uint64_t flags; + struct hinic_nic_dev *nic_dev = rxq->nic_dev; + + if (unlikely(!(nic_dev->rx_csum_en & HINIC_RX_CSUM_OFFLOAD_EN))) + return PKT_RX_IP_CKSUM_UNKNOWN; /* most case checksum is ok */ checksum_err = HINIC_GET_RX_CSUM_ERR(status); diff --git a/drivers/net/hinic/hinic_pmd_tx.c b/drivers/net/hinic/hinic_pmd_tx.c index 0ef7add..26f481f 100644 --- a/drivers/net/hinic/hinic_pmd_tx.c +++ b/drivers/net/hinic/hinic_pmd_tx.c @@ -20,6 +20,9 @@ #include "hinic_pmd_tx.h" /* packet header and tx offload info */ +#define ETHER_LEN_NO_VLAN 14 +#define ETHER_LEN_WITH_VLAN 18 +#define HEADER_LEN_OFFSET 2 #define VXLANLEN 8 #define MAX_PLD_OFFSET 221 #define MAX_SINGLE_SGE_SIZE 65536 @@ -34,6 +37,9 @@ #define HINIC_TSO_PKT_MAX_SGE 127 /* tso max sge 127 */ #define HINIC_TSO_SEG_NUM_INVALID(num) ((num) > HINIC_TSO_PKT_MAX_SGE) +#define HINIC_TX_OUTER_CHECKSUM_FLAG_SET 1 +#define HINIC_TX_OUTER_CHECKSUM_FLAG_NO_SET 0 + /* sizeof(struct hinic_sq_bufdesc) == 16, shift 4 */ #define HINIC_BUF_DESC_SIZE(nr_descs) (SIZE_8BYTES(((u32)nr_descs) << 4)) @@ -476,16 +482,16 @@ static inline bool hinic_is_tso_sge_valid(struct rte_mbuf *mbuf, hinic_set_l4_csum_info(struct hinic_sq_task *task, u32 *queue_info, struct hinic_tx_offload_info *poff_info) { - u32 tcp_udp_cs, sctp; + u32 tcp_udp_cs, sctp = 0; u16 l2hdr_len; - sctp = 0; if (unlikely(poff_info->inner_l4_type == SCTP_OFFLOAD_ENABLE)) sctp = 1; tcp_udp_cs = poff_info->inner_l4_tcp_udp; - if (poff_info->tunnel_type == TUNNEL_UDP_NO_CSUM) { + if (poff_info->tunnel_type == TUNNEL_UDP_CSUM || + poff_info->tunnel_type == TUNNEL_UDP_NO_CSUM) { l2hdr_len = poff_info->outer_l2_len; task->pkt_info2 |= @@ -665,50 +671,6 @@ static inline void hinic_xmit_mbuf_cleanup(struct hinic_txq *txq) return (struct hinic_sq_wqe *)WQ_WQE_ADDR(wq, cur_pi); } -static inline int -hinic_validate_tx_offload(const struct rte_mbuf *m) -{ - uint64_t ol_flags = m->ol_flags; - uint64_t inner_l3_offset = m->l2_len; - - /* just support vxlan offload */ - if ((ol_flags & PKT_TX_TUNNEL_MASK) && - !(ol_flags & PKT_TX_TUNNEL_VXLAN)) - return -ENOTSUP; - - if (ol_flags & PKT_TX_OUTER_IP_CKSUM) - inner_l3_offset += m->outer_l2_len + m->outer_l3_len; - - /* Headers are fragmented */ - if (rte_pktmbuf_data_len(m) < inner_l3_offset + m->l3_len + m->l4_len) - return -ENOTSUP; - - /* IP checksum can be counted only for IPv4 packet */ - if ((ol_flags & PKT_TX_IP_CKSUM) && (ol_flags & PKT_TX_IPV6)) - return -EINVAL; - - /* IP type not set when required */ - if (ol_flags & (PKT_TX_L4_MASK | PKT_TX_TCP_SEG)) { - if (!(ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6))) - return -EINVAL; - } - - /* Check requirements for TSO packet */ - if (ol_flags & PKT_TX_TCP_SEG) { - if (m->tso_segsz == 0 || - ((ol_flags & PKT_TX_IPV4) && - !(ol_flags & PKT_TX_IP_CKSUM))) - return -EINVAL; - } - - /* PKT_TX_OUTER_IP_CKSUM set for non outer IPv4 packet. */ - if ((ol_flags & PKT_TX_OUTER_IP_CKSUM) && - !(ol_flags & PKT_TX_OUTER_IPV4)) - return -EINVAL; - - return 0; -} - static inline uint16_t hinic_ipv4_phdr_cksum(const struct rte_ipv4_hdr *ipv4_hdr, uint64_t ol_flags) { @@ -760,6 +722,65 @@ static inline void hinic_xmit_mbuf_cleanup(struct hinic_txq *txq) return __rte_raw_cksum_reduce(sum); } +static inline void +hinic_get_pld_offset(struct rte_mbuf *m, struct hinic_tx_offload_info *off_info, + int outer_cs_flag) +{ + uint64_t ol_flags = m->ol_flags; + + if (outer_cs_flag == 1) { + if ((ol_flags & PKT_TX_UDP_CKSUM) == PKT_TX_UDP_CKSUM) { + off_info->payload_offset = m->outer_l2_len + + m->outer_l3_len + m->l2_len + m->l3_len; + } else if ((ol_flags & PKT_TX_TCP_CKSUM) || + (ol_flags & PKT_TX_TCP_SEG)) { + off_info->payload_offset = m->outer_l2_len + + m->outer_l3_len + m->l2_len + + m->l3_len + m->l4_len; + } + } else { + if ((ol_flags & PKT_TX_UDP_CKSUM) == PKT_TX_UDP_CKSUM) { + off_info->payload_offset = m->l2_len + m->l3_len; + } else if ((ol_flags & PKT_TX_TCP_CKSUM) || + (ol_flags & PKT_TX_TCP_SEG)) { + off_info->payload_offset = m->l2_len + m->l3_len + + m->l4_len; + } + } +} + +static inline void +hinic_analyze_tx_info(struct rte_mbuf *mbuf, + struct hinic_tx_offload_info *off_info) +{ + struct rte_ether_hdr *eth_hdr; + struct rte_vlan_hdr *vlan_hdr; + struct rte_ipv4_hdr *ip4h; + u16 pkt_type; + u8 *hdr; + + hdr = (u8 *)rte_pktmbuf_mtod(mbuf, u8*); + eth_hdr = (struct rte_ether_hdr *)hdr; + pkt_type = rte_be_to_cpu_16(eth_hdr->ether_type); + + if (pkt_type == RTE_ETHER_TYPE_VLAN) { + off_info->outer_l2_len = ETHER_LEN_WITH_VLAN; + vlan_hdr = (struct rte_vlan_hdr *)(hdr + 1); + pkt_type = rte_be_to_cpu_16(vlan_hdr->eth_proto); + } else { + off_info->outer_l2_len = ETHER_LEN_NO_VLAN; + } + + if (pkt_type == RTE_ETHER_TYPE_IPV4) { + ip4h = (struct rte_ipv4_hdr *)(hdr + off_info->outer_l2_len); + off_info->outer_l3_len = (ip4h->version_ihl & 0xf) << + HEADER_LEN_OFFSET; + } else if (pkt_type == RTE_ETHER_TYPE_IPV6) { + /* not support ipv6 extension header */ + off_info->outer_l3_len = sizeof(struct rte_ipv6_hdr); + } +} + static inline int hinic_tx_offload_pkt_prepare(struct rte_mbuf *m, struct hinic_tx_offload_info *off_info) @@ -771,42 +792,66 @@ static inline void hinic_xmit_mbuf_cleanup(struct hinic_txq *txq) struct rte_ether_hdr *eth_hdr; struct rte_vlan_hdr *vlan_hdr; u16 eth_type = 0; - uint64_t inner_l3_offset = m->l2_len; + uint64_t inner_l3_offset; uint64_t ol_flags = m->ol_flags; - /* Does packet set any of available offloads */ + /* Check if the packets set available offload flags */ if (!(ol_flags & HINIC_TX_CKSUM_OFFLOAD_MASK)) return 0; - if (unlikely(hinic_validate_tx_offload(m))) + /* Support only vxlan offload */ + if ((ol_flags & PKT_TX_TUNNEL_MASK) && + !(ol_flags & PKT_TX_TUNNEL_VXLAN)) + return -ENOTSUP; + +#ifdef RTE_LIBRTE_ETHDEV_DEBUG + if (rte_validate_tx_offload(m) != 0) return -EINVAL; +#endif - if ((ol_flags & PKT_TX_OUTER_IP_CKSUM) || - (ol_flags & PKT_TX_OUTER_IPV6) || - (ol_flags & PKT_TX_TUNNEL_VXLAN)) { - inner_l3_offset += m->outer_l2_len + m->outer_l3_len; - off_info->outer_l2_len = m->outer_l2_len; - off_info->outer_l3_len = m->outer_l3_len; - /* just support vxlan tunneling pkt */ - off_info->inner_l2_len = m->l2_len - VXLANLEN - - sizeof(struct rte_udp_hdr); - off_info->inner_l3_len = m->l3_len; - off_info->inner_l4_len = m->l4_len; - off_info->tunnel_length = m->l2_len; - off_info->payload_offset = m->outer_l2_len + - m->outer_l3_len + m->l2_len + m->l3_len; - off_info->tunnel_type = TUNNEL_UDP_NO_CSUM; + if (ol_flags & PKT_TX_TUNNEL_VXLAN) { + if ((ol_flags & PKT_TX_OUTER_IP_CKSUM) || + (ol_flags & PKT_TX_OUTER_IPV6)) { + inner_l3_offset = m->l2_len + m->outer_l2_len + + m->outer_l3_len; + off_info->outer_l2_len = m->outer_l2_len; + off_info->outer_l3_len = m->outer_l3_len; + /* just support vxlan tunneling pkt */ + off_info->inner_l2_len = m->l2_len - VXLANLEN - + sizeof(*udp_hdr); + off_info->inner_l3_len = m->l3_len; + off_info->inner_l4_len = m->l4_len; + off_info->tunnel_length = m->l2_len; + off_info->tunnel_type = TUNNEL_UDP_NO_CSUM; + + hinic_get_pld_offset(m, off_info, + HINIC_TX_OUTER_CHECKSUM_FLAG_SET); + } else { + inner_l3_offset = m->l2_len; + hinic_analyze_tx_info(m, off_info); + /* just support vxlan tunneling pkt */ + off_info->inner_l2_len = m->l2_len - VXLANLEN - + sizeof(*udp_hdr) - off_info->outer_l2_len - + off_info->outer_l3_len; + off_info->inner_l3_len = m->l3_len; + off_info->inner_l4_len = m->l4_len; + off_info->tunnel_length = m->l2_len - + off_info->outer_l2_len - off_info->outer_l3_len; + off_info->tunnel_type = TUNNEL_UDP_NO_CSUM; + + hinic_get_pld_offset(m, off_info, + HINIC_TX_OUTER_CHECKSUM_FLAG_NO_SET); + } } else { + inner_l3_offset = m->l2_len; off_info->inner_l2_len = m->l2_len; off_info->inner_l3_len = m->l3_len; off_info->inner_l4_len = m->l4_len; off_info->tunnel_type = NOT_TUNNEL; - off_info->payload_offset = m->l2_len + m->l3_len; - } - if (((ol_flags & PKT_TX_L4_MASK) != PKT_TX_SCTP_CKSUM) && - ((ol_flags & PKT_TX_L4_MASK) != PKT_TX_UDP_CKSUM)) - off_info->payload_offset += m->l4_len; + hinic_get_pld_offset(m, off_info, + HINIC_TX_OUTER_CHECKSUM_FLAG_NO_SET); + } /* invalid udp or tcp header */ if (unlikely(off_info->payload_offset > MAX_PLD_OFFSET)) @@ -855,6 +900,10 @@ static inline void hinic_xmit_mbuf_cleanup(struct hinic_txq *txq) udp_hdr->dgram_cksum = hinic_ipv6_phdr_cksum(ipv6_hdr, ol_flags); } + } else if (ol_flags & PKT_TX_OUTER_IPV4) { + off_info->tunnel_type = TUNNEL_UDP_NO_CSUM; + off_info->inner_l4_tcp_udp = 1; + off_info->outer_l3_type = IPV4_PKT_NO_CHKSUM_OFFLOAD; } if (ol_flags & PKT_TX_IPV4) @@ -892,7 +941,6 @@ static inline void hinic_xmit_mbuf_cleanup(struct hinic_txq *txq) off_info->inner_l4_type = UDP_OFFLOAD_ENABLE; off_info->inner_l4_tcp_udp = 1; - off_info->inner_l4_len = sizeof(struct rte_udp_hdr); } else if (((ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM) || (ol_flags & PKT_TX_TCP_SEG)) { if (ol_flags & PKT_TX_IPV4) { -- 1.8.3.1