DPDK patches and discussions
 help / color / mirror / Atom feed
From: Lijun Ou <oulijun@huawei.com>
To: <ferruh.yigit@intel.com>
Cc: <dev@dpdk.org>, <linuxarm@huawei.com>
Subject: [dpdk-dev] [PATCH 3/8] net/hns3: fix Tx checksum with fix header length
Date: Mon, 2 Nov 2020 22:38:14 +0800	[thread overview]
Message-ID: <1604327899-60126-4-git-send-email-oulijun@huawei.com> (raw)
In-Reply-To: <1604327899-60126-1-git-send-email-oulijun@huawei.com>

From: Chengchang Tang <tangchengchang@huawei.com>

Currently, the header length of all the layers are fixed, It would
lead to a csum error when the header length changed.

This patch fixes above problem by using the header length in mbuf
instead of the fixed header length to perform the TX cksum offload.

Fixes: bba636698316 ("net/hns3: support Rx/Tx and related operations")
Cc: stable@dpdk.org

Signed-off-by: Chengchang Tang <tangchengchang@huawei.com>
Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
 drivers/net/hns3/hns3_ethdev.h |   2 +
 drivers/net/hns3/hns3_rxtx.c   | 318 +++++++++++++++++++----------------------
 drivers/net/hns3/hns3_rxtx.h   |   8 +-
 3 files changed, 154 insertions(+), 174 deletions(-)

diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h
index b23c90e..a2b61ff 100644
--- a/drivers/net/hns3/hns3_ethdev.h
+++ b/drivers/net/hns3/hns3_ethdev.h
@@ -827,6 +827,8 @@ struct hns3_adapter {
 #define hns3_get_bit(origin, shift) \
 	hns3_get_field((origin), (0x1UL << (shift)), (shift))
 
+#define hns3_gen_field_val(mask, shift, val) (((val) << (shift)) & (mask))
+
 /*
  * upper_32_bits - return bits 32-63 of a number
  * A basic shift-right of a 64- or 32-bit quantity. Use this to suppress
diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c
index 9cd728b..abc2cab 100644
--- a/drivers/net/hns3/hns3_rxtx.c
+++ b/drivers/net/hns3/hns3_rxtx.c
@@ -2641,44 +2641,6 @@ hns3_tx_free_useless_buffer(struct hns3_tx_queue *txq)
 	txq->tx_bd_ready   = tx_bd_ready;
 }
 
-static int
-hns3_tso_proc_tunnel(struct hns3_desc *desc, uint64_t ol_flags,
-		     struct rte_mbuf *rxm, uint8_t *l2_len)
-{
-	uint64_t tun_flags;
-	uint8_t ol4_len;
-	uint32_t otmp;
-
-	tun_flags = ol_flags & PKT_TX_TUNNEL_MASK;
-	if (tun_flags == 0)
-		return 0;
-
-	otmp = rte_le_to_cpu_32(desc->tx.ol_type_vlan_len_msec);
-	switch (tun_flags) {
-	case PKT_TX_TUNNEL_GENEVE:
-	case PKT_TX_TUNNEL_VXLAN:
-		*l2_len = rxm->l2_len - RTE_ETHER_VXLAN_HLEN;
-		break;
-	case PKT_TX_TUNNEL_GRE:
-		/*
-		 * OL4 header size, defined in 4 Bytes, it contains outer
-		 * L4(GRE) length and tunneling length.
-		 */
-		ol4_len = hns3_get_field(otmp, HNS3_TXD_L4LEN_M,
-					 HNS3_TXD_L4LEN_S);
-		*l2_len = rxm->l2_len - (ol4_len << HNS3_L4_LEN_UNIT);
-		break;
-	default:
-		/* For non UDP / GRE tunneling, drop the tunnel packet */
-		return -EINVAL;
-	}
-	hns3_set_field(otmp, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
-		       rxm->outer_l2_len >> HNS3_L2_LEN_UNIT);
-	desc->tx.ol_type_vlan_len_msec = rte_cpu_to_le_32(otmp);
-
-	return 0;
-}
-
 int
 hns3_config_gro(struct hns3_hw *hw, bool en)
 {
@@ -2723,31 +2685,15 @@ hns3_pkt_is_tso(struct rte_mbuf *m)
 }
 
 static void
-hns3_set_tso(struct hns3_desc *desc, uint64_t ol_flags,
-		uint32_t paylen, struct rte_mbuf *rxm)
+hns3_set_tso(struct hns3_desc *desc, uint32_t paylen, struct rte_mbuf *rxm)
 {
-	uint8_t l2_len = rxm->l2_len;
-	uint32_t tmp;
-
 	if (!hns3_pkt_is_tso(rxm))
 		return;
 
-	if (hns3_tso_proc_tunnel(desc, ol_flags, rxm, &l2_len))
-		return;
-
 	if (paylen <= rxm->tso_segsz)
 		return;
 
-	tmp = rte_le_to_cpu_32(desc->tx.type_cs_vlan_tso_len);
-	hns3_set_bit(tmp, HNS3_TXD_TSO_B, 1);
-	hns3_set_bit(tmp, HNS3_TXD_L3CS_B, 1);
-	hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S, HNS3_L4T_TCP);
-	hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1);
-	hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
-		       sizeof(struct rte_tcp_hdr) >> HNS3_L4_LEN_UNIT);
-	hns3_set_field(tmp, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
-		       l2_len >> HNS3_L2_LEN_UNIT);
-	desc->tx.type_cs_vlan_tso_len = rte_cpu_to_le_32(tmp);
+	desc->tx.type_cs_vlan_tso_len |= rte_cpu_to_le_32(BIT(HNS3_TXD_TSO_B));
 	desc->tx.mss = rte_cpu_to_le_16(rxm->tso_segsz);
 }
 
@@ -2772,7 +2718,7 @@ hns3_fill_first_desc(struct hns3_tx_queue *txq, struct hns3_desc *desc,
 			   rxm->outer_l2_len + rxm->outer_l3_len : 0;
 	paylen = rxm->pkt_len - hdr_len;
 	desc->tx.paylen = rte_cpu_to_le_32(paylen);
-	hns3_set_tso(desc, ol_flags, paylen, rxm);
+	hns3_set_tso(desc, paylen, rxm);
 
 	/*
 	 * Currently, hardware doesn't support more than two layers VLAN offload
@@ -2917,180 +2863,212 @@ hns3_reassemble_tx_pkts(struct rte_mbuf *tx_pkt, struct rte_mbuf **new_pkt,
 }
 
 static void
-hns3_parse_outer_params(uint64_t ol_flags, uint32_t *ol_type_vlan_len_msec)
+hns3_parse_outer_params(struct rte_mbuf *m, uint32_t *ol_type_vlan_len_msec)
 {
 	uint32_t tmp = *ol_type_vlan_len_msec;
+	uint64_t ol_flags = m->ol_flags;
 
 	/* (outer) IP header type */
 	if (ol_flags & PKT_TX_OUTER_IPV4) {
-		/* OL3 header size, defined in 4 bytes */
-		hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
-			       sizeof(struct rte_ipv4_hdr) >> HNS3_L3_LEN_UNIT);
 		if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
-			hns3_set_field(tmp, HNS3_TXD_OL3T_M,
-				       HNS3_TXD_OL3T_S, HNS3_OL3T_IPV4_CSUM);
+			tmp |= hns3_gen_field_val(HNS3_TXD_OL3T_M,
+					HNS3_TXD_OL3T_S, HNS3_OL3T_IPV4_CSUM);
 		else
-			hns3_set_field(tmp, HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
-				       HNS3_OL3T_IPV4_NO_CSUM);
+			tmp |= hns3_gen_field_val(HNS3_TXD_OL3T_M,
+				HNS3_TXD_OL3T_S, HNS3_OL3T_IPV4_NO_CSUM);
 	} else if (ol_flags & PKT_TX_OUTER_IPV6) {
-		hns3_set_field(tmp, HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
-			       HNS3_OL3T_IPV6);
-		/* OL3 header size, defined in 4 bytes */
-		hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
-			       sizeof(struct rte_ipv6_hdr) >> HNS3_L3_LEN_UNIT);
+		tmp |= hns3_gen_field_val(HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
+					HNS3_OL3T_IPV6);
 	}
-
+	/* OL3 header size, defined in 4 bytes */
+	tmp |= hns3_gen_field_val(HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
+				m->outer_l3_len >> HNS3_L3_LEN_UNIT);
 	*ol_type_vlan_len_msec = tmp;
 }
 
 static int
-hns3_parse_inner_params(uint64_t ol_flags, uint32_t *ol_type_vlan_len_msec,
-			struct rte_net_hdr_lens *hdr_lens)
+hns3_parse_inner_params(struct rte_mbuf *m, uint32_t *ol_type_vlan_len_msec,
+			uint32_t *type_cs_vlan_tso_len)
 {
-	uint32_t tmp = *ol_type_vlan_len_msec;
-	uint8_t l4_len;
-
-	/* OL2 header size, defined in 2 bytes */
-	hns3_set_field(tmp, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
-		       sizeof(struct rte_ether_hdr) >> HNS3_L2_LEN_UNIT);
+#define HNS3_NVGRE_HLEN 8
+	uint32_t tmp_outer = *ol_type_vlan_len_msec;
+	uint32_t tmp_inner = *type_cs_vlan_tso_len;
+	uint64_t ol_flags = m->ol_flags;
+	uint16_t inner_l2_len;
 
-	/* L4TUNT: L4 Tunneling Type */
 	switch (ol_flags & PKT_TX_TUNNEL_MASK) {
 	case PKT_TX_TUNNEL_GENEVE:
 	case PKT_TX_TUNNEL_VXLAN:
-		/* MAC in UDP tunnelling packet, include VxLAN */
-		hns3_set_field(tmp, HNS3_TXD_TUNTYPE_M, HNS3_TXD_TUNTYPE_S,
-			       HNS3_TUN_MAC_IN_UDP);
+		/* MAC in UDP tunnelling packet, include VxLAN and GENEVE */
+		tmp_outer |= hns3_gen_field_val(HNS3_TXD_TUNTYPE_M,
+				HNS3_TXD_TUNTYPE_S, HNS3_TUN_MAC_IN_UDP);
 		/*
-		 * OL4 header size, defined in 4 Bytes, it contains outer
-		 * L4(UDP) length and tunneling length.
+		 * The inner l2 length of mbuf is the sum of outer l4 length,
+		 * tunneling header length and inner l2 length for a tunnel
+		 * packect. But in hns3 tx descriptor, the tunneling header
+		 * length is contained in the field of outer L4 length.
+		 * Therefore, driver need to calculate the outer L4 length and
+		 * inner L2 length.
 		 */
-		hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
-			       (uint8_t)RTE_ETHER_VXLAN_HLEN >>
-			       HNS3_L4_LEN_UNIT);
+		tmp_outer |= hns3_gen_field_val(HNS3_TXD_L4LEN_M,
+						HNS3_TXD_L4LEN_S,
+						(uint8_t)RTE_ETHER_VXLAN_HLEN >>
+						HNS3_L4_LEN_UNIT);
+
+		inner_l2_len = m->l2_len - RTE_ETHER_VXLAN_HLEN;
 		break;
 	case PKT_TX_TUNNEL_GRE:
-		hns3_set_field(tmp, HNS3_TXD_TUNTYPE_M, HNS3_TXD_TUNTYPE_S,
-			       HNS3_TUN_NVGRE);
+		tmp_outer |= hns3_gen_field_val(HNS3_TXD_TUNTYPE_M,
+					HNS3_TXD_TUNTYPE_S, HNS3_TUN_NVGRE);
 		/*
-		 * OL4 header size, defined in 4 Bytes, it contains outer
-		 * L4(GRE) length and tunneling length.
+		 * For NVGRE tunnel packect, the outer L4 is empty. So only
+		 * fill the NVGRE header length to the outer L4 field.
 		 */
-		l4_len = hdr_lens->l4_len + hdr_lens->tunnel_len;
-		hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
-			       l4_len >> HNS3_L4_LEN_UNIT);
+		tmp_outer |= hns3_gen_field_val(HNS3_TXD_L4LEN_M,
+				HNS3_TXD_L4LEN_S,
+				(uint8_t)HNS3_NVGRE_HLEN >> HNS3_L4_LEN_UNIT);
+
+		inner_l2_len = m->l2_len - HNS3_NVGRE_HLEN;
 		break;
 	default:
 		/* For non UDP / GRE tunneling, drop the tunnel packet */
 		return -EINVAL;
 	}
 
-	*ol_type_vlan_len_msec = tmp;
+	tmp_inner |= hns3_gen_field_val(HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
+					inner_l2_len >> HNS3_L2_LEN_UNIT);
+	/* OL2 header size, defined in 2 bytes */
+	tmp_outer |= hns3_gen_field_val(HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
+					m->outer_l2_len >> HNS3_L2_LEN_UNIT);
+
+	*type_cs_vlan_tso_len = tmp_inner;
+	*ol_type_vlan_len_msec = tmp_outer;
 
 	return 0;
 }
 
 static int
-hns3_parse_tunneling_params(struct hns3_tx_queue *txq, uint16_t tx_desc_id,
-			    uint64_t ol_flags,
-			    struct rte_net_hdr_lens *hdr_lens)
+hns3_parse_tunneling_params(struct hns3_tx_queue *txq, struct rte_mbuf *m,
+			    uint16_t tx_desc_id)
 {
 	struct hns3_desc *tx_ring = txq->tx_ring;
 	struct hns3_desc *desc = &tx_ring[tx_desc_id];
-	uint32_t value = 0;
+	uint32_t tmp_outer = 0;
+	uint32_t tmp_inner = 0;
 	int ret;
 
-	hns3_parse_outer_params(ol_flags, &value);
-	ret = hns3_parse_inner_params(ol_flags, &value, hdr_lens);
-	if (ret)
-		return -EINVAL;
+	/*
+	 * The tunnel header is contained in the inner L2 header field of the
+	 * mbuf, but for hns3 descriptor, it is contained in the outer L4. So,
+	 * there is a need that switching between them. To avoid multiple
+	 * calculations, the length of the L2 header include the outer and
+	 * inner, will be filled during the parsing of tunnel packects.
+	 */
+	if (!(m->ol_flags & PKT_TX_TUNNEL_MASK)) {
+		/*
+		 * For non tunnel type the tunnel type id is 0, so no need to
+		 * assign a value to it. Only the inner(normal) L2 header length
+		 * is assigned.
+		 */
+		tmp_inner |= hns3_gen_field_val(HNS3_TXD_L2LEN_M,
+			       HNS3_TXD_L2LEN_S, m->l2_len >> HNS3_L2_LEN_UNIT);
+	} else {
+		/*
+		 * If outer csum is not offload, the outer length may be filled
+		 * with 0. And the length of the outer header is added to the
+		 * inner l2_len. It would lead a cksum error. So driver has to
+		 * calculate the header length.
+		 */
+		if (unlikely(!(m->ol_flags & PKT_TX_OUTER_IP_CKSUM) &&
+					m->outer_l2_len == 0)) {
+			struct rte_net_hdr_lens hdr_len;
+			(void)rte_net_get_ptype(m, &hdr_len,
+					RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK);
+			m->outer_l3_len = hdr_len.l3_len;
+			m->outer_l2_len = hdr_len.l2_len;
+			m->l2_len = m->l2_len - hdr_len.l2_len - hdr_len.l3_len;
+		}
+		hns3_parse_outer_params(m, &tmp_outer);
+		ret = hns3_parse_inner_params(m, &tmp_outer, &tmp_inner);
+		if (ret)
+			return -EINVAL;
+	}
 
-	desc->tx.ol_type_vlan_len_msec |= rte_cpu_to_le_32(value);
+	desc->tx.ol_type_vlan_len_msec = rte_cpu_to_le_32(tmp_outer);
+	desc->tx.type_cs_vlan_tso_len = rte_cpu_to_le_32(tmp_inner);
 
 	return 0;
 }
 
 static void
-hns3_parse_l3_cksum_params(uint64_t ol_flags, uint32_t *type_cs_vlan_tso_len)
+hns3_parse_l3_cksum_params(struct rte_mbuf *m, uint32_t *type_cs_vlan_tso_len)
 {
+	uint64_t ol_flags = m->ol_flags;
+	uint32_t l3_type;
 	uint32_t tmp;
 
+	tmp = *type_cs_vlan_tso_len;
+	if (ol_flags & PKT_TX_IPV4)
+		l3_type = HNS3_L3T_IPV4;
+	else if (ol_flags & PKT_TX_IPV6)
+		l3_type = HNS3_L3T_IPV6;
+	else
+		l3_type = HNS3_L3T_NONE;
+
+	/* inner(/normal) L3 header size, defined in 4 bytes */
+	tmp |= hns3_gen_field_val(HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
+					m->l3_len >> HNS3_L3_LEN_UNIT);
+
+	tmp |= hns3_gen_field_val(HNS3_TXD_L3T_M, HNS3_TXD_L3T_S, l3_type);
+
 	/* Enable L3 checksum offloads */
-	if (ol_flags & PKT_TX_IPV4) {
-		tmp = *type_cs_vlan_tso_len;
-		hns3_set_field(tmp, HNS3_TXD_L3T_M, HNS3_TXD_L3T_S,
-			       HNS3_L3T_IPV4);
-		/* inner(/normal) L3 header size, defined in 4 bytes */
-		hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
-			       sizeof(struct rte_ipv4_hdr) >> HNS3_L3_LEN_UNIT);
-		if (ol_flags & PKT_TX_IP_CKSUM)
-			hns3_set_bit(tmp, HNS3_TXD_L3CS_B, 1);
-		*type_cs_vlan_tso_len = tmp;
-	} else if (ol_flags & PKT_TX_IPV6) {
-		tmp = *type_cs_vlan_tso_len;
-		/* L3T, IPv6 don't do checksum */
-		hns3_set_field(tmp, HNS3_TXD_L3T_M, HNS3_TXD_L3T_S,
-			       HNS3_L3T_IPV6);
-		/* inner(/normal) L3 header size, defined in 4 bytes */
-		hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S,
-			       sizeof(struct rte_ipv6_hdr) >> HNS3_L3_LEN_UNIT);
-		*type_cs_vlan_tso_len = tmp;
-	}
+	if (ol_flags & PKT_TX_IP_CKSUM)
+		tmp |= BIT(HNS3_TXD_L3CS_B);
+	*type_cs_vlan_tso_len = tmp;
 }
 
 static void
-hns3_parse_l4_cksum_params(uint64_t ol_flags, uint32_t *type_cs_vlan_tso_len)
+hns3_parse_l4_cksum_params(struct rte_mbuf *m, uint32_t *type_cs_vlan_tso_len)
 {
+	uint64_t ol_flags = m->ol_flags;
 	uint32_t tmp;
-
 	/* Enable L4 checksum offloads */
-	switch (ol_flags & PKT_TX_L4_MASK) {
+	switch (ol_flags & (PKT_TX_L4_MASK | PKT_TX_TCP_SEG)) {
 	case PKT_TX_TCP_CKSUM:
+	case PKT_TX_TCP_SEG:
 		tmp = *type_cs_vlan_tso_len;
-		hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
-			       HNS3_L4T_TCP);
-		hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1);
-		hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
-			       sizeof(struct rte_tcp_hdr) >> HNS3_L4_LEN_UNIT);
-		*type_cs_vlan_tso_len = tmp;
+		tmp |= hns3_gen_field_val(HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
+					HNS3_L4T_TCP);
 		break;
 	case PKT_TX_UDP_CKSUM:
 		tmp = *type_cs_vlan_tso_len;
-		hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
-			       HNS3_L4T_UDP);
-		hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1);
-		hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
-			       sizeof(struct rte_udp_hdr) >> HNS3_L4_LEN_UNIT);
-		*type_cs_vlan_tso_len = tmp;
+		tmp |= hns3_gen_field_val(HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
+					HNS3_L4T_UDP);
 		break;
 	case PKT_TX_SCTP_CKSUM:
 		tmp = *type_cs_vlan_tso_len;
-		hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
-			       HNS3_L4T_SCTP);
-		hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1);
-		hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
-			       sizeof(struct rte_sctp_hdr) >> HNS3_L4_LEN_UNIT);
-		*type_cs_vlan_tso_len = tmp;
+		tmp |= hns3_gen_field_val(HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
+					HNS3_L4T_SCTP);
 		break;
 	default:
-		break;
+		return;
 	}
+	tmp |= BIT(HNS3_TXD_L4CS_B);
+	tmp |= hns3_gen_field_val(HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
+					m->l4_len >> HNS3_L4_LEN_UNIT);
+	*type_cs_vlan_tso_len = tmp;
 }
 
 static void
-hns3_txd_enable_checksum(struct hns3_tx_queue *txq, uint16_t tx_desc_id,
-			 uint64_t ol_flags)
+hns3_txd_enable_checksum(struct hns3_tx_queue *txq, struct rte_mbuf *m,
+			 uint16_t tx_desc_id)
 {
 	struct hns3_desc *tx_ring = txq->tx_ring;
 	struct hns3_desc *desc = &tx_ring[tx_desc_id];
 	uint32_t value = 0;
 
-	/* inner(/normal) L2 header size, defined in 2 bytes */
-	hns3_set_field(value, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S,
-		       sizeof(struct rte_ether_hdr) >> HNS3_L2_LEN_UNIT);
-
-	hns3_parse_l3_cksum_params(ol_flags, &value);
-	hns3_parse_l4_cksum_params(ol_flags, &value);
+	hns3_parse_l3_cksum_params(m, &value);
+	hns3_parse_l4_cksum_params(m, &value);
 
 	desc->tx.type_cs_vlan_tso_len |= rte_cpu_to_le_32(value);
 }
@@ -3332,20 +3310,25 @@ hns3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
 
 static int
 hns3_parse_cksum(struct hns3_tx_queue *txq, uint16_t tx_desc_id,
-		 const struct rte_mbuf *m, struct rte_net_hdr_lens *hdr_lens)
+		 struct rte_mbuf *m)
 {
-	/* Fill in tunneling parameters if necessary */
-	if (m->ol_flags & PKT_TX_TUNNEL_MASK) {
-		(void)rte_net_get_ptype(m, hdr_lens, RTE_PTYPE_ALL_MASK);
-		if (hns3_parse_tunneling_params(txq, tx_desc_id, m->ol_flags,
-						hdr_lens)) {
+	struct hns3_desc *tx_ring = txq->tx_ring;
+	struct hns3_desc *desc = &tx_ring[tx_desc_id];
+
+	/* Enable checksum offloading */
+	if (m->ol_flags & HNS3_TX_CKSUM_OFFLOAD_MASK) {
+		/* Fill in tunneling parameters if necessary */
+		if (hns3_parse_tunneling_params(txq, m, tx_desc_id)) {
 			txq->unsupported_tunnel_pkt_cnt++;
-			return -EINVAL;
+				return -EINVAL;
 		}
+
+		hns3_txd_enable_checksum(txq, m, tx_desc_id);
+	} else {
+		/* clear the control bit */
+		desc->tx.type_cs_vlan_tso_len  = 0;
+		desc->tx.ol_type_vlan_len_msec = 0;
 	}
-	/* Enable checksum offloading */
-	if (m->ol_flags & HNS3_TX_CKSUM_OFFLOAD_MASK)
-		hns3_txd_enable_checksum(txq, tx_desc_id, m->ol_flags);
 
 	return 0;
 }
@@ -3536,7 +3519,6 @@ hns3_xmit_pkts_simple(void *tx_queue,
 uint16_t
 hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 {
-	struct rte_net_hdr_lens hdr_lens = {0};
 	struct hns3_tx_queue *txq = tx_queue;
 	struct hns3_entry *tx_bak_pkt;
 	struct hns3_desc *tx_ring;
@@ -3600,7 +3582,7 @@ hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 		if (hns3_check_non_tso_pkt(nb_buf, &m_seg, tx_pkt, txq))
 			goto end_of_tx;
 
-		if (hns3_parse_cksum(txq, tx_next_use, m_seg, &hdr_lens))
+		if (hns3_parse_cksum(txq, tx_next_use, m_seg))
 			goto end_of_tx;
 
 		i = 0;
diff --git a/drivers/net/hns3/hns3_rxtx.h b/drivers/net/hns3/hns3_rxtx.h
index 68497a0..51504ca 100644
--- a/drivers/net/hns3/hns3_rxtx.h
+++ b/drivers/net/hns3/hns3_rxtx.h
@@ -470,14 +470,10 @@ struct hns3_queue_info {
 };
 
 #define HNS3_TX_CKSUM_OFFLOAD_MASK ( \
-	PKT_TX_OUTER_IPV6 | \
-	PKT_TX_OUTER_IPV4 | \
 	PKT_TX_OUTER_IP_CKSUM | \
-	PKT_TX_IPV6 | \
-	PKT_TX_IPV4 | \
 	PKT_TX_IP_CKSUM | \
-	PKT_TX_L4_MASK | \
-	PKT_TX_TUNNEL_MASK)
+	PKT_TX_TCP_SEG | \
+	PKT_TX_L4_MASK)
 
 enum hns3_cksum_status {
 	HNS3_CKSUM_NONE = 0,
-- 
2.7.4


  parent reply	other threads:[~2020-11-02 14:39 UTC|newest]

Thread overview: 14+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-11-02 14:38 [dpdk-dev] [PATCH 0/8] misc fixes for hns3 Lijun Ou
2020-11-02 14:38 ` [dpdk-dev] [PATCH 1/8] net/hns3: add limit promisc mode to VF Lijun Ou
2020-11-02 14:38 ` [dpdk-dev] [PATCH 2/8] net/hns3: fix Tx cksum outer header prepare Lijun Ou
2020-11-02 14:38 ` Lijun Ou [this message]
2020-11-02 14:38 ` [dpdk-dev] [PATCH 4/8] net/hns3: add VXLAN-GPE packets TSO and checksum support Lijun Ou
2020-11-02 14:38 ` [dpdk-dev] [PATCH 5/8] net/hns3: fix configurations of port-level scheduling rate Lijun Ou
2020-11-02 14:38 ` [dpdk-dev] [PATCH 6/8] net/hns3: fix visit unsupported QL register error Lijun Ou
2020-11-02 14:38 ` [dpdk-dev] [PATCH 7/8] net/hns3: fix some static check errors by coverity Lijun Ou
2020-11-03 11:41   ` Ferruh Yigit
2020-11-03 12:11     ` oulijun
2020-11-03 12:18       ` Ferruh Yigit
2020-11-02 14:38 ` [dpdk-dev] [PATCH 8/8] net/hns3: adjust some header files location Lijun Ou
2020-11-03 12:15 ` [dpdk-dev] [PATCH 0/8] misc fixes for hns3 Ferruh Yigit
2020-11-04  8:34   ` oulijun

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1604327899-60126-4-git-send-email-oulijun@huawei.com \
    --to=oulijun@huawei.com \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@intel.com \
    --cc=linuxarm@huawei.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).