DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH v2 0/4] some bugs fixes
@ 2020-07-25  8:15 Xiaoyun wang
  2020-07-25  8:15 ` [dpdk-dev] [PATCH v2 1/4] net/hinic: modify csum offload process Xiaoyun wang
                   ` (4 more replies)
  0 siblings, 5 replies; 6+ messages in thread
From: Xiaoyun wang @ 2020-07-25  8:15 UTC (permalink / raw)
  To: dev
  Cc: ferruh.yigit, bluca, luoxianjun, zhouguoyang, yin.yinshi,
	david.yangxiaoliang, zhaohui8, zhengjingzhou, guojian365,
	Xiaoyun wang

These patches modify csum offload process, optimize Rx 
performance for x86, set vhd type with 2 for no offset
in ovs offload scenario, make timeout not affected by 
system time jump.

--
v1->v2:
  - fix typo spelling

v1:
  - modify csum offload process
  - optimize Rx performance for x86
  - modify vhd type for SDI
  - make timeout not affected by system time jump

Xiaoyun wang (4):
  net/hinic: modify csum offload process
  net/hinic: optimize Rx performance for x86
  net/hinic/base: modify vhd type for SDI
  net/hinic/base: make timeout not affected by system time jump

 drivers/net/hinic/base/hinic_compat.h    |  11 +-
 drivers/net/hinic/base/hinic_pmd_nicio.c |   2 +-
 drivers/net/hinic/base/hinic_pmd_nicio.h |   5 +
 drivers/net/hinic/hinic_pmd_rx.h         |   4 +
 drivers/net/hinic/hinic_pmd_tx.c         | 371 +++++++++++++++++--------------
 5 files changed, 218 insertions(+), 175 deletions(-)

-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 6+ messages in thread

* [dpdk-dev] [PATCH v2 1/4] net/hinic: modify csum offload process
  2020-07-25  8:15 [dpdk-dev] [PATCH v2 0/4] some bugs fixes Xiaoyun wang
@ 2020-07-25  8:15 ` Xiaoyun wang
  2020-07-25  8:15 ` [dpdk-dev] [PATCH v2 2/4] net/hinic: optimize Rx performance for x86 Xiaoyun wang
                   ` (3 subsequent siblings)
  4 siblings, 0 replies; 6+ messages in thread
From: Xiaoyun wang @ 2020-07-25  8:15 UTC (permalink / raw)
  To: dev
  Cc: ferruh.yigit, bluca, luoxianjun, zhouguoyang, yin.yinshi,
	david.yangxiaoliang, zhaohui8, zhengjingzhou, guojian365,
	Xiaoyun wang

Encapsulate different types of packet checksum preprocessing
into functions.

Signed-off-by: Xiaoyun wang <cloud.wangxiaoyun@huawei.com>
---
 drivers/net/hinic/hinic_pmd_tx.c | 371 +++++++++++++++++++++------------------
 1 file changed, 202 insertions(+), 169 deletions(-)

diff --git a/drivers/net/hinic/hinic_pmd_tx.c b/drivers/net/hinic/hinic_pmd_tx.c
index 4d99967..d9f251a 100644
--- a/drivers/net/hinic/hinic_pmd_tx.c
+++ b/drivers/net/hinic/hinic_pmd_tx.c
@@ -38,9 +38,6 @@
 #define HINIC_TSO_PKT_MAX_SGE			127	/* tso max sge 127 */
 #define HINIC_TSO_SEG_NUM_INVALID(num)		((num) > HINIC_TSO_PKT_MAX_SGE)
 
-#define HINIC_TX_OUTER_CHECKSUM_FLAG_SET       1
-#define HINIC_TX_OUTER_CHECKSUM_FLAG_NO_SET    0
-
 /* sizeof(struct hinic_sq_bufdesc) == 16, shift 4 */
 #define HINIC_BUF_DESC_SIZE(nr_descs)	(SIZE_8BYTES(((u32)nr_descs) << 4))
 
@@ -671,7 +668,7 @@ static inline void hinic_xmit_mbuf_cleanup(struct hinic_txq *txq)
 
 static inline struct hinic_sq_wqe *
 hinic_get_sq_wqe(struct hinic_txq *txq, int wqebb_cnt,
-		struct hinic_wqe_info *wqe_info)
+		 struct hinic_wqe_info *wqe_info)
 {
 	u32 cur_pi, end_pi;
 	u16 remain_wqebbs;
@@ -758,36 +755,33 @@ static inline void hinic_xmit_mbuf_cleanup(struct hinic_txq *txq)
 	return __rte_raw_cksum_reduce(sum);
 }
 
-static inline void
-hinic_get_pld_offset(struct rte_mbuf *m, struct hinic_tx_offload_info *off_info,
-		     int outer_cs_flag)
+static inline void hinic_get_outer_cs_pld_offset(struct rte_mbuf *m,
+					struct hinic_tx_offload_info *off_info)
 {
 	uint64_t ol_flags = m->ol_flags;
 
-	if (outer_cs_flag == 1) {
-		if ((ol_flags & PKT_TX_UDP_CKSUM) == PKT_TX_UDP_CKSUM) {
-			off_info->payload_offset = m->outer_l2_len +
-				m->outer_l3_len + m->l2_len + m->l3_len;
-		} else if ((ol_flags & PKT_TX_TCP_CKSUM) ||
-				(ol_flags & PKT_TX_TCP_SEG)) {
-			off_info->payload_offset = m->outer_l2_len +
-					m->outer_l3_len + m->l2_len +
-					m->l3_len + m->l4_len;
-		}
-	} else {
-		if ((ol_flags & PKT_TX_UDP_CKSUM) == PKT_TX_UDP_CKSUM) {
-			off_info->payload_offset = m->l2_len + m->l3_len;
-		} else if ((ol_flags & PKT_TX_TCP_CKSUM) ||
-			(ol_flags & PKT_TX_TCP_SEG)) {
-			off_info->payload_offset = m->l2_len + m->l3_len +
-						   m->l4_len;
-		}
-	}
+	if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM)
+		off_info->payload_offset = m->outer_l2_len + m->outer_l3_len +
+					   m->l2_len + m->l3_len;
+	else if ((ol_flags & PKT_TX_TCP_CKSUM) || (ol_flags & PKT_TX_TCP_SEG))
+		off_info->payload_offset = m->outer_l2_len + m->outer_l3_len +
+					   m->l2_len + m->l3_len + m->l4_len;
 }
 
-static inline void
-hinic_analyze_tx_info(struct rte_mbuf *mbuf,
-		      struct hinic_tx_offload_info *off_info)
+static inline void hinic_get_pld_offset(struct rte_mbuf *m,
+					struct hinic_tx_offload_info *off_info)
+{
+	uint64_t ol_flags = m->ol_flags;
+
+	if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM)
+		off_info->payload_offset = m->l2_len + m->l3_len;
+	else if ((ol_flags & PKT_TX_TCP_CKSUM) || (ol_flags & PKT_TX_TCP_SEG))
+		off_info->payload_offset = m->l2_len + m->l3_len +
+					   m->l4_len;
+}
+
+static inline void hinic_analyze_tx_info(struct rte_mbuf *mbuf,
+					 struct hinic_tx_offload_info *off_info)
 {
 	struct rte_ether_hdr *eth_hdr;
 	struct rte_vlan_hdr *vlan_hdr;
@@ -817,17 +811,164 @@ static inline void hinic_xmit_mbuf_cleanup(struct hinic_txq *txq)
 	}
 }
 
-static inline int
-hinic_tx_offload_pkt_prepare(struct rte_mbuf *m,
-				struct hinic_tx_offload_info *off_info)
+static inline void hinic_analyze_outer_ip_vxlan(struct rte_mbuf *mbuf,
+					struct hinic_tx_offload_info *off_info)
+{
+	struct rte_ether_hdr *eth_hdr;
+	struct rte_vlan_hdr *vlan_hdr;
+	struct rte_ipv4_hdr *ipv4_hdr;
+	struct rte_udp_hdr *udp_hdr;
+	u16 eth_type = 0;
+
+	eth_hdr = rte_pktmbuf_mtod(mbuf, struct rte_ether_hdr *);
+	eth_type = rte_be_to_cpu_16(eth_hdr->ether_type);
+
+	if (eth_type == RTE_ETHER_TYPE_VLAN) {
+		vlan_hdr = (struct rte_vlan_hdr *)(eth_hdr + 1);
+		eth_type = rte_be_to_cpu_16(vlan_hdr->eth_proto);
+	}
+
+	if (eth_type == RTE_ETHER_TYPE_IPV4) {
+		ipv4_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_ipv4_hdr *,
+						   mbuf->outer_l2_len);
+		off_info->outer_l3_type = IPV4_PKT_WITH_CHKSUM_OFFLOAD;
+		ipv4_hdr->hdr_checksum = 0;
+
+		udp_hdr = (struct rte_udp_hdr *)((char *)ipv4_hdr +
+						 mbuf->outer_l3_len);
+		udp_hdr->dgram_cksum = 0;
+	} else if (eth_type == RTE_ETHER_TYPE_IPV6) {
+		off_info->outer_l3_type = IPV6_PKT;
+
+		udp_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_udp_hdr *,
+						  (mbuf->outer_l2_len +
+						   mbuf->outer_l3_len));
+		udp_hdr->dgram_cksum = 0;
+	}
+}
+
+static inline uint8_t hinic_analyze_l3_type(struct rte_mbuf *mbuf)
+{
+	uint8_t l3_type;
+	uint64_t ol_flags = mbuf->ol_flags;
+
+	if (ol_flags & PKT_TX_IPV4)
+		l3_type = (ol_flags & PKT_TX_IP_CKSUM) ?
+			  IPV4_PKT_WITH_CHKSUM_OFFLOAD :
+			  IPV4_PKT_NO_CHKSUM_OFFLOAD;
+	else if (ol_flags & PKT_TX_IPV6)
+		l3_type = IPV6_PKT;
+	else
+		l3_type = UNKNOWN_L3TYPE;
+
+	return l3_type;
+}
+
+static inline void hinic_calculate_tcp_checksum(struct rte_mbuf *mbuf,
+					struct hinic_tx_offload_info *off_info,
+					uint64_t inner_l3_offset)
 {
 	struct rte_ipv4_hdr *ipv4_hdr;
 	struct rte_ipv6_hdr *ipv6_hdr;
 	struct rte_tcp_hdr *tcp_hdr;
+	uint64_t ol_flags = mbuf->ol_flags;
+
+	if (ol_flags & PKT_TX_IPV4) {
+		ipv4_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_ipv4_hdr *,
+						   inner_l3_offset);
+
+		if (ol_flags & PKT_TX_IP_CKSUM)
+			ipv4_hdr->hdr_checksum = 0;
+
+		tcp_hdr = (struct rte_tcp_hdr *)((char *)ipv4_hdr +
+						 mbuf->l3_len);
+		tcp_hdr->cksum = hinic_ipv4_phdr_cksum(ipv4_hdr, ol_flags);
+	} else {
+		ipv6_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_ipv6_hdr *,
+						   inner_l3_offset);
+		tcp_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_tcp_hdr *,
+						  (inner_l3_offset +
+						   mbuf->l3_len));
+		tcp_hdr->cksum = hinic_ipv6_phdr_cksum(ipv6_hdr, ol_flags);
+	}
+
+	off_info->inner_l4_type = TCP_OFFLOAD_ENABLE;
+	off_info->inner_l4_tcp_udp = 1;
+}
+
+static inline void hinic_calculate_udp_checksum(struct rte_mbuf *mbuf,
+					struct hinic_tx_offload_info *off_info,
+					uint64_t inner_l3_offset)
+{
+	struct rte_ipv4_hdr *ipv4_hdr;
+	struct rte_ipv6_hdr *ipv6_hdr;
 	struct rte_udp_hdr *udp_hdr;
-	struct rte_ether_hdr *eth_hdr;
-	struct rte_vlan_hdr *vlan_hdr;
-	u16 eth_type = 0;
+	uint64_t ol_flags = mbuf->ol_flags;
+
+	if (ol_flags & PKT_TX_IPV4) {
+		ipv4_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_ipv4_hdr *,
+						   inner_l3_offset);
+
+		if (ol_flags & PKT_TX_IP_CKSUM)
+			ipv4_hdr->hdr_checksum = 0;
+
+		udp_hdr = (struct rte_udp_hdr *)((char *)ipv4_hdr +
+						 mbuf->l3_len);
+		udp_hdr->dgram_cksum = hinic_ipv4_phdr_cksum(ipv4_hdr,
+							     ol_flags);
+	} else {
+		ipv6_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_ipv6_hdr *,
+						   inner_l3_offset);
+
+		udp_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_udp_hdr *,
+						  (inner_l3_offset +
+						   mbuf->l3_len));
+		udp_hdr->dgram_cksum = hinic_ipv6_phdr_cksum(ipv6_hdr,
+							     ol_flags);
+	}
+
+	off_info->inner_l4_type = UDP_OFFLOAD_ENABLE;
+	off_info->inner_l4_tcp_udp = 1;
+}
+
+static inline void
+hinic_calculate_sctp_checksum(struct hinic_tx_offload_info *off_info)
+{
+	off_info->inner_l4_type = SCTP_OFFLOAD_ENABLE;
+	off_info->inner_l4_tcp_udp = 0;
+	off_info->inner_l4_len = sizeof(struct rte_sctp_hdr);
+}
+
+static inline void hinic_calculate_checksum(struct rte_mbuf *mbuf,
+					struct hinic_tx_offload_info *off_info,
+					uint64_t inner_l3_offset)
+{
+	uint64_t ol_flags = mbuf->ol_flags;
+
+	switch (ol_flags & PKT_TX_L4_MASK) {
+	case PKT_TX_UDP_CKSUM:
+		hinic_calculate_udp_checksum(mbuf, off_info, inner_l3_offset);
+		break;
+
+	case PKT_TX_TCP_CKSUM:
+		hinic_calculate_tcp_checksum(mbuf, off_info, inner_l3_offset);
+		break;
+
+	case PKT_TX_SCTP_CKSUM:
+		hinic_calculate_sctp_checksum(off_info);
+		break;
+
+	default:
+		if (ol_flags & PKT_TX_TCP_SEG)
+			hinic_calculate_tcp_checksum(mbuf, off_info,
+						     inner_l3_offset);
+		break;
+	}
+}
+
+static inline int hinic_tx_offload_pkt_prepare(struct rte_mbuf *m,
+					struct hinic_tx_offload_info *off_info)
+{
 	uint64_t inner_l3_offset;
 	uint64_t ol_flags = m->ol_flags;
 
@@ -836,8 +977,8 @@ static inline void hinic_xmit_mbuf_cleanup(struct hinic_txq *txq)
 		return 0;
 
 	/* Support only vxlan offload */
-	if ((ol_flags & PKT_TX_TUNNEL_MASK) &&
-	    !(ol_flags & PKT_TX_TUNNEL_VXLAN))
+	if (unlikely((ol_flags & PKT_TX_TUNNEL_MASK) &&
+	    !(ol_flags & PKT_TX_TUNNEL_VXLAN)))
 		return -ENOTSUP;
 
 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
@@ -846,169 +987,61 @@ static inline void hinic_xmit_mbuf_cleanup(struct hinic_txq *txq)
 #endif
 
 	if (ol_flags & PKT_TX_TUNNEL_VXLAN) {
+		off_info->tunnel_type = TUNNEL_UDP_NO_CSUM;
+
+		/* inner_l4_tcp_udp csum should be set to calculate outer
+		 * udp checksum when vxlan packets without inner l3 and l4
+		 */
+		off_info->inner_l4_tcp_udp = 1;
+
 		if ((ol_flags & PKT_TX_OUTER_IP_CKSUM) ||
 		    (ol_flags & PKT_TX_OUTER_IPV6) ||
 		    (ol_flags & PKT_TX_TCP_SEG)) {
 			inner_l3_offset = m->l2_len + m->outer_l2_len +
-				m->outer_l3_len;
+					  m->outer_l3_len;
 			off_info->outer_l2_len = m->outer_l2_len;
 			off_info->outer_l3_len = m->outer_l3_len;
 			/* just support vxlan tunneling pkt */
 			off_info->inner_l2_len = m->l2_len - VXLANLEN -
-				sizeof(*udp_hdr);
-			off_info->inner_l3_len = m->l3_len;
-			off_info->inner_l4_len = m->l4_len;
+						 sizeof(struct rte_udp_hdr);
 			off_info->tunnel_length = m->l2_len;
-			off_info->tunnel_type = TUNNEL_UDP_NO_CSUM;
 
-			hinic_get_pld_offset(m, off_info,
-					     HINIC_TX_OUTER_CHECKSUM_FLAG_SET);
+			hinic_analyze_outer_ip_vxlan(m, off_info);
+
+			hinic_get_outer_cs_pld_offset(m, off_info);
 		} else {
 			inner_l3_offset = m->l2_len;
 			hinic_analyze_tx_info(m, off_info);
 			/* just support vxlan tunneling pkt */
 			off_info->inner_l2_len = m->l2_len - VXLANLEN -
-				sizeof(*udp_hdr) - off_info->outer_l2_len -
-				off_info->outer_l3_len;
-			off_info->inner_l3_len = m->l3_len;
-			off_info->inner_l4_len = m->l4_len;
+						 sizeof(struct rte_udp_hdr) -
+						 off_info->outer_l2_len -
+						 off_info->outer_l3_len;
 			off_info->tunnel_length = m->l2_len -
-				off_info->outer_l2_len - off_info->outer_l3_len;
-			off_info->tunnel_type = TUNNEL_UDP_NO_CSUM;
+						  off_info->outer_l2_len -
+						  off_info->outer_l3_len;
+			off_info->outer_l3_type = IPV4_PKT_NO_CHKSUM_OFFLOAD;
 
-			hinic_get_pld_offset(m, off_info,
-				HINIC_TX_OUTER_CHECKSUM_FLAG_NO_SET);
+			hinic_get_pld_offset(m, off_info);
 		}
 	} else {
 		inner_l3_offset = m->l2_len;
 		off_info->inner_l2_len = m->l2_len;
-		off_info->inner_l3_len = m->l3_len;
-		off_info->inner_l4_len = m->l4_len;
 		off_info->tunnel_type = NOT_TUNNEL;
 
-		hinic_get_pld_offset(m, off_info,
-				     HINIC_TX_OUTER_CHECKSUM_FLAG_NO_SET);
+		hinic_get_pld_offset(m, off_info);
 	}
 
 	/* invalid udp or tcp header */
 	if (unlikely(off_info->payload_offset > MAX_PLD_OFFSET))
 		return -EINVAL;
 
-	/* Process outter udp pseudo-header checksum */
-	if ((ol_flags & PKT_TX_TUNNEL_VXLAN) && ((ol_flags & PKT_TX_TCP_SEG) ||
-			(ol_flags & PKT_TX_OUTER_IP_CKSUM) ||
-			(ol_flags & PKT_TX_OUTER_IPV6))) {
-
-		/* inner_l4_tcp_udp csum should be setted to calculate outter
-		 * udp checksum when vxlan packets without inner l3 and l4
-		 */
-		off_info->inner_l4_tcp_udp = 1;
-
-		eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
-		eth_type = rte_be_to_cpu_16(eth_hdr->ether_type);
-
-		if (eth_type == RTE_ETHER_TYPE_VLAN) {
-			vlan_hdr = (struct rte_vlan_hdr *)(eth_hdr + 1);
-			eth_type = rte_be_to_cpu_16(vlan_hdr->eth_proto);
-		}
-
-		if (eth_type == RTE_ETHER_TYPE_IPV4) {
-			ipv4_hdr =
-			rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
-						m->outer_l2_len);
-			off_info->outer_l3_type = IPV4_PKT_WITH_CHKSUM_OFFLOAD;
-			ipv4_hdr->hdr_checksum = 0;
-
-			udp_hdr = (struct rte_udp_hdr *)((char *)ipv4_hdr +
-							m->outer_l3_len);
-			udp_hdr->dgram_cksum = 0;
-		} else if (eth_type == RTE_ETHER_TYPE_IPV6) {
-			off_info->outer_l3_type = IPV6_PKT;
-			ipv6_hdr =
-			rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
-						m->outer_l2_len);
-
-			udp_hdr =
-			rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *,
-						(m->outer_l2_len +
-						m->outer_l3_len));
-			udp_hdr->dgram_cksum = 0;
-		}
-	} else if (ol_flags & PKT_TX_OUTER_IPV4) {
-		off_info->tunnel_type = TUNNEL_UDP_NO_CSUM;
-		off_info->inner_l4_tcp_udp = 1;
-		off_info->outer_l3_type = IPV4_PKT_NO_CHKSUM_OFFLOAD;
-	}
-
-	if (ol_flags & PKT_TX_IPV4)
-		off_info->inner_l3_type = (ol_flags & PKT_TX_IP_CKSUM) ?
-					IPV4_PKT_WITH_CHKSUM_OFFLOAD :
-					IPV4_PKT_NO_CHKSUM_OFFLOAD;
-	else if (ol_flags & PKT_TX_IPV6)
-		off_info->inner_l3_type = IPV6_PKT;
+	off_info->inner_l3_len = m->l3_len;
+	off_info->inner_l4_len = m->l4_len;
+	off_info->inner_l3_type = hinic_analyze_l3_type(m);
 
 	/* Process the pseudo-header checksum */
-	if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM) {
-		if (ol_flags & PKT_TX_IPV4) {
-			ipv4_hdr =
-			rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
-						inner_l3_offset);
-
-			if (ol_flags & PKT_TX_IP_CKSUM)
-				ipv4_hdr->hdr_checksum = 0;
-
-			udp_hdr = (struct rte_udp_hdr *)((char *)ipv4_hdr +
-								m->l3_len);
-			udp_hdr->dgram_cksum =
-				hinic_ipv4_phdr_cksum(ipv4_hdr, ol_flags);
-		} else {
-			ipv6_hdr =
-			rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
-						inner_l3_offset);
-
-			udp_hdr =
-			rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *,
-						(inner_l3_offset + m->l3_len));
-			udp_hdr->dgram_cksum =
-				hinic_ipv6_phdr_cksum(ipv6_hdr, ol_flags);
-		}
-
-		off_info->inner_l4_type = UDP_OFFLOAD_ENABLE;
-		off_info->inner_l4_tcp_udp = 1;
-	} else if (((ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM) ||
-			(ol_flags & PKT_TX_TCP_SEG)) {
-		if (ol_flags & PKT_TX_IPV4) {
-			ipv4_hdr =
-			rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
-						inner_l3_offset);
-
-			if (ol_flags & PKT_TX_IP_CKSUM)
-				ipv4_hdr->hdr_checksum = 0;
-
-			/* non-TSO tcp */
-			tcp_hdr = (struct rte_tcp_hdr *)((char *)ipv4_hdr +
-								m->l3_len);
-			tcp_hdr->cksum =
-				hinic_ipv4_phdr_cksum(ipv4_hdr, ol_flags);
-		} else {
-			ipv6_hdr =
-			rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
-						inner_l3_offset);
-			/* non-TSO tcp */
-			tcp_hdr =
-			rte_pktmbuf_mtod_offset(m, struct rte_tcp_hdr *,
-						(inner_l3_offset + m->l3_len));
-			tcp_hdr->cksum =
-				hinic_ipv6_phdr_cksum(ipv6_hdr, ol_flags);
-		}
-
-		off_info->inner_l4_type = TCP_OFFLOAD_ENABLE;
-		off_info->inner_l4_tcp_udp = 1;
-	} else if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_SCTP_CKSUM) {
-		off_info->inner_l4_type = SCTP_OFFLOAD_ENABLE;
-		off_info->inner_l4_tcp_udp = 0;
-		off_info->inner_l4_len = sizeof(struct rte_sctp_hdr);
-	}
+	hinic_calculate_checksum(m, off_info, inner_l3_offset);
 
 	return 0;
 }
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 6+ messages in thread

* [dpdk-dev] [PATCH v2 2/4] net/hinic: optimize Rx performance for x86
  2020-07-25  8:15 [dpdk-dev] [PATCH v2 0/4] some bugs fixes Xiaoyun wang
  2020-07-25  8:15 ` [dpdk-dev] [PATCH v2 1/4] net/hinic: modify csum offload process Xiaoyun wang
@ 2020-07-25  8:15 ` Xiaoyun wang
  2020-07-25  8:15 ` [dpdk-dev] [PATCH v2 3/4] net/hinic/base: modify vhd type for SDI Xiaoyun wang
                   ` (2 subsequent siblings)
  4 siblings, 0 replies; 6+ messages in thread
From: Xiaoyun wang @ 2020-07-25  8:15 UTC (permalink / raw)
  To: dev
  Cc: ferruh.yigit, bluca, luoxianjun, zhouguoyang, yin.yinshi,
	david.yangxiaoliang, zhaohui8, zhengjingzhou, guojian365,
	Xiaoyun wang, stable

For x86 platform, the rq cqe without cache aligned, which can
improve performance for some gateway scenarios.

Fixes: 361a9ccf81d6 ("net/hinic: optimize Rx performance")

Cc: stable@dpdk.org
Signed-off-by: Xiaoyun wang <cloud.wangxiaoyun@huawei.com>
---
 drivers/net/hinic/hinic_pmd_rx.h | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/drivers/net/hinic/hinic_pmd_rx.h b/drivers/net/hinic/hinic_pmd_rx.h
index 49fa565..8a45f2d 100644
--- a/drivers/net/hinic/hinic_pmd_rx.h
+++ b/drivers/net/hinic/hinic_pmd_rx.h
@@ -35,7 +35,11 @@ struct hinic_rq_cqe {
 	u32 rss_hash;
 
 	u32 rsvd[4];
+#if defined(RTE_ARCH_ARM64)
 } __rte_cache_aligned;
+#else
+};
+#endif
 
 struct hinic_rq_cqe_sect {
 	struct hinic_sge	sge;
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 6+ messages in thread

* [dpdk-dev] [PATCH v2 3/4] net/hinic/base: modify vhd type for SDI
  2020-07-25  8:15 [dpdk-dev] [PATCH v2 0/4] some bugs fixes Xiaoyun wang
  2020-07-25  8:15 ` [dpdk-dev] [PATCH v2 1/4] net/hinic: modify csum offload process Xiaoyun wang
  2020-07-25  8:15 ` [dpdk-dev] [PATCH v2 2/4] net/hinic: optimize Rx performance for x86 Xiaoyun wang
@ 2020-07-25  8:15 ` Xiaoyun wang
  2020-07-25  8:15 ` [dpdk-dev] [PATCH v2 4/4] net/hinic/base: make timeout not affected by system time jump Xiaoyun wang
  2020-07-28 10:41 ` [dpdk-dev] [PATCH v2 0/4] some bugs fixes Ferruh Yigit
  4 siblings, 0 replies; 6+ messages in thread
From: Xiaoyun wang @ 2020-07-25  8:15 UTC (permalink / raw)
  To: dev
  Cc: ferruh.yigit, bluca, luoxianjun, zhouguoyang, yin.yinshi,
	david.yangxiaoliang, zhaohui8, zhengjingzhou, guojian365,
	Xiaoyun wang

For ovs offload scenario, when fw processes the virtio header,
there is no need to offset; and for standard card scenarios,
fw does not care about the vhd_type parameter, so in order to
be compatible with these two scenarios, use 0 offset instead.

Signed-off-by: Xiaoyun wang <cloud.wangxiaoyun@huawei.com>
---
 drivers/net/hinic/base/hinic_pmd_nicio.c | 2 +-
 drivers/net/hinic/base/hinic_pmd_nicio.h | 5 +++++
 2 files changed, 6 insertions(+), 1 deletion(-)

diff --git a/drivers/net/hinic/base/hinic_pmd_nicio.c b/drivers/net/hinic/base/hinic_pmd_nicio.c
index 2914e99..576fe59 100644
--- a/drivers/net/hinic/base/hinic_pmd_nicio.c
+++ b/drivers/net/hinic/base/hinic_pmd_nicio.c
@@ -578,7 +578,7 @@ int hinic_init_qp_ctxts(struct hinic_hwdev *hwdev)
 	rx_buf_sz = nic_io->rq_buf_size;
 
 	/* update rx buf size to function table */
-	err = hinic_set_rx_vhd_mode(hwdev, 0, rx_buf_sz);
+	err = hinic_set_rx_vhd_mode(hwdev, HINIC_VHD_TYPE_0B, rx_buf_sz);
 	if (err) {
 		PMD_DRV_LOG(ERR, "Set rx vhd mode failed, rc: %d", err);
 		return err;
diff --git a/drivers/net/hinic/base/hinic_pmd_nicio.h b/drivers/net/hinic/base/hinic_pmd_nicio.h
index 9a487d0..600c073 100644
--- a/drivers/net/hinic/base/hinic_pmd_nicio.h
+++ b/drivers/net/hinic/base/hinic_pmd_nicio.h
@@ -8,6 +8,11 @@
 #define RX_BUF_LEN_16K			16384
 #define RX_BUF_LEN_1_5K			1536
 
+/* vhd type */
+#define HINIC_VHD_TYPE_0B		2
+#define HINIC_VHD_TYPE_10B		1
+#define HINIC_VHD_TYPE_12B		0
+
 #define HINIC_Q_CTXT_MAX		42
 
 /* performance: ci addr RTE_CACHE_SIZE(64B) alignment */
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 6+ messages in thread

* [dpdk-dev] [PATCH v2 4/4] net/hinic/base: make timeout not affected by system time jump
  2020-07-25  8:15 [dpdk-dev] [PATCH v2 0/4] some bugs fixes Xiaoyun wang
                   ` (2 preceding siblings ...)
  2020-07-25  8:15 ` [dpdk-dev] [PATCH v2 3/4] net/hinic/base: modify vhd type for SDI Xiaoyun wang
@ 2020-07-25  8:15 ` Xiaoyun wang
  2020-07-28 10:41 ` [dpdk-dev] [PATCH v2 0/4] some bugs fixes Ferruh Yigit
  4 siblings, 0 replies; 6+ messages in thread
From: Xiaoyun wang @ 2020-07-25  8:15 UTC (permalink / raw)
  To: dev
  Cc: ferruh.yigit, bluca, luoxianjun, zhouguoyang, yin.yinshi,
	david.yangxiaoliang, zhaohui8, zhengjingzhou, guojian365,
	Xiaoyun wang, stable

Replace gettimeofday() with clock_gettime(CLOCK_MONOTONIC_RAW, &now),
the reason is same with this patch "make alarm not affected by
system time jump".

Fixes: 81d53291a466 ("net/hinic/base: add various headers")

Cc: stable@dpdk.org
Signed-off-by: Xiaoyun wang <cloud.wangxiaoyun@huawei.com>
---
 drivers/net/hinic/base/hinic_compat.h | 11 ++++++-----
 1 file changed, 6 insertions(+), 5 deletions(-)

diff --git a/drivers/net/hinic/base/hinic_compat.h b/drivers/net/hinic/base/hinic_compat.h
index 2d21b7b..7036b03 100644
--- a/drivers/net/hinic/base/hinic_compat.h
+++ b/drivers/net/hinic/base/hinic_compat.h
@@ -166,16 +166,17 @@ static inline u32 readl(const volatile void *addr)
 #define spin_lock(spinlock_prt)		rte_spinlock_lock(spinlock_prt)
 #define spin_unlock(spinlock_prt)	rte_spinlock_unlock(spinlock_prt)
 
-static inline unsigned long get_timeofday_ms(void)
+static inline unsigned long clock_gettime_ms(void)
 {
-	struct timeval tv;
+	struct timespec tv;
 
-	(void)gettimeofday(&tv, NULL);
+	(void)clock_gettime(CLOCK_MONOTONIC, &tv);
 
-	return (unsigned long)tv.tv_sec * 1000 + tv.tv_usec / 1000;
+	return (unsigned long)tv.tv_sec * 1000 +
+	       (unsigned long)tv.tv_nsec / 1000000;
 }
 
-#define jiffies	get_timeofday_ms()
+#define jiffies	clock_gettime_ms()
 #define msecs_to_jiffies(ms)	(ms)
 #define time_before(now, end)	((now) < (end))
 
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [dpdk-dev] [PATCH v2 0/4] some bugs fixes
  2020-07-25  8:15 [dpdk-dev] [PATCH v2 0/4] some bugs fixes Xiaoyun wang
                   ` (3 preceding siblings ...)
  2020-07-25  8:15 ` [dpdk-dev] [PATCH v2 4/4] net/hinic/base: make timeout not affected by system time jump Xiaoyun wang
@ 2020-07-28 10:41 ` Ferruh Yigit
  4 siblings, 0 replies; 6+ messages in thread
From: Ferruh Yigit @ 2020-07-28 10:41 UTC (permalink / raw)
  To: Xiaoyun wang, dev
  Cc: bluca, luoxianjun, zhouguoyang, yin.yinshi, david.yangxiaoliang,
	zhaohui8, zhengjingzhou, guojian365

On 7/25/2020 9:15 AM, Xiaoyun wang wrote:
> These patches modify csum offload process, optimize Rx 
> performance for x86, set vhd type with 2 for no offset
> in ovs offload scenario, make timeout not affected by 
> system time jump.
> 
> --
> v1->v2:
>   - fix typo spelling
> 
> v1:
>   - modify csum offload process
>   - optimize Rx performance for x86
>   - modify vhd type for SDI
>   - make timeout not affected by system time jump
> 
> Xiaoyun wang (4):
>   net/hinic: modify csum offload process
>   net/hinic: optimize Rx performance for x86
>   net/hinic/base: modify vhd type for SDI
>   net/hinic/base: make timeout not affected by system time jump

Series applied to dpdk-next-net/master, thanks.

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2020-07-28 10:41 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-07-25  8:15 [dpdk-dev] [PATCH v2 0/4] some bugs fixes Xiaoyun wang
2020-07-25  8:15 ` [dpdk-dev] [PATCH v2 1/4] net/hinic: modify csum offload process Xiaoyun wang
2020-07-25  8:15 ` [dpdk-dev] [PATCH v2 2/4] net/hinic: optimize Rx performance for x86 Xiaoyun wang
2020-07-25  8:15 ` [dpdk-dev] [PATCH v2 3/4] net/hinic/base: modify vhd type for SDI Xiaoyun wang
2020-07-25  8:15 ` [dpdk-dev] [PATCH v2 4/4] net/hinic/base: make timeout not affected by system time jump Xiaoyun wang
2020-07-28 10:41 ` [dpdk-dev] [PATCH v2 0/4] some bugs fixes Ferruh Yigit

DPDK patches and discussions

This inbox may be cloned and mirrored by anyone:

	git clone --mirror https://inbox.dpdk.org/dev/0 dev/git/0.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 dev dev/ https://inbox.dpdk.org/dev \
		dev@dpdk.org
	public-inbox-index dev

Example config snippet for mirrors.
Newsgroup available over NNTP:
	nntp://inbox.dpdk.org/inbox.dpdk.dev


AGPL code for this site: git clone https://public-inbox.org/public-inbox.git