DPDK patches and discussions
 help / color / mirror / Atom feed
From: John Daley <johndale@cisco.com>
To: ferruh.yigit@intel.com
Cc: dev@dpdk.org, Hyong Youb Kim <hyonkim@cisco.com>
Subject: [dpdk-dev] [PATCH v4 1/2] net/enic: move common Rx functions to a new header file
Date: Wed,  3 Oct 2018 13:09:27 -0700	[thread overview]
Message-ID: <20181003200928.27086-1-johndale@cisco.com> (raw)
In-Reply-To: <20180928192544.6833-1-johndale@cisco.com>

From: Hyong Youb Kim <hyonkim@cisco.com>

Move a number of Rx functions to the header file so that the avx2
based Rx handler can use them.

Signed-off-by: Hyong Youb Kim <hyonkim@cisco.com>
Reviewed-by: John Daley <johndale@cisco.com>
---
v2: remove bool type from stucture (found by checkpatch)
v3: re-add Reviewed-by
v4: Address Ferruh's comments regarding doc, comment, and log message.
    Fix makefile and meson.build to compile the avx2 handler when 'machine'
    does not support avx2 but the compiler does

 drivers/net/enic/enic_rxtx.c        | 263 +---------------------------------
 drivers/net/enic/enic_rxtx_common.h | 271 ++++++++++++++++++++++++++++++++++++
 2 files changed, 272 insertions(+), 262 deletions(-)
 create mode 100644 drivers/net/enic/enic_rxtx_common.h

diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c
index 276a2e559..5189ee635 100644
--- a/drivers/net/enic/enic_rxtx.c
+++ b/drivers/net/enic/enic_rxtx.c
@@ -11,6 +11,7 @@
 #include "enic_compat.h"
 #include "rq_enet_desc.h"
 #include "enic.h"
+#include "enic_rxtx_common.h"
 #include <rte_ether.h>
 #include <rte_ip.h>
 #include <rte_tcp.h>
@@ -30,268 +31,6 @@
 #define rte_packet_prefetch(p) do {} while (0)
 #endif
 
-static inline uint16_t
-enic_cq_rx_desc_ciflags(struct cq_enet_rq_desc *crd)
-{
-	return le16_to_cpu(crd->completed_index_flags) & ~CQ_DESC_COMP_NDX_MASK;
-}
-
-static inline uint16_t
-enic_cq_rx_desc_bwflags(struct cq_enet_rq_desc *crd)
-{
-	return le16_to_cpu(crd->bytes_written_flags) &
-			   ~CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
-}
-
-static inline uint8_t
-enic_cq_rx_desc_packet_error(uint16_t bwflags)
-{
-	return (bwflags & CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ==
-		CQ_ENET_RQ_DESC_FLAGS_TRUNCATED;
-}
-
-static inline uint8_t
-enic_cq_rx_desc_eop(uint16_t ciflags)
-{
-	return (ciflags & CQ_ENET_RQ_DESC_FLAGS_EOP)
-		== CQ_ENET_RQ_DESC_FLAGS_EOP;
-}
-
-static inline uint8_t
-enic_cq_rx_desc_csum_not_calc(struct cq_enet_rq_desc *cqrd)
-{
-	return (le16_to_cpu(cqrd->q_number_rss_type_flags) &
-		CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ==
-		CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC;
-}
-
-static inline uint8_t
-enic_cq_rx_desc_ipv4_csum_ok(struct cq_enet_rq_desc *cqrd)
-{
-	return (cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ==
-		CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK;
-}
-
-static inline uint8_t
-enic_cq_rx_desc_tcp_udp_csum_ok(struct cq_enet_rq_desc *cqrd)
-{
-	return (cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ==
-		CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK;
-}
-
-static inline uint8_t
-enic_cq_rx_desc_rss_type(struct cq_enet_rq_desc *cqrd)
-{
-	return (uint8_t)((le16_to_cpu(cqrd->q_number_rss_type_flags) >>
-		CQ_DESC_Q_NUM_BITS) & CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
-}
-
-static inline uint32_t
-enic_cq_rx_desc_rss_hash(struct cq_enet_rq_desc *cqrd)
-{
-	return le32_to_cpu(cqrd->rss_hash);
-}
-
-static inline uint16_t
-enic_cq_rx_desc_vlan(struct cq_enet_rq_desc *cqrd)
-{
-	return le16_to_cpu(cqrd->vlan);
-}
-
-static inline uint16_t
-enic_cq_rx_desc_n_bytes(struct cq_desc *cqd)
-{
-	struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
-	return le16_to_cpu(cqrd->bytes_written_flags) &
-		CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
-}
-
-
-static inline uint8_t
-enic_cq_rx_check_err(struct cq_desc *cqd)
-{
-	struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
-	uint16_t bwflags;
-
-	bwflags = enic_cq_rx_desc_bwflags(cqrd);
-	if (unlikely(enic_cq_rx_desc_packet_error(bwflags)))
-		return 1;
-	return 0;
-}
-
-/* Lookup table to translate RX CQ flags to mbuf flags. */
-static inline uint32_t
-enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd, uint8_t tnl)
-{
-	struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
-	uint8_t cqrd_flags = cqrd->flags;
-	/*
-	 * Odd-numbered entries are for tunnel packets. All packet type info
-	 * applies to the inner packet, and there is no info on the outer
-	 * packet. The outer flags in these entries exist only to avoid
-	 * changing enic_cq_rx_to_pkt_flags(). They are cleared from mbuf
-	 * afterwards.
-	 *
-	 * Also, as there is no tunnel type info (VXLAN, NVGRE, or GENEVE), set
-	 * RTE_PTYPE_TUNNEL_GRENAT..
-	 */
-	static const uint32_t cq_type_table[128] __rte_cache_aligned = {
-		[0x00] = RTE_PTYPE_UNKNOWN,
-		[0x01] = RTE_PTYPE_UNKNOWN |
-			 RTE_PTYPE_TUNNEL_GRENAT |
-			 RTE_PTYPE_INNER_L2_ETHER,
-		[0x20] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG,
-		[0x21] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG |
-			 RTE_PTYPE_TUNNEL_GRENAT |
-			 RTE_PTYPE_INNER_L2_ETHER |
-			 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-			 RTE_PTYPE_INNER_L4_NONFRAG,
-		[0x22] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
-		[0x23] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
-			 RTE_PTYPE_TUNNEL_GRENAT |
-			 RTE_PTYPE_INNER_L2_ETHER |
-			 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-			 RTE_PTYPE_INNER_L4_UDP,
-		[0x24] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
-		[0x25] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP |
-			 RTE_PTYPE_TUNNEL_GRENAT |
-			 RTE_PTYPE_INNER_L2_ETHER |
-			 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-			 RTE_PTYPE_INNER_L4_TCP,
-		[0x60] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
-		[0x61] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
-			 RTE_PTYPE_TUNNEL_GRENAT |
-			 RTE_PTYPE_INNER_L2_ETHER |
-			 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-			 RTE_PTYPE_INNER_L4_FRAG,
-		[0x62] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
-		[0x63] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
-			 RTE_PTYPE_TUNNEL_GRENAT |
-			 RTE_PTYPE_INNER_L2_ETHER |
-			 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-			 RTE_PTYPE_INNER_L4_FRAG,
-		[0x64] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
-		[0x65] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
-			 RTE_PTYPE_TUNNEL_GRENAT |
-			 RTE_PTYPE_INNER_L2_ETHER |
-			 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-			 RTE_PTYPE_INNER_L4_FRAG,
-		[0x10] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG,
-		[0x11] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG |
-			 RTE_PTYPE_TUNNEL_GRENAT |
-			 RTE_PTYPE_INNER_L2_ETHER |
-			 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-			 RTE_PTYPE_INNER_L4_NONFRAG,
-		[0x12] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
-		[0x13] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
-			 RTE_PTYPE_TUNNEL_GRENAT |
-			 RTE_PTYPE_INNER_L2_ETHER |
-			 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-			 RTE_PTYPE_INNER_L4_UDP,
-		[0x14] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
-		[0x15] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP |
-			 RTE_PTYPE_TUNNEL_GRENAT |
-			 RTE_PTYPE_INNER_L2_ETHER |
-			 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-			 RTE_PTYPE_INNER_L4_TCP,
-		[0x50] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
-		[0x51] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
-			 RTE_PTYPE_TUNNEL_GRENAT |
-			 RTE_PTYPE_INNER_L2_ETHER |
-			 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-			 RTE_PTYPE_INNER_L4_FRAG,
-		[0x52] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
-		[0x53] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
-			 RTE_PTYPE_TUNNEL_GRENAT |
-			 RTE_PTYPE_INNER_L2_ETHER |
-			 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-			 RTE_PTYPE_INNER_L4_FRAG,
-		[0x54] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
-		[0x55] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
-			 RTE_PTYPE_TUNNEL_GRENAT |
-			 RTE_PTYPE_INNER_L2_ETHER |
-			 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-			 RTE_PTYPE_INNER_L4_FRAG,
-		/* All others reserved */
-	};
-	cqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT
-		| CQ_ENET_RQ_DESC_FLAGS_IPV4 | CQ_ENET_RQ_DESC_FLAGS_IPV6
-		| CQ_ENET_RQ_DESC_FLAGS_TCP | CQ_ENET_RQ_DESC_FLAGS_UDP;
-	return cq_type_table[cqrd_flags + tnl];
-}
-
-static inline void
-enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
-{
-	struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
-	uint16_t bwflags, pkt_flags = 0, vlan_tci;
-	bwflags = enic_cq_rx_desc_bwflags(cqrd);
-	vlan_tci = enic_cq_rx_desc_vlan(cqrd);
-
-	/* VLAN STRIPPED flag. The L2 packet type updated here also */
-	if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) {
-		pkt_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
-		mbuf->packet_type |= RTE_PTYPE_L2_ETHER;
-	} else {
-		if (vlan_tci != 0) {
-			pkt_flags |= PKT_RX_VLAN;
-			mbuf->packet_type |= RTE_PTYPE_L2_ETHER_VLAN;
-		} else {
-			mbuf->packet_type |= RTE_PTYPE_L2_ETHER;
-		}
-	}
-	mbuf->vlan_tci = vlan_tci;
-
-	if ((cqd->type_color & CQ_DESC_TYPE_MASK) == CQ_DESC_TYPE_CLASSIFIER) {
-		struct cq_enet_rq_clsf_desc *clsf_cqd;
-		uint16_t filter_id;
-		clsf_cqd = (struct cq_enet_rq_clsf_desc *)cqd;
-		filter_id = clsf_cqd->filter_id;
-		if (filter_id) {
-			pkt_flags |= PKT_RX_FDIR;
-			if (filter_id != ENIC_MAGIC_FILTER_ID) {
-				mbuf->hash.fdir.hi = clsf_cqd->filter_id;
-				pkt_flags |= PKT_RX_FDIR_ID;
-			}
-		}
-	} else if (enic_cq_rx_desc_rss_type(cqrd)) {
-		/* RSS flag */
-		pkt_flags |= PKT_RX_RSS_HASH;
-		mbuf->hash.rss = enic_cq_rx_desc_rss_hash(cqrd);
-	}
-
-	/* checksum flags */
-	if (mbuf->packet_type & (RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L3_IPV6)) {
-		if (!enic_cq_rx_desc_csum_not_calc(cqrd)) {
-			uint32_t l4_flags;
-			l4_flags = mbuf->packet_type & RTE_PTYPE_L4_MASK;
-
-			/*
-			 * When overlay offload is enabled, the NIC may
-			 * set ipv4_csum_ok=1 if the inner packet is IPv6..
-			 * So, explicitly check for IPv4 before checking
-			 * ipv4_csum_ok.
-			 */
-			if (mbuf->packet_type & RTE_PTYPE_L3_IPV4) {
-				if (enic_cq_rx_desc_ipv4_csum_ok(cqrd))
-					pkt_flags |= PKT_RX_IP_CKSUM_GOOD;
-				else
-					pkt_flags |= PKT_RX_IP_CKSUM_BAD;
-			}
-
-			if (l4_flags == RTE_PTYPE_L4_UDP ||
-			    l4_flags == RTE_PTYPE_L4_TCP) {
-				if (enic_cq_rx_desc_tcp_udp_csum_ok(cqrd))
-					pkt_flags |= PKT_RX_L4_CKSUM_GOOD;
-				else
-					pkt_flags |= PKT_RX_L4_CKSUM_BAD;
-			}
-		}
-	}
-
-	mbuf->ol_flags = pkt_flags;
-}
-
 /* dummy receive function to replace actual function in
  * order to do safe reconfiguration operations.
  */
diff --git a/drivers/net/enic/enic_rxtx_common.h b/drivers/net/enic/enic_rxtx_common.h
new file mode 100644
index 000000000..bfbb4909e
--- /dev/null
+++ b/drivers/net/enic/enic_rxtx_common.h
@@ -0,0 +1,271 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2018 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ */
+
+#ifndef _ENIC_RXTX_COMMON_H_
+#define _ENIC_RXTX_COMMON_H_
+
+static inline uint16_t
+enic_cq_rx_desc_ciflags(struct cq_enet_rq_desc *crd)
+{
+	return le16_to_cpu(crd->completed_index_flags) & ~CQ_DESC_COMP_NDX_MASK;
+}
+
+static inline uint16_t
+enic_cq_rx_desc_bwflags(struct cq_enet_rq_desc *crd)
+{
+	return le16_to_cpu(crd->bytes_written_flags) &
+			   ~CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
+}
+
+static inline uint8_t
+enic_cq_rx_desc_packet_error(uint16_t bwflags)
+{
+	return (bwflags & CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ==
+		CQ_ENET_RQ_DESC_FLAGS_TRUNCATED;
+}
+
+static inline uint8_t
+enic_cq_rx_desc_eop(uint16_t ciflags)
+{
+	return (ciflags & CQ_ENET_RQ_DESC_FLAGS_EOP)
+		== CQ_ENET_RQ_DESC_FLAGS_EOP;
+}
+
+static inline uint8_t
+enic_cq_rx_desc_csum_not_calc(struct cq_enet_rq_desc *cqrd)
+{
+	return (le16_to_cpu(cqrd->q_number_rss_type_flags) &
+		CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ==
+		CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC;
+}
+
+static inline uint8_t
+enic_cq_rx_desc_ipv4_csum_ok(struct cq_enet_rq_desc *cqrd)
+{
+	return (cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ==
+		CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK;
+}
+
+static inline uint8_t
+enic_cq_rx_desc_tcp_udp_csum_ok(struct cq_enet_rq_desc *cqrd)
+{
+	return (cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ==
+		CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK;
+}
+
+static inline uint8_t
+enic_cq_rx_desc_rss_type(struct cq_enet_rq_desc *cqrd)
+{
+	return (uint8_t)((le16_to_cpu(cqrd->q_number_rss_type_flags) >>
+		CQ_DESC_Q_NUM_BITS) & CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
+}
+
+static inline uint32_t
+enic_cq_rx_desc_rss_hash(struct cq_enet_rq_desc *cqrd)
+{
+	return le32_to_cpu(cqrd->rss_hash);
+}
+
+static inline uint16_t
+enic_cq_rx_desc_vlan(struct cq_enet_rq_desc *cqrd)
+{
+	return le16_to_cpu(cqrd->vlan);
+}
+
+static inline uint16_t
+enic_cq_rx_desc_n_bytes(struct cq_desc *cqd)
+{
+	struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
+	return le16_to_cpu(cqrd->bytes_written_flags) &
+		CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
+}
+
+
+static inline uint8_t
+enic_cq_rx_check_err(struct cq_desc *cqd)
+{
+	struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
+	uint16_t bwflags;
+
+	bwflags = enic_cq_rx_desc_bwflags(cqrd);
+	if (unlikely(enic_cq_rx_desc_packet_error(bwflags)))
+		return 1;
+	return 0;
+}
+
+/* Lookup table to translate RX CQ flags to mbuf flags. */
+static uint32_t
+enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd, uint8_t tnl)
+{
+	struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
+	uint8_t cqrd_flags = cqrd->flags;
+	/*
+	 * Odd-numbered entries are for tunnel packets. All packet type info
+	 * applies to the inner packet, and there is no info on the outer
+	 * packet. The outer flags in these entries exist only to avoid
+	 * changing enic_cq_rx_to_pkt_flags(). They are cleared from mbuf
+	 * afterwards.
+	 *
+	 * Also, as there is no tunnel type info (VXLAN, NVGRE, or GENEVE), set
+	 * RTE_PTYPE_TUNNEL_GRENAT..
+	 */
+	static const uint32_t cq_type_table[128] __rte_cache_aligned = {
+		[0x00] = RTE_PTYPE_UNKNOWN,
+		[0x01] = RTE_PTYPE_UNKNOWN |
+			 RTE_PTYPE_TUNNEL_GRENAT |
+			 RTE_PTYPE_INNER_L2_ETHER,
+		[0x20] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG,
+		[0x21] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG |
+			 RTE_PTYPE_TUNNEL_GRENAT |
+			 RTE_PTYPE_INNER_L2_ETHER |
+			 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+			 RTE_PTYPE_INNER_L4_NONFRAG,
+		[0x22] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
+		[0x23] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+			 RTE_PTYPE_TUNNEL_GRENAT |
+			 RTE_PTYPE_INNER_L2_ETHER |
+			 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+			 RTE_PTYPE_INNER_L4_UDP,
+		[0x24] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
+		[0x25] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP |
+			 RTE_PTYPE_TUNNEL_GRENAT |
+			 RTE_PTYPE_INNER_L2_ETHER |
+			 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+			 RTE_PTYPE_INNER_L4_TCP,
+		[0x60] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
+		[0x61] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
+			 RTE_PTYPE_TUNNEL_GRENAT |
+			 RTE_PTYPE_INNER_L2_ETHER |
+			 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+			 RTE_PTYPE_INNER_L4_FRAG,
+		[0x62] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
+		[0x63] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
+			 RTE_PTYPE_TUNNEL_GRENAT |
+			 RTE_PTYPE_INNER_L2_ETHER |
+			 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+			 RTE_PTYPE_INNER_L4_FRAG,
+		[0x64] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
+		[0x65] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
+			 RTE_PTYPE_TUNNEL_GRENAT |
+			 RTE_PTYPE_INNER_L2_ETHER |
+			 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+			 RTE_PTYPE_INNER_L4_FRAG,
+		[0x10] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG,
+		[0x11] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG |
+			 RTE_PTYPE_TUNNEL_GRENAT |
+			 RTE_PTYPE_INNER_L2_ETHER |
+			 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+			 RTE_PTYPE_INNER_L4_NONFRAG,
+		[0x12] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
+		[0x13] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+			 RTE_PTYPE_TUNNEL_GRENAT |
+			 RTE_PTYPE_INNER_L2_ETHER |
+			 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+			 RTE_PTYPE_INNER_L4_UDP,
+		[0x14] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
+		[0x15] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP |
+			 RTE_PTYPE_TUNNEL_GRENAT |
+			 RTE_PTYPE_INNER_L2_ETHER |
+			 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+			 RTE_PTYPE_INNER_L4_TCP,
+		[0x50] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
+		[0x51] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
+			 RTE_PTYPE_TUNNEL_GRENAT |
+			 RTE_PTYPE_INNER_L2_ETHER |
+			 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+			 RTE_PTYPE_INNER_L4_FRAG,
+		[0x52] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
+		[0x53] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
+			 RTE_PTYPE_TUNNEL_GRENAT |
+			 RTE_PTYPE_INNER_L2_ETHER |
+			 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+			 RTE_PTYPE_INNER_L4_FRAG,
+		[0x54] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
+		[0x55] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
+			 RTE_PTYPE_TUNNEL_GRENAT |
+			 RTE_PTYPE_INNER_L2_ETHER |
+			 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+			 RTE_PTYPE_INNER_L4_FRAG,
+		/* All others reserved */
+	};
+	cqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT
+		| CQ_ENET_RQ_DESC_FLAGS_IPV4 | CQ_ENET_RQ_DESC_FLAGS_IPV6
+		| CQ_ENET_RQ_DESC_FLAGS_TCP | CQ_ENET_RQ_DESC_FLAGS_UDP;
+	return cq_type_table[cqrd_flags + tnl];
+}
+
+static void
+enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
+{
+	struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
+	uint16_t bwflags, pkt_flags = 0, vlan_tci;
+	bwflags = enic_cq_rx_desc_bwflags(cqrd);
+	vlan_tci = enic_cq_rx_desc_vlan(cqrd);
+
+	/* VLAN STRIPPED flag. The L2 packet type updated here also */
+	if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) {
+		pkt_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+		mbuf->packet_type |= RTE_PTYPE_L2_ETHER;
+	} else {
+		if (vlan_tci != 0) {
+			pkt_flags |= PKT_RX_VLAN;
+			mbuf->packet_type |= RTE_PTYPE_L2_ETHER_VLAN;
+		} else {
+			mbuf->packet_type |= RTE_PTYPE_L2_ETHER;
+		}
+	}
+	mbuf->vlan_tci = vlan_tci;
+
+	if ((cqd->type_color & CQ_DESC_TYPE_MASK) == CQ_DESC_TYPE_CLASSIFIER) {
+		struct cq_enet_rq_clsf_desc *clsf_cqd;
+		uint16_t filter_id;
+		clsf_cqd = (struct cq_enet_rq_clsf_desc *)cqd;
+		filter_id = clsf_cqd->filter_id;
+		if (filter_id) {
+			pkt_flags |= PKT_RX_FDIR;
+			if (filter_id != ENIC_MAGIC_FILTER_ID) {
+				mbuf->hash.fdir.hi = clsf_cqd->filter_id;
+				pkt_flags |= PKT_RX_FDIR_ID;
+			}
+		}
+	} else if (enic_cq_rx_desc_rss_type(cqrd)) {
+		/* RSS flag */
+		pkt_flags |= PKT_RX_RSS_HASH;
+		mbuf->hash.rss = enic_cq_rx_desc_rss_hash(cqrd);
+	}
+
+	/* checksum flags */
+	if (mbuf->packet_type & (RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L3_IPV6)) {
+		if (!enic_cq_rx_desc_csum_not_calc(cqrd)) {
+			uint32_t l4_flags;
+			l4_flags = mbuf->packet_type & RTE_PTYPE_L4_MASK;
+
+			/*
+			 * When overlay offload is enabled, the NIC may
+			 * set ipv4_csum_ok=1 if the inner packet is IPv6..
+			 * So, explicitly check for IPv4 before checking
+			 * ipv4_csum_ok.
+			 */
+			if (mbuf->packet_type & RTE_PTYPE_L3_IPV4) {
+				if (enic_cq_rx_desc_ipv4_csum_ok(cqrd))
+					pkt_flags |= PKT_RX_IP_CKSUM_GOOD;
+				else
+					pkt_flags |= PKT_RX_IP_CKSUM_BAD;
+			}
+
+			if (l4_flags == RTE_PTYPE_L4_UDP ||
+			    l4_flags == RTE_PTYPE_L4_TCP) {
+				if (enic_cq_rx_desc_tcp_udp_csum_ok(cqrd))
+					pkt_flags |= PKT_RX_L4_CKSUM_GOOD;
+				else
+					pkt_flags |= PKT_RX_L4_CKSUM_BAD;
+			}
+		}
+	}
+
+	mbuf->ol_flags = pkt_flags;
+}
+
+#endif /* _ENIC_RXTX_COMMON_H_ */
-- 
2.16.2

  parent reply	other threads:[~2018-10-03 20:09 UTC|newest]

Thread overview: 11+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-09-28  2:16 [dpdk-dev] [PATCH " John Daley
2018-09-28  2:16 ` [dpdk-dev] [PATCH 2/2] net/enic: add AVX2 based vectorized Rx handler John Daley
2018-09-28 19:20 ` [dpdk-dev] [PATCH v2 1/2] net/enic: move common Rx functions to a new header file John Daley
2018-09-28 19:20   ` [dpdk-dev] [PATCH v2 2/2] net/enic: add AVX2 based vectorized Rx handler John Daley
2018-09-28 19:25   ` [dpdk-dev] [PATCH v2 1/2] net/enic: move common Rx functions to a new header file John Daley
2018-09-28 19:25     ` [dpdk-dev] [PATCH v2 2/2] net/enic: add AVX2 based vectorized Rx handler John Daley
2018-10-02 16:08       ` Ferruh Yigit
2018-10-03 13:00         ` Hyong Youb Kim
2018-10-03 20:09     ` John Daley [this message]
2018-10-03 20:09       ` [dpdk-dev] [PATCH v4 " John Daley
2018-10-04 16:15       ` [dpdk-dev] [PATCH v4 1/2] net/enic: move common Rx functions to a new header file Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20181003200928.27086-1-johndale@cisco.com \
    --to=johndale@cisco.com \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@intel.com \
    --cc=hyonkim@cisco.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).