DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH 1/3] net/qede: fix supported packet types
@ 2017-10-19  1:13 Rasesh Mody
  2017-10-19  1:13 ` [dpdk-dev] [PATCH 2/3] net/qede: add support for VXLAN UDP port config over VF Rasesh Mody
                   ` (2 more replies)
  0 siblings, 3 replies; 4+ messages in thread
From: Rasesh Mody @ 2017-10-19  1:13 UTC (permalink / raw)
  To: dev; +Cc: Harish Patil, Dept-EngDPDKDev, Rasesh Mody

From: Harish Patil <harish.patil@cavium.com>

Update/fix supported ptypes to return both inner and outer headers,
tunnel_type, fragmented and VLAN packet types.

Fixes: 3d4bb4411683 ("net/qede: add fastpath support for VXLAN tunneling")
Fixes: 2ea6f76aff40 ("qede: add core driver")

Signed-off-by: Harish Patil <harish.patil@cavium.com>
Signed-off-by: Rasesh Mody <rasesh.mody@cavium.com>
---
 drivers/net/qede/qede_ethdev.c |   14 +++
 drivers/net/qede/qede_rxtx.c   |  223 ++++++++++++++++++++++++++++++++++------
 drivers/net/qede/qede_rxtx.h   |   23 ++++-
 3 files changed, 227 insertions(+), 33 deletions(-)

diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
index a238781..dc67bfd 100644
--- a/drivers/net/qede/qede_ethdev.c
+++ b/drivers/net/qede/qede_ethdev.c
@@ -1808,8 +1808,22 @@ static int qede_flow_ctrl_get(struct rte_eth_dev *eth_dev,
 qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
 {
 	static const uint32_t ptypes[] = {
+		RTE_PTYPE_L2_ETHER,
+		RTE_PTYPE_L2_ETHER_VLAN,
 		RTE_PTYPE_L3_IPV4,
 		RTE_PTYPE_L3_IPV6,
+		RTE_PTYPE_L4_TCP,
+		RTE_PTYPE_L4_UDP,
+		RTE_PTYPE_TUNNEL_VXLAN,
+		RTE_PTYPE_L4_FRAG,
+		/* Inner */
+		RTE_PTYPE_INNER_L2_ETHER,
+		RTE_PTYPE_INNER_L2_ETHER_VLAN,
+		RTE_PTYPE_INNER_L3_IPV4,
+		RTE_PTYPE_INNER_L3_IPV6,
+		RTE_PTYPE_INNER_L4_TCP,
+		RTE_PTYPE_INNER_L4_UDP,
+		RTE_PTYPE_INNER_L4_FRAG,
 		RTE_PTYPE_UNKNOWN
 	};
 
diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
index 45b4aeb..aba51ab 100644
--- a/drivers/net/qede/qede_rxtx.c
+++ b/drivers/net/qede/qede_rxtx.c
@@ -844,6 +844,109 @@ static inline uint8_t qede_check_notunn_csum_l4(uint16_t flag)
 	return 0;
 }
 
+/* Returns outer L3 and L4 packet_type for tunneled packets */
+static inline uint32_t qede_rx_cqe_to_pkt_type_outer(struct rte_mbuf *m)
+{
+	uint32_t packet_type = RTE_PTYPE_UNKNOWN;
+	struct ether_hdr *eth_hdr;
+	struct ipv4_hdr *ipv4_hdr;
+	struct ipv6_hdr *ipv6_hdr;
+
+	eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
+	if (eth_hdr->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
+		packet_type |= RTE_PTYPE_L3_IPV4;
+		ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
+						   sizeof(struct ether_hdr));
+		if (ipv4_hdr->next_proto_id == IPPROTO_TCP)
+			packet_type |= RTE_PTYPE_L4_TCP;
+		else if (ipv4_hdr->next_proto_id == IPPROTO_UDP)
+			packet_type |= RTE_PTYPE_L4_UDP;
+	} else if (eth_hdr->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv6)) {
+		packet_type |= RTE_PTYPE_L3_IPV6;
+		ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct ipv6_hdr *,
+						   sizeof(struct ether_hdr));
+		if (ipv6_hdr->proto == IPPROTO_TCP)
+			packet_type |= RTE_PTYPE_L4_TCP;
+		else if (ipv6_hdr->proto == IPPROTO_UDP)
+			packet_type |= RTE_PTYPE_L4_UDP;
+	}
+
+	return packet_type;
+}
+
+static inline uint32_t qede_rx_cqe_to_pkt_type_inner(uint16_t flags)
+{
+	uint16_t val;
+
+	/* Lookup table */
+	static const uint32_t
+	ptype_lkup_tbl[QEDE_PKT_TYPE_MAX] __rte_cache_aligned = {
+		[QEDE_PKT_TYPE_IPV4] = RTE_PTYPE_INNER_L3_IPV4		|
+				       RTE_PTYPE_INNER_L2_ETHER,
+		[QEDE_PKT_TYPE_IPV6] = RTE_PTYPE_INNER_L3_IPV6		|
+				       RTE_PTYPE_INNER_L2_ETHER,
+		[QEDE_PKT_TYPE_IPV4_TCP] = RTE_PTYPE_INNER_L3_IPV4	|
+					   RTE_PTYPE_INNER_L4_TCP	|
+					   RTE_PTYPE_INNER_L2_ETHER,
+		[QEDE_PKT_TYPE_IPV6_TCP] = RTE_PTYPE_INNER_L3_IPV6	|
+					   RTE_PTYPE_INNER_L4_TCP	|
+					   RTE_PTYPE_INNER_L2_ETHER,
+		[QEDE_PKT_TYPE_IPV4_UDP] = RTE_PTYPE_INNER_L3_IPV4	|
+					   RTE_PTYPE_INNER_L4_UDP	|
+					   RTE_PTYPE_INNER_L2_ETHER,
+		[QEDE_PKT_TYPE_IPV6_UDP] = RTE_PTYPE_INNER_L3_IPV6	|
+					   RTE_PTYPE_INNER_L4_UDP	|
+					   RTE_PTYPE_INNER_L2_ETHER,
+		/* Frags with no VLAN */
+		[QEDE_PKT_TYPE_IPV4_FRAG] = RTE_PTYPE_INNER_L3_IPV4	|
+					    RTE_PTYPE_INNER_L4_FRAG	|
+					    RTE_PTYPE_INNER_L2_ETHER,
+		[QEDE_PKT_TYPE_IPV6_FRAG] = RTE_PTYPE_INNER_L3_IPV6	|
+					    RTE_PTYPE_INNER_L4_FRAG	|
+					    RTE_PTYPE_INNER_L2_ETHER,
+		/* VLANs */
+		[QEDE_PKT_TYPE_IPV4_VLAN] = RTE_PTYPE_INNER_L3_IPV4	|
+					    RTE_PTYPE_INNER_L2_ETHER_VLAN,
+		[QEDE_PKT_TYPE_IPV6_VLAN] = RTE_PTYPE_INNER_L3_IPV6	|
+					    RTE_PTYPE_INNER_L2_ETHER_VLAN,
+		[QEDE_PKT_TYPE_IPV4_TCP_VLAN] = RTE_PTYPE_INNER_L3_IPV4	|
+						RTE_PTYPE_INNER_L4_TCP	|
+						RTE_PTYPE_INNER_L2_ETHER_VLAN,
+		[QEDE_PKT_TYPE_IPV6_TCP_VLAN] = RTE_PTYPE_INNER_L3_IPV6	|
+						RTE_PTYPE_INNER_L4_TCP	|
+						RTE_PTYPE_INNER_L2_ETHER_VLAN,
+		[QEDE_PKT_TYPE_IPV4_UDP_VLAN] = RTE_PTYPE_INNER_L3_IPV4	|
+						RTE_PTYPE_INNER_L4_UDP	|
+						RTE_PTYPE_INNER_L2_ETHER_VLAN,
+		[QEDE_PKT_TYPE_IPV6_UDP_VLAN] = RTE_PTYPE_INNER_L3_IPV6	|
+						RTE_PTYPE_INNER_L4_UDP	|
+						RTE_PTYPE_INNER_L2_ETHER_VLAN,
+		/* Frags with VLAN */
+		[QEDE_PKT_TYPE_IPV4_VLAN_FRAG] = RTE_PTYPE_INNER_L3_IPV4 |
+						 RTE_PTYPE_INNER_L4_FRAG |
+						 RTE_PTYPE_INNER_L2_ETHER_VLAN,
+		[QEDE_PKT_TYPE_IPV6_VLAN_FRAG] = RTE_PTYPE_INNER_L3_IPV6 |
+						 RTE_PTYPE_INNER_L4_FRAG |
+						 RTE_PTYPE_INNER_L2_ETHER_VLAN,
+	};
+
+	/* Bits (0..3) provides L3/L4 protocol type */
+	/* Bits (4,5) provides frag and VLAN info */
+	val = ((PARSING_AND_ERR_FLAGS_L3TYPE_MASK <<
+	       PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) |
+	       (PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK <<
+		PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT) |
+	       (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK <<
+		PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT) |
+		(PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK <<
+		 PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT)) & flags;
+
+	if (val < QEDE_PKT_TYPE_MAX)
+		return ptype_lkup_tbl[val];
+
+	return RTE_PTYPE_UNKNOWN;
+}
+
 static inline uint32_t qede_rx_cqe_to_pkt_type(uint16_t flags)
 {
 	uint16_t val;
@@ -851,24 +954,68 @@ static inline uint32_t qede_rx_cqe_to_pkt_type(uint16_t flags)
 	/* Lookup table */
 	static const uint32_t
 	ptype_lkup_tbl[QEDE_PKT_TYPE_MAX] __rte_cache_aligned = {
-		[QEDE_PKT_TYPE_IPV4] = RTE_PTYPE_L3_IPV4,
-		[QEDE_PKT_TYPE_IPV6] = RTE_PTYPE_L3_IPV6,
-		[QEDE_PKT_TYPE_IPV4_TCP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
-		[QEDE_PKT_TYPE_IPV6_TCP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
-		[QEDE_PKT_TYPE_IPV4_UDP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
-		[QEDE_PKT_TYPE_IPV6_UDP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
+		[QEDE_PKT_TYPE_IPV4] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L2_ETHER,
+		[QEDE_PKT_TYPE_IPV6] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L2_ETHER,
+		[QEDE_PKT_TYPE_IPV4_TCP] = RTE_PTYPE_L3_IPV4	|
+					   RTE_PTYPE_L4_TCP	|
+					   RTE_PTYPE_L2_ETHER,
+		[QEDE_PKT_TYPE_IPV6_TCP] = RTE_PTYPE_L3_IPV6	|
+					   RTE_PTYPE_L4_TCP	|
+					   RTE_PTYPE_L2_ETHER,
+		[QEDE_PKT_TYPE_IPV4_UDP] = RTE_PTYPE_L3_IPV4	|
+					   RTE_PTYPE_L4_UDP	|
+					   RTE_PTYPE_L2_ETHER,
+		[QEDE_PKT_TYPE_IPV6_UDP] = RTE_PTYPE_L3_IPV6	|
+					   RTE_PTYPE_L4_UDP	|
+					   RTE_PTYPE_L2_ETHER,
+		/* Frags with no VLAN */
+		[QEDE_PKT_TYPE_IPV4_FRAG] = RTE_PTYPE_L3_IPV4	|
+					    RTE_PTYPE_L4_FRAG	|
+					    RTE_PTYPE_L2_ETHER,
+		[QEDE_PKT_TYPE_IPV6_FRAG] = RTE_PTYPE_L3_IPV6	|
+					    RTE_PTYPE_L4_FRAG	|
+					    RTE_PTYPE_L2_ETHER,
+		/* VLANs */
+		[QEDE_PKT_TYPE_IPV4_VLAN] = RTE_PTYPE_L3_IPV4		|
+					    RTE_PTYPE_L2_ETHER_VLAN,
+		[QEDE_PKT_TYPE_IPV6_VLAN] = RTE_PTYPE_L3_IPV6		|
+					    RTE_PTYPE_L2_ETHER_VLAN,
+		[QEDE_PKT_TYPE_IPV4_TCP_VLAN] = RTE_PTYPE_L3_IPV4	|
+						RTE_PTYPE_L4_TCP	|
+						RTE_PTYPE_L2_ETHER_VLAN,
+		[QEDE_PKT_TYPE_IPV6_TCP_VLAN] = RTE_PTYPE_L3_IPV6	|
+						RTE_PTYPE_L4_TCP	|
+						RTE_PTYPE_L2_ETHER_VLAN,
+		[QEDE_PKT_TYPE_IPV4_UDP_VLAN] = RTE_PTYPE_L3_IPV4	|
+						RTE_PTYPE_L4_UDP	|
+						RTE_PTYPE_L2_ETHER_VLAN,
+		[QEDE_PKT_TYPE_IPV6_UDP_VLAN] = RTE_PTYPE_L3_IPV6	|
+						RTE_PTYPE_L4_UDP	|
+						RTE_PTYPE_L2_ETHER_VLAN,
+		/* Frags with VLAN */
+		[QEDE_PKT_TYPE_IPV4_VLAN_FRAG] = RTE_PTYPE_L3_IPV4	|
+						 RTE_PTYPE_L4_FRAG	|
+						 RTE_PTYPE_L2_ETHER_VLAN,
+		[QEDE_PKT_TYPE_IPV6_VLAN_FRAG] = RTE_PTYPE_L3_IPV6	|
+						 RTE_PTYPE_L4_FRAG	|
+						 RTE_PTYPE_L2_ETHER_VLAN,
 	};
 
 	/* Bits (0..3) provides L3/L4 protocol type */
+	/* Bits (4,5) provides frag and VLAN info */
 	val = ((PARSING_AND_ERR_FLAGS_L3TYPE_MASK <<
 	       PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) |
 	       (PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK <<
-		PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT)) & flags;
+		PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT) |
+	       (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK <<
+		PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT) |
+		(PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK <<
+		 PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT)) & flags;
 
 	if (val < QEDE_PKT_TYPE_MAX)
-		return ptype_lkup_tbl[val] | RTE_PTYPE_L2_ETHER;
-	else
-		return RTE_PTYPE_UNKNOWN;
+		return ptype_lkup_tbl[val];
+
+	return RTE_PTYPE_UNKNOWN;
 }
 
 static inline uint8_t
@@ -1100,6 +1247,27 @@ static inline uint32_t qede_rx_cqe_to_tunn_pkt_type(uint16_t flags)
 	return 0;
 }
 
+#ifdef RTE_LIBRTE_QEDE_DEBUG_RX
+static inline void
+print_rx_bd_info(struct rte_mbuf *m, struct qede_rx_queue *rxq,
+		 uint8_t bitfield)
+{
+	PMD_RX_LOG(INFO, rxq,
+		"len 0x%x bf 0x%x hash_val 0x%x"
+		" ol_flags 0x%04lx l2=%s l3=%s l4=%s tunn=%s"
+		" inner_l2=%s inner_l3=%s inner_l4=%s\n",
+		m->data_len, bitfield, m->hash.rss,
+		(unsigned long)m->ol_flags,
+		rte_get_ptype_l2_name(m->packet_type),
+		rte_get_ptype_l3_name(m->packet_type),
+		rte_get_ptype_l4_name(m->packet_type),
+		rte_get_ptype_tunnel_name(m->packet_type),
+		rte_get_ptype_inner_l2_name(m->packet_type),
+		rte_get_ptype_inner_l3_name(m->packet_type),
+		rte_get_ptype_inner_l4_name(m->packet_type));
+}
+#endif
+
 uint16_t
 qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 {
@@ -1120,7 +1288,6 @@ static inline uint32_t qede_rx_cqe_to_tunn_pkt_type(uint16_t flags)
 	uint16_t parse_flag;
 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
 	uint8_t bitfield_val;
-	enum rss_hash_type htype;
 #endif
 	uint8_t tunn_parse_flag;
 	uint8_t j;
@@ -1214,8 +1381,6 @@ static inline uint32_t qede_rx_cqe_to_tunn_pkt_type(uint16_t flags)
 			rss_hash = rte_le_to_cpu_32(fp_cqe->rss_hash);
 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
 			bitfield_val = fp_cqe->bitfields;
-			htype = (uint8_t)GET_FIELD(bitfield_val,
-					ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE);
 #endif
 		} else {
 			parse_flag =
@@ -1226,8 +1391,6 @@ static inline uint32_t qede_rx_cqe_to_tunn_pkt_type(uint16_t flags)
 			vlan_tci = rte_le_to_cpu_16(cqe_start_tpa->vlan_tag);
 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
 			bitfield_val = cqe_start_tpa->bitfields;
-			htype = (uint8_t)GET_FIELD(bitfield_val,
-				ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE);
 #endif
 			rss_hash = rte_le_to_cpu_32(cqe_start_tpa->rss_hash);
 		}
@@ -1247,8 +1410,17 @@ static inline uint32_t qede_rx_cqe_to_tunn_pkt_type(uint16_t flags)
 				else
 					flags = fp_cqe->tunnel_pars_flags.flags;
 				tunn_parse_flag = flags;
+				/* Tunnel_type */
 				packet_type =
 				qede_rx_cqe_to_tunn_pkt_type(tunn_parse_flag);
+
+				/* Inner header */
+				packet_type |=
+				      qede_rx_cqe_to_pkt_type_inner(parse_flag);
+
+				/* Outer L3/L4 types is not available in CQE */
+				packet_type |=
+				      qede_rx_cqe_to_pkt_type_outer(rx_mb);
 			}
 		} else {
 			PMD_RX_LOG(INFO, rxq, "Rx non-tunneled packet\n");
@@ -1275,21 +1447,16 @@ static inline uint32_t qede_rx_cqe_to_tunn_pkt_type(uint16_t flags)
 			}
 		}
 
-		if (CQE_HAS_VLAN(parse_flag)) {
+		if (CQE_HAS_VLAN(parse_flag) ||
+		    CQE_HAS_OUTER_VLAN(parse_flag)) {
+			/* Note: FW doesn't indicate Q-in-Q packet */
 			ol_flags |= PKT_RX_VLAN_PKT;
 			if (qdev->vlan_strip_flg) {
 				ol_flags |= PKT_RX_VLAN_STRIPPED;
 				rx_mb->vlan_tci = vlan_tci;
 			}
 		}
-		if (CQE_HAS_OUTER_VLAN(parse_flag)) {
-			ol_flags |= PKT_RX_QINQ_PKT;
-			if (qdev->vlan_strip_flg) {
-				rx_mb->vlan_tci = vlan_tci;
-				ol_flags |= PKT_RX_QINQ_STRIPPED;
-			}
-			rx_mb->vlan_tci_outer = 0;
-		}
+
 		/* RSS Hash */
 		if (qdev->rss_enable) {
 			ol_flags |= PKT_RX_RSS_HASH;
@@ -1341,11 +1508,9 @@ static inline uint32_t qede_rx_cqe_to_tunn_pkt_type(uint16_t flags)
 		rx_mb->ol_flags = ol_flags;
 		rx_mb->data_len = len;
 		rx_mb->packet_type = packet_type;
-		PMD_RX_LOG(INFO, rxq,
-			   "pkt_type 0x%04x len %u hash_type %d hash_val 0x%x"
-			   " ol_flags 0x%04lx\n",
-			   packet_type, len, htype, rx_mb->hash.rss,
-			   (unsigned long)ol_flags);
+#ifdef RTE_LIBRTE_QEDE_DEBUG_RX
+		print_rx_bd_info(rx_mb, rxq, bitfield_val);
+#endif
 		if (!tpa_start_flg) {
 			rx_mb->nb_segs = fp_cqe->bd_num;
 			rx_mb->pkt_len = pkt_len;
diff --git a/drivers/net/qede/qede_rxtx.h b/drivers/net/qede/qede_rxtx.h
index b551fd6..acf9e47 100644
--- a/drivers/net/qede/qede_rxtx.h
+++ b/drivers/net/qede/qede_rxtx.h
@@ -84,7 +84,8 @@
 
 /* Macros for non-tunnel packet types lkup table */
 #define QEDE_PKT_TYPE_UNKNOWN				0x0
-#define QEDE_PKT_TYPE_MAX				0xf
+#define QEDE_PKT_TYPE_MAX				0x3f
+
 #define QEDE_PKT_TYPE_IPV4				0x1
 #define QEDE_PKT_TYPE_IPV6				0x2
 #define QEDE_PKT_TYPE_IPV4_TCP				0x5
@@ -92,6 +93,20 @@
 #define QEDE_PKT_TYPE_IPV4_UDP				0x9
 #define QEDE_PKT_TYPE_IPV6_UDP				0xa
 
+/* For frag pkts, corresponding IP bits is set */
+#define QEDE_PKT_TYPE_IPV4_FRAG				0x11
+#define QEDE_PKT_TYPE_IPV6_FRAG				0x12
+
+#define QEDE_PKT_TYPE_IPV4_VLAN				0x21
+#define QEDE_PKT_TYPE_IPV6_VLAN				0x22
+#define QEDE_PKT_TYPE_IPV4_TCP_VLAN			0x25
+#define QEDE_PKT_TYPE_IPV6_TCP_VLAN			0x26
+#define QEDE_PKT_TYPE_IPV4_UDP_VLAN			0x29
+#define QEDE_PKT_TYPE_IPV6_UDP_VLAN			0x2a
+
+#define QEDE_PKT_TYPE_IPV4_VLAN_FRAG			0x31
+#define QEDE_PKT_TYPE_IPV6_VLAN_FRAG			0x32
+
 /* Macros for tunneled packets with next protocol lkup table */
 #define QEDE_PKT_TYPE_TUNN_GENEVE			0x1
 #define QEDE_PKT_TYPE_TUNN_GRE				0x2
@@ -99,12 +114,12 @@
 
 /* Bit 2 is don't care bit */
 #define QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GENEVE	0x9
-#define QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GRE	0xa
+#define QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GRE		0xa
 #define QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_VXLAN	0xb
 
 #define QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GENEVE	0xd
 #define QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GRE		0xe
-#define QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_VXLAN	0xf
+#define QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_VXLAN		0xf
 
 
 #define QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GENEVE    0x11
@@ -112,7 +127,7 @@
 #define QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_VXLAN     0x13
 
 #define QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GENEVE	0x15
-#define QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GRE	0x16
+#define QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GRE		0x16
 #define QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_VXLAN	0x17
 
 
-- 
1.7.10.3

^ permalink raw reply	[flat|nested] 4+ messages in thread

* [dpdk-dev] [PATCH 2/3] net/qede: add support for VXLAN UDP port config over VF
  2017-10-19  1:13 [dpdk-dev] [PATCH 1/3] net/qede: fix supported packet types Rasesh Mody
@ 2017-10-19  1:13 ` Rasesh Mody
  2017-10-19  1:13 ` [dpdk-dev] [PATCH 3/3] net/qede: fix to re-enable LRO during device start Rasesh Mody
  2017-10-23 18:35 ` [dpdk-dev] [PATCH 1/3] net/qede: fix supported packet types Ferruh Yigit
  2 siblings, 0 replies; 4+ messages in thread
From: Rasesh Mody @ 2017-10-19  1:13 UTC (permalink / raw)
  To: dev; +Cc: Harish Patil, Dept-EngDPDKDev

From: Harish Patil <harish.patil@cavium.com>

- Allow VXLAN enable/disable over VF using udp_tunnel_port_add/del APIs.
  Only default MAC/VLAN classification is supported.
- Enable VxLAN before UDP port configuration.
- Change VxLAN default UDP port to 4789 instead of 8472.

Signed-off-by: Harish Patil <harish.patil@cavium.com>
---
 drivers/net/qede/qede_ethdev.c |  195 ++++++++++++++++++++++++----------------
 drivers/net/qede/qede_ethdev.h |   11 ++-
 2 files changed, 127 insertions(+), 79 deletions(-)

diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
index dc67bfd..5727c6a 100644
--- a/drivers/net/qede/qede_ethdev.c
+++ b/drivers/net/qede/qede_ethdev.c
@@ -602,15 +602,45 @@ static void qede_set_ucast_cmn_params(struct ecore_filter_ucast *ucast)
 	return ecore_filter_accept_cmd(edev, 0, flags, false, false,
 			ECORE_SPQ_MODE_CB, NULL);
 }
-static void qede_set_cmn_tunn_param(struct ecore_tunnel_info *p_tunn,
-				    uint8_t clss, bool mode, bool mask)
+
+static int
+qede_vxlan_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
+		  bool enable, bool mask)
 {
-	memset(p_tunn, 0, sizeof(struct ecore_tunnel_info));
-	p_tunn->vxlan.b_update_mode = mode;
-	p_tunn->vxlan.b_mode_enabled = mask;
-	p_tunn->b_update_rx_cls = true;
-	p_tunn->b_update_tx_cls = true;
-	p_tunn->vxlan.tun_cls = clss;
+	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+	enum _ecore_status_t rc = ECORE_INVAL;
+	struct ecore_ptt *p_ptt;
+	struct ecore_tunnel_info tunn;
+	struct ecore_hwfn *p_hwfn;
+	int i;
+
+	memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
+	tunn.vxlan.b_update_mode = enable;
+	tunn.vxlan.b_mode_enabled = mask;
+	tunn.b_update_rx_cls = true;
+	tunn.b_update_tx_cls = true;
+	tunn.vxlan.tun_cls = clss;
+
+	for_each_hwfn(edev, i) {
+		p_hwfn = &edev->hwfns[i];
+		p_ptt = IS_PF(edev) ? ecore_ptt_acquire(p_hwfn) : NULL;
+		rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt,
+				&tunn, ECORE_SPQ_MODE_CB, NULL);
+		if (rc != ECORE_SUCCESS) {
+			DP_ERR(edev, "Failed to update tunn_clss %u\n",
+					tunn.vxlan.tun_cls);
+			break;
+		}
+	}
+
+	if (rc == ECORE_SUCCESS) {
+		qdev->vxlan.enable = enable;
+		qdev->vxlan.udp_port = (enable) ? QEDE_VXLAN_DEF_PORT : 0;
+		DP_INFO(edev, "vxlan is %s\n", enable ? "enabled" : "disabled");
+	}
+
+	return rc;
 }
 
 static int
@@ -2172,19 +2202,51 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
 	struct ecore_tunnel_info tunn; /* @DPDK */
 	struct ecore_hwfn *p_hwfn;
+	struct ecore_ptt *p_ptt;
+	uint16_t udp_port;
 	int rc, i;
 
 	PMD_INIT_FUNC_TRACE(edev);
 
 	memset(&tunn, 0, sizeof(tunn));
 	if (tunnel_udp->prot_type == RTE_TUNNEL_TYPE_VXLAN) {
+		/* Enable VxLAN tunnel if needed before UDP port update using
+		 * default MAC/VLAN classification.
+		 */
+		if (add) {
+			if (qdev->vxlan.udp_port == tunnel_udp->udp_port) {
+				DP_INFO(edev,
+					"UDP port %u was already configured\n",
+					tunnel_udp->udp_port);
+				return ECORE_SUCCESS;
+			}
+			/* Enable VXLAN if it was not enabled while adding
+			 * VXLAN filter.
+			 */
+			if (!qdev->vxlan.enable) {
+				rc = qede_vxlan_enable(eth_dev,
+					ECORE_TUNN_CLSS_MAC_VLAN, true, true);
+				if (rc != ECORE_SUCCESS) {
+					DP_ERR(edev, "Failed to enable VXLAN "
+						"prior to updating UDP port\n");
+					return rc;
+				}
+			}
+			udp_port = tunnel_udp->udp_port;
+		} else {
+			if (qdev->vxlan.udp_port != tunnel_udp->udp_port) {
+				DP_ERR(edev, "UDP port %u doesn't exist\n",
+					tunnel_udp->udp_port);
+				return ECORE_INVAL;
+			}
+			udp_port = 0;
+		}
+
 		tunn.vxlan_port.b_update_port = true;
-		tunn.vxlan_port.port = (add) ? tunnel_udp->udp_port :
-						  QEDE_VXLAN_DEF_PORT;
+		tunn.vxlan_port.port = udp_port;
 		for_each_hwfn(edev, i) {
 			p_hwfn = &edev->hwfns[i];
-			struct ecore_ptt *p_ptt = IS_PF(edev) ?
-			       ecore_ptt_acquire(p_hwfn) : NULL;
+			p_ptt = IS_PF(edev) ? ecore_ptt_acquire(p_hwfn) : NULL;
 			rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, &tunn,
 						ECORE_SPQ_MODE_CB, NULL);
 			if (rc != ECORE_SUCCESS) {
@@ -2195,6 +2257,15 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
 				return rc;
 			}
 		}
+
+		qdev->vxlan.udp_port = udp_port;
+		/* If the request is to delete UDP port and if the number of
+		 * VXLAN filters have reached 0 then VxLAN offload can be be
+		 * disabled.
+		 */
+		if (!add && qdev->vxlan.enable && qdev->vxlan.num_filters == 0)
+			return qede_vxlan_enable(eth_dev,
+					ECORE_TUNN_CLSS_MAC_VLAN, false, true);
 	}
 
 	return 0;
@@ -2284,35 +2355,38 @@ static int qede_vxlan_tunn_config(struct rte_eth_dev *eth_dev,
 {
 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
-	struct ecore_tunnel_info tunn;
-	struct ecore_hwfn *p_hwfn;
 	enum ecore_filter_ucast_type type;
-	enum ecore_tunn_clss clss;
+	enum ecore_tunn_clss clss = MAX_ECORE_TUNN_CLSS;
 	struct ecore_filter_ucast ucast;
 	char str[80];
-	uint16_t filter_type;
-	int rc, i;
+	uint16_t filter_type = 0;
+	int rc;
 
 	PMD_INIT_FUNC_TRACE(edev);
 
-	filter_type = conf->filter_type | qdev->vxlan_filter_type;
-	/* First determine if the given filter classification is supported */
-	qede_get_ecore_tunn_params(filter_type, &type, &clss, str);
-	if (clss == MAX_ECORE_TUNN_CLSS) {
-		DP_ERR(edev, "Wrong filter type\n");
-		return -EINVAL;
-	}
-	/* Init tunnel ucast params */
-	rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type);
-	if (rc != ECORE_SUCCESS) {
-		DP_ERR(edev, "Unsupported VxLAN filter type 0x%x\n",
-				conf->filter_type);
-		return rc;
-	}
-	DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n",
-		str, filter_op, ucast.type);
 	switch (filter_op) {
 	case RTE_ETH_FILTER_ADD:
+		if (IS_VF(edev))
+			return qede_vxlan_enable(eth_dev,
+					ECORE_TUNN_CLSS_MAC_VLAN, true, true);
+
+		filter_type = conf->filter_type;
+		/* Determine if the given filter classification is supported */
+		qede_get_ecore_tunn_params(filter_type, &type, &clss, str);
+		if (clss == MAX_ECORE_TUNN_CLSS) {
+			DP_ERR(edev, "Unsupported filter type\n");
+			return -EINVAL;
+		}
+		/* Init tunnel ucast params */
+		rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type);
+		if (rc != ECORE_SUCCESS) {
+			DP_ERR(edev, "Unsupported VxLAN filter type 0x%x\n",
+			conf->filter_type);
+			return rc;
+		}
+		DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n",
+			str, filter_op, ucast.type);
+
 		ucast.opcode = ECORE_FILTER_ADD;
 
 		/* Skip MAC/VLAN if filter is based on VNI */
@@ -2332,26 +2406,17 @@ static int qede_vxlan_tunn_config(struct rte_eth_dev *eth_dev,
 		if (rc != ECORE_SUCCESS)
 			return rc;
 
-		qdev->vxlan_filter_type = filter_type;
+		qdev->vxlan.num_filters++;
+		qdev->vxlan.filter_type = filter_type;
+		if (!qdev->vxlan.enable)
+			return qede_vxlan_enable(eth_dev, clss, true, true);
 
-		DP_INFO(edev, "Enabling VXLAN tunneling\n");
-		qede_set_cmn_tunn_param(&tunn, clss, true, true);
-		for_each_hwfn(edev, i) {
-			p_hwfn = &edev->hwfns[i];
-			struct ecore_ptt *p_ptt = IS_PF(edev) ?
-			       ecore_ptt_acquire(p_hwfn) : NULL;
-			rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt,
-				&tunn, ECORE_SPQ_MODE_CB, NULL);
-			if (rc != ECORE_SUCCESS) {
-				DP_ERR(edev, "Failed to update tunn_clss %u\n",
-				       tunn.vxlan.tun_cls);
-				if (IS_PF(edev))
-					ecore_ptt_release(p_hwfn, p_ptt);
-			}
-		}
-		qdev->num_tunn_filters++; /* Filter added successfully */
 	break;
 	case RTE_ETH_FILTER_DELETE:
+		if (IS_VF(edev))
+			return qede_vxlan_enable(eth_dev,
+				ECORE_TUNN_CLSS_MAC_VLAN, false, true);
+
 		ucast.opcode = ECORE_FILTER_REMOVE;
 
 		if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) {
@@ -2365,38 +2430,14 @@ static int qede_vxlan_tunn_config(struct rte_eth_dev *eth_dev,
 		if (rc != ECORE_SUCCESS)
 			return rc;
 
-		qdev->vxlan_filter_type = filter_type;
-		qdev->num_tunn_filters--;
-
 		/* Disable VXLAN if VXLAN filters become 0 */
-		if (qdev->num_tunn_filters == 0) {
-			DP_INFO(edev, "Disabling VXLAN tunneling\n");
-
-			/* Use 0 as tunnel mode */
-			qede_set_cmn_tunn_param(&tunn, clss, false, true);
-			for_each_hwfn(edev, i) {
-				p_hwfn = &edev->hwfns[i];
-				struct ecore_ptt *p_ptt = IS_PF(edev) ?
-				       ecore_ptt_acquire(p_hwfn) : NULL;
-				rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt,
-					&tunn, ECORE_SPQ_MODE_CB, NULL);
-				if (rc != ECORE_SUCCESS) {
-					DP_ERR(edev,
-						"Failed to update tunn_clss %u\n",
-						tunn.vxlan.tun_cls);
-					if (IS_PF(edev))
-						ecore_ptt_release(p_hwfn,
-								  p_ptt);
-					break;
-				}
-			}
-		}
+		if (qdev->vxlan.num_filters == 0)
+			return qede_vxlan_enable(eth_dev, clss, false, true);
 	break;
 	default:
 		DP_ERR(edev, "Unsupported operation %d\n", filter_op);
 		return -EINVAL;
 	}
-	DP_INFO(edev, "Current VXLAN filters %d\n", qdev->num_tunn_filters);
 
 	return 0;
 }
@@ -2524,6 +2565,8 @@ int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
 	.reta_update  = qede_rss_reta_update,
 	.reta_query  = qede_rss_reta_query,
 	.mtu_set = qede_set_mtu,
+	.udp_tunnel_port_add = qede_udp_dst_port_add,
+	.udp_tunnel_port_del = qede_udp_dst_port_del,
 };
 
 static void qede_update_pf_params(struct ecore_dev *edev)
diff --git a/drivers/net/qede/qede_ethdev.h b/drivers/net/qede/qede_ethdev.h
index 4543533..3212020 100644
--- a/drivers/net/qede/qede_ethdev.h
+++ b/drivers/net/qede/qede_ethdev.h
@@ -122,7 +122,6 @@
 #define PCI_DEVICE_ID_QLOGIC_AH_IOV            CHIP_NUM_AH_IOV
 
 
-#define QEDE_VXLAN_DEF_PORT		8472
 
 extern char fw_file[];
 
@@ -171,6 +170,13 @@ struct qede_fdir_info {
 	SLIST_HEAD(fdir_list_head, qede_fdir_entry)fdir_list_head;
 };
 
+struct qede_vxlan_tunn {
+	bool enable;
+	uint16_t num_filters;
+	uint16_t filter_type;
+#define QEDE_VXLAN_DEF_PORT			(4789)
+	uint16_t udp_port;
+};
 
 /*
  *  Structure to store private data for each port.
@@ -200,8 +206,7 @@ struct qede_dev {
 	SLIST_HEAD(uc_list_head, qede_ucast_entry) uc_list_head;
 	uint16_t num_uc_addr;
 	bool handle_hw_err;
-	uint16_t num_tunn_filters;
-	uint16_t vxlan_filter_type;
+	struct qede_vxlan_tunn vxlan;
 	struct qede_fdir_info fdir_info;
 	bool vlan_strip_flg;
 	char drv_ver[QEDE_PMD_DRV_VER_STR_SIZE];
-- 
1.7.10.3

^ permalink raw reply	[flat|nested] 4+ messages in thread

* [dpdk-dev] [PATCH 3/3] net/qede: fix to re-enable LRO during device start
  2017-10-19  1:13 [dpdk-dev] [PATCH 1/3] net/qede: fix supported packet types Rasesh Mody
  2017-10-19  1:13 ` [dpdk-dev] [PATCH 2/3] net/qede: add support for VXLAN UDP port config over VF Rasesh Mody
@ 2017-10-19  1:13 ` Rasesh Mody
  2017-10-23 18:35 ` [dpdk-dev] [PATCH 1/3] net/qede: fix supported packet types Ferruh Yigit
  2 siblings, 0 replies; 4+ messages in thread
From: Rasesh Mody @ 2017-10-19  1:13 UTC (permalink / raw)
  To: dev; +Cc: Harish Patil, Dept-EngDPDKDev, stable

From: Harish Patil <harish.patil@cavium.com>

Move LRO configuration from dev_configure to dev_start so that
LRO configuration can be re-enabled following a port restart.

Fixes: 9a6d30ae6d46 ("net/qede: refactoring vport handling code")
Cc: stable@dpdk.org

Signed-off-by: Harish Patil <harish.patil@cavium.com>
---
 drivers/net/qede/qede_ethdev.c |   29 ++++++++++++-----------------
 1 file changed, 12 insertions(+), 17 deletions(-)

diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
index 5727c6a..4b61904 100644
--- a/drivers/net/qede/qede_ethdev.c
+++ b/drivers/net/qede/qede_ethdev.c
@@ -520,7 +520,7 @@ int qede_enable_tpa(struct rte_eth_dev *eth_dev, bool flg)
 			return -1;
 		}
 	}
-
+	qdev->enable_lro = flg;
 	DP_INFO(edev, "LRO is %s\n", flg ? "enabled" : "disabled");
 
 	return 0;
@@ -1108,6 +1108,7 @@ static void qede_fastpath_start(struct ecore_dev *edev)
 
 static int qede_dev_start(struct rte_eth_dev *eth_dev)
 {
+	struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
 	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
 
@@ -1118,10 +1119,15 @@ static int qede_dev_start(struct rte_eth_dev *eth_dev)
 		if (qede_update_mtu(eth_dev, qdev->new_mtu))
 			goto err;
 		qdev->mtu = qdev->new_mtu;
-		/* If MTU has changed then update TPA too */
-		if (qdev->enable_lro)
-			if (qede_enable_tpa(eth_dev, true))
-				goto err;
+	}
+
+	/* Configure TPA parameters */
+	if (rxmode->enable_lro) {
+		if (qede_enable_tpa(eth_dev, true))
+			return -EINVAL;
+		/* Enable scatter mode for LRO */
+		if (!rxmode->enable_scatter)
+			eth_dev->data->scattered_rx = 1;
 	}
 
 	/* Start queues */
@@ -1133,7 +1139,7 @@ static int qede_dev_start(struct rte_eth_dev *eth_dev)
 	 * Also, we would like to retain similar behavior in PF case, so we
 	 * don't do PF/VF specific check here.
 	 */
-	if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
+	if (rxmode->mq_mode == ETH_MQ_RX_RSS)
 		if (qede_config_rss(eth_dev))
 			goto err;
 
@@ -1169,7 +1175,6 @@ static void qede_dev_stop(struct rte_eth_dev *eth_dev)
 	if (qdev->enable_lro)
 		qede_enable_tpa(eth_dev, false);
 
-	/* TODO: Do we need disable LRO or RSS */
 	/* Stop queues */
 	qede_stop_queues(eth_dev);
 
@@ -1256,16 +1261,6 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
 	qdev->mtu = rxmode->max_rx_pkt_len;
 	qdev->new_mtu = qdev->mtu;
 
-	/* Configure TPA parameters */
-	if (rxmode->enable_lro) {
-		if (qede_enable_tpa(eth_dev, true))
-			return -EINVAL;
-		/* Enable scatter mode for LRO */
-		if (!rxmode->enable_scatter)
-			eth_dev->data->scattered_rx = 1;
-	}
-	qdev->enable_lro = rxmode->enable_lro;
-
 	/* Enable VLAN offloads by default */
 	qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK  |
 			ETH_VLAN_FILTER_MASK |
-- 
1.7.10.3

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [dpdk-dev] [PATCH 1/3] net/qede: fix supported packet types
  2017-10-19  1:13 [dpdk-dev] [PATCH 1/3] net/qede: fix supported packet types Rasesh Mody
  2017-10-19  1:13 ` [dpdk-dev] [PATCH 2/3] net/qede: add support for VXLAN UDP port config over VF Rasesh Mody
  2017-10-19  1:13 ` [dpdk-dev] [PATCH 3/3] net/qede: fix to re-enable LRO during device start Rasesh Mody
@ 2017-10-23 18:35 ` Ferruh Yigit
  2 siblings, 0 replies; 4+ messages in thread
From: Ferruh Yigit @ 2017-10-23 18:35 UTC (permalink / raw)
  To: Rasesh Mody, dev; +Cc: Harish Patil, Dept-EngDPDKDev

On 10/18/2017 6:13 PM, Rasesh Mody wrote:
> From: Harish Patil <harish.patil@cavium.com>
> 
> Update/fix supported ptypes to return both inner and outer headers,
> tunnel_type, fragmented and VLAN packet types.
> 
> Fixes: 3d4bb4411683 ("net/qede: add fastpath support for VXLAN tunneling")
> Fixes: 2ea6f76aff40 ("qede: add core driver")
> 
> Signed-off-by: Harish Patil <harish.patil@cavium.com>
> Signed-off-by: Rasesh Mody <rasesh.mody@cavium.com>

Series applied to dpdk-next-net/master, thanks.


(There was a build error [1], fixed while applying, please check it.)

[1]
.../dpdk/drivers/net/qede/qede_ethdev.c: In function ‘qede_vxlan_tunn_config’:
.../dpdk/drivers/net/qede/qede_ethdev.c:684:13: error: ‘ucast.vlan’ may be used
uninitialized in this function [-Werror=maybe-uninitialized]
        ucast->vlan == tmp->vlan   &&
        ~~~~~^~~~~~
.../dpdk/drivers/net/qede/qede_ethdev.c:685:13: error: ‘ucast.vni’ may be used
uninitialized in this function [-Werror=maybe-uninitialized]
        ucast->vni == tmp->vni)
        ~~~~~^~~~~

Fixed in patch 2/3, by providing initial value of "0" to ucast.

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2017-10-23 18:35 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-10-19  1:13 [dpdk-dev] [PATCH 1/3] net/qede: fix supported packet types Rasesh Mody
2017-10-19  1:13 ` [dpdk-dev] [PATCH 2/3] net/qede: add support for VXLAN UDP port config over VF Rasesh Mody
2017-10-19  1:13 ` [dpdk-dev] [PATCH 3/3] net/qede: fix to re-enable LRO during device start Rasesh Mody
2017-10-23 18:35 ` [dpdk-dev] [PATCH 1/3] net/qede: fix supported packet types Ferruh Yigit

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).