DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH] net/ixgbe: add flow parser ntuple support
@ 2017-12-20  2:10 Wei Zhao
  2017-12-20  2:33 ` [dpdk-dev] [PATCH v2] " Wei Zhao
  0 siblings, 1 reply; 17+ messages in thread
From: Wei Zhao @ 2017-12-20  2:10 UTC (permalink / raw)
  To: dev; +Cc: Wei Zhao

Ixgbe ntuple filter in rte_flow need to support diversion data
with less than 5 tuple parameters.So add this new support in parser
code.

Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
 drivers/net/ixgbe/ixgbe_flow.c | 86 ++++++++++++++++++++++--------------------
 1 file changed, 46 insertions(+), 40 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c
index 8f964cf..2ac58cf 100644
--- a/drivers/net/ixgbe/ixgbe_flow.c
+++ b/drivers/net/ixgbe/ixgbe_flow.c
@@ -310,48 +310,49 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
 		}
 	}
 
-	/* get the IPv4 info */
-	if (!item->spec || !item->mask) {
-		rte_flow_error_set(error, EINVAL,
-			RTE_FLOW_ERROR_TYPE_ITEM,
-			item, "Invalid ntuple mask");
-		return -rte_errno;
-	}
-	/*Not supported last point for range*/
-	if (item->last) {
-		rte_flow_error_set(error, EINVAL,
-			RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-			item, "Not supported last point for range");
-		return -rte_errno;
-
-	}
+	if (item->mask) {
+		/* get the IPv4 info */
+		if (!item->spec || !item->mask) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Invalid ntuple mask");
+			return -rte_errno;
+		}
+		/*Not supported last point for range*/
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				item, "Not supported last point for range");
+			return -rte_errno;
+		}
 
-	ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
-	/**
-	 * Only support src & dst addresses, protocol,
-	 * others should be masked.
-	 */
-	if (ipv4_mask->hdr.version_ihl ||
-	    ipv4_mask->hdr.type_of_service ||
-	    ipv4_mask->hdr.total_length ||
-	    ipv4_mask->hdr.packet_id ||
-	    ipv4_mask->hdr.fragment_offset ||
-	    ipv4_mask->hdr.time_to_live ||
-	    ipv4_mask->hdr.hdr_checksum) {
-			rte_flow_error_set(error,
-			EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
-			item, "Not supported by ntuple filter");
-		return -rte_errno;
-	}
+		ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
+		/**
+		 * Only support src & dst addresses, protocol,
+		 * others should be masked.
+		 */
+		if (ipv4_mask->hdr.version_ihl ||
+			ipv4_mask->hdr.type_of_service ||
+			ipv4_mask->hdr.total_length ||
+			ipv4_mask->hdr.packet_id ||
+			ipv4_mask->hdr.fragment_offset ||
+			ipv4_mask->hdr.time_to_live ||
+			ipv4_mask->hdr.hdr_checksum) {
+				rte_flow_error_set(error,
+					EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+					item, "Not supported by ntuple filter");
+				return -rte_errno;
+		}
 
-	filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
-	filter->src_ip_mask = ipv4_mask->hdr.src_addr;
-	filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
+		filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
+		filter->src_ip_mask = ipv4_mask->hdr.src_addr;
+		filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
 
-	ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
-	filter->dst_ip = ipv4_spec->hdr.dst_addr;
-	filter->src_ip = ipv4_spec->hdr.src_addr;
-	filter->proto  = ipv4_spec->hdr.next_proto_id;
+		ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
+		filter->dst_ip = ipv4_spec->hdr.dst_addr;
+		filter->src_ip = ipv4_spec->hdr.src_addr;
+		filter->proto  = ipv4_spec->hdr.next_proto_id;
+	}
 
 	/* check if the next not void item is TCP or UDP */
 	item = next_no_void_pattern(pattern, item);
@@ -366,7 +367,12 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
 		return -rte_errno;
 	}
 
-	/* get the TCP/UDP info */
+	if ((item->type != RTE_FLOW_ITEM_TYPE_END) &&
+		(!item->spec && !item->mask)) {
+		goto action;
+	}
+
+	/* get the TCP/UDP/SCTP info */
 	if ((item->type != RTE_FLOW_ITEM_TYPE_END) &&
 		(!item->spec || !item->mask)) {
 		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
-- 
2.7.5

^ permalink raw reply	[flat|nested] 17+ messages in thread

* [dpdk-dev] [PATCH v2] net/ixgbe: add flow parser ntuple support
  2017-12-20  2:10 [dpdk-dev] [PATCH] net/ixgbe: add flow parser ntuple support Wei Zhao
@ 2017-12-20  2:33 ` Wei Zhao
  2017-12-26  9:28   ` [dpdk-dev] [PATCH v3] " Wei Zhao
  0 siblings, 1 reply; 17+ messages in thread
From: Wei Zhao @ 2017-12-20  2:33 UTC (permalink / raw)
  To: dev; +Cc: wenzhuo.lu, Wei Zhao

Ixgbe ntuple filter in rte_flow need to support diversion data
with less than 5 tuple parameters.So add this new support in parser
code.

Signed-off-by: Wei Zhao <wei.zhao1@intel.com>

---

v2:
-fix coding style issue.

---
 drivers/net/ixgbe/ixgbe_flow.c | 84 ++++++++++++++++++++++--------------------
 1 file changed, 45 insertions(+), 39 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c
index 8f964cf..f69f0c4 100644
--- a/drivers/net/ixgbe/ixgbe_flow.c
+++ b/drivers/net/ixgbe/ixgbe_flow.c
@@ -310,48 +310,49 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
 		}
 	}
 
-	/* get the IPv4 info */
-	if (!item->spec || !item->mask) {
-		rte_flow_error_set(error, EINVAL,
-			RTE_FLOW_ERROR_TYPE_ITEM,
-			item, "Invalid ntuple mask");
-		return -rte_errno;
-	}
-	/*Not supported last point for range*/
-	if (item->last) {
-		rte_flow_error_set(error, EINVAL,
-			RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-			item, "Not supported last point for range");
-		return -rte_errno;
-
-	}
+	if (item->mask) {
+		/* get the IPv4 info */
+		if (!item->spec || !item->mask) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Invalid ntuple mask");
+			return -rte_errno;
+		}
+		/*Not supported last point for range*/
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				item, "Not supported last point for range");
+			return -rte_errno;
+		}
 
-	ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
-	/**
-	 * Only support src & dst addresses, protocol,
-	 * others should be masked.
-	 */
-	if (ipv4_mask->hdr.version_ihl ||
-	    ipv4_mask->hdr.type_of_service ||
-	    ipv4_mask->hdr.total_length ||
-	    ipv4_mask->hdr.packet_id ||
-	    ipv4_mask->hdr.fragment_offset ||
-	    ipv4_mask->hdr.time_to_live ||
-	    ipv4_mask->hdr.hdr_checksum) {
+		ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
+		/**
+		 * Only support src & dst addresses, protocol,
+		 * others should be masked.
+		 */
+		if (ipv4_mask->hdr.version_ihl ||
+		    ipv4_mask->hdr.type_of_service ||
+		    ipv4_mask->hdr.total_length ||
+		    ipv4_mask->hdr.packet_id ||
+		    ipv4_mask->hdr.fragment_offset ||
+		    ipv4_mask->hdr.time_to_live ||
+		    ipv4_mask->hdr.hdr_checksum) {
 			rte_flow_error_set(error,
-			EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
-			item, "Not supported by ntuple filter");
-		return -rte_errno;
-	}
+				EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by ntuple filter");
+			return -rte_errno;
+		}
 
-	filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
-	filter->src_ip_mask = ipv4_mask->hdr.src_addr;
-	filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
+		filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
+		filter->src_ip_mask = ipv4_mask->hdr.src_addr;
+		filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
 
-	ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
-	filter->dst_ip = ipv4_spec->hdr.dst_addr;
-	filter->src_ip = ipv4_spec->hdr.src_addr;
-	filter->proto  = ipv4_spec->hdr.next_proto_id;
+		ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
+		filter->dst_ip = ipv4_spec->hdr.dst_addr;
+		filter->src_ip = ipv4_spec->hdr.src_addr;
+		filter->proto  = ipv4_spec->hdr.next_proto_id;
+	}
 
 	/* check if the next not void item is TCP or UDP */
 	item = next_no_void_pattern(pattern, item);
@@ -366,8 +367,13 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
 		return -rte_errno;
 	}
 
-	/* get the TCP/UDP info */
 	if ((item->type != RTE_FLOW_ITEM_TYPE_END) &&
+		(!item->spec && !item->mask)) {
+		goto action;
+	}
+
+	/* get the TCP/UDP/SCTP info */
+	if (item->type != RTE_FLOW_ITEM_TYPE_END &&
 		(!item->spec || !item->mask)) {
 		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
 		rte_flow_error_set(error, EINVAL,
-- 
2.7.5

^ permalink raw reply	[flat|nested] 17+ messages in thread

* [dpdk-dev] [PATCH v3] net/ixgbe: add flow parser ntuple support
  2017-12-20  2:33 ` [dpdk-dev] [PATCH v2] " Wei Zhao
@ 2017-12-26  9:28   ` Wei Zhao
  2017-12-26  9:39     ` [dpdk-dev] [PATCH v4] " Wei Zhao
  2017-12-26  9:49     ` Wei Zhao
  0 siblings, 2 replies; 17+ messages in thread
From: Wei Zhao @ 2017-12-26  9:28 UTC (permalink / raw)
  To: dev; +Cc: wenzhuo.lu, Wei Zhao

Ixgbe ntuple filter in rte_flow need to support diversion data
with less than 5 tuple parameters.So add this new support in parser
code.

Signed-off-by: Wei Zhao <wei.zhao1@intel.com>

---

v2:
-fix coding style issue.

v3:
-add parser vlan pattern code.
---
 drivers/net/ixgbe/ixgbe_flow.c | 136 +++++++++++++++++++++++++++++------------
 1 file changed, 96 insertions(+), 40 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c
index 887d933..a066917 100644
--- a/drivers/net/ixgbe/ixgbe_flow.c
+++ b/drivers/net/ixgbe/ixgbe_flow.c
@@ -207,6 +207,12 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
 	const struct rte_flow_item_udp *udp_mask;
 	const struct rte_flow_item_sctp *sctp_spec;
 	const struct rte_flow_item_sctp *sctp_mask;
+	const struct rte_flow_item_eth *eth_spec;
+	const struct rte_flow_item_eth *eth_mask;
+	const struct rte_flow_item_vlan *vlan_spec;
+	const struct rte_flow_item_vlan *vlan_mask;
+	struct rte_flow_item_eth eth_null;
+	struct rte_flow_item_vlan vlan_null;
 
 	if (!pattern) {
 		rte_flow_error_set(error,
@@ -228,6 +234,9 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
 		return -rte_errno;
 	}
 
+	memset(&eth_null, 0, sizeof(struct rte_flow_item_eth));
+	memset(&vlan_null, 0, sizeof(struct rte_flow_item_vlan));
+
 #ifdef RTE_LIBRTE_SECURITY
 	/**
 	 *  Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY
@@ -277,6 +286,8 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
 	}
 	/* Skip Ethernet */
 	if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+		eth_spec = (const struct rte_flow_item_eth *)item->spec;
+		eth_mask = (const struct rte_flow_item_eth *)item->mask;
 		/*Not supported last point for range*/
 		if (item->last) {
 			rte_flow_error_set(error,
@@ -287,15 +298,20 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
 
 		}
 		/* if the first item is MAC, the content should be NULL */
-		if (item->spec || item->mask) {
+		if ((item->spec || item->mask) &&
+			(memcmp(eth_spec, &eth_null,
+				sizeof(struct rte_flow_item_eth)) ||
+			 memcmp(eth_mask, &eth_null,
+				sizeof(struct rte_flow_item_eth)))) {
 			rte_flow_error_set(error, EINVAL,
 				RTE_FLOW_ERROR_TYPE_ITEM,
 				item, "Not supported by ntuple filter");
 			return -rte_errno;
 		}
-		/* check if the next not void item is IPv4 */
+		/* check if the next not void item is IPv4 or Vlan */
 		item = next_no_void_pattern(pattern, item);
-		if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+		if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+			item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
 			rte_flow_error_set(error,
 			  EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
 			  item, "Not supported by ntuple filter");
@@ -303,48 +319,83 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
 		}
 	}
 
-	/* get the IPv4 info */
-	if (!item->spec || !item->mask) {
-		rte_flow_error_set(error, EINVAL,
-			RTE_FLOW_ERROR_TYPE_ITEM,
-			item, "Invalid ntuple mask");
-		return -rte_errno;
-	}
-	/*Not supported last point for range*/
-	if (item->last) {
-		rte_flow_error_set(error, EINVAL,
-			RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-			item, "Not supported last point for range");
-		return -rte_errno;
+	if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+		vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
+		vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
+		/*Not supported last point for range*/
+		if (item->last) {
+			rte_flow_error_set(error,
+			  EINVAL,
+			  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+			  item, "Not supported last point for range");
+			return -rte_errno;
 
-	}
+		}
+		/* the content should be NULL */
+		if ((item->spec || item->mask) &&
+			(memcmp(vlan_spec, &vlan_null,
+				sizeof(struct rte_flow_item_vlan)) ||
+			 memcmp(vlan_mask, &vlan_null,
+				sizeof(struct rte_flow_item_vlan)))) {
 
-	ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
-	/**
-	 * Only support src & dst addresses, protocol,
-	 * others should be masked.
-	 */
-	if (ipv4_mask->hdr.version_ihl ||
-	    ipv4_mask->hdr.type_of_service ||
-	    ipv4_mask->hdr.total_length ||
-	    ipv4_mask->hdr.packet_id ||
-	    ipv4_mask->hdr.fragment_offset ||
-	    ipv4_mask->hdr.time_to_live ||
-	    ipv4_mask->hdr.hdr_checksum) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by ntuple filter");
+			return -rte_errno;
+		}
+		/* check if the next not void item is IPv4 */
+		item = next_no_void_pattern(pattern, item);
+		if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
 			rte_flow_error_set(error,
-			EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
-			item, "Not supported by ntuple filter");
-		return -rte_errno;
+			  EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+			  item, "Not supported by ntuple filter");
+			  return -rte_errno;
+		}
 	}
 
-	filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
-	filter->src_ip_mask = ipv4_mask->hdr.src_addr;
-	filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
+	if (item->mask) {
+		/* get the IPv4 info */
+		if (!item->spec || !item->mask) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Invalid ntuple mask");
+			return -rte_errno;
+		}
+		/*Not supported last point for range*/
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				item, "Not supported last point for range");
+			return -rte_errno;
+		}
 
-	ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
-	filter->dst_ip = ipv4_spec->hdr.dst_addr;
-	filter->src_ip = ipv4_spec->hdr.src_addr;
-	filter->proto  = ipv4_spec->hdr.next_proto_id;
+		ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
+		/**
+		 * Only support src & dst addresses, protocol,
+		 * others should be masked.
+		 */
+		if (ipv4_mask->hdr.version_ihl ||
+		    ipv4_mask->hdr.type_of_service ||
+		    ipv4_mask->hdr.total_length ||
+		    ipv4_mask->hdr.packet_id ||
+		    ipv4_mask->hdr.fragment_offset ||
+		    ipv4_mask->hdr.time_to_live ||
+		    ipv4_mask->hdr.hdr_checksum) {
+			rte_flow_error_set(error,
+				EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by ntuple filter");
+			return -rte_errno;
+		}
+
+		filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
+		filter->src_ip_mask = ipv4_mask->hdr.src_addr;
+		filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
+
+		ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
+		filter->dst_ip = ipv4_spec->hdr.dst_addr;
+		filter->src_ip = ipv4_spec->hdr.src_addr;
+		filter->proto  = ipv4_spec->hdr.next_proto_id;
+	}
 
 	/* check if the next not void item is TCP or UDP */
 	item = next_no_void_pattern(pattern, item);
@@ -359,8 +410,13 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
 		return -rte_errno;
 	}
 
-	/* get the TCP/UDP info */
 	if ((item->type != RTE_FLOW_ITEM_TYPE_END) &&
+		(!item->spec && !item->mask)) {
+		goto action;
+	}
+
+	/* get the TCP/UDP/SCTP info */
+	if (item->type != RTE_FLOW_ITEM_TYPE_END &&
 		(!item->spec || !item->mask)) {
 		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
 		rte_flow_error_set(error, EINVAL,
-- 
2.9.3

^ permalink raw reply	[flat|nested] 17+ messages in thread

* [dpdk-dev] [PATCH v4] net/ixgbe: add flow parser ntuple support
  2017-12-26  9:28   ` [dpdk-dev] [PATCH v3] " Wei Zhao
@ 2017-12-26  9:39     ` Wei Zhao
  2017-12-26  9:49     ` Wei Zhao
  1 sibling, 0 replies; 17+ messages in thread
From: Wei Zhao @ 2017-12-26  9:39 UTC (permalink / raw)
  To: dev; +Cc: wenzhuo.lu, Wei Zhao

Ixgbe ntuple filter in rte_flow need to support diversion data
with less than 5 tuple parameters.So add this new support in parser
code.

Signed-off-by: Wei Zhao <wei.zhao1@intel.com>

---

v2:
-fix coding style issue.

v3:
-add parser vlan pattern code.

v4:
-fix patch check issue.
---
 drivers/net/ixgbe/ixgbe_flow.c | 136 +++++++++++++++++++++++++++++------------
 1 file changed, 96 insertions(+), 40 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c
index 887d933..a066917 100644
--- a/drivers/net/ixgbe/ixgbe_flow.c
+++ b/drivers/net/ixgbe/ixgbe_flow.c
@@ -207,6 +207,12 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
 	const struct rte_flow_item_udp *udp_mask;
 	const struct rte_flow_item_sctp *sctp_spec;
 	const struct rte_flow_item_sctp *sctp_mask;
+	const struct rte_flow_item_eth *eth_spec;
+	const struct rte_flow_item_eth *eth_mask;
+	const struct rte_flow_item_vlan *vlan_spec;
+	const struct rte_flow_item_vlan *vlan_mask;
+	struct rte_flow_item_eth eth_null;
+	struct rte_flow_item_vlan vlan_null;
 
 	if (!pattern) {
 		rte_flow_error_set(error,
@@ -228,6 +234,9 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
 		return -rte_errno;
 	}
 
+	memset(&eth_null, 0, sizeof(struct rte_flow_item_eth));
+	memset(&vlan_null, 0, sizeof(struct rte_flow_item_vlan));
+
 #ifdef RTE_LIBRTE_SECURITY
 	/**
 	 *  Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY
@@ -277,6 +286,8 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
 	}
 	/* Skip Ethernet */
 	if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+		eth_spec = (const struct rte_flow_item_eth *)item->spec;
+		eth_mask = (const struct rte_flow_item_eth *)item->mask;
 		/*Not supported last point for range*/
 		if (item->last) {
 			rte_flow_error_set(error,
@@ -287,15 +298,20 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
 
 		}
 		/* if the first item is MAC, the content should be NULL */
-		if (item->spec || item->mask) {
+		if ((item->spec || item->mask) &&
+			(memcmp(eth_spec, &eth_null,
+				sizeof(struct rte_flow_item_eth)) ||
+			 memcmp(eth_mask, &eth_null,
+				sizeof(struct rte_flow_item_eth)))) {
 			rte_flow_error_set(error, EINVAL,
 				RTE_FLOW_ERROR_TYPE_ITEM,
 				item, "Not supported by ntuple filter");
 			return -rte_errno;
 		}
-		/* check if the next not void item is IPv4 */
+		/* check if the next not void item is IPv4 or Vlan */
 		item = next_no_void_pattern(pattern, item);
-		if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+		if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+			item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
 			rte_flow_error_set(error,
 			  EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
 			  item, "Not supported by ntuple filter");
@@ -303,48 +319,83 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
 		}
 	}
 
-	/* get the IPv4 info */
-	if (!item->spec || !item->mask) {
-		rte_flow_error_set(error, EINVAL,
-			RTE_FLOW_ERROR_TYPE_ITEM,
-			item, "Invalid ntuple mask");
-		return -rte_errno;
-	}
-	/*Not supported last point for range*/
-	if (item->last) {
-		rte_flow_error_set(error, EINVAL,
-			RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-			item, "Not supported last point for range");
-		return -rte_errno;
+	if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+		vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
+		vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
+		/*Not supported last point for range*/
+		if (item->last) {
+			rte_flow_error_set(error,
+			  EINVAL,
+			  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+			  item, "Not supported last point for range");
+			return -rte_errno;
 
-	}
+		}
+		/* the content should be NULL */
+		if ((item->spec || item->mask) &&
+			(memcmp(vlan_spec, &vlan_null,
+				sizeof(struct rte_flow_item_vlan)) ||
+			 memcmp(vlan_mask, &vlan_null,
+				sizeof(struct rte_flow_item_vlan)))) {
 
-	ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
-	/**
-	 * Only support src & dst addresses, protocol,
-	 * others should be masked.
-	 */
-	if (ipv4_mask->hdr.version_ihl ||
-	    ipv4_mask->hdr.type_of_service ||
-	    ipv4_mask->hdr.total_length ||
-	    ipv4_mask->hdr.packet_id ||
-	    ipv4_mask->hdr.fragment_offset ||
-	    ipv4_mask->hdr.time_to_live ||
-	    ipv4_mask->hdr.hdr_checksum) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by ntuple filter");
+			return -rte_errno;
+		}
+		/* check if the next not void item is IPv4 */
+		item = next_no_void_pattern(pattern, item);
+		if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
 			rte_flow_error_set(error,
-			EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
-			item, "Not supported by ntuple filter");
-		return -rte_errno;
+			  EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+			  item, "Not supported by ntuple filter");
+			  return -rte_errno;
+		}
 	}
 
-	filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
-	filter->src_ip_mask = ipv4_mask->hdr.src_addr;
-	filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
+	if (item->mask) {
+		/* get the IPv4 info */
+		if (!item->spec || !item->mask) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Invalid ntuple mask");
+			return -rte_errno;
+		}
+		/*Not supported last point for range*/
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				item, "Not supported last point for range");
+			return -rte_errno;
+		}
 
-	ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
-	filter->dst_ip = ipv4_spec->hdr.dst_addr;
-	filter->src_ip = ipv4_spec->hdr.src_addr;
-	filter->proto  = ipv4_spec->hdr.next_proto_id;
+		ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
+		/**
+		 * Only support src & dst addresses, protocol,
+		 * others should be masked.
+		 */
+		if (ipv4_mask->hdr.version_ihl ||
+		    ipv4_mask->hdr.type_of_service ||
+		    ipv4_mask->hdr.total_length ||
+		    ipv4_mask->hdr.packet_id ||
+		    ipv4_mask->hdr.fragment_offset ||
+		    ipv4_mask->hdr.time_to_live ||
+		    ipv4_mask->hdr.hdr_checksum) {
+			rte_flow_error_set(error,
+				EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by ntuple filter");
+			return -rte_errno;
+		}
+
+		filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
+		filter->src_ip_mask = ipv4_mask->hdr.src_addr;
+		filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
+
+		ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
+		filter->dst_ip = ipv4_spec->hdr.dst_addr;
+		filter->src_ip = ipv4_spec->hdr.src_addr;
+		filter->proto  = ipv4_spec->hdr.next_proto_id;
+	}
 
 	/* check if the next not void item is TCP or UDP */
 	item = next_no_void_pattern(pattern, item);
@@ -359,8 +410,13 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
 		return -rte_errno;
 	}
 
-	/* get the TCP/UDP info */
 	if ((item->type != RTE_FLOW_ITEM_TYPE_END) &&
+		(!item->spec && !item->mask)) {
+		goto action;
+	}
+
+	/* get the TCP/UDP/SCTP info */
+	if (item->type != RTE_FLOW_ITEM_TYPE_END &&
 		(!item->spec || !item->mask)) {
 		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
 		rte_flow_error_set(error, EINVAL,
-- 
2.9.3

^ permalink raw reply	[flat|nested] 17+ messages in thread

* [dpdk-dev] [PATCH v4] net/ixgbe: add flow parser ntuple support
  2017-12-26  9:28   ` [dpdk-dev] [PATCH v3] " Wei Zhao
  2017-12-26  9:39     ` [dpdk-dev] [PATCH v4] " Wei Zhao
@ 2017-12-26  9:49     ` Wei Zhao
  2017-12-27  8:31       ` [dpdk-dev] [PATCH v2] examples/flow_filtering: add Tx queues setup process Wei Zhao
                         ` (3 more replies)
  1 sibling, 4 replies; 17+ messages in thread
From: Wei Zhao @ 2017-12-26  9:49 UTC (permalink / raw)
  To: dev; +Cc: wenzhuo.lu, Wei Zhao

Ixgbe ntuple filter in rte_flow need to support diversion data
with less than 5 tuple parameters.So add this new support in parser
code.

Signed-off-by: Wei Zhao <wei.zhao1@intel.com>

---

v2:
-fix coding style issue.

v3:
-add parser vlan pattern code.

v4:
-fix patch check issue.
---
 drivers/net/ixgbe/ixgbe_flow.c | 135 +++++++++++++++++++++++++++++------------
 1 file changed, 95 insertions(+), 40 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c
index 887d933..a10c8b1 100644
--- a/drivers/net/ixgbe/ixgbe_flow.c
+++ b/drivers/net/ixgbe/ixgbe_flow.c
@@ -207,6 +207,12 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
 	const struct rte_flow_item_udp *udp_mask;
 	const struct rte_flow_item_sctp *sctp_spec;
 	const struct rte_flow_item_sctp *sctp_mask;
+	const struct rte_flow_item_eth *eth_spec;
+	const struct rte_flow_item_eth *eth_mask;
+	const struct rte_flow_item_vlan *vlan_spec;
+	const struct rte_flow_item_vlan *vlan_mask;
+	struct rte_flow_item_eth eth_null;
+	struct rte_flow_item_vlan vlan_null;
 
 	if (!pattern) {
 		rte_flow_error_set(error,
@@ -228,6 +234,9 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
 		return -rte_errno;
 	}
 
+	memset(&eth_null, 0, sizeof(struct rte_flow_item_eth));
+	memset(&vlan_null, 0, sizeof(struct rte_flow_item_vlan));
+
 #ifdef RTE_LIBRTE_SECURITY
 	/**
 	 *  Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY
@@ -277,6 +286,8 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
 	}
 	/* Skip Ethernet */
 	if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+		eth_spec = (const struct rte_flow_item_eth *)item->spec;
+		eth_mask = (const struct rte_flow_item_eth *)item->mask;
 		/*Not supported last point for range*/
 		if (item->last) {
 			rte_flow_error_set(error,
@@ -287,15 +298,20 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
 
 		}
 		/* if the first item is MAC, the content should be NULL */
-		if (item->spec || item->mask) {
+		if ((item->spec || item->mask) &&
+			(memcmp(eth_spec, &eth_null,
+				sizeof(struct rte_flow_item_eth)) ||
+			 memcmp(eth_mask, &eth_null,
+				sizeof(struct rte_flow_item_eth)))) {
 			rte_flow_error_set(error, EINVAL,
 				RTE_FLOW_ERROR_TYPE_ITEM,
 				item, "Not supported by ntuple filter");
 			return -rte_errno;
 		}
-		/* check if the next not void item is IPv4 */
+		/* check if the next not void item is IPv4 or Vlan */
 		item = next_no_void_pattern(pattern, item);
-		if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+		if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+			item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
 			rte_flow_error_set(error,
 			  EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
 			  item, "Not supported by ntuple filter");
@@ -303,48 +319,82 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
 		}
 	}
 
-	/* get the IPv4 info */
-	if (!item->spec || !item->mask) {
-		rte_flow_error_set(error, EINVAL,
-			RTE_FLOW_ERROR_TYPE_ITEM,
-			item, "Invalid ntuple mask");
-		return -rte_errno;
-	}
-	/*Not supported last point for range*/
-	if (item->last) {
-		rte_flow_error_set(error, EINVAL,
-			RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-			item, "Not supported last point for range");
-		return -rte_errno;
+	if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+		vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
+		vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
+		/*Not supported last point for range*/
+		if (item->last) {
+			rte_flow_error_set(error,
+			  EINVAL,
+			  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+			  item, "Not supported last point for range");
+			return -rte_errno;
+		}
+		/* the content should be NULL */
+		if ((item->spec || item->mask) &&
+			(memcmp(vlan_spec, &vlan_null,
+				sizeof(struct rte_flow_item_vlan)) ||
+			 memcmp(vlan_mask, &vlan_null,
+				sizeof(struct rte_flow_item_vlan)))) {
 
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by ntuple filter");
+			return -rte_errno;
+		}
+		/* check if the next not void item is IPv4 */
+		item = next_no_void_pattern(pattern, item);
+		if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+			rte_flow_error_set(error,
+			  EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+			  item, "Not supported by ntuple filter");
+			return -rte_errno;
+		}
 	}
 
-	ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
-	/**
-	 * Only support src & dst addresses, protocol,
-	 * others should be masked.
-	 */
-	if (ipv4_mask->hdr.version_ihl ||
-	    ipv4_mask->hdr.type_of_service ||
-	    ipv4_mask->hdr.total_length ||
-	    ipv4_mask->hdr.packet_id ||
-	    ipv4_mask->hdr.fragment_offset ||
-	    ipv4_mask->hdr.time_to_live ||
-	    ipv4_mask->hdr.hdr_checksum) {
+	if (item->mask) {
+		/* get the IPv4 info */
+		if (!item->spec || !item->mask) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Invalid ntuple mask");
+			return -rte_errno;
+		}
+		/*Not supported last point for range*/
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				item, "Not supported last point for range");
+			return -rte_errno;
+		}
+
+		ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
+		/**
+		 * Only support src & dst addresses, protocol,
+		 * others should be masked.
+		 */
+		if (ipv4_mask->hdr.version_ihl ||
+		    ipv4_mask->hdr.type_of_service ||
+		    ipv4_mask->hdr.total_length ||
+		    ipv4_mask->hdr.packet_id ||
+		    ipv4_mask->hdr.fragment_offset ||
+		    ipv4_mask->hdr.time_to_live ||
+		    ipv4_mask->hdr.hdr_checksum) {
 			rte_flow_error_set(error,
-			EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
-			item, "Not supported by ntuple filter");
-		return -rte_errno;
-	}
+				EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by ntuple filter");
+			return -rte_errno;
+		}
 
-	filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
-	filter->src_ip_mask = ipv4_mask->hdr.src_addr;
-	filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
+		filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
+		filter->src_ip_mask = ipv4_mask->hdr.src_addr;
+		filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
 
-	ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
-	filter->dst_ip = ipv4_spec->hdr.dst_addr;
-	filter->src_ip = ipv4_spec->hdr.src_addr;
-	filter->proto  = ipv4_spec->hdr.next_proto_id;
+		ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
+		filter->dst_ip = ipv4_spec->hdr.dst_addr;
+		filter->src_ip = ipv4_spec->hdr.src_addr;
+		filter->proto  = ipv4_spec->hdr.next_proto_id;
+	}
 
 	/* check if the next not void item is TCP or UDP */
 	item = next_no_void_pattern(pattern, item);
@@ -359,8 +409,13 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
 		return -rte_errno;
 	}
 
-	/* get the TCP/UDP info */
 	if ((item->type != RTE_FLOW_ITEM_TYPE_END) &&
+		(!item->spec && !item->mask)) {
+		goto action;
+	}
+
+	/* get the TCP/UDP/SCTP info */
+	if (item->type != RTE_FLOW_ITEM_TYPE_END &&
 		(!item->spec || !item->mask)) {
 		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
 		rte_flow_error_set(error, EINVAL,
-- 
2.9.3

^ permalink raw reply	[flat|nested] 17+ messages in thread

* [dpdk-dev] [PATCH v2] examples/flow_filtering: add Tx queues setup process
  2017-12-26  9:49     ` Wei Zhao
@ 2017-12-27  8:31       ` Wei Zhao
  2017-12-27  8:32       ` Wei Zhao
                         ` (2 subsequent siblings)
  3 siblings, 0 replies; 17+ messages in thread
From: Wei Zhao @ 2017-12-27  8:31 UTC (permalink / raw)
  To: dev; +Cc: orika, zhao wei

From: zhao wei <wei.zhao1@intel.com>

This example do not has the process of set up tx queues, but
some NIC start up process will be blocked if this is no tx queue
and only rx queues. So add tx queues setup process in main code.

Signed-off-by: Wei Zhao <wei.zhao1@intel.com>

---

v2:
-add support the new tx offloads.
---
 examples/flow_filtering/main.c | 26 ++++++++++++++++++++++++++
 1 file changed, 26 insertions(+)

diff --git a/examples/flow_filtering/main.c b/examples/flow_filtering/main.c
index 7d739b4..4a07b63 100644
--- a/examples/flow_filtering/main.c
+++ b/examples/flow_filtering/main.c
@@ -149,7 +149,18 @@ init_port(void)
 			/**< CRC stripped by hardware */
 			.hw_strip_crc   = 1,
 		},
+		.txmode = {
+			.offloads =
+				DEV_TX_OFFLOAD_VLAN_INSERT |
+				DEV_TX_OFFLOAD_IPV4_CKSUM  |
+				DEV_TX_OFFLOAD_UDP_CKSUM   |
+				DEV_TX_OFFLOAD_TCP_CKSUM   |
+				DEV_TX_OFFLOAD_SCTP_CKSUM  |
+				DEV_TX_OFFLOAD_TCP_TSO,
+		},
 	};
+	struct rte_eth_txconf txq_conf;
+	struct rte_eth_dev_info dev_info;
 
 	printf(":: initializing port: %d\n", port_id);
 	ret = rte_eth_dev_configure(port_id,
@@ -173,6 +184,21 @@ init_port(void)
 		}
 	}
 
+	rte_eth_dev_info_get(port_id, &dev_info);
+	txq_conf = dev_info.default_txconf;
+	txq_conf.offloads = port_conf.txmode.offloads;
+
+	for (i = 0; i < nr_queues; i++) {
+		ret = rte_eth_tx_queue_setup(port_id, i, 512,
+				rte_eth_dev_socket_id(port_id),
+				&txq_conf);
+		if (ret < 0) {
+			rte_exit(EXIT_FAILURE,
+				":: Tx queue setup failed: err=%d, port=%u\n",
+				ret, port_id);
+		}
+	}
+
 	rte_eth_promiscuous_enable(port_id);
 	ret = rte_eth_dev_start(port_id);
 	if (ret < 0) {
-- 
2.9.3

^ permalink raw reply	[flat|nested] 17+ messages in thread

* [dpdk-dev] [PATCH v2] examples/flow_filtering: add Tx queues setup process
  2017-12-26  9:49     ` Wei Zhao
  2017-12-27  8:31       ` [dpdk-dev] [PATCH v2] examples/flow_filtering: add Tx queues setup process Wei Zhao
@ 2017-12-27  8:32       ` Wei Zhao
  2017-12-27 11:53         ` Ori Kam
  2018-01-12 12:25         ` Ferruh Yigit
  2018-01-04  8:27       ` [dpdk-dev] [PATCH v4] net/ixgbe: add flow parser ntuple support Lu, Wenzhuo
  2018-01-05  6:13       ` [dpdk-dev] [PATCH v5] " Wei Zhao
  3 siblings, 2 replies; 17+ messages in thread
From: Wei Zhao @ 2017-12-27  8:32 UTC (permalink / raw)
  To: dev; +Cc: orika, Wei Zhao

This example do not has the process of set up tx queues, but
some NIC start up process will be blocked if this is no tx queue
and only rx queues. So add tx queues setup process in main code.

Signed-off-by: Wei Zhao <wei.zhao1@intel.com>

---

v2:
-add support the new tx offloads.
---
 examples/flow_filtering/main.c | 26 ++++++++++++++++++++++++++
 1 file changed, 26 insertions(+)

diff --git a/examples/flow_filtering/main.c b/examples/flow_filtering/main.c
index 7d739b4..4a07b63 100644
--- a/examples/flow_filtering/main.c
+++ b/examples/flow_filtering/main.c
@@ -149,7 +149,18 @@ init_port(void)
 			/**< CRC stripped by hardware */
 			.hw_strip_crc   = 1,
 		},
+		.txmode = {
+			.offloads =
+				DEV_TX_OFFLOAD_VLAN_INSERT |
+				DEV_TX_OFFLOAD_IPV4_CKSUM  |
+				DEV_TX_OFFLOAD_UDP_CKSUM   |
+				DEV_TX_OFFLOAD_TCP_CKSUM   |
+				DEV_TX_OFFLOAD_SCTP_CKSUM  |
+				DEV_TX_OFFLOAD_TCP_TSO,
+		},
 	};
+	struct rte_eth_txconf txq_conf;
+	struct rte_eth_dev_info dev_info;
 
 	printf(":: initializing port: %d\n", port_id);
 	ret = rte_eth_dev_configure(port_id,
@@ -173,6 +184,21 @@ init_port(void)
 		}
 	}
 
+	rte_eth_dev_info_get(port_id, &dev_info);
+	txq_conf = dev_info.default_txconf;
+	txq_conf.offloads = port_conf.txmode.offloads;
+
+	for (i = 0; i < nr_queues; i++) {
+		ret = rte_eth_tx_queue_setup(port_id, i, 512,
+				rte_eth_dev_socket_id(port_id),
+				&txq_conf);
+		if (ret < 0) {
+			rte_exit(EXIT_FAILURE,
+				":: Tx queue setup failed: err=%d, port=%u\n",
+				ret, port_id);
+		}
+	}
+
 	rte_eth_promiscuous_enable(port_id);
 	ret = rte_eth_dev_start(port_id);
 	if (ret < 0) {
-- 
2.9.3

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [dpdk-dev] [PATCH v2] examples/flow_filtering: add Tx queues setup process
  2017-12-27  8:32       ` Wei Zhao
@ 2017-12-27 11:53         ` Ori Kam
  2017-12-28  1:54           ` Zhao1, Wei
  2018-01-08  6:31           ` Zhang, Helin
  2018-01-12 12:25         ` Ferruh Yigit
  1 sibling, 2 replies; 17+ messages in thread
From: Ori Kam @ 2017-12-27 11:53 UTC (permalink / raw)
  To: Wei Zhao, dev



> -----Original Message-----
> From: Wei Zhao [mailto:wei.zhao1@intel.com]
> Sent: Wednesday, December 27, 2017 10:32 AM
> To: dev@dpdk.org
> Cc: Ori Kam <orika@mellanox.com>; Wei Zhao <wei.zhao1@intel.com>
> Subject: [PATCH v2] examples/flow_filtering: add Tx queues setup process
> 
> This example do not has the process of set up tx queues, but some NIC start
> up process will be blocked if this is no tx queue and only rx queues. So add tx
> queues setup process in main code.
> 
> Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
> 
> ---
> 
> v2:
> -add support the new tx offloads.
> ---
>  examples/flow_filtering/main.c | 26 ++++++++++++++++++++++++++
>  1 file changed, 26 insertions(+)
> 
> diff --git a/examples/flow_filtering/main.c b/examples/flow_filtering/main.c
> index 7d739b4..4a07b63 100644
> --- a/examples/flow_filtering/main.c
> +++ b/examples/flow_filtering/main.c
> @@ -149,7 +149,18 @@ init_port(void)
>  			/**< CRC stripped by hardware */
>  			.hw_strip_crc   = 1,
>  		},
> +		.txmode = {
> +			.offloads =
> +				DEV_TX_OFFLOAD_VLAN_INSERT |
> +				DEV_TX_OFFLOAD_IPV4_CKSUM  |
> +				DEV_TX_OFFLOAD_UDP_CKSUM   |
> +				DEV_TX_OFFLOAD_TCP_CKSUM   |
> +				DEV_TX_OFFLOAD_SCTP_CKSUM  |
> +				DEV_TX_OFFLOAD_TCP_TSO,
> +		},
>  	};
> +	struct rte_eth_txconf txq_conf;
> +	struct rte_eth_dev_info dev_info;
> 
>  	printf(":: initializing port: %d\n", port_id);
>  	ret = rte_eth_dev_configure(port_id,
> @@ -173,6 +184,21 @@ init_port(void)
>  		}
>  	}
> 
> +	rte_eth_dev_info_get(port_id, &dev_info);
> +	txq_conf = dev_info.default_txconf;
> +	txq_conf.offloads = port_conf.txmode.offloads;
> +
> +	for (i = 0; i < nr_queues; i++) {
> +		ret = rte_eth_tx_queue_setup(port_id, i, 512,
> +				rte_eth_dev_socket_id(port_id),
> +				&txq_conf);
> +		if (ret < 0) {
> +			rte_exit(EXIT_FAILURE,
> +				":: Tx queue setup failed: err=%d,
> port=%u\n",
> +				ret, port_id);
> +		}
> +	}
> +
>  	rte_eth_promiscuous_enable(port_id);
>  	ret = rte_eth_dev_start(port_id);
>  	if (ret < 0) {
> --
> 2.9.3

Acked-by: Ori Kam <orika@mellanox.com>

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [dpdk-dev] [PATCH v2] examples/flow_filtering: add Tx queues setup process
  2017-12-27 11:53         ` Ori Kam
@ 2017-12-28  1:54           ` Zhao1, Wei
  2018-01-08  6:31           ` Zhang, Helin
  1 sibling, 0 replies; 17+ messages in thread
From: Zhao1, Wei @ 2017-12-28  1:54 UTC (permalink / raw)
  To: Ori Kam, dev

Thabk you.

> -----Original Message-----
> From: Ori Kam [mailto:orika@mellanox.com]
> Sent: Wednesday, December 27, 2017 7:53 PM
> To: Zhao1, Wei <wei.zhao1@intel.com>; dev@dpdk.org
> Subject: RE: [PATCH v2] examples/flow_filtering: add Tx queues setup
> process
> 
> 
> 
> > -----Original Message-----
> > From: Wei Zhao [mailto:wei.zhao1@intel.com]
> > Sent: Wednesday, December 27, 2017 10:32 AM
> > To: dev@dpdk.org
> > Cc: Ori Kam <orika@mellanox.com>; Wei Zhao <wei.zhao1@intel.com>
> > Subject: [PATCH v2] examples/flow_filtering: add Tx queues setup
> > process
> >
> > This example do not has the process of set up tx queues, but some NIC
> > start up process will be blocked if this is no tx queue and only rx
> > queues. So add tx queues setup process in main code.
> >
> > Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
> >
> > ---
> >
> > v2:
> > -add support the new tx offloads.
> > ---
> >  examples/flow_filtering/main.c | 26 ++++++++++++++++++++++++++
> >  1 file changed, 26 insertions(+)
> >
> > diff --git a/examples/flow_filtering/main.c
> > b/examples/flow_filtering/main.c index 7d739b4..4a07b63 100644
> > --- a/examples/flow_filtering/main.c
> > +++ b/examples/flow_filtering/main.c
> > @@ -149,7 +149,18 @@ init_port(void)
> >  			/**< CRC stripped by hardware */
> >  			.hw_strip_crc   = 1,
> >  		},
> > +		.txmode = {
> > +			.offloads =
> > +				DEV_TX_OFFLOAD_VLAN_INSERT |
> > +				DEV_TX_OFFLOAD_IPV4_CKSUM  |
> > +				DEV_TX_OFFLOAD_UDP_CKSUM   |
> > +				DEV_TX_OFFLOAD_TCP_CKSUM   |
> > +				DEV_TX_OFFLOAD_SCTP_CKSUM  |
> > +				DEV_TX_OFFLOAD_TCP_TSO,
> > +		},
> >  	};
> > +	struct rte_eth_txconf txq_conf;
> > +	struct rte_eth_dev_info dev_info;
> >
> >  	printf(":: initializing port: %d\n", port_id);
> >  	ret = rte_eth_dev_configure(port_id, @@ -173,6 +184,21 @@
> > init_port(void)
> >  		}
> >  	}
> >
> > +	rte_eth_dev_info_get(port_id, &dev_info);
> > +	txq_conf = dev_info.default_txconf;
> > +	txq_conf.offloads = port_conf.txmode.offloads;
> > +
> > +	for (i = 0; i < nr_queues; i++) {
> > +		ret = rte_eth_tx_queue_setup(port_id, i, 512,
> > +				rte_eth_dev_socket_id(port_id),
> > +				&txq_conf);
> > +		if (ret < 0) {
> > +			rte_exit(EXIT_FAILURE,
> > +				":: Tx queue setup failed: err=%d,
> > port=%u\n",
> > +				ret, port_id);
> > +		}
> > +	}
> > +
> >  	rte_eth_promiscuous_enable(port_id);
> >  	ret = rte_eth_dev_start(port_id);
> >  	if (ret < 0) {
> > --
> > 2.9.3
> 
> Acked-by: Ori Kam <orika@mellanox.com>

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [dpdk-dev] [PATCH v4] net/ixgbe: add flow parser ntuple support
  2017-12-26  9:49     ` Wei Zhao
  2017-12-27  8:31       ` [dpdk-dev] [PATCH v2] examples/flow_filtering: add Tx queues setup process Wei Zhao
  2017-12-27  8:32       ` Wei Zhao
@ 2018-01-04  8:27       ` Lu, Wenzhuo
  2018-01-04  8:41         ` Zhao1, Wei
  2018-01-05  6:13       ` [dpdk-dev] [PATCH v5] " Wei Zhao
  3 siblings, 1 reply; 17+ messages in thread
From: Lu, Wenzhuo @ 2018-01-04  8:27 UTC (permalink / raw)
  To: Zhao1, Wei, dev

Hi Wei,


> -----Original Message-----
> From: Zhao1, Wei
> Sent: Tuesday, December 26, 2017 5:49 PM
> To: dev@dpdk.org
> Cc: Lu, Wenzhuo <wenzhuo.lu@intel.com>; Zhao1, Wei
> <wei.zhao1@intel.com>
> Subject: [PATCH v4] net/ixgbe: add flow parser ntuple support
> 
> Ixgbe ntuple filter in rte_flow need to support diversion data with less than
> 5 tuple parameters.So add this new support in parser code.
> 
> Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
> 
> ---
> 
> v2:
> -fix coding style issue.
> 
> v3:
> -add parser vlan pattern code.
I think this patch is good. Just suggest to put this vlan info into the commit log. As the words here will be removed when merging this patch.

> 
> v4:
> -fix patch check issue.

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [dpdk-dev] [PATCH v4] net/ixgbe: add flow parser ntuple support
  2018-01-04  8:27       ` [dpdk-dev] [PATCH v4] net/ixgbe: add flow parser ntuple support Lu, Wenzhuo
@ 2018-01-04  8:41         ` Zhao1, Wei
  0 siblings, 0 replies; 17+ messages in thread
From: Zhao1, Wei @ 2018-01-04  8:41 UTC (permalink / raw)
  To: Lu, Wenzhuo, dev

Hi, wenzhuo

> -----Original Message-----
> From: Lu, Wenzhuo
> Sent: Thursday, January 4, 2018 4:27 PM
> To: Zhao1, Wei <wei.zhao1@intel.com>; dev@dpdk.org
> Subject: RE: [PATCH v4] net/ixgbe: add flow parser ntuple support
> 
> Hi Wei,
> 
> 
> > -----Original Message-----
> > From: Zhao1, Wei
> > Sent: Tuesday, December 26, 2017 5:49 PM
> > To: dev@dpdk.org
> > Cc: Lu, Wenzhuo <wenzhuo.lu@intel.com>; Zhao1, Wei
> > <wei.zhao1@intel.com>
> > Subject: [PATCH v4] net/ixgbe: add flow parser ntuple support
> >
> > Ixgbe ntuple filter in rte_flow need to support diversion data with
> > less than
> > 5 tuple parameters.So add this new support in parser code.
> >
> > Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
> >
> > ---
> >
> > v2:
> > -fix coding style issue.
> >
> > v3:
> > -add parser vlan pattern code.
> I think this patch is good. Just suggest to put this vlan info into the commit log.
> As the words here will be removed when merging this patch.
> 

Ok, I will update as your suggestion.

> >
> > v4:
> > -fix patch check issue.

^ permalink raw reply	[flat|nested] 17+ messages in thread

* [dpdk-dev] [PATCH v5] net/ixgbe: add flow parser ntuple support
  2017-12-26  9:49     ` Wei Zhao
                         ` (2 preceding siblings ...)
  2018-01-04  8:27       ` [dpdk-dev] [PATCH v4] net/ixgbe: add flow parser ntuple support Lu, Wenzhuo
@ 2018-01-05  6:13       ` Wei Zhao
  2018-01-05  7:57         ` Lu, Wenzhuo
  3 siblings, 1 reply; 17+ messages in thread
From: Wei Zhao @ 2018-01-05  6:13 UTC (permalink / raw)
  To: dev; +Cc: wenzhuo.lu, Wei Zhao

Ixgbe ntuple filter in rte_flow need to support diversion data
with less than 5 tuple parameters.So add this new support in parser
code.This patch also add parser vlan pattern code in ntuple filter
in order to handle some case including vlan in flow API.

Signed-off-by: Wei Zhao <wei.zhao1@intel.com>

---

v2:
-fix coding style issue.

v3:
-add parser vlan pattern code.

v4:
-fix patch check issue.

v5:
-add more commit log message.
---
 drivers/net/ixgbe/ixgbe_flow.c | 135 +++++++++++++++++++++++++++++------------
 1 file changed, 95 insertions(+), 40 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c
index 8f964cf..6af5ce1 100644
--- a/drivers/net/ixgbe/ixgbe_flow.c
+++ b/drivers/net/ixgbe/ixgbe_flow.c
@@ -214,6 +214,12 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
 	const struct rte_flow_item_udp *udp_mask;
 	const struct rte_flow_item_sctp *sctp_spec;
 	const struct rte_flow_item_sctp *sctp_mask;
+	const struct rte_flow_item_eth *eth_spec;
+	const struct rte_flow_item_eth *eth_mask;
+	const struct rte_flow_item_vlan *vlan_spec;
+	const struct rte_flow_item_vlan *vlan_mask;
+	struct rte_flow_item_eth eth_null;
+	struct rte_flow_item_vlan vlan_null;
 
 	if (!pattern) {
 		rte_flow_error_set(error,
@@ -235,6 +241,9 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
 		return -rte_errno;
 	}
 
+	memset(&eth_null, 0, sizeof(struct rte_flow_item_eth));
+	memset(&vlan_null, 0, sizeof(struct rte_flow_item_vlan));
+
 #ifdef RTE_LIBRTE_SECURITY
 	/**
 	 *  Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY
@@ -284,6 +293,8 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
 	}
 	/* Skip Ethernet */
 	if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+		eth_spec = (const struct rte_flow_item_eth *)item->spec;
+		eth_mask = (const struct rte_flow_item_eth *)item->mask;
 		/*Not supported last point for range*/
 		if (item->last) {
 			rte_flow_error_set(error,
@@ -294,15 +305,20 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
 
 		}
 		/* if the first item is MAC, the content should be NULL */
-		if (item->spec || item->mask) {
+		if ((item->spec || item->mask) &&
+			(memcmp(eth_spec, &eth_null,
+				sizeof(struct rte_flow_item_eth)) ||
+			 memcmp(eth_mask, &eth_null,
+				sizeof(struct rte_flow_item_eth)))) {
 			rte_flow_error_set(error, EINVAL,
 				RTE_FLOW_ERROR_TYPE_ITEM,
 				item, "Not supported by ntuple filter");
 			return -rte_errno;
 		}
-		/* check if the next not void item is IPv4 */
+		/* check if the next not void item is IPv4 or Vlan */
 		item = next_no_void_pattern(pattern, item);
-		if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+		if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+			item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
 			rte_flow_error_set(error,
 			  EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
 			  item, "Not supported by ntuple filter");
@@ -310,48 +326,82 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
 		}
 	}
 
-	/* get the IPv4 info */
-	if (!item->spec || !item->mask) {
-		rte_flow_error_set(error, EINVAL,
-			RTE_FLOW_ERROR_TYPE_ITEM,
-			item, "Invalid ntuple mask");
-		return -rte_errno;
-	}
-	/*Not supported last point for range*/
-	if (item->last) {
-		rte_flow_error_set(error, EINVAL,
-			RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-			item, "Not supported last point for range");
-		return -rte_errno;
+	if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+		vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
+		vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
+		/*Not supported last point for range*/
+		if (item->last) {
+			rte_flow_error_set(error,
+			  EINVAL,
+			  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+			  item, "Not supported last point for range");
+			return -rte_errno;
+		}
+		/* the content should be NULL */
+		if ((item->spec || item->mask) &&
+			(memcmp(vlan_spec, &vlan_null,
+				sizeof(struct rte_flow_item_vlan)) ||
+			 memcmp(vlan_mask, &vlan_null,
+				sizeof(struct rte_flow_item_vlan)))) {
 
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by ntuple filter");
+			return -rte_errno;
+		}
+		/* check if the next not void item is IPv4 */
+		item = next_no_void_pattern(pattern, item);
+		if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+			rte_flow_error_set(error,
+			  EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+			  item, "Not supported by ntuple filter");
+			return -rte_errno;
+		}
 	}
 
-	ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
-	/**
-	 * Only support src & dst addresses, protocol,
-	 * others should be masked.
-	 */
-	if (ipv4_mask->hdr.version_ihl ||
-	    ipv4_mask->hdr.type_of_service ||
-	    ipv4_mask->hdr.total_length ||
-	    ipv4_mask->hdr.packet_id ||
-	    ipv4_mask->hdr.fragment_offset ||
-	    ipv4_mask->hdr.time_to_live ||
-	    ipv4_mask->hdr.hdr_checksum) {
+	if (item->mask) {
+		/* get the IPv4 info */
+		if (!item->spec || !item->mask) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Invalid ntuple mask");
+			return -rte_errno;
+		}
+		/*Not supported last point for range*/
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				item, "Not supported last point for range");
+			return -rte_errno;
+		}
+
+		ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
+		/**
+		 * Only support src & dst addresses, protocol,
+		 * others should be masked.
+		 */
+		if (ipv4_mask->hdr.version_ihl ||
+		    ipv4_mask->hdr.type_of_service ||
+		    ipv4_mask->hdr.total_length ||
+		    ipv4_mask->hdr.packet_id ||
+		    ipv4_mask->hdr.fragment_offset ||
+		    ipv4_mask->hdr.time_to_live ||
+		    ipv4_mask->hdr.hdr_checksum) {
 			rte_flow_error_set(error,
-			EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
-			item, "Not supported by ntuple filter");
-		return -rte_errno;
-	}
+				EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by ntuple filter");
+			return -rte_errno;
+		}
 
-	filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
-	filter->src_ip_mask = ipv4_mask->hdr.src_addr;
-	filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
+		filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
+		filter->src_ip_mask = ipv4_mask->hdr.src_addr;
+		filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
 
-	ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
-	filter->dst_ip = ipv4_spec->hdr.dst_addr;
-	filter->src_ip = ipv4_spec->hdr.src_addr;
-	filter->proto  = ipv4_spec->hdr.next_proto_id;
+		ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
+		filter->dst_ip = ipv4_spec->hdr.dst_addr;
+		filter->src_ip = ipv4_spec->hdr.src_addr;
+		filter->proto  = ipv4_spec->hdr.next_proto_id;
+	}
 
 	/* check if the next not void item is TCP or UDP */
 	item = next_no_void_pattern(pattern, item);
@@ -366,8 +416,13 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
 		return -rte_errno;
 	}
 
-	/* get the TCP/UDP info */
 	if ((item->type != RTE_FLOW_ITEM_TYPE_END) &&
+		(!item->spec && !item->mask)) {
+		goto action;
+	}
+
+	/* get the TCP/UDP/SCTP info */
+	if (item->type != RTE_FLOW_ITEM_TYPE_END &&
 		(!item->spec || !item->mask)) {
 		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
 		rte_flow_error_set(error, EINVAL,
-- 
2.9.3

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [dpdk-dev] [PATCH v5] net/ixgbe: add flow parser ntuple support
  2018-01-05  6:13       ` [dpdk-dev] [PATCH v5] " Wei Zhao
@ 2018-01-05  7:57         ` Lu, Wenzhuo
  2018-01-07  8:53           ` Zhang, Helin
  0 siblings, 1 reply; 17+ messages in thread
From: Lu, Wenzhuo @ 2018-01-05  7:57 UTC (permalink / raw)
  To: Zhao1, Wei, dev

Hi,

> -----Original Message-----
> From: Zhao1, Wei
> Sent: Friday, January 5, 2018 2:14 PM
> To: dev@dpdk.org
> Cc: Lu, Wenzhuo <wenzhuo.lu@intel.com>; Zhao1, Wei
> <wei.zhao1@intel.com>
> Subject: [PATCH v5] net/ixgbe: add flow parser ntuple support
> 
> Ixgbe ntuple filter in rte_flow need to support diversion data with less than
> 5 tuple parameters.So add this new support in parser code.This patch also
> add parser vlan pattern code in ntuple filter in order to handle some case
> including vlan in flow API.
> 
> Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
Acked-by: Wenzhuo Lu <wenzhuo.lu@intel.com>

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [dpdk-dev] [PATCH v5] net/ixgbe: add flow parser ntuple support
  2018-01-05  7:57         ` Lu, Wenzhuo
@ 2018-01-07  8:53           ` Zhang, Helin
  0 siblings, 0 replies; 17+ messages in thread
From: Zhang, Helin @ 2018-01-07  8:53 UTC (permalink / raw)
  To: Lu, Wenzhuo, Zhao1, Wei; +Cc: dev



> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Lu, Wenzhuo
> Sent: Friday, January 5, 2018 3:58 PM
> To: Zhao1, Wei; dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH v5] net/ixgbe: add flow parser ntuple support
> 
> Hi,
> 
> > -----Original Message-----
> > From: Zhao1, Wei
> > Sent: Friday, January 5, 2018 2:14 PM
> > To: dev@dpdk.org
> > Cc: Lu, Wenzhuo <wenzhuo.lu@intel.com>; Zhao1, Wei
> > <wei.zhao1@intel.com>
> > Subject: [PATCH v5] net/ixgbe: add flow parser ntuple support
> >
> > Ixgbe ntuple filter in rte_flow need to support diversion data with
> > less than
> > 5 tuple parameters.So add this new support in parser code.This patch
> > also add parser vlan pattern code in ntuple filter in order to handle
> > some case including vlan in flow API.
> >
> > Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
> Acked-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
Applied into dpdk-next-net-intel. Thanks!

/Helin

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [dpdk-dev] [PATCH v2] examples/flow_filtering: add Tx queues setup process
  2017-12-27 11:53         ` Ori Kam
  2017-12-28  1:54           ` Zhao1, Wei
@ 2018-01-08  6:31           ` Zhang, Helin
  1 sibling, 0 replies; 17+ messages in thread
From: Zhang, Helin @ 2018-01-08  6:31 UTC (permalink / raw)
  To: Ori Kam, Zhao1, Wei, dev



> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Ori Kam
> Sent: Wednesday, December 27, 2017 7:53 PM
> To: Zhao1, Wei; dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH v2] examples/flow_filtering: add Tx queues
> setup process
> 
> 
> 
> > -----Original Message-----
> > From: Wei Zhao [mailto:wei.zhao1@intel.com]
> > Sent: Wednesday, December 27, 2017 10:32 AM
> > To: dev@dpdk.org
> > Cc: Ori Kam <orika@mellanox.com>; Wei Zhao <wei.zhao1@intel.com>
> > Subject: [PATCH v2] examples/flow_filtering: add Tx queues setup
> > process
> >
> > This example do not has the process of set up tx queues, but some NIC
> > start up process will be blocked if this is no tx queue and only rx
> > queues. So add tx queues setup process in main code.
> >
> > Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
> >
> > ---
> >
> > v2:
> > -add support the new tx offloads.
> > ---
> >  examples/flow_filtering/main.c | 26 ++++++++++++++++++++++++++
> >  1 file changed, 26 insertions(+)
> >
> > diff --git a/examples/flow_filtering/main.c
> > b/examples/flow_filtering/main.c index 7d739b4..4a07b63 100644
> > --- a/examples/flow_filtering/main.c
> > +++ b/examples/flow_filtering/main.c
> > @@ -149,7 +149,18 @@ init_port(void)
> >  			/**< CRC stripped by hardware */
> >  			.hw_strip_crc   = 1,
> >  		},
> > +		.txmode = {
> > +			.offloads =
> > +				DEV_TX_OFFLOAD_VLAN_INSERT |
> > +				DEV_TX_OFFLOAD_IPV4_CKSUM  |
> > +				DEV_TX_OFFLOAD_UDP_CKSUM   |
> > +				DEV_TX_OFFLOAD_TCP_CKSUM   |
> > +				DEV_TX_OFFLOAD_SCTP_CKSUM  |
> > +				DEV_TX_OFFLOAD_TCP_TSO,
> > +		},
> >  	};
> > +	struct rte_eth_txconf txq_conf;
> > +	struct rte_eth_dev_info dev_info;
> >
> >  	printf(":: initializing port: %d\n", port_id);
> >  	ret = rte_eth_dev_configure(port_id, @@ -173,6 +184,21 @@
> > init_port(void)
> >  		}
> >  	}
> >
> > +	rte_eth_dev_info_get(port_id, &dev_info);
> > +	txq_conf = dev_info.default_txconf;
> > +	txq_conf.offloads = port_conf.txmode.offloads;
> > +
> > +	for (i = 0; i < nr_queues; i++) {
> > +		ret = rte_eth_tx_queue_setup(port_id, i, 512,
> > +				rte_eth_dev_socket_id(port_id),
> > +				&txq_conf);
> > +		if (ret < 0) {
> > +			rte_exit(EXIT_FAILURE,
> > +				":: Tx queue setup failed: err=%d,
> > port=%u\n",
> > +				ret, port_id);
> > +		}
> > +	}
> > +
> >  	rte_eth_promiscuous_enable(port_id);
> >  	ret = rte_eth_dev_start(port_id);
> >  	if (ret < 0) {
> > --
> > 2.9.3
> 
> Acked-by: Ori Kam <orika@mellanox.com>
Applied to dpdk-next-net-intel, thanks!

/Helin

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [dpdk-dev] [PATCH v2] examples/flow_filtering: add Tx queues setup process
  2017-12-27  8:32       ` Wei Zhao
  2017-12-27 11:53         ` Ori Kam
@ 2018-01-12 12:25         ` Ferruh Yigit
  2018-01-16 13:52           ` Shahaf Shuler
  1 sibling, 1 reply; 17+ messages in thread
From: Ferruh Yigit @ 2018-01-12 12:25 UTC (permalink / raw)
  To: Wei Zhao, dev, Shahaf Shuler; +Cc: orika

On 12/27/2017 8:32 AM, Wei Zhao wrote:
> This example do not has the process of set up tx queues, but
> some NIC start up process will be blocked if this is no tx queue
> and only rx queues. So add tx queues setup process in main code.
> 
> Signed-off-by: Wei Zhao <wei.zhao1@intel.com>

<...>

> @@ -149,7 +149,18 @@ init_port(void)
>  			/**< CRC stripped by hardware */
>  			.hw_strip_crc   = 1,
>  		},
> +		.txmode = {
> +			.offloads =
> +				DEV_TX_OFFLOAD_VLAN_INSERT |
> +				DEV_TX_OFFLOAD_IPV4_CKSUM  |
> +				DEV_TX_OFFLOAD_UDP_CKSUM   |
> +				DEV_TX_OFFLOAD_TCP_CKSUM   |
> +				DEV_TX_OFFLOAD_SCTP_CKSUM  |
> +				DEV_TX_OFFLOAD_TCP_TSO,
> +		},
>  	};
> +	struct rte_eth_txconf txq_conf;
> +	struct rte_eth_dev_info dev_info;
>  
>  	printf(":: initializing port: %d\n", port_id);
>  	ret = rte_eth_dev_configure(port_id,
> @@ -173,6 +184,21 @@ init_port(void)
>  		}
>  	}
>  
> +	rte_eth_dev_info_get(port_id, &dev_info);
> +	txq_conf = dev_info.default_txconf;
> +	txq_conf.offloads = port_conf.txmode.offloads;

Hi Wei, Shahaf,

When new offload method used for txmode, shouldn't ETH_TXQ_FLAGS_IGNORE set,
otherwise txq_flags assumed to have valid values, no?

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [dpdk-dev] [PATCH v2] examples/flow_filtering: add Tx queues setup process
  2018-01-12 12:25         ` Ferruh Yigit
@ 2018-01-16 13:52           ` Shahaf Shuler
  0 siblings, 0 replies; 17+ messages in thread
From: Shahaf Shuler @ 2018-01-16 13:52 UTC (permalink / raw)
  To: Ferruh Yigit, Wei Zhao, dev; +Cc: Ori Kam

Friday, January 12, 2018 2:25 PM, Ferruh Yigit:
> > +	rte_eth_dev_info_get(port_id, &dev_info);
> > +	txq_conf = dev_info.default_txconf;
> > +	txq_conf.offloads = port_conf.txmode.offloads;
> 
> Hi Wei, Shahaf,
> 
> When new offload method used for txmode, shouldn't
> ETH_TXQ_FLAGS_IGNORE set, otherwise txq_flags assumed to have valid
> values, no?

Yes this is correct. see other examples code already converted.

^ permalink raw reply	[flat|nested] 17+ messages in thread

end of thread, other threads:[~2018-01-16 13:52 UTC | newest]

Thread overview: 17+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-12-20  2:10 [dpdk-dev] [PATCH] net/ixgbe: add flow parser ntuple support Wei Zhao
2017-12-20  2:33 ` [dpdk-dev] [PATCH v2] " Wei Zhao
2017-12-26  9:28   ` [dpdk-dev] [PATCH v3] " Wei Zhao
2017-12-26  9:39     ` [dpdk-dev] [PATCH v4] " Wei Zhao
2017-12-26  9:49     ` Wei Zhao
2017-12-27  8:31       ` [dpdk-dev] [PATCH v2] examples/flow_filtering: add Tx queues setup process Wei Zhao
2017-12-27  8:32       ` Wei Zhao
2017-12-27 11:53         ` Ori Kam
2017-12-28  1:54           ` Zhao1, Wei
2018-01-08  6:31           ` Zhang, Helin
2018-01-12 12:25         ` Ferruh Yigit
2018-01-16 13:52           ` Shahaf Shuler
2018-01-04  8:27       ` [dpdk-dev] [PATCH v4] net/ixgbe: add flow parser ntuple support Lu, Wenzhuo
2018-01-04  8:41         ` Zhao1, Wei
2018-01-05  6:13       ` [dpdk-dev] [PATCH v5] " Wei Zhao
2018-01-05  7:57         ` Lu, Wenzhuo
2018-01-07  8:53           ` Zhang, Helin

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).