DPDK patches and discussions
 help / color / mirror / Atom feed
From: Wei Zhao <wei.zhao1@intel.com>
To: dev@dpdk.org
Cc: wenzhuo.lu@intel.com, wei zhao1 <wei.zhao1@intel.com>
Subject: [dpdk-dev] [PATCH 11/18] net/ixgbe: parse n-tuple filter
Date: Fri,  2 Dec 2016 18:43:07 +0800	[thread overview]
Message-ID: <1480675394-59179-12-git-send-email-wei.zhao1@intel.com> (raw)
In-Reply-To: <1480675394-59179-1-git-send-email-wei.zhao1@intel.com>

From: wei zhao1 <wei.zhao1@intel.com>

Add rule validate function and check if the rule is a n-tuple rule,
and get the n-tuple info.

Signed-off-by: wei zhao1 <wei.zhao1@intel.com>
Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
---
 drivers/net/ixgbe/ixgbe_ethdev.c | 349 +++++++++++++++++++++++++++++++++++++++
 1 file changed, 349 insertions(+)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index f84ca17..d3768c6 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -61,6 +61,8 @@
 #include <rte_random.h>
 #include <rte_dev.h>
 #include <rte_hash_crc.h>
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
 
 #include "ixgbe_logs.h"
 #include "base/ixgbe_api.h"
@@ -393,6 +395,26 @@ static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 static int ixgbe_filter_restore(struct rte_eth_dev *dev);
 static void ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev);
 int ixgbe_flush_all_filter(struct rte_eth_dev *dev);
+static enum
+rte_flow_error_type cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
+					const struct rte_flow_item pattern[],
+					const struct rte_flow_action actions[],
+					struct rte_eth_ntuple_filter *filter);
+static enum
+rte_flow_error_type ixgbe_parse_ntuple_filter(const struct rte_flow_attr *attr,
+					const struct rte_flow_item pattern[],
+					const struct rte_flow_action actions[],
+					struct rte_eth_ntuple_filter *filter);
+enum rte_flow_error_type
+ixgbe_flow_rule_validate(__rte_unused struct rte_eth_dev *dev,
+					const struct rte_flow_attr *attr,
+					const struct rte_flow_item pattern[],
+					const struct rte_flow_action actions[]);
+int ixgbe_flow_validate(struct rte_eth_dev *dev,
+		const struct rte_flow_attr *attr,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		struct rte_flow_error *error);
 
 /*
  * Define VF Stats MACRO for Non "cleared on read" register
@@ -778,6 +800,14 @@ static const struct rte_ixgbe_xstats_name_off rte_ixgbevf_stats_strings[] = {
 #define IXGBEVF_NB_XSTATS (sizeof(rte_ixgbevf_stats_strings) /	\
 		sizeof(rte_ixgbevf_stats_strings[0]))
 
+static const struct rte_flow_ops ixgbe_flow_ops = {
+	ixgbe_flow_validate,
+	NULL,
+	NULL,
+	NULL,
+	NULL,
+};
+
 /**
  * Atomically reads the link status information from global
  * structure rte_eth_dev.
@@ -6311,6 +6341,11 @@ ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
 	case RTE_ETH_FILTER_L2_TUNNEL:
 		ret = ixgbe_dev_l2_tunnel_filter_handle(dev, filter_op, arg);
 		break;
+	case RTE_ETH_FILTER_GENERIC:
+		if (filter_op != RTE_ETH_FILTER_GET)
+			return -EINVAL;
+		*(const void **)arg = &ixgbe_flow_ops;
+		break;
 	default:
 		PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
 							filter_type);
@@ -7995,6 +8030,320 @@ ixgbe_flush_all_filter(struct rte_eth_dev *dev)
 	return 0;
 }
 
+static inline uint32_t
+rte_be_to_cpu_24(uint32_t x)
+{
+	return  ((x & 0x000000ffUL) << 16) |
+		(x & 0x0000ff00UL) |
+		((x & 0x00ff0000UL) >> 16);
+}
+#define IXGBE_MIN_N_TUPLE_PRIO 1
+#define IXGBE_MAX_N_TUPLE_PRIO 7
+#define PATTERN_SKIP_VOID(filter, filter_struct, ret)\
+		do {\
+			if (!pattern) {\
+			memset(filter, 0, sizeof(filter_struct));\
+			return ret;\
+			} \
+			item = pattern + i;\
+			while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {\
+					i++;\
+					item = pattern + i;\
+			} \
+		} while (0)
+
+#define ACTION_SKIP_VOID(filter, filter_struct, ret)\
+		do {\
+			if (!actions) {\
+			memset(filter, 0, sizeof(filter_struct));\
+			return ret;\
+			} \
+			act = actions + i;\
+			while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
+					i++;\
+					act = actions + i;\
+			} \
+		} while (0)
+
+/**
+ * Please aware there's an asumption for all the parsers.
+ * rte_flow_item is using big endian, rte_flow_attr and
+ * rte_flow_action are using CPU order.
+ * Because the pattern is used to describe the packets,
+ * normally the packets should use network order.
+ */
+
+/**
+ * Parse the rule to see if it is a n-tuple rule.
+ * And get the n-tuple filter info BTW.
+ */
+static enum rte_flow_error_type
+cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
+			 const struct rte_flow_item pattern[],
+			 const struct rte_flow_action actions[],
+			 struct rte_eth_ntuple_filter *filter)
+{
+	const struct rte_flow_item *item;
+	const struct rte_flow_action *act;
+	const struct rte_flow_item_ipv4 *ipv4_spec;
+	const struct rte_flow_item_ipv4 *ipv4_mask;
+	const struct rte_flow_item_tcp *tcp_spec;
+	const struct rte_flow_item_tcp *tcp_mask;
+	const struct rte_flow_item_udp *udp_spec;
+	const struct rte_flow_item_udp *udp_mask;
+	uint32_t i;
+
+	/************************************************
+	 * parse pattern
+	 ************************************************/
+	i = 0;
+
+	/* the first not void item can be MAC or IPv4 */
+	PATTERN_SKIP_VOID(filter, struct rte_eth_ntuple_filter,
+			  RTE_FLOW_ERROR_TYPE_ITEM_NUM);
+	if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+	    item->type != RTE_FLOW_ITEM_TYPE_IPV4)
+		return RTE_FLOW_ERROR_TYPE_ITEM;
+
+	/* Skip Ethernet */
+	if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+		/* if the first item is MAC, the content should be NULL */
+		if (item->spec || item->mask)
+			return RTE_FLOW_ERROR_TYPE_ITEM;
+
+		/* check if the next not void item is IPv4 */
+		i++;
+		PATTERN_SKIP_VOID(filter, struct rte_eth_ntuple_filter,
+				  RTE_FLOW_ERROR_TYPE_ITEM);
+		if (item->type != RTE_FLOW_ITEM_TYPE_IPV4)
+			return RTE_FLOW_ERROR_TYPE_ITEM;
+	}
+
+	/* get the IPv4 info */
+	if (!item->spec || !item->mask)
+		return RTE_FLOW_ERROR_TYPE_ITEM;
+
+	ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
+	/**
+	 * Only support src & dst addresses, protocol,
+	 * others should be masked.
+	 */
+	if (ipv4_mask->hdr.version_ihl ||
+	    ipv4_mask->hdr.type_of_service ||
+	    ipv4_mask->hdr.total_length ||
+	    ipv4_mask->hdr.packet_id ||
+	    ipv4_mask->hdr.fragment_offset ||
+	    ipv4_mask->hdr.time_to_live ||
+	    ipv4_mask->hdr.hdr_checksum)
+		return RTE_FLOW_ERROR_TYPE_ITEM;
+
+	filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
+	filter->src_ip_mask = ipv4_mask->hdr.src_addr;
+	filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
+
+	ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
+	filter->dst_ip = ipv4_spec->hdr.dst_addr;
+	filter->src_ip = ipv4_spec->hdr.src_addr;
+	filter->proto  = ipv4_spec->hdr.next_proto_id;
+
+	/* check if the next not void item is TCP or UDP */
+	i++;
+	PATTERN_SKIP_VOID(filter, struct rte_eth_ntuple_filter,
+			  RTE_FLOW_ERROR_TYPE_ITEM);
+	if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
+	    item->type != RTE_FLOW_ITEM_TYPE_UDP) {
+		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+		return RTE_FLOW_ERROR_TYPE_ITEM;
+	}
+
+	/* get the TCP/UDP info */
+	if (!item->spec || !item->mask)
+		return RTE_FLOW_ERROR_TYPE_ITEM;
+
+	if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
+		tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+
+		/**
+		 * Only support src & dst ports, tcp flags,
+		 * others should be masked.
+		 */
+		if (tcp_mask->hdr.sent_seq ||
+		    tcp_mask->hdr.recv_ack ||
+		    tcp_mask->hdr.data_off ||
+		    tcp_mask->hdr.rx_win ||
+		    tcp_mask->hdr.cksum ||
+		    tcp_mask->hdr.tcp_urp) {
+			memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+			return RTE_FLOW_ERROR_TYPE_ITEM;
+		}
+
+		filter->dst_port_mask  = tcp_mask->hdr.dst_port;
+		filter->src_port_mask  = tcp_mask->hdr.src_port;
+		if (tcp_mask->hdr.tcp_flags == 0xFF) {
+			filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
+		} else if (!tcp_mask->hdr.tcp_flags) {
+			filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
+		} else {
+			memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+			return RTE_FLOW_ERROR_TYPE_ITEM;
+		}
+
+		tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
+		filter->dst_port  = tcp_spec->hdr.dst_port;
+		filter->src_port  = tcp_spec->hdr.src_port;
+		filter->tcp_flags = tcp_spec->hdr.tcp_flags;
+	} else {
+		udp_mask = (const struct rte_flow_item_udp *)item->mask;
+
+		/**
+		 * Only support src & dst ports,
+		 * others should be masked.
+		 */
+		if (udp_mask->hdr.dgram_len ||
+		    udp_mask->hdr.dgram_cksum) {
+			memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+			return RTE_FLOW_ERROR_TYPE_ITEM;
+		}
+
+		filter->dst_port_mask = udp_mask->hdr.dst_port;
+		filter->src_port_mask = udp_mask->hdr.src_port;
+
+		udp_spec = (const struct rte_flow_item_udp *)item->spec;
+		filter->dst_port = udp_spec->hdr.dst_port;
+		filter->src_port = udp_spec->hdr.src_port;
+	}
+
+	/* check if the next not void item is END */
+	i++;
+	PATTERN_SKIP_VOID(filter, struct rte_eth_ntuple_filter,
+			  RTE_FLOW_ERROR_TYPE_ITEM);
+	if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+		return RTE_FLOW_ERROR_TYPE_ITEM;
+	}
+
+	/************************************************
+	 * parse action
+	 ************************************************/
+	i = 0;
+
+	/**
+	 * n-tuple only supports forwarding,
+	 * check if the first not void action is QUEUE.
+	 */
+	ACTION_SKIP_VOID(filter, struct rte_eth_ntuple_filter,
+			 RTE_FLOW_ERROR_TYPE_ACTION_NUM);
+	if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
+		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+		return RTE_FLOW_ERROR_TYPE_ACTION;
+	}
+	filter->queue =
+		((const struct rte_flow_action_queue *)act->conf)->index;
+
+	/* check if the next not void item is END */
+	i++;
+	ACTION_SKIP_VOID(filter, struct rte_eth_ntuple_filter,
+			 RTE_FLOW_ERROR_TYPE_ACTION);
+	if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+		return RTE_FLOW_ERROR_TYPE_ACTION;
+	}
+
+	/************************************************
+	 * parse attr
+	 ************************************************/
+	/* must be input direction */
+	if (!attr->ingress) {
+		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+		return RTE_FLOW_ERROR_TYPE_ATTR_INGRESS;
+	}
+
+	/* not supported */
+	if (attr->egress) {
+		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+		return RTE_FLOW_ERROR_TYPE_ATTR_EGRESS;
+	}
+
+	if (attr->priority > 0xFFFF) {
+		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+		return RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY;
+	}
+	filter->priority = (uint16_t)attr->priority;
+
+	return 0;
+}
+
+/* a specific function for ixgbe because the flags is specific */
+static enum rte_flow_error_type
+ixgbe_parse_ntuple_filter(const struct rte_flow_attr *attr,
+			  const struct rte_flow_item pattern[],
+			  const struct rte_flow_action actions[],
+			  struct rte_eth_ntuple_filter *filter)
+{
+	int ret;
+
+	ret = cons_parse_ntuple_filter(attr, pattern, actions, filter);
+
+	if (ret)
+		return ret;
+
+	/* Ixgbe doesn't support tcp flags. */
+	if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
+		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+		return RTE_FLOW_ERROR_TYPE_ITEM;
+	}
+
+	/* Ixgbe doesn't support many priorities. */
+	if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
+	    filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
+		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+		return RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY;
+	}
+
+	/* fixed value for ixgbe */
+	filter->flags = RTE_5TUPLE_FLAGS;
+	return 0;
+}
+
+/**
+ * Check if the flow rule is supported by ixgbe.
+ * It only checkes the format. Don't guarantee the rule can be programmed into
+ * the HW. Because there can be no enough room for the rule.
+ */
+enum rte_flow_error_type
+ixgbe_flow_rule_validate(__rte_unused struct rte_eth_dev *dev,
+			const struct rte_flow_attr *attr,
+			const struct rte_flow_item pattern[],
+			const struct rte_flow_action actions[])
+{
+	int ret;
+	struct rte_eth_ntuple_filter ntuple_filter;
+
+	memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
+	ret = ixgbe_parse_ntuple_filter(attr, pattern, actions, &ntuple_filter);
+	if (!ret)
+		return RTE_FLOW_ERROR_TYPE_NONE;
+
+
+	return ret;
+}
+
+/* Check whether a flow rule can be created on ixgbe. */
+int
+ixgbe_flow_validate(struct rte_eth_dev *dev,
+		const struct rte_flow_attr *attr,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		struct rte_flow_error *error)
+{
+	error->type = ixgbe_flow_rule_validate(dev, attr, pattern, actions);
+	if (error->type == RTE_FLOW_ERROR_TYPE_NONE)
+		return 0;
+	else
+		return -EINVAL;
+
+}
+
 RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd.pci_drv);
 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map);
 RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd.pci_drv);
-- 
2.5.5

  parent reply	other threads:[~2016-12-02 10:46 UTC|newest]

Thread overview: 36+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-12-02 10:42 [dpdk-dev] [PATCH 00/18] net/ixgbe: Consistent filter API Wei Zhao
2016-12-02 10:42 ` [dpdk-dev] [PATCH 01/18] net/ixgbe: store SYN filter Wei Zhao
2016-12-20 16:55   ` Ferruh Yigit
2016-12-22  9:48     ` Zhao1, Wei
2017-01-06 16:26       ` Ferruh Yigit
2017-01-10  6:48         ` Zhao1, Wei
2016-12-26  1:47     ` Zhao1, Wei
2016-12-02 10:42 ` [dpdk-dev] [PATCH 02/18] net/ixgbe: store flow director filter Wei Zhao
2016-12-20 16:58   ` Ferruh Yigit
2016-12-26  2:50     ` Zhao1, Wei
2016-12-02 10:42 ` [dpdk-dev] [PATCH 03/18] net/ixgbe: store L2 tunnel filter Wei Zhao
2016-12-02 10:43 ` [dpdk-dev] [PATCH 04/18] net/ixgbe: restore n-tuple filter Wei Zhao
2016-12-20 16:58   ` Ferruh Yigit
2016-12-26  3:32     ` Zhao1, Wei
2016-12-02 10:43 ` [dpdk-dev] [PATCH 05/18] net/ixgbe: restore ether type filter Wei Zhao
2016-12-02 10:43 ` [dpdk-dev] [PATCH 06/18] net/ixgbe: restore SYN filter Wei Zhao
2016-12-02 10:43 ` [dpdk-dev] [PATCH 07/18] net/ixgbe: restore flow director filter Wei Zhao
2016-12-02 10:43 ` [dpdk-dev] [PATCH 08/18] net/ixgbe: restore L2 tunnel filter Wei Zhao
2016-12-02 10:43 ` [dpdk-dev] [PATCH 09/18] net/ixgbe: store and restore L2 tunnel configuration Wei Zhao
2016-12-02 10:43 ` [dpdk-dev] [PATCH 10/18] net/ixgbe: flush all the filters Wei Zhao
2016-12-02 10:43 ` Wei Zhao [this message]
2016-12-20 17:23   ` [dpdk-dev] [PATCH 11/18] net/ixgbe: parse n-tuple filter Ferruh Yigit
2016-12-02 10:43 ` [dpdk-dev] [PATCH 12/18] net/ixgbe: parse ethertype filter Wei Zhao
2016-12-02 10:43 ` [dpdk-dev] [PATCH 13/18] net/ixgbe: parse SYN filter Wei Zhao
2016-12-02 10:43 ` [dpdk-dev] [PATCH 14/18] net/ixgbe: parse L2 tunnel filter Wei Zhao
2016-12-02 10:43 ` [dpdk-dev] [PATCH 15/18] net/ixgbe: parse flow director filter Wei Zhao
2016-12-20 17:00   ` Ferruh Yigit
2016-12-22  9:19     ` Zhao1, Wei
2016-12-22 10:44       ` Ferruh Yigit
2016-12-23  8:13         ` Adrien Mazarguil
2016-12-27  3:31           ` Zhao1, Wei
2016-12-02 10:43 ` [dpdk-dev] [PATCH 16/18] net/ixgbe: create consistent filter Wei Zhao
2016-12-20 17:25   ` Ferruh Yigit
2016-12-23  6:26     ` Zhao1, Wei
2016-12-02 10:43 ` [dpdk-dev] [PATCH 17/18] net/ixgbe: destroy " Wei Zhao
2016-12-02 10:43 ` [dpdk-dev] [PATCH 18/18] net/ixgbe: flush " Wei Zhao

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1480675394-59179-12-git-send-email-wei.zhao1@intel.com \
    --to=wei.zhao1@intel.com \
    --cc=dev@dpdk.org \
    --cc=wenzhuo.lu@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).