From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga14.intel.com (mga14.intel.com [192.55.52.115]) by dpdk.org (Postfix) with ESMTP id C1729532C for ; Thu, 22 Jun 2017 04:14:13 +0200 (CEST) Received: from orsmga001.jf.intel.com ([10.7.209.18]) by fmsmga103.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 21 Jun 2017 19:14:13 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.39,371,1493708400"; d="scan'208";a="1143640780" Received: from zhangqi.sh.intel.com ([10.239.129.189]) by orsmga001.jf.intel.com with ESMTP; 21 Jun 2017 19:14:12 -0700 From: Qi Zhang To: wenzhuo.lu@intel.com, helin.zhang@intel.com Cc: dev@dpdk.org, Qi Zhang Date: Wed, 21 Jun 2017 15:07:10 -0400 Message-Id: <1498072031-4039-3-git-send-email-qi.z.zhang@intel.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1498072031-4039-1-git-send-email-qi.z.zhang@intel.com> References: <1495582134-13665-1-git-send-email-qi.z.zhang@intel.com> <1498072031-4039-1-git-send-email-qi.z.zhang@intel.com> Subject: [dpdk-dev] [PATCH v3 2/3] net/ixgbe: enable signature match for consistent API X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Thu, 22 Jun 2017 02:14:14 -0000 Enable signature match for rte_flow API. RTE_FLOW_ITEM_TYPE_FUZZY specify a signature match. Signed-off-by: Qi Zhang --- v2: - will check fuzzy match's last and mask value. drivers/net/ixgbe/ixgbe_flow.c | 91 ++++++++++++++++++++++++++++++++++++------ 1 file changed, 79 insertions(+), 12 deletions(-) diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c index 067252a..c6653d7 100644 --- a/drivers/net/ixgbe/ixgbe_flow.c +++ b/drivers/net/ixgbe/ixgbe_flow.c @@ -1268,6 +1268,67 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr, return 0; } +/* search next no void pattern and skip roughly */ +static inline +const struct rte_flow_item *next_no_roughly_pattern( + const struct rte_flow_item pattern[], + const struct rte_flow_item *cur) +{ + const struct rte_flow_item *next = + next_no_void_pattern(pattern, cur); + while (1) { + if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY) + return next; + next = next_no_void_pattern(pattern, next); + } +} + +static inline uint8_t signature_match(const struct rte_flow_item pattern[]) +{ + const struct rte_flow_item_fuzzy *spec, *last, *mask; + const struct rte_flow_item *item; + uint32_t sh, lh, mh; + int i = 0; + + while (1) { + item = pattern + i; + if (item->type == RTE_FLOW_ITEM_TYPE_END) + break; + + if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) { + spec = + (const struct rte_flow_item_fuzzy *)item->spec; + last = + (const struct rte_flow_item_fuzzy *)item->last; + mask = + (const struct rte_flow_item_fuzzy *)item->mask; + + if (!spec || !mask) + return 0; + + sh = spec->thresh; + + if (!last) + lh = sh; + else + lh = last->thresh; + + mh = mask->thresh; + sh = sh & mh; + lh = lh & mh; + + if (!sh || sh > lh) + return 0; + + return 1; + } + + i++; + } + + return 0; +} + /** * Parse the rule to see if it is a IP or MAC VLAN flow director rule. * And get the flow director filter info BTW. @@ -1277,6 +1338,7 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr, * The next not void item could be UDP or TCP or SCTP (optional) * The next not void item could be RAW (for flexbyte, optional) * The next not void item must be END. + * A Roughly Match pattern can appear at any place before END (optional) * MAC VLAN PATTERN: * The first not void item must be ETH. * The second not void item must be MAC VLAN. @@ -1371,7 +1433,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, * The first not void item should be * MAC or IPv4 or TCP or UDP or SCTP. */ - item = next_no_void_pattern(pattern, NULL); + item = next_no_roughly_pattern(pattern, NULL); if (item->type != RTE_FLOW_ITEM_TYPE_ETH && item->type != RTE_FLOW_ITEM_TYPE_IPV4 && item->type != RTE_FLOW_ITEM_TYPE_TCP && @@ -1384,7 +1446,10 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, return -rte_errno; } - rule->mode = RTE_FDIR_MODE_PERFECT; + if (signature_match(pattern)) + rule->mode = RTE_FDIR_MODE_SIGNATURE; + else + rule->mode = RTE_FDIR_MODE_PERFECT; /*Not supported last point for range*/ if (item->last) { @@ -1421,14 +1486,13 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, if (item->mask) { - /* If ethernet has meaning, it means MAC VLAN mode. */ - rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN; rule->b_mask = TRUE; eth_mask = (const struct rte_flow_item_eth *)item->mask; /* Ether type should be masked. */ - if (eth_mask->type) { + if (eth_mask->type || + rule->mode == RTE_FDIR_MODE_SIGNATURE) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -1436,6 +1500,9 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, return -rte_errno; } + /* If ethernet has meaning, it means MAC VLAN mode. */ + rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN; + /** * src MAC address must be masked, * and don't support dst MAC address mask. @@ -1464,7 +1531,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, * Check if the next not void item is vlan or ipv4. * IPv6 is not supported. */ - item = next_no_void_pattern(pattern, item); + item = next_no_roughly_pattern(pattern, item); if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) { if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); @@ -1511,7 +1578,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, /* More than one tags are not supported. */ /* Next not void item must be END */ - item = next_no_void_pattern(pattern, item); + item = next_no_roughly_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_END) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); rte_flow_error_set(error, EINVAL, @@ -1581,7 +1648,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, * Check if the next not void item is * TCP or UDP or SCTP or END. */ - item = next_no_void_pattern(pattern, item); + item = next_no_roughly_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_TCP && item->type != RTE_FLOW_ITEM_TYPE_UDP && item->type != RTE_FLOW_ITEM_TYPE_SCTP && @@ -1648,7 +1715,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, tcp_spec->hdr.dst_port; } - item = next_no_void_pattern(pattern, item); + item = next_no_roughly_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_RAW && item->type != RTE_FLOW_ITEM_TYPE_END) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); @@ -1708,7 +1775,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, udp_spec->hdr.dst_port; } - item = next_no_void_pattern(pattern, item); + item = next_no_roughly_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_RAW && item->type != RTE_FLOW_ITEM_TYPE_END) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); @@ -1770,7 +1837,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, sctp_spec->hdr.dst_port; } - item = next_no_void_pattern(pattern, item); + item = next_no_roughly_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_RAW && item->type != RTE_FLOW_ITEM_TYPE_END) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); @@ -1854,7 +1921,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, if (item->type != RTE_FLOW_ITEM_TYPE_END) { /* check if the next not void item is END */ - item = next_no_void_pattern(pattern, item); + item = next_no_roughly_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_END) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); rte_flow_error_set(error, EINVAL, -- 2.7.4