From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id DC7D3A04BA; Thu, 1 Oct 2020 23:18:23 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id A50B21D9FC; Thu, 1 Oct 2020 23:15:48 +0200 (CEST) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 460091D6F7 for ; Thu, 1 Oct 2020 23:15:41 +0200 (CEST) Received: from Internal Mail-Server by MTLPINE1 (envelope-from dekelp@nvidia.com) with SMTP; 2 Oct 2020 00:15:38 +0300 Received: from mtl-vdi-280.wap.labs.mlnx. (mtl-vdi-280.wap.labs.mlnx [10.228.134.250]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 091LFOe5021286; Fri, 2 Oct 2020 00:15:38 +0300 From: Dekel Peled To: orika@nvidia.com, thomas@monjalon.net, ferruh.yigit@intel.com, arybchenko@solarflare.com, konstantin.ananyev@intel.com, olivier.matz@6wind.com, wenzhuo.lu@intel.com, beilei.xing@intel.com, bernard.iremonger@intel.com, matan@nvidia.com, shahafs@nvidia.com, viacheslavo@nvidia.com Cc: dev@dpdk.org Date: Fri, 2 Oct 2020 00:15:06 +0300 Message-Id: X-Mailer: git-send-email 1.7.1 In-Reply-To: References: Subject: [dpdk-dev] [PATCH v2 09/11] net/mlx5: support match on IPv6 fragment ext. item X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" rte_flow update, following RFC [1], added to ethdev the rte_flow item ipv6_frag_ext. This patch adds to MLX5 PMD the option to match on this item type. [1] http://mails.dpdk.org/archives/dev/2020-March/160255.html Signed-off-by: Dekel Peled --- drivers/net/mlx5/mlx5_flow.h | 4 + drivers/net/mlx5/mlx5_flow_dv.c | 209 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 213 insertions(+) diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h index 1e30c93..376519f 100644 --- a/drivers/net/mlx5/mlx5_flow.h +++ b/drivers/net/mlx5/mlx5_flow.h @@ -122,6 +122,10 @@ enum mlx5_feature_name { /* Pattern eCPRI Layer bit. */ #define MLX5_FLOW_LAYER_ECPRI (UINT64_C(1) << 29) +/* IPv6 Fragment Extension Header bit. */ +#define MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT (1u << 30) +#define MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT (1u << 31) + /* Outer Masks. */ #define MLX5_FLOW_LAYER_OUTER_L3 \ (MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6) diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index 4403abc..eb1db12 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -1901,6 +1901,120 @@ struct field_modify_info modify_tcp[] = { } /** + * Validate IPV6 fragment extension item. + * + * @param[in] item + * Item specification. + * @param[in] item_flags + * Bit-fields that holds the items detected until now. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_validate_item_ipv6_frag_ext(const struct rte_flow_item *item, + uint64_t item_flags, + struct rte_flow_error *error) +{ + const struct rte_flow_item_ipv6_frag_ext *spec = item->spec; + const struct rte_flow_item_ipv6_frag_ext *last = item->last; + const struct rte_flow_item_ipv6_frag_ext *mask = item->mask; + rte_be16_t frag_data_spec = 0; + rte_be16_t frag_data_last = 0; + const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); + const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : + MLX5_FLOW_LAYER_OUTER_L4; + int ret = 0; + struct rte_flow_item_ipv6_frag_ext nic_mask = { + .hdr = { + .next_header = 0xff, + .frag_data = RTE_BE16(0xffff), + }, + }; + + if (item_flags & l4m) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "ipv6 fragment extension item cannot " + "follow L4 item."); + if ((tunnel && !(item_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)) || + (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "ipv6 fragment extension item must " + "follow ipv6 item"); + if (spec && mask) + frag_data_spec = spec->hdr.frag_data & mask->hdr.frag_data; + if (!frag_data_spec) + return 0; + /* + * spec and mask are valid, enforce using full mask to make sure the + * complete value is used correctly. + */ + if ((mask->hdr.frag_data & RTE_BE16(RTE_IPV6_FRAG_USED_MASK)) != + RTE_BE16(RTE_IPV6_FRAG_USED_MASK)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, + item, "must use full mask for" + " frag_data"); + /* + * Match on frag_data 0x00001 means M is 1 and frag-offset is 0. + * This is 1st fragment of fragmented packet. + */ + if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_MF_MASK)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "match on first fragment not " + "supported"); + if (frag_data_spec && !last) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "specified value not supported"); + ret = mlx5_flow_item_acceptable + (item, (const uint8_t *)mask, + (const uint8_t *)&nic_mask, + sizeof(struct rte_flow_item_ipv6_frag_ext), + MLX5_ITEM_RANGE_ACCEPTED, error); + if (ret) + return ret; + /* spec and last are valid, validate the specified range. */ + frag_data_last = last->hdr.frag_data & mask->hdr.frag_data; + /* + * Match on frag_data spec 0x0009 and last 0xfff9 + * means M is 1 and frag-offset is > 0. + * This packet is fragment 2nd and onward, excluding last. + * This is not yet supported in MLX5, return appropriate + * error message. + */ + if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN | + RTE_IPV6_EHDR_MF_MASK) && + frag_data_last == RTE_BE16(RTE_IPV6_FRAG_USED_MASK)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM_LAST, + last, "match on following " + "fragments not supported"); + /* + * Match on frag_data spec 0x0008 and last 0xfff8 + * means M is 0 and frag-offset is > 0. + * This packet is last fragment of fragmented packet. + * This is not yet supported in MLX5, return appropriate + * error message. + */ + if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN) && + frag_data_last == RTE_BE16(RTE_IPV6_EHDR_FO_MASK)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM_LAST, + last, "match on last " + "fragment not supported"); + /* Other range values are invalid and rejected. */ + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_LAST, last, + "specified range not supported"); +} + +/** * Validate the pop VLAN action. * * @param[in] dev @@ -5349,6 +5463,29 @@ struct field_modify_info modify_tcp[] = { next_protocol = 0xff; } break; + case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT: + ret = flow_dv_validate_item_ipv6_frag_ext(items, + item_flags, + error); + if (ret < 0) + return ret; + last_item = tunnel ? + MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT : + MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT; + if (items->mask != NULL && + ((const struct rte_flow_item_ipv6_frag_ext *) + items->mask)->hdr.next_header) { + next_protocol = + ((const struct rte_flow_item_ipv6_frag_ext *) + items->spec)->hdr.next_header; + next_protocol &= + ((const struct rte_flow_item_ipv6_frag_ext *) + items->mask)->hdr.next_header; + } else { + /* Reset for inner layer. */ + next_protocol = 0xff; + } + break; case RTE_FLOW_ITEM_TYPE_TCP: ret = mlx5_flow_validate_item_tcp (items, item_flags, @@ -6527,6 +6664,57 @@ struct field_modify_info modify_tcp[] = { } /** + * Add IPV6 fragment extension item to matcher and to the value. + * + * @param[in, out] matcher + * Flow matcher. + * @param[in, out] key + * Flow matcher value. + * @param[in] item + * Flow pattern to translate. + * @param[in] inner + * Item is inner pattern. + */ +static void +flow_dv_translate_item_ipv6_frag_ext(void *matcher, void *key, + const struct rte_flow_item *item, + int inner) +{ + const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_m = item->mask; + const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_v = item->spec; + const struct rte_flow_item_ipv6_frag_ext nic_mask = { + .hdr = { + .next_header = 0xff, + .frag_data = RTE_BE16(0xffff), + }, + }; + void *headers_m; + void *headers_v; + + if (inner) { + headers_m = MLX5_ADDR_OF(fte_match_param, matcher, + inner_headers); + headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); + } else { + headers_m = MLX5_ADDR_OF(fte_match_param, matcher, + outer_headers); + headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); + } + /* IPv6 fragment extension item exists, so packet is IP fragment. */ + MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 1); + if (!ipv6_frag_ext_v) + return; + if (!ipv6_frag_ext_m) + ipv6_frag_ext_m = &nic_mask; + MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, + ipv6_frag_ext_m->hdr.next_header); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, + ipv6_frag_ext_v->hdr.next_header & + ipv6_frag_ext_m->hdr.next_header); +} + +/** * Add TCP item to matcher and to the value. * * @param[in, out] matcher @@ -8868,6 +9056,27 @@ struct field_modify_info modify_tcp[] = { next_protocol = 0xff; } break; + case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT: + flow_dv_translate_item_ipv6_frag_ext(match_mask, + match_value, + items, tunnel); + last_item = tunnel ? + MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT : + MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT; + if (items->mask != NULL && + ((const struct rte_flow_item_ipv6_frag_ext *) + items->mask)->hdr.next_header) { + next_protocol = + ((const struct rte_flow_item_ipv6_frag_ext *) + items->spec)->hdr.next_header; + next_protocol &= + ((const struct rte_flow_item_ipv6_frag_ext *) + items->mask)->hdr.next_header; + } else { + /* Reset for inner layer. */ + next_protocol = 0xff; + } + break; case RTE_FLOW_ITEM_TYPE_TCP: flow_dv_translate_item_tcp(match_mask, match_value, items, tunnel); -- 1.8.3.1