From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 15682A09FF; Mon, 28 Dec 2020 20:46:14 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id E0E83CA89; Mon, 28 Dec 2020 20:45:00 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 691E6CA67 for ; Mon, 28 Dec 2020 20:44:57 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from shirik@nvidia.com) with SMTP; 28 Dec 2020 21:44:51 +0200 Received: from nvidia.com (c-236-148-180-183.mtl.labs.mlnx [10.236.148.183]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 0BSJicFa018969; Mon, 28 Dec 2020 21:44:51 +0200 From: Shiri Kuzin To: dev@dpdk.org Cc: viacheslavo@nvidia.com, adrien.mazarguil@6wind.com, orika@nvidia.com, ferruh.yigit@intel.com, thomas@monjalon.net, rasland@nvidia.com Date: Mon, 28 Dec 2020 21:44:30 +0200 Message-Id: <20201228194432.30512-5-shirik@nvidia.com> X-Mailer: git-send-email 2.21.0 In-Reply-To: <20201228194432.30512-1-shirik@nvidia.com> References: <20201228194432.30512-1-shirik@nvidia.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Subject: [dpdk-dev] [PATCH 4/6] net/mlx5: add GTP PSC flow validation X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" In this patch we add validation routine for GTP PSC extension header. The GTP PSC extension header must follow the GTP item. Signed-off-by: Shiri Kuzin --- drivers/net/mlx5/mlx5_flow.h | 5 +++ drivers/net/mlx5/mlx5_flow_dv.c | 70 +++++++++++++++++++++++++++++++++ 2 files changed, 75 insertions(+) diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h index d85dd19929..fd6b24c32d 100644 --- a/drivers/net/mlx5/mlx5_flow.h +++ b/drivers/net/mlx5/mlx5_flow.h @@ -138,6 +138,9 @@ enum mlx5_feature_name { #define MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT (1u << 30) #define MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT (1u << 31) +/* Pattern tunnel Layer bits (continued). */ +#define MLX5_FLOW_LAYER_GTP_PSC (UINT64_C(1) << 33) + /* Outer Masks. */ #define MLX5_FLOW_LAYER_OUTER_L3 \ (MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6) @@ -348,6 +351,8 @@ enum mlx5_feature_name { #define MLX5_ENCAPSULATION_DECISION_SIZE (sizeof(struct rte_ether_hdr) + \ sizeof(struct rte_ipv4_hdr)) +/* GTP extension header flag. */ +#define MLX5_GTP_EXT_HEADER_FLAG 4 /* IPv4 fragment_offset field contains relevant data in bits 2 to 15. */ #define MLX5_IPV4_FRAG_OFFSET_MASK \ diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index 4f638e24ad..3f6d44a265 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -1759,6 +1759,66 @@ flow_dv_validate_item_gtp(struct rte_eth_dev *dev, MLX5_ITEM_RANGE_NOT_ACCEPTED, error); } +/** + * Validate GTP PSC item. + * + * @param[in] item + * Item specification. + * @param[in] last_item + * Previous validated item in the pattern items. + * @param[in] gtp_item + * Previous GTP item specification. + * @param[in] attr + * Pointer to flow attributes. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_validate_item_gtp_psc(const struct rte_flow_item *item, + uint64_t last_item, + const struct rte_flow_item *gtp_item, + const struct rte_flow_attr *attr, + struct rte_flow_error *error) +{ + const struct rte_flow_item_gtp *gtp_spec; + const struct rte_flow_item_gtp *gtp_mask; + const struct rte_flow_item_gtp_psc *mask; + const struct rte_flow_item_gtp_psc nic_mask = { + .pdu_type = 0xFF, + .qfi = 0xFF, + }; + + if (!gtp_item || !(last_item & MLX5_FLOW_LAYER_GTP)) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, + "GTP PSC item must be preceded with GTP item"); + gtp_spec = gtp_item->spec; + gtp_mask = gtp_item->mask ? gtp_item->mask : &rte_flow_item_gtp_mask; + /* GTP spec and E flag is requested to match zero. */ + if (gtp_spec && + (gtp_mask->v_pt_rsv_flags & + ~gtp_spec->v_pt_rsv_flags & MLX5_GTP_EXT_HEADER_FLAG)) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, + "GTP E flag must be 1 to match GTP PSC"); + /* Check the flow is not created in group zero. */ + if (!attr->transfer && !attr->group) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "GTP PSC is not supported for group 0"); + /* GTP spec is here and E flag is requested to match zero. */ + if (!item->spec) + return 0; + mask = item->mask ? item->mask : &rte_flow_item_gtp_psc_mask; + return mlx5_flow_item_acceptable(item, (const uint8_t *)mask, + (const uint8_t *)&nic_mask, + sizeof(struct rte_flow_item_gtp_psc), + MLX5_ITEM_RANGE_NOT_ACCEPTED, error); +} + /** * Validate IPV4 item. * Use existing validation function mlx5_flow_validate_item_ipv4(), and @@ -5219,6 +5279,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, int actions_n = 0; uint8_t item_ipv6_proto = 0; const struct rte_flow_item *gre_item = NULL; + const struct rte_flow_item *gtp_item = NULL; const struct rte_flow_action_raw_decap *decap; const struct rte_flow_action_raw_encap *encap; const struct rte_flow_action_rss *rss; @@ -5556,8 +5617,17 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, error); if (ret < 0) return ret; + gtp_item = items; last_item = MLX5_FLOW_LAYER_GTP; break; + case RTE_FLOW_ITEM_TYPE_GTP_PSC: + ret = flow_dv_validate_item_gtp_psc(items, last_item, + gtp_item, attr, + error); + if (ret < 0) + return ret; + last_item = MLX5_FLOW_LAYER_GTP_PSC; + break; case RTE_FLOW_ITEM_TYPE_ECPRI: /* Capacity will be checked in the translate stage. */ ret = mlx5_flow_validate_item_ecpri(items, item_flags, -- 2.21.0