From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 856911322C for ; Fri, 9 Mar 2018 12:30:17 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from xuemingl@mellanox.com) with ESMTPS (AES256-SHA encrypted); 9 Mar 2018 13:30:47 +0200 Received: from dev-r630-06.mtbc.labs.mlnx (dev-r630-06.mtbc.labs.mlnx [10.12.205.180]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id w29BUE3W021636; Fri, 9 Mar 2018 13:30:15 +0200 Received: from dev-r630-06.mtbc.labs.mlnx (localhost [127.0.0.1]) by dev-r630-06.mtbc.labs.mlnx (8.14.7/8.14.7) with ESMTP id w29BUEm6002234; Fri, 9 Mar 2018 19:30:14 +0800 Received: (from xuemingl@localhost) by dev-r630-06.mtbc.labs.mlnx (8.14.7/8.14.7/Submit) id w29BUEpl002233; Fri, 9 Mar 2018 19:30:14 +0800 From: Xueming Li To: Wenzhuo Lu , Jingjing Wu , Thomas Monjalon , Nelio Laranjeiro , Adrien Mazarguil , Shahaf Shuler , Olivier Matz Cc: Xueming Li , dev@dpdk.org Date: Fri, 9 Mar 2018 19:29:21 +0800 Message-Id: <20180309112921.2105-22-xuemingl@mellanox.com> X-Mailer: git-send-email 2.13.3 In-Reply-To: <20180309112921.2105-1-xuemingl@mellanox.com> References: <20180309112921.2105-1-xuemingl@mellanox.com> In-Reply-To: <20180226150947.107179-2-xuemingl@mellanox.com> References: <20180226150947.107179-2-xuemingl@mellanox.com> Subject: [dpdk-dev] [PATCH v1 21/21] net/mlx5: support MPLS-in-GRE and MPLS-in-UDP flow pattern X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Fri, 09 Mar 2018 11:30:17 -0000 This patch supports new tunnel type MPLS-in-GRE and MPLS-in-UDP. Signed-off-by: Xueming Li --- drivers/net/mlx5/mlx5_flow.c | 122 ++++++++++++++++++++++++++++++++++++------- 1 file changed, 104 insertions(+), 18 deletions(-) diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index bda1a58..00c705a 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -98,6 +98,11 @@ struct ibv_flow_spec_counter_action { const void *default_mask, void *data); +static int +mlx5_flow_create_mpls(const struct rte_flow_item *item, + const void *default_mask, + void *data); + struct mlx5_flow_parse; static void @@ -246,21 +251,29 @@ struct rte_flow { #define IS_TUNNEL(type) ( \ (type) == RTE_FLOW_ITEM_TYPE_VXLAN || \ (type) == RTE_FLOW_ITEM_TYPE_VXLAN_GPE || \ + (type) == RTE_FLOW_ITEM_TYPE_MPLS || \ (type) == RTE_FLOW_ITEM_TYPE_GRE) -const uint32_t rte_ptype[] = { - [RTE_FLOW_ITEM_TYPE_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN | - RTE_PTYPE_L4_UDP, - [RTE_FLOW_ITEM_TYPE_VXLAN_GPE] = RTE_PTYPE_TUNNEL_VXLAN_GPE | - RTE_PTYPE_L4_UDP, - [RTE_FLOW_ITEM_TYPE_GRE] = RTE_PTYPE_TUNNEL_GRE, +const uint32_t flow2ptype[] = { + [RTE_FLOW_ITEM_TYPE_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN, + [RTE_FLOW_ITEM_TYPE_VXLAN_GPE] = RTE_PTYPE_TUNNEL_VXLAN_GPE, + [RTE_FLOW_ITEM_TYPE_GRE] = RTE_PTYPE_TUNNEL_GRE, + [RTE_FLOW_ITEM_TYPE_MPLS] = RTE_PTYPE_TUNNEL_MPLS_IN_GRE, }; #define PTYPE_TUN(t) ((RTE_PTYPE_TUNNEL_MASK & (t)) >> 12) -static const uint32_t ptype_flow_type[] = { - [PTYPE_TUN(RTE_PTYPE_TUNNEL_VXLAN)] = RTE_FLOW_ITEM_TYPE_VXLAN, - [PTYPE_TUN(RTE_PTYPE_TUNNEL_VXLAN_GPE)] = RTE_FLOW_ITEM_TYPE_VXLAN_GPE, - [PTYPE_TUN(RTE_PTYPE_TUNNEL_GRE)] = RTE_FLOW_ITEM_TYPE_GRE, + +const uint32_t ptype2ptype[] = { + [PTYPE_TUN(RTE_PTYPE_TUNNEL_VXLAN)] = RTE_PTYPE_TUNNEL_VXLAN | + RTE_PTYPE_L4_UDP, + [PTYPE_TUN(RTE_PTYPE_TUNNEL_VXLAN_GPE)] = RTE_PTYPE_TUNNEL_VXLAN_GPE | + RTE_PTYPE_L4_UDP, + [PTYPE_TUN(RTE_PTYPE_TUNNEL_GRE)] = RTE_PTYPE_TUNNEL_GRE, + [PTYPE_TUN(RTE_PTYPE_TUNNEL_MPLS_IN_GRE)] = + RTE_PTYPE_TUNNEL_MPLS_IN_GRE, + [PTYPE_TUN(RTE_PTYPE_TUNNEL_MPLS_IN_UDP)] = + RTE_PTYPE_TUNNEL_MPLS_IN_GRE | + RTE_PTYPE_L4_UDP, }; /** Structure to generate a simple graph of layers supported by the NIC. */ @@ -395,7 +408,8 @@ struct mlx5_flow_items { }, [RTE_FLOW_ITEM_TYPE_UDP] = { .items = ITEMS(RTE_FLOW_ITEM_TYPE_VXLAN, - RTE_FLOW_ITEM_TYPE_VXLAN_GPE), + RTE_FLOW_ITEM_TYPE_VXLAN_GPE, + RTE_FLOW_ITEM_TYPE_MPLS), .actions = valid_actions, .mask = &(const struct rte_flow_item_udp){ .hdr = { @@ -423,7 +437,8 @@ struct mlx5_flow_items { }, [RTE_FLOW_ITEM_TYPE_GRE] = { .items = ITEMS(RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_IPV6), + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_MPLS), .actions = valid_actions, .mask = &(const struct rte_flow_item_gre){ .protocol = -1, @@ -459,6 +474,19 @@ struct mlx5_flow_items { .convert = mlx5_flow_create_vxlan_gpe, .dst_sz = sizeof(struct ibv_flow_spec_tunnel), }, + [RTE_FLOW_ITEM_TYPE_MPLS] = { + .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_IPV6), + .actions = valid_actions, + .mask = &(const struct rte_flow_item_mpls){ + .label_tc_s = "\xff\xff\xf0", + }, + .default_mask = &rte_flow_item_mpls_mask, + .mask_sz = sizeof(struct rte_flow_item_mpls), + .convert = mlx5_flow_create_mpls, + .dst_sz = sizeof(struct ibv_flow_spec_tunnel), + }, }; /** Structure to pass to the conversion function. */ @@ -908,7 +936,9 @@ struct ibv_spec_header { if (err) goto exit_item_not_supported; if (IS_TUNNEL(items->type)) { - if (parser->tunnel) { + if (parser->tunnel && + !(parser->tunnel == RTE_PTYPE_TUNNEL_GRE && + items->type == RTE_FLOW_ITEM_TYPE_MPLS)) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, items, @@ -929,7 +959,7 @@ struct ibv_spec_header { items, "Cannot support tunnel inner RSS"); parser->inner = IBV_FLOW_SPEC_INNER; - parser->tunnel = rte_ptype[items->type]; + parser->tunnel = flow2ptype[items->type]; parser->layer = HASH_RXQ_TUNNEL; } else { parser->layer = HASH_RXQ_ETH; @@ -1726,7 +1756,7 @@ struct ibv_spec_header { id.vni[0] = 0; parser->inner = IBV_FLOW_SPEC_INNER; - parser->tunnel = rte_ptype[item->type]; + parser->tunnel = ptype2ptype[PTYPE_TUN(RTE_PTYPE_TUNNEL_VXLAN)]; parser->out_layer = parser->layer; parser->layer = HASH_RXQ_TUNNEL; if (spec) { @@ -1788,7 +1818,7 @@ struct ibv_spec_header { id.vni[0] = 0; parser->inner = IBV_FLOW_SPEC_INNER; - parser->tunnel = rte_ptype[item->type]; + parser->tunnel = ptype2ptype[PTYPE_TUN(RTE_PTYPE_TUNNEL_VXLAN_GPE)]; parser->out_layer = parser->layer; parser->layer = HASH_RXQ_TUNNEL; if (spec) { @@ -1827,6 +1857,62 @@ struct ibv_spec_header { } /** + * Convert MPLS item to Verbs specification. + * Tunnel types currently supported are MPLS-in-GRE and MPLS-in-UDP. + * + * @param item[in] + * Item specification. + * @param default_mask[in] + * Default bit-masks to use when item->mask is not provided. + * @param data[in, out] + * User structure. + */ +static int +mlx5_flow_create_mpls(const struct rte_flow_item *item, + const void *default_mask, + void *data) +{ + const struct rte_flow_item_mpls *spec = item->spec; + const struct rte_flow_item_mpls *mask = item->mask; + struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data; + unsigned int size = sizeof(struct ibv_flow_spec_tunnel); + struct ibv_flow_spec_tunnel mpls = { + .type = parser->inner | IBV_FLOW_SPEC_VXLAN_TUNNEL, + .size = size, + }; + union vni { + uint32_t vlan_id; + uint8_t vni[4]; + } id; + + id.vni[0] = 0; + parser->inner = IBV_FLOW_SPEC_INNER; + if (parser->layer == HASH_RXQ_UDPV4 || + parser->layer == HASH_RXQ_UDPV6) { + parser->tunnel = + ptype2ptype[PTYPE_TUN(RTE_PTYPE_TUNNEL_MPLS_IN_UDP)]; + parser->out_layer = parser->layer; + } else { + parser->tunnel = + ptype2ptype[PTYPE_TUN(RTE_PTYPE_TUNNEL_MPLS_IN_GRE)]; + } + parser->layer = HASH_RXQ_TUNNEL; + if (spec) { + if (!mask) + mask = default_mask; + memcpy(&id.vni[1], spec->label_tc_s, 3); + mpls.val.tunnel_id = id.vlan_id; + memcpy(&id.vni[1], mask->label_tc_s, 3); + mpls.mask.tunnel_id = id.vlan_id; + /* Remove unwanted bits from values. */ + mpls.val.tunnel_id &= mpls.mask.tunnel_id; + } + + mlx5_flow_create_copy(parser, &mpls, size); + return 0; +} + +/** * Convert GRE item to Verbs specification. * * @param item[in] @@ -1851,7 +1937,7 @@ struct ibv_spec_header { (void)item; (void)default_mask; parser->inner = IBV_FLOW_SPEC_INNER; - parser->tunnel = rte_ptype[item->type]; + parser->tunnel = ptype2ptype[PTYPE_TUN(RTE_PTYPE_TUNNEL_GRE)]; parser->out_layer = parser->layer; parser->layer = HASH_RXQ_TUNNEL; mlx5_flow_create_copy(parser, &tunnel, size); @@ -2325,7 +2411,7 @@ struct rte_flow * } /* Keep same if more than one tunnel types left. */ if (types == 1) - rxq->tunnel = rte_ptype[ptype_flow_type[last]]; + rxq->tunnel = ptype2ptype[last]; else if (types == 0) /* No tunnel type left. */ rxq->tunnel = 0; } -- 1.8.3.1