From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mail-wr0-f196.google.com (mail-wr0-f196.google.com [209.85.128.196]) by dpdk.org (Postfix) with ESMTP id 30C701BFB5 for ; Wed, 27 Jun 2018 17:07:49 +0200 (CEST) Received: by mail-wr0-f196.google.com with SMTP id u7-v6so2396265wrn.12 for ; Wed, 27 Jun 2018 08:07:49 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=6wind-com.20150623.gappssmtp.com; s=20150623; h=from:to:subject:date:message-id:in-reply-to:references; bh=W4hG2Adl28FBtFPZh1X+CWOcxoVqTL3DmEOsmwObpv8=; b=gLYldRR4A9RkD0qdOi9HulDMejlfwONvntzOmkmkcBd8xBsnmhv5amZMsGLLs6IuXy nq+BPW3aXhAgGSMRObKvXk/rwCn/WvBMiUeZJgMGRK7HMtpLyRs1xLGiGs+94dE5sST6 zPN71LMDCqYvr7NOP9PAdR+stA/zdXwcqHfNeJ8nKgJ/yCga1zHzo63TadAoWZRH0lhH GJxGAyjyKH22F1zuOMN1/tSoG9ZWetm0UksBVDvkuOIfcktkvNHJ1V08algVuY3EKXZr aUVnPklMawDKAfVOo0W1aEBKgqAxm9WNCJY5/8embgPi1X8ykBPN9yyEbGaUgsEUL7bm NInQ== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20161025; h=x-gm-message-state:from:to:subject:date:message-id:in-reply-to :references; bh=W4hG2Adl28FBtFPZh1X+CWOcxoVqTL3DmEOsmwObpv8=; b=tdU+jfQzziE37EpUNbDizB+BbwqpsPPHhabizYAE9PL29ShesH0lXy3oO+vnMoxgIL /kTq99itNwc5MHwBEGmHks6A/AV8MC9RK34/dZs/O8tL8Q6glZRnP3qpZxTCuPZnqHz9 BGdZ+b61QphFAjq4H4GYlGuvQ47mcmyrXOsYzBY/d+kI56OfZAs/GiBktNXxAHSGqnWa TC3Z5QXv2ppqZiiGfc4R5ydHonpUsp7WppNpKypMAznieli8UaupSiQBEZUzc4iFgD3Q xr+cLLDtfgbmdACJQ4UZWdFjfZyrAfmnhs4YqXyS5f9gCBp+C6quPO9tE2/JXhWcIeEV ZbKQ== X-Gm-Message-State: APt69E0crUgsXeLOAMNyovjRjoyBCWGmq2yxiZQop2OW/J4aLA2ffNDi 88LzhUBiPA7McfWeNSQyPN6vJfBeaA== X-Google-Smtp-Source: AAOMgpcnRh8awJ+TqEwwlP39p2IQhO/6Sn40YMMcelURnA1ohj4EvtmdVKPLATOKkotXdtl976soSQ== X-Received: by 2002:adf:fc8c:: with SMTP id g12-v6mr5509411wrr.216.1530112068368; Wed, 27 Jun 2018 08:07:48 -0700 (PDT) Received: from laranjeiro-vm.dev.6wind.com (host.78.145.23.62.rev.coltfrance.com. [62.23.145.78]) by smtp.gmail.com with ESMTPSA id k17-v6sm4872513wrp.19.2018.06.27.08.07.47 (version=TLS1_2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128); Wed, 27 Jun 2018 08:07:47 -0700 (PDT) From: Nelio Laranjeiro To: dev@dpdk.org, Adrien Mazarguil , Yongseok Koh Date: Wed, 27 Jun 2018 17:07:48 +0200 Message-Id: <723a4eb5db839302778ca3fb0894dbe24f5acd20.1530111623.git.nelio.laranjeiro@6wind.com> X-Mailer: git-send-email 2.18.0 In-Reply-To: References: Subject: [dpdk-dev] [PATCH v2 16/20] net/mlx5: add flow VXLAN item X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Wed, 27 Jun 2018 15:07:49 -0000 Signed-off-by: Nelio Laranjeiro --- drivers/net/mlx5/mlx5_flow.c | 242 +++++++++++++++++++++++++++++------ 1 file changed, 202 insertions(+), 40 deletions(-) diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index eedf0c461..daf5b9b5a 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -35,18 +35,45 @@ extern const struct eth_dev_ops mlx5_dev_ops; extern const struct eth_dev_ops mlx5_dev_ops_isolate; -/* Pattern Layer bits. */ +/* Pattern outer Layer bits. */ #define MLX5_FLOW_LAYER_OUTER_L2 (1u << 0) #define MLX5_FLOW_LAYER_OUTER_L3_IPV4 (1u << 1) #define MLX5_FLOW_LAYER_OUTER_L3_IPV6 (1u << 2) #define MLX5_FLOW_LAYER_OUTER_L4_UDP (1u << 3) #define MLX5_FLOW_LAYER_OUTER_L4_TCP (1u << 4) #define MLX5_FLOW_LAYER_OUTER_VLAN (1u << 5) -/* Masks. */ + +/* Pattern inner Layer bits. */ +#define MLX5_FLOW_LAYER_INNER_L2 (1u << 6) +#define MLX5_FLOW_LAYER_INNER_L3_IPV4 (1u << 7) +#define MLX5_FLOW_LAYER_INNER_L3_IPV6 (1u << 8) +#define MLX5_FLOW_LAYER_INNER_L4_UDP (1u << 9) +#define MLX5_FLOW_LAYER_INNER_L4_TCP (1u << 10) +#define MLX5_FLOW_LAYER_INNER_VLAN (1u << 11) + +/* Pattern tunnel Layer bits. */ +#define MLX5_FLOW_LAYER_VXLAN (1u << 12) + +/* Outer Masks. */ #define MLX5_FLOW_LAYER_OUTER_L3 \ (MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6) #define MLX5_FLOW_LAYER_OUTER_L4 \ (MLX5_FLOW_LAYER_OUTER_L4_UDP | MLX5_FLOW_LAYER_OUTER_L4_TCP) +#define MLX5_FLOW_LAYER_OUTER \ + (MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_L3 | \ + MLX5_FLOW_LAYER_OUTER_L4) + +/* Tunnel masks. */ +#define MLX5_FLOW_LAYER_TUNNEL MLX5_FLOW_LAYER_VXLAN + +/* Inner Masks. */ +#define MLX5_FLOW_LAYER_INNER_L3 \ + (MLX5_FLOW_LAYER_INNER_L3_IPV4 | MLX5_FLOW_LAYER_INNER_L3_IPV6) +#define MLX5_FLOW_LAYER_INNER_L4 \ + (MLX5_FLOW_LAYER_INNER_L4_UDP | MLX5_FLOW_LAYER_INNER_L4_TCP) +#define MLX5_FLOW_LAYER_INNER \ + (MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_L3 | \ + MLX5_FLOW_LAYER_INNER_L4) /* Action fate on the packet. */ #define MLX5_FLOW_FATE_DROP (1u << 0) @@ -406,10 +433,14 @@ mlx5_flow_print(struct rte_flow *flow __rte_unused) LIST_FOREACH(verbs, &flow->verbs, next) { uint32_t layers = flow->layers | verbs->layers; - fprintf(stdout, " layers: %s/%s/%s\n", + fprintf(stdout, " layers: %s/%s/%s/%s/%s/%s/%s\n", layers & MLX5_FLOW_LAYER_OUTER_L2 ? "l2" : "-", layers & MLX5_FLOW_LAYER_OUTER_L3 ? "l3" : "-", - layers & MLX5_FLOW_LAYER_OUTER_L4 ? "l4" : "-"); + layers & MLX5_FLOW_LAYER_OUTER_L4 ? "l4" : "-", + layers & MLX5_FLOW_LAYER_TUNNEL ? "T" : "-", + layers & MLX5_FLOW_LAYER_INNER_L2 ? "l2" : "-", + layers & MLX5_FLOW_LAYER_INNER_L3 ? "l3" : "-", + layers & MLX5_FLOW_LAYER_INNER_L4 ? "l4" : "-"); if (verbs->attr) { struct ibv_spec_header *hdr = (struct ibv_spec_header *)verbs->specs; @@ -634,16 +665,18 @@ mlx5_flow_item_eth(const struct rte_flow_item *item, struct rte_flow *flow, .src.addr_bytes = "\xff\xff\xff\xff\xff\xff", .type = RTE_BE16(0xffff), }; + const uint32_t layers = mlx5_flow_layers(flow); + const int tunnel = !!(layers & MLX5_FLOW_LAYER_TUNNEL); const unsigned int size = sizeof(struct ibv_flow_spec_eth); struct ibv_flow_spec_eth eth = { - .type = IBV_FLOW_SPEC_ETH, + .type = IBV_FLOW_SPEC_ETH | (tunnel ? IBV_FLOW_SPEC_INNER : 0), .size = size, }; - const uint32_t layers = mlx5_flow_layers(flow); int ret; if (!flow->expand) { - if (layers & MLX5_FLOW_LAYER_OUTER_L2) + if (layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L2 : + MLX5_FLOW_LAYER_OUTER_L2)) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, @@ -658,7 +691,8 @@ mlx5_flow_item_eth(const struct rte_flow_item *item, struct rte_flow *flow, if (ret) return ret; } - mlx5_flow_layers_update(flow, MLX5_FLOW_LAYER_OUTER_L2); + mlx5_flow_layers_update(flow, tunnel ? MLX5_FLOW_LAYER_INNER_L2 : + MLX5_FLOW_LAYER_OUTER_L2); if (size > flow_size) return size; if (spec) { @@ -694,7 +728,7 @@ mlx5_flow_item_vlan_update(struct ibv_flow_attr *attr, struct ibv_flow_spec_eth *eth) { unsigned int i; - enum ibv_flow_spec_type search = IBV_FLOW_SPEC_ETH; + const enum ibv_flow_spec_type search = eth->type; struct ibv_spec_header *hdr = (struct ibv_spec_header *) ((uint8_t *)attr + sizeof(struct ibv_flow_attr)); @@ -737,17 +771,20 @@ mlx5_flow_item_vlan(const struct rte_flow_item *item, struct rte_flow *flow, .tci = RTE_BE16(0x0fff), }; unsigned int size = sizeof(struct ibv_flow_spec_eth); - struct mlx5_flow_verbs *verbs = flow->cur_verbs; + const uint32_t layers = mlx5_flow_layers(flow); + const int tunnel = !!(layers & MLX5_FLOW_LAYER_TUNNEL); struct ibv_flow_spec_eth eth = { - .type = IBV_FLOW_SPEC_ETH, + .type = IBV_FLOW_SPEC_ETH | (tunnel ? IBV_FLOW_SPEC_INNER : 0), .size = size, }; int ret; - const uint32_t lm = MLX5_FLOW_LAYER_OUTER_L3 | - MLX5_FLOW_LAYER_OUTER_L4; - const uint32_t vlanm = MLX5_FLOW_LAYER_OUTER_VLAN; - const uint32_t l2m = MLX5_FLOW_LAYER_OUTER_L2; - const uint32_t layers = mlx5_flow_layers(flow); + const uint32_t lm = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 | + MLX5_FLOW_LAYER_INNER_L4) : + (MLX5_FLOW_LAYER_OUTER_L3 | MLX5_FLOW_LAYER_OUTER_L4); + const uint32_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN : + MLX5_FLOW_LAYER_OUTER_VLAN; + const uint32_t l2m = tunnel ? MLX5_FLOW_LAYER_INNER_L2 : + MLX5_FLOW_LAYER_OUTER_L2; if (!flow->expand) { if (layers & vlanm) @@ -801,12 +838,17 @@ mlx5_flow_item_vlan(const struct rte_flow_item *item, struct rte_flow *flow, if (size <= flow_size) mlx5_flow_spec_verbs_add(flow, ð, size); } else { - if (verbs->attr) - mlx5_flow_item_vlan_update(verbs->attr, ð); + if (flow->cur_verbs) + mlx5_flow_item_vlan_update(flow->cur_verbs->attr, + ð); size = 0; /**< Only an update is done in eth specification. */ } - mlx5_flow_layers_update(flow, MLX5_FLOW_LAYER_OUTER_L2 | - MLX5_FLOW_LAYER_OUTER_VLAN); + mlx5_flow_layers_update(flow, + tunnel ? + (MLX5_FLOW_LAYER_INNER_L2 | + MLX5_FLOW_LAYER_INNER_VLAN) : + (MLX5_FLOW_LAYER_OUTER_L2 | + MLX5_FLOW_LAYER_OUTER_VLAN)); return size; } @@ -840,22 +882,26 @@ mlx5_flow_item_ipv4(const struct rte_flow_item *item, struct rte_flow *flow, .next_proto_id = 0xff, }, }; + const uint32_t layers = mlx5_flow_layers(flow); + const int tunnel = !!(layers & MLX5_FLOW_LAYER_TUNNEL); unsigned int size = sizeof(struct ibv_flow_spec_ipv4_ext); struct ibv_flow_spec_ipv4_ext ipv4 = { - .type = IBV_FLOW_SPEC_IPV4_EXT, + .type = IBV_FLOW_SPEC_IPV4_EXT | + (tunnel ? IBV_FLOW_SPEC_INNER : 0), .size = size, }; int ret; - const uint32_t layers = mlx5_flow_layers(flow); if (!flow->expand) { - if (layers & MLX5_FLOW_LAYER_OUTER_L3) + if (layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 : + MLX5_FLOW_LAYER_OUTER_L3)) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, "multiple L3 layers not" " supported"); - else if (layers & MLX5_FLOW_LAYER_OUTER_L4) + else if (layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 : + MLX5_FLOW_LAYER_OUTER_L4)) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, @@ -870,7 +916,9 @@ mlx5_flow_item_ipv4(const struct rte_flow_item *item, struct rte_flow *flow, if (ret < 0) return ret; } - mlx5_flow_layers_update(flow, MLX5_FLOW_LAYER_OUTER_L3_IPV4); + mlx5_flow_layers_update(flow, + tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : + MLX5_FLOW_LAYER_OUTER_L3_IPV4); if (size > flow_size) return size; if (spec) { @@ -931,22 +979,25 @@ mlx5_flow_item_ipv6(const struct rte_flow_item *item, struct rte_flow *flow, .hop_limits = 0xff, }, }; + const uint32_t layers = mlx5_flow_layers(flow); + const int tunnel = !!(layers & MLX5_FLOW_LAYER_TUNNEL); unsigned int size = sizeof(struct ibv_flow_spec_ipv6); struct ibv_flow_spec_ipv6 ipv6 = { - .type = IBV_FLOW_SPEC_IPV6, + .type = IBV_FLOW_SPEC_IPV6 | (tunnel ? IBV_FLOW_SPEC_INNER : 0), .size = size, }; int ret; - const uint32_t layers = mlx5_flow_layers(flow); if (!flow->expand) { - if (layers & MLX5_FLOW_LAYER_OUTER_L3) + if (layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 : + MLX5_FLOW_LAYER_OUTER_L3)) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, "multiple L3 layers not" " supported"); - else if (layers & MLX5_FLOW_LAYER_OUTER_L4) + else if (layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 : + MLX5_FLOW_LAYER_OUTER_L4)) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, @@ -961,7 +1012,9 @@ mlx5_flow_item_ipv6(const struct rte_flow_item *item, struct rte_flow *flow, if (ret < 0) return ret; } - mlx5_flow_layers_update(flow, MLX5_FLOW_LAYER_OUTER_L3_IPV6); + mlx5_flow_layers_update(flow, + tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : + MLX5_FLOW_LAYER_OUTER_L3_IPV6); if (size > flow_size) return size; if (spec) { @@ -1029,22 +1082,25 @@ mlx5_flow_item_udp(const struct rte_flow_item *item, struct rte_flow *flow, { const struct rte_flow_item_udp *spec = item->spec; const struct rte_flow_item_udp *mask = item->mask; + const uint32_t layers = mlx5_flow_layers(flow); + const int tunnel = !!(layers & MLX5_FLOW_LAYER_TUNNEL); unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp); struct ibv_flow_spec_tcp_udp udp = { - .type = IBV_FLOW_SPEC_UDP, + .type = IBV_FLOW_SPEC_UDP | (tunnel ? IBV_FLOW_SPEC_INNER : 0), .size = size, }; int ret; - const uint32_t layers = mlx5_flow_layers(flow); if (!flow->expand) { - if (!(layers & MLX5_FLOW_LAYER_OUTER_L3)) + if (!(layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 : + MLX5_FLOW_LAYER_OUTER_L3))) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, "L3 is mandatory to filter" " on L4"); - if (layers & MLX5_FLOW_LAYER_OUTER_L4) + if (layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 : + MLX5_FLOW_LAYER_OUTER_L4)) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, @@ -1059,7 +1115,9 @@ mlx5_flow_item_udp(const struct rte_flow_item *item, struct rte_flow *flow, if (ret < 0) return ret; } - mlx5_flow_layers_update(flow, MLX5_FLOW_LAYER_OUTER_L4_UDP); + mlx5_flow_layers_update(flow, + tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP : + MLX5_FLOW_LAYER_OUTER_L4_UDP); if (size > flow_size) return size; if (spec) { @@ -1097,22 +1155,25 @@ mlx5_flow_item_tcp(const struct rte_flow_item *item, struct rte_flow *flow, { const struct rte_flow_item_tcp *spec = item->spec; const struct rte_flow_item_tcp *mask = item->mask; + const uint32_t layers = mlx5_flow_layers(flow); + const int tunnel = !!(layers & MLX5_FLOW_LAYER_TUNNEL); unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp); struct ibv_flow_spec_tcp_udp tcp = { - .type = IBV_FLOW_SPEC_TCP, + .type = IBV_FLOW_SPEC_TCP | (tunnel ? IBV_FLOW_SPEC_INNER : 0), .size = size, }; int ret; - const uint32_t layers = mlx5_flow_layers(flow); if (!flow->expand) { - if (!(layers & MLX5_FLOW_LAYER_OUTER_L3)) + if (!(layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 : + MLX5_FLOW_LAYER_OUTER_L3))) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, "L3 is mandatory to filter" " on L4"); - if (layers & MLX5_FLOW_LAYER_OUTER_L4) + if (layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 : + MLX5_FLOW_LAYER_OUTER_L4)) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, @@ -1127,7 +1188,9 @@ mlx5_flow_item_tcp(const struct rte_flow_item *item, struct rte_flow *flow, if (ret < 0) return ret; } - mlx5_flow_layers_update(flow, MLX5_FLOW_LAYER_OUTER_L4_TCP); + mlx5_flow_layers_update(flow, + tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP : + MLX5_FLOW_LAYER_OUTER_L4_TCP); if (size > flow_size) return size; if (spec) { @@ -1143,6 +1206,102 @@ mlx5_flow_item_tcp(const struct rte_flow_item *item, struct rte_flow *flow, return size; } +/** + * Validate VXLAN layer and possibly create the Verbs specification. + * + * @param item[in] + * Item specification. + * @param flow[in, out] + * Pointer to flow structure. + * @param flow_size[in] + * Size in bytes of the available space for to store the flow information. + * @param error + * Pointer to error structure. + * + * @return + * size in bytes necessary for the conversion, a negative errno value + * otherwise and rte_errno is set. + */ +static int +mlx5_flow_item_vxlan(const struct rte_flow_item *item, struct rte_flow *flow, + const size_t flow_size, struct rte_flow_error *error) +{ + const struct rte_flow_item_vxlan *spec = item->spec; + const struct rte_flow_item_vxlan *mask = item->mask; + const uint32_t layers = mlx5_flow_layers(flow); + unsigned int size = sizeof(struct ibv_flow_spec_tunnel); + struct ibv_flow_spec_tunnel vxlan = { + .type = IBV_FLOW_SPEC_VXLAN_TUNNEL, + .size = size, + }; + int ret; + union vni { + uint32_t vlan_id; + uint8_t vni[4]; + } id = { .vlan_id = 0, }; + + if (!flow->expand) { + if (layers & MLX5_FLOW_LAYER_TUNNEL) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "a tunnel is already" + " present"); + /* + * Verify only UDPv4 is present as defined in + * https://tools.ietf.org/html/rfc7348 + */ + if (!(layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "no outer UDP layer found"); + if (!mask) + mask = &rte_flow_item_vxlan_mask; + ret = mlx5_flow_item_validate + (item, (const uint8_t *)mask, + (const uint8_t *)&rte_flow_item_vxlan_mask, + sizeof(struct rte_flow_item_vxlan), error); + if (ret < 0) + return ret; + } + if (spec) { + memcpy(&id.vni[1], spec->vni, 3); + vxlan.val.tunnel_id = id.vlan_id; + memcpy(&id.vni[1], mask->vni, 3); + vxlan.mask.tunnel_id = id.vlan_id; + /* Remove unwanted bits from values. */ + vxlan.val.tunnel_id &= vxlan.mask.tunnel_id; + } + if (!flow->expand) { + /* + * Tunnel id 0 is equivalent as not adding a VXLAN layer, if + * only this layer is defined in the Verbs specification it is + * interpreted as wildcard and all packets will match this + * rule, if it follows a full stack layer (ex: eth / ipv4 / + * udp), all packets matching the layers before will also + * match this rule. To avoid such situation, VNI 0 is + * currently refused. + */ + if (!vxlan.val.tunnel_id) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "VXLAN vni cannot be 0"); + if (!(layers & MLX5_FLOW_LAYER_OUTER)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "VXLAN tunnel must be fully" + " defined"); + } + if (size <= flow_size) + mlx5_flow_spec_verbs_add(flow, &vxlan, size); + mlx5_flow_layers_update(flow, MLX5_FLOW_LAYER_VXLAN); + flow->ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP; + return size; +} + /** * Validate items provided by the user. * @@ -1191,6 +1350,9 @@ mlx5_flow_items(const struct rte_flow_item items[], case RTE_FLOW_ITEM_TYPE_TCP: ret = mlx5_flow_item_tcp(items, flow, remain, error); break; + case RTE_FLOW_ITEM_TYPE_VXLAN: + ret = mlx5_flow_item_vxlan(items, flow, remain, error); + break; default: return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, -- 2.18.0