From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mail-wm0-f66.google.com (mail-wm0-f66.google.com [74.125.82.66]) by dpdk.org (Postfix) with ESMTP id C0AC61B476 for ; Wed, 11 Jul 2018 09:23:33 +0200 (CEST) Received: by mail-wm0-f66.google.com with SMTP id i139-v6so1339956wmf.4 for ; Wed, 11 Jul 2018 00:23:33 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=6wind-com.20150623.gappssmtp.com; s=20150623; h=from:to:cc:subject:date:message-id:in-reply-to:references; bh=c0XnzNbmXJpgPiO4Wc/NZfu9/9xz31c4k+MQH2o/yp8=; b=zxiKQ3bkLbpTiLiRGiHYAJkITln4ij8jceTejB80nxKGzB6XCXWjHPrXkdAn2U45Ak RxDDTOyY0jD5nCraW65n7MqAUWb8j3TJdEXJcGhq6AZc/T7vL70KJg0mDlOny3q82BU3 J+6tw6i/tPAYb+7YEcmBIxrSVMfh13aQ4OIfnPaMKmuetdX3P/wwIvzCNEuETlif+dgt 6fZS//ye6E9KKR9AuJdyrW2z35Gj9qke+DrGGMZlDCDo0KqkZtB59f/RFEQ2DKbQV/g8 9RqsK3oaYu8ROjJ/RE69ZjyIdqFAQgHeIyV7FBe9Pan9YLpF4A/3z7ILJ91AM4GEm4yC aaKg== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20161025; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references; bh=c0XnzNbmXJpgPiO4Wc/NZfu9/9xz31c4k+MQH2o/yp8=; b=Q7M0K+Rd3V/tJoHut1od7Mn6JapXG7iurP7m7CS6dt8IHfJyV+xk+SF5uZA0hDCFqk DlUFKjC3/oVJZZy0ivuDbAIlLPcWoK9v7CQIt7tnirSueZ8J2p+0XgDj9BYB1TBWprHa IyCvu1k4RkHnjuOCL9YmLYGElUFfzRAEifGC9Rle6jH4Zpm+621ugbeq3pj7Tm3/3w/C o6tnhkgAZkiHZs+78MZTED4gketYdBBaO/ki0mkyxSI1YHg4BzlodZlylljj9VkTVOd4 YwB/nitkYs/WXLFfN4YoabUNvEZSgu1VImW5ucRuzLuJ9jNS+YqHWaM0JogLNaFMcnLt 2FbA== X-Gm-Message-State: AOUpUlEyJVHoyriegRmBlCP++l27p/I7nKPDe8lqwHAunDH7mAlqojG8 Upt4K08g5ETHlT6MsGHy8yLvnhPhSA== X-Google-Smtp-Source: AAOMgpf53MhvG8nK/yojHT0lD9vu33TME8ILeNJ/3uNjpA52csW8Kp1BWYzmiWTtqt3DsinQVLiXMg== X-Received: by 2002:a1c:6c14:: with SMTP id h20-v6mr2960790wmc.138.1531293813149; Wed, 11 Jul 2018 00:23:33 -0700 (PDT) Received: from laranjeiro-vm.dev.6wind.com (host.78.145.23.62.rev.coltfrance.com. [62.23.145.78]) by smtp.gmail.com with ESMTPSA id t10-v6sm31314212wre.95.2018.07.11.00.23.32 (version=TLS1_2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128); Wed, 11 Jul 2018 00:23:32 -0700 (PDT) From: Nelio Laranjeiro To: dev@dpdk.org, Yongseok Koh Cc: Adrien Mazarguil Date: Wed, 11 Jul 2018 09:22:51 +0200 Message-Id: X-Mailer: git-send-email 2.18.0 In-Reply-To: References: Subject: [dpdk-dev] [PATCH v3 18/21] net/mlx5: add flow VXLAN-GPE item X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Wed, 11 Jul 2018 07:23:34 -0000 Signed-off-by: Nelio Laranjeiro --- drivers/net/mlx5/mlx5_flow.c | 219 ++++++++++++++++++++++++++++++++--- drivers/net/mlx5/mlx5_rxtx.h | 5 +- 2 files changed, 209 insertions(+), 15 deletions(-) diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 95507059e..e06df0eb5 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -53,6 +53,7 @@ extern const struct eth_dev_ops mlx5_dev_ops_isolate; /* Pattern tunnel Layer bits. */ #define MLX5_FLOW_LAYER_VXLAN (1u << 12) +#define MLX5_FLOW_LAYER_VXLAN_GPE (1u << 13) /* Outer Masks. */ #define MLX5_FLOW_LAYER_OUTER_L3 \ @@ -64,7 +65,8 @@ extern const struct eth_dev_ops mlx5_dev_ops_isolate; MLX5_FLOW_LAYER_OUTER_L4) /* Tunnel Masks. */ -#define MLX5_FLOW_LAYER_TUNNEL MLX5_FLOW_LAYER_VXLAN +#define MLX5_FLOW_LAYER_TUNNEL \ + (MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE) /* Inner Masks. */ #define MLX5_FLOW_LAYER_INNER_L3 \ @@ -102,6 +104,7 @@ enum mlx5_expansion { MLX5_EXPANSION_OUTER_IPV6_UDP, MLX5_EXPANSION_OUTER_IPV6_TCP, MLX5_EXPANSION_VXLAN, + MLX5_EXPANSION_VXLAN_GPE, MLX5_EXPANSION_ETH, MLX5_EXPANSION_IPV4, MLX5_EXPANSION_IPV4_UDP, @@ -140,7 +143,8 @@ static const struct rte_flow_expand_node mlx5_support_expansion[] = { ETH_RSS_NONFRAG_IPV4_OTHER, }, [MLX5_EXPANSION_OUTER_IPV4_UDP] = { - .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN), + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN, + MLX5_EXPANSION_VXLAN_GPE), .type = RTE_FLOW_ITEM_TYPE_UDP, .rss_types = ETH_RSS_NONFRAG_IPV4_UDP, }, @@ -157,7 +161,8 @@ static const struct rte_flow_expand_node mlx5_support_expansion[] = { ETH_RSS_NONFRAG_IPV6_OTHER, }, [MLX5_EXPANSION_OUTER_IPV6_UDP] = { - .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN), + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN, + MLX5_EXPANSION_VXLAN_GPE), .type = RTE_FLOW_ITEM_TYPE_UDP, .rss_types = ETH_RSS_NONFRAG_IPV6_UDP, }, @@ -169,6 +174,12 @@ static const struct rte_flow_expand_node mlx5_support_expansion[] = { .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH), .type = RTE_FLOW_ITEM_TYPE_VXLAN, }, + [MLX5_EXPANSION_VXLAN_GPE] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, + MLX5_EXPANSION_IPV4, + MLX5_EXPANSION_IPV6), + .type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE, + }, [MLX5_EXPANSION_ETH] = { .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, MLX5_EXPANSION_IPV6), @@ -236,8 +247,6 @@ struct rte_flow { struct mlx5_flow_verbs *cur_verbs; /**< Current Verbs flow structure being filled. */ struct rte_flow_action_rss rss;/**< RSS context. */ - uint32_t tunnel_ptype; - /**< Store tunnel packet type data to store in Rx queue. */ uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */ uint16_t (*queue)[]; /**< Destination queues to redirect traffic to. */ }; @@ -304,6 +313,23 @@ static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = { { 9, 10, 11 }, { 12, 13, 14 }, }; +/* Tunnel information. */ +struct mlx5_flow_tunnel_info { + uint32_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */ + uint32_t ptype; /**< Tunnel Ptype (see RTE_PTYPE_*). */ +}; + +static struct mlx5_flow_tunnel_info tunnels_info[] = { + { + .tunnel = MLX5_FLOW_LAYER_VXLAN, + .ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP, + }, + { + .tunnel = MLX5_FLOW_LAYER_VXLAN_GPE, + .ptype = RTE_PTYPE_TUNNEL_VXLAN_GPE | RTE_PTYPE_L4_UDP, + }, +}; + /** * Discover the maximum number of priority available. * @@ -1263,7 +1289,119 @@ mlx5_flow_item_vxlan(const struct rte_flow_item *item, struct rte_flow *flow, flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2; } flow->layers |= MLX5_FLOW_LAYER_VXLAN; - flow->tunnel_ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP; + return size; +} + +/** + * Convert the @p item into a Verbs specification after ensuring the NIC + * will understand and process it correctly. + * If the necessary size for the conversion is greater than the @p flow_size, + * nothing is written in @p flow, the validation is still performed. + * + * @param dev + * Pointer to Ethernet device. + * @param[in] item + * Item specification. + * @param[in, out] flow + * Pointer to flow structure. + * @param[in] flow_size + * Size in bytes of the available space in @p flow, if too small, nothing is + * written. + * @param[out] error + * Pointer to error structure. + * + * @return + * On success the number of bytes consumed/necessary, if the returned value + * is lesser or equal to @p flow_size, the @p item has fully been converted, + * otherwise another call with this returned memory size should be done. + * On error, a negative errno value is returned and rte_errno is set. + */ +static int +mlx5_flow_item_vxlan_gpe(struct rte_eth_dev *dev, + const struct rte_flow_item *item, + struct rte_flow *flow, const size_t flow_size, + struct rte_flow_error *error) +{ + const struct rte_flow_item_vxlan_gpe *spec = item->spec; + const struct rte_flow_item_vxlan_gpe *mask = item->mask; + unsigned int size = sizeof(struct ibv_flow_spec_tunnel); + struct ibv_flow_spec_tunnel vxlan_gpe = { + .type = IBV_FLOW_SPEC_VXLAN_TUNNEL, + .size = size, + }; + int ret; + union vni { + uint32_t vlan_id; + uint8_t vni[4]; + } id = { .vlan_id = 0, }; + + if (!((struct priv *)dev->data->dev_private)->config.l3_vxlan_en) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "L3 VXLAN is not enabled by device" + " parameter and/or not configured in" + " firmware"); + if (flow->layers & MLX5_FLOW_LAYER_TUNNEL) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "a tunnel is already present"); + /* + * Verify only UDPv4 is present as defined in + * https://tools.ietf.org/html/rfc7348 + */ + if (!(flow->layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "no outer UDP layer found"); + if (!mask) + mask = &rte_flow_item_vxlan_gpe_mask; + ret = mlx5_flow_item_acceptable + (item, (const uint8_t *)mask, + (const uint8_t *)&rte_flow_item_vxlan_gpe_mask, + sizeof(struct rte_flow_item_vxlan_gpe), error); + if (ret < 0) + return ret; + if (spec) { + memcpy(&id.vni[1], spec->vni, 3); + vxlan_gpe.val.tunnel_id = id.vlan_id; + memcpy(&id.vni[1], mask->vni, 3); + vxlan_gpe.mask.tunnel_id = id.vlan_id; + if (spec->protocol) + return rte_flow_error_set + (error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "VxLAN-GPE protocol not supported"); + /* Remove unwanted bits from values. */ + vxlan_gpe.val.tunnel_id &= vxlan_gpe.mask.tunnel_id; + } + /* + * Tunnel id 0 is equivalent as not adding a VXLAN layer, if only this + * layer is defined in the Verbs specification it is interpreted as + * wildcard and all packets will match this rule, if it follows a full + * stack layer (ex: eth / ipv4 / udp), all packets matching the layers + * before will also match this rule. To avoid such situation, VNI 0 + * is currently refused. + */ + if (!vxlan_gpe.val.tunnel_id) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "VXLAN-GPE vni cannot be 0"); + if (!(flow->layers & MLX5_FLOW_LAYER_OUTER)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "VXLAN-GPE tunnel must be fully" + " defined"); + if (size <= flow_size) { + mlx5_flow_spec_verbs_add(flow, &vxlan_gpe, size); + flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2; + } + flow->layers |= MLX5_FLOW_LAYER_VXLAN_GPE; return size; } @@ -1293,7 +1431,8 @@ mlx5_flow_item_vxlan(const struct rte_flow_item *item, struct rte_flow *flow, * On error, a negative errno value is returned and rte_errno is set. */ static int -mlx5_flow_items(const struct rte_flow_item pattern[], +mlx5_flow_items(struct rte_eth_dev *dev, + const struct rte_flow_item pattern[], struct rte_flow *flow, const size_t flow_size, struct rte_flow_error *error) { @@ -1328,6 +1467,10 @@ mlx5_flow_items(const struct rte_flow_item pattern[], ret = mlx5_flow_item_vxlan(pattern, flow, remain, error); break; + case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: + ret = mlx5_flow_item_vxlan_gpe(dev, pattern, flow, + remain, error); + break; default: return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, @@ -1908,7 +2051,8 @@ mlx5_flow_merge(struct rte_eth_dev *dev, struct rte_flow *flow, } } ret = mlx5_flow_items - ((const struct rte_flow_item *) + (dev, + (const struct rte_flow_item *) &buf->entry[i].pattern[pattern_start_idx], flow, (size < flow_size) ? flow_size - size : 0, error); @@ -1934,6 +2078,34 @@ mlx5_flow_merge(struct rte_eth_dev *dev, struct rte_flow *flow, return size; } +/** + * Lookup and set the ptype in the data Rx part. A single Ptype can be used, + * if several tunnel rules are used on this queue, the tunnel ptype will be + * cleared. + * + * @param rxq_ctrl + * Rx queue to update. + */ +static void +mlx5_flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl) +{ + unsigned int i; + uint32_t tunnel_ptype = 0; + + /* Look up for the ptype to use. */ + for (i = 0; i != MLX5_FLOW_TUNNEL; ++i) { + if (!rxq_ctrl->flow_tunnels_n[i]) + continue; + if (!tunnel_ptype) { + tunnel_ptype = tunnels_info[i].ptype; + } else { + tunnel_ptype = 0; + break; + } + } + rxq_ctrl->rxq.tunnel = tunnel_ptype; +} + /** * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the flow. * @@ -1962,8 +2134,17 @@ mlx5_flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow) rxq_ctrl->flow_mark_n++; } if (tunnel) { - rxq_ctrl->rxq.tunnel = flow->tunnel_ptype; - rxq_ctrl->flow_vxlan_n++; + unsigned int j; + + /* Increase the counter matching the flow. */ + for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) { + if ((tunnels_info[j].tunnel & flow->layers) == + tunnels_info[j].tunnel) { + rxq_ctrl->flow_tunnels_n[j]++; + break; + } + } + mlx5_flow_rxq_tunnel_ptype_update(rxq_ctrl); } } } @@ -1998,9 +2179,17 @@ mlx5_flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow) rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n; } if (tunnel) { - rxq_ctrl->flow_vxlan_n++; - if (!rxq_ctrl->flow_vxlan_n) - rxq_ctrl->rxq.tunnel = 0; + unsigned int j; + + /* Decrease the counter matching the flow. */ + for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) { + if ((tunnels_info[j].tunnel & flow->layers) == + tunnels_info[j].tunnel) { + rxq_ctrl->flow_tunnels_n[j]--; + break; + } + } + mlx5_flow_rxq_tunnel_ptype_update(rxq_ctrl); } } } @@ -2020,6 +2209,7 @@ mlx5_flow_rxq_flags_clear(struct rte_eth_dev *dev) for (idx = 0, i = 0; idx != priv->rxqs_n; ++i) { struct mlx5_rxq_ctrl *rxq_ctrl; + unsigned int j; if (!(*priv->rxqs)[idx]) continue; @@ -2027,7 +2217,8 @@ mlx5_flow_rxq_flags_clear(struct rte_eth_dev *dev) struct mlx5_rxq_ctrl, rxq); rxq_ctrl->flow_mark_n = 0; rxq_ctrl->rxq.mark = 0; - rxq_ctrl->flow_vxlan_n = 0; + for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) + rxq_ctrl->flow_tunnels_n[j] = 0; rxq_ctrl->rxq.tunnel = 0; ++idx; } diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index ae9b564dd..e97f5766c 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -34,6 +34,9 @@ #include "mlx5_defs.h" #include "mlx5_prm.h" +/* Support tunnel matching. */ +#define MLX5_FLOW_TUNNEL 2 + struct mlx5_rxq_stats { unsigned int idx; /**< Mapping index. */ #ifdef MLX5_PMD_SOFT_COUNTERS @@ -139,7 +142,7 @@ struct mlx5_rxq_ctrl { unsigned int irq:1; /* Whether IRQ is enabled. */ uint16_t idx; /* Queue index. */ uint32_t flow_mark_n; /* Number of Mark/Flag flows using this Queue. */ - uint32_t flow_vxlan_n; /* Number of VXLAN flows using this queue. */ + uint32_t flow_tunnels_n[MLX5_FLOW_TUNNEL]; /* Tunnels counters. */ }; /* Indirection table. */ -- 2.18.0