From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mail-wr1-f66.google.com (mail-wr1-f66.google.com [209.85.221.66]) by dpdk.org (Postfix) with ESMTP id CEBC41B51C for ; Thu, 12 Jul 2018 11:31:39 +0200 (CEST) Received: by mail-wr1-f66.google.com with SMTP id r16-v6so20964341wrt.11 for ; Thu, 12 Jul 2018 02:31:39 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=6wind-com.20150623.gappssmtp.com; s=20150623; h=from:to:cc:subject:date:message-id:in-reply-to:references; bh=fK9qcrlLtftXIiFMee4QfMUAhGHzzA7ce4Jq7ToTdPk=; b=xMA46pz23RXqtGnGPzS8Q9Go7Y9T3c7LtKhqIfX9vQcf3JEnkCaynOaFc6mx3pCmsb 3Izgnj1Y0SEpKmaHDtgwuwt6NXGl8I4wQTYj82n48sGS2xOdoh3Hu3TdsZeeJ63Emp74 zW5UrGhIlN4YOOrt8B51ZuIE4QQcR77Hap3JWhTH2++0L/X+m8zBIg4fD3RN0+oKxsTh Kzp3AirOPpDOk1qnOIO4i5F11Cc7pjSdOBy2awR4uML3j27K3hudPKMGzQ/2BIvoJHnt +FPabNnpq3hrv5srsdGW6pQIY6fZQNiTEb3E2ZXoC0dZvjuJthdpIgnR9fMXOYsh7zCH 96EQ== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20161025; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references; bh=fK9qcrlLtftXIiFMee4QfMUAhGHzzA7ce4Jq7ToTdPk=; b=l3+K82ecRnApusanhc+LQZHztjfJNLVyj+rTLwG6OnSpY94wNU1kXZs6zsSsHxLe+O 8MlfNa5TcqC76NikMhQf/QOQYNMtNy8wxxuObET5HRc2Zo4gKOM5q9GCl/ELbMedq/yl ZNeOsJLQAId195nBdFTsyj9xPmLw8gS48N1L+3kisS0Gi7C2ghUE8sUN8Ny2V9/zDz6i jwBFCSyfh34JxjTMfu/YJ+wavdVt691/gBtC21NwBtobTbHnCc6mj3bIuMxcz2EMzUzv m9FYATC7WNJLX9NTb8zAM2AgANJ7ZnVKs9655GE4lRLKs4ej+WoKEpe4WNac8sMvkQJo V6Kg== X-Gm-Message-State: AOUpUlFs5vcq5qbEVnFp3IZFvzn3Fsw8WS4JTrlH3PV/aBtKD/RgyVFQ 6PySItqm5GZlzj0wdF/LJKUL0/gAdg== X-Google-Smtp-Source: AAOMgpcwo/ZALz7XkRL5DnaTbjTLMTPiJIxZTC7bWf7YSd0TwkAek5Rww6LvXl2xT8xNG3yedI1SxQ== X-Received: by 2002:adf:fdcd:: with SMTP id i13-v6mr1078715wrs.276.1531387899006; Thu, 12 Jul 2018 02:31:39 -0700 (PDT) Received: from laranjeiro-vm.dev.6wind.com (host.78.145.23.62.rev.coltfrance.com. [62.23.145.78]) by smtp.gmail.com with ESMTPSA id s2-v6sm18717603wrn.75.2018.07.12.02.31.38 (version=TLS1_2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128); Thu, 12 Jul 2018 02:31:38 -0700 (PDT) From: Nelio Laranjeiro To: dev@dpdk.org, Yongseok Koh Cc: Adrien Mazarguil Date: Thu, 12 Jul 2018 11:31:02 +0200 Message-Id: X-Mailer: git-send-email 2.18.0 In-Reply-To: References: Subject: [dpdk-dev] [PATCH v4 16/21] net/mlx5: support inner RSS computation X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Thu, 12 Jul 2018 09:31:40 -0000 Signed-off-by: Nelio Laranjeiro Acked-by: Yongseok Koh --- drivers/net/mlx5/mlx5_flow.c | 245 ++++++++++++++++++++++++++--------- 1 file changed, 185 insertions(+), 60 deletions(-) diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 730360b22..84bd99b3e 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -35,18 +35,42 @@ extern const struct eth_dev_ops mlx5_dev_ops; extern const struct eth_dev_ops mlx5_dev_ops_isolate; -/* Pattern Layer bits. */ +/* Pattern outer Layer bits. */ #define MLX5_FLOW_LAYER_OUTER_L2 (1u << 0) #define MLX5_FLOW_LAYER_OUTER_L3_IPV4 (1u << 1) #define MLX5_FLOW_LAYER_OUTER_L3_IPV6 (1u << 2) #define MLX5_FLOW_LAYER_OUTER_L4_UDP (1u << 3) #define MLX5_FLOW_LAYER_OUTER_L4_TCP (1u << 4) #define MLX5_FLOW_LAYER_OUTER_VLAN (1u << 5) -/* Masks. */ + +/* Pattern inner Layer bits. */ +#define MLX5_FLOW_LAYER_INNER_L2 (1u << 6) +#define MLX5_FLOW_LAYER_INNER_L3_IPV4 (1u << 7) +#define MLX5_FLOW_LAYER_INNER_L3_IPV6 (1u << 8) +#define MLX5_FLOW_LAYER_INNER_L4_UDP (1u << 9) +#define MLX5_FLOW_LAYER_INNER_L4_TCP (1u << 10) +#define MLX5_FLOW_LAYER_INNER_VLAN (1u << 11) + +/* Outer Masks. */ #define MLX5_FLOW_LAYER_OUTER_L3 \ (MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6) #define MLX5_FLOW_LAYER_OUTER_L4 \ (MLX5_FLOW_LAYER_OUTER_L4_UDP | MLX5_FLOW_LAYER_OUTER_L4_TCP) +#define MLX5_FLOW_LAYER_OUTER \ + (MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_L3 | \ + MLX5_FLOW_LAYER_OUTER_L4) + +/* Tunnel Masks. */ +#define MLX5_FLOW_LAYER_TUNNEL 0 + +/* Inner Masks. */ +#define MLX5_FLOW_LAYER_INNER_L3 \ + (MLX5_FLOW_LAYER_INNER_L3_IPV4 | MLX5_FLOW_LAYER_INNER_L3_IPV6) +#define MLX5_FLOW_LAYER_INNER_L4 \ + (MLX5_FLOW_LAYER_INNER_L4_UDP | MLX5_FLOW_LAYER_INNER_L4_TCP) +#define MLX5_FLOW_LAYER_INNER \ + (MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_L3 | \ + MLX5_FLOW_LAYER_INNER_L4) /* Actions that modify the fate of matching traffic. */ #define MLX5_FLOW_FATE_DROP (1u << 0) @@ -66,6 +90,14 @@ extern const struct eth_dev_ops mlx5_dev_ops_isolate; enum mlx5_expansion { MLX5_EXPANSION_ROOT, + MLX5_EXPANSION_ROOT_OUTER, + MLX5_EXPANSION_OUTER_ETH, + MLX5_EXPANSION_OUTER_IPV4, + MLX5_EXPANSION_OUTER_IPV4_UDP, + MLX5_EXPANSION_OUTER_IPV4_TCP, + MLX5_EXPANSION_OUTER_IPV6, + MLX5_EXPANSION_OUTER_IPV6_UDP, + MLX5_EXPANSION_OUTER_IPV6_TCP, MLX5_EXPANSION_ETH, MLX5_EXPANSION_IPV4, MLX5_EXPANSION_IPV4_UDP, @@ -83,6 +115,50 @@ static const struct rte_flow_expand_node mlx5_support_expansion[] = { MLX5_EXPANSION_IPV6), .type = RTE_FLOW_ITEM_TYPE_END, }, + [MLX5_EXPANSION_ROOT_OUTER] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH, + MLX5_EXPANSION_OUTER_IPV4, + MLX5_EXPANSION_OUTER_IPV6), + .type = RTE_FLOW_ITEM_TYPE_END, + }, + [MLX5_EXPANSION_OUTER_ETH] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4, + MLX5_EXPANSION_OUTER_IPV6), + .type = RTE_FLOW_ITEM_TYPE_ETH, + .rss_types = 0, + }, + [MLX5_EXPANSION_OUTER_IPV4] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT + (MLX5_EXPANSION_OUTER_IPV4_UDP, + MLX5_EXPANSION_OUTER_IPV4_TCP), + .type = RTE_FLOW_ITEM_TYPE_IPV4, + .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | + ETH_RSS_NONFRAG_IPV4_OTHER, + }, + [MLX5_EXPANSION_OUTER_IPV4_UDP] = { + .type = RTE_FLOW_ITEM_TYPE_UDP, + .rss_types = ETH_RSS_NONFRAG_IPV4_UDP, + }, + [MLX5_EXPANSION_OUTER_IPV4_TCP] = { + .type = RTE_FLOW_ITEM_TYPE_TCP, + .rss_types = ETH_RSS_NONFRAG_IPV4_TCP, + }, + [MLX5_EXPANSION_OUTER_IPV6] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT + (MLX5_EXPANSION_OUTER_IPV6_UDP, + MLX5_EXPANSION_OUTER_IPV6_TCP), + .type = RTE_FLOW_ITEM_TYPE_IPV6, + .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | + ETH_RSS_NONFRAG_IPV6_OTHER, + }, + [MLX5_EXPANSION_OUTER_IPV6_UDP] = { + .type = RTE_FLOW_ITEM_TYPE_UDP, + .rss_types = ETH_RSS_NONFRAG_IPV6_UDP, + }, + [MLX5_EXPANSION_OUTER_IPV6_TCP] = { + .type = RTE_FLOW_ITEM_TYPE_TCP, + .rss_types = ETH_RSS_NONFRAG_IPV6_TCP, + }, [MLX5_EXPANSION_ETH] = { .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, MLX5_EXPANSION_IPV6), @@ -453,6 +529,34 @@ mlx5_flow_spec_verbs_add(struct rte_flow *flow, void *src, unsigned int size) verbs->size += size; } +/** + * Adjust verbs hash fields according to the @p flow information. + * + * @param[in, out] flow. + * Pointer to flow structure. + * @param[in] tunnel + * 1 when the hash field is for a tunnel item. + * @param[in] layer_types + * ETH_RSS_* types. + * @param[in] hash_fields + * Item hash fields. + */ +static void +mlx5_flow_verbs_hashfields_adjust(struct rte_flow *flow, int tunnel __rte_unused, + uint32_t layer_types, uint64_t hash_fields) +{ +#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT + hash_fields |= (tunnel ? IBV_RX_HASH_INNER : 0); + if (flow->rss.level == 2 && !tunnel) + hash_fields = 0; + else if (flow->rss.level < 2 && tunnel) + hash_fields = 0; +#endif + if (!(flow->rss.types & layer_types)) + hash_fields = 0; + flow->cur_verbs->hash_fields |= hash_fields; +} + /** * Convert the @p item into a Verbs specification after ensuring the NIC * will understand and process it correctly. @@ -486,14 +590,16 @@ mlx5_flow_item_eth(const struct rte_flow_item *item, struct rte_flow *flow, .src.addr_bytes = "\xff\xff\xff\xff\xff\xff", .type = RTE_BE16(0xffff), }; + const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL); const unsigned int size = sizeof(struct ibv_flow_spec_eth); struct ibv_flow_spec_eth eth = { - .type = IBV_FLOW_SPEC_ETH, + .type = IBV_FLOW_SPEC_ETH | (tunnel ? IBV_FLOW_SPEC_INNER : 0), .size = size, }; int ret; - if (flow->layers & MLX5_FLOW_LAYER_OUTER_L2) + if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L2 : + MLX5_FLOW_LAYER_OUTER_L2)) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, @@ -506,7 +612,8 @@ mlx5_flow_item_eth(const struct rte_flow_item *item, struct rte_flow *flow, error); if (ret) return ret; - flow->layers |= MLX5_FLOW_LAYER_OUTER_L2; + flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 : + MLX5_FLOW_LAYER_OUTER_L2; if (size > flow_size) return size; if (spec) { @@ -543,7 +650,7 @@ mlx5_flow_item_vlan_update(struct ibv_flow_attr *attr, struct ibv_flow_spec_eth *eth) { unsigned int i; - enum ibv_flow_spec_type search = IBV_FLOW_SPEC_ETH; + const enum ibv_flow_spec_type search = eth->type; struct ibv_spec_header *hdr = (struct ibv_spec_header *) ((uint8_t *)attr + sizeof(struct ibv_flow_attr)); @@ -596,16 +703,19 @@ mlx5_flow_item_vlan(const struct rte_flow_item *item, struct rte_flow *flow, .inner_type = RTE_BE16(0xffff), }; unsigned int size = sizeof(struct ibv_flow_spec_eth); - struct mlx5_flow_verbs *verbs = flow->cur_verbs; + const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL); struct ibv_flow_spec_eth eth = { - .type = IBV_FLOW_SPEC_ETH, + .type = IBV_FLOW_SPEC_ETH | (tunnel ? IBV_FLOW_SPEC_INNER : 0), .size = size, }; int ret; - const uint32_t l34m = MLX5_FLOW_LAYER_OUTER_L3 | - MLX5_FLOW_LAYER_OUTER_L4; - const uint32_t vlanm = MLX5_FLOW_LAYER_OUTER_VLAN; - const uint32_t l2m = MLX5_FLOW_LAYER_OUTER_L2; + const uint32_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 | + MLX5_FLOW_LAYER_INNER_L4) : + (MLX5_FLOW_LAYER_OUTER_L3 | MLX5_FLOW_LAYER_OUTER_L4); + const uint32_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN : + MLX5_FLOW_LAYER_OUTER_VLAN; + const uint32_t l2m = tunnel ? MLX5_FLOW_LAYER_INNER_L2 : + MLX5_FLOW_LAYER_OUTER_L2; if (flow->layers & vlanm) return rte_flow_error_set(error, ENOTSUP, @@ -648,11 +758,14 @@ mlx5_flow_item_vlan(const struct rte_flow_item *item, struct rte_flow *flow, mlx5_flow_spec_verbs_add(flow, ð, size); } } else { - if (verbs->attr) - mlx5_flow_item_vlan_update(verbs->attr, ð); + if (flow->cur_verbs) + mlx5_flow_item_vlan_update(flow->cur_verbs->attr, + ð); size = 0; /* Only an update is done in eth specification. */ } - flow->layers |= MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_VLAN; + flow->layers |= tunnel ? + (MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_VLAN) : + (MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_VLAN); return size; } @@ -692,19 +805,23 @@ mlx5_flow_item_ipv4(const struct rte_flow_item *item, struct rte_flow *flow, .next_proto_id = 0xff, }, }; + const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL); unsigned int size = sizeof(struct ibv_flow_spec_ipv4_ext); struct ibv_flow_spec_ipv4_ext ipv4 = { - .type = IBV_FLOW_SPEC_IPV4_EXT, + .type = IBV_FLOW_SPEC_IPV4_EXT | + (tunnel ? IBV_FLOW_SPEC_INNER : 0), .size = size, }; int ret; - if (flow->layers & MLX5_FLOW_LAYER_OUTER_L3) + if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 : + MLX5_FLOW_LAYER_OUTER_L3)) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, "multiple L3 layers not supported"); - else if (flow->layers & MLX5_FLOW_LAYER_OUTER_L4) + else if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 : + MLX5_FLOW_LAYER_OUTER_L4)) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, @@ -717,7 +834,8 @@ mlx5_flow_item_ipv4(const struct rte_flow_item *item, struct rte_flow *flow, sizeof(struct rte_flow_item_ipv4), error); if (ret < 0) return ret; - flow->layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV4; + flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : + MLX5_FLOW_LAYER_OUTER_L3_IPV4; if (spec) { ipv4.val = (struct ibv_flow_ipv4_ext_filter){ .src_ip = spec->hdr.src_addr, @@ -740,14 +858,11 @@ mlx5_flow_item_ipv4(const struct rte_flow_item *item, struct rte_flow *flow, flow->l3_protocol_en = !!ipv4.mask.proto; flow->l3_protocol = ipv4.val.proto; if (size <= flow_size) { - uint64_t hash_fields = IBV_RX_HASH_SRC_IPV4 | - IBV_RX_HASH_DST_IPV4; - - if (!(flow->rss.types & - (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | - ETH_RSS_NONFRAG_IPV4_OTHER))) - hash_fields = 0; - flow->cur_verbs->hash_fields |= hash_fields; + mlx5_flow_verbs_hashfields_adjust + (flow, tunnel, + (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | + ETH_RSS_NONFRAG_IPV4_OTHER), + (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4)); flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L3; mlx5_flow_spec_verbs_add(flow, &ipv4, size); } @@ -795,19 +910,22 @@ mlx5_flow_item_ipv6(const struct rte_flow_item *item, struct rte_flow *flow, .hop_limits = 0xff, }, }; + const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL); unsigned int size = sizeof(struct ibv_flow_spec_ipv6); struct ibv_flow_spec_ipv6 ipv6 = { - .type = IBV_FLOW_SPEC_IPV6, + .type = IBV_FLOW_SPEC_IPV6 | (tunnel ? IBV_FLOW_SPEC_INNER : 0), .size = size, }; int ret; - if (flow->layers & MLX5_FLOW_LAYER_OUTER_L3) + if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 : + MLX5_FLOW_LAYER_OUTER_L3)) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, "multiple L3 layers not supported"); - else if (flow->layers & MLX5_FLOW_LAYER_OUTER_L4) + else if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 : + MLX5_FLOW_LAYER_OUTER_L4)) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, @@ -820,7 +938,8 @@ mlx5_flow_item_ipv6(const struct rte_flow_item *item, struct rte_flow *flow, sizeof(struct rte_flow_item_ipv6), error); if (ret < 0) return ret; - flow->layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV6; + flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : + MLX5_FLOW_LAYER_OUTER_L3_IPV6; if (spec) { unsigned int i; uint32_t vtc_flow_val; @@ -863,13 +982,10 @@ mlx5_flow_item_ipv6(const struct rte_flow_item *item, struct rte_flow *flow, flow->l3_protocol_en = !!ipv6.mask.next_hdr; flow->l3_protocol = ipv6.val.next_hdr; if (size <= flow_size) { - uint64_t hash_fields = IBV_RX_HASH_SRC_IPV6 | - IBV_RX_HASH_DST_IPV6; - - if (!(flow->rss.types & - (ETH_RSS_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER))) - hash_fields = 0; - flow->cur_verbs->hash_fields |= hash_fields; + mlx5_flow_verbs_hashfields_adjust + (flow, tunnel, + (ETH_RSS_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER), + (IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6)); flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L3; mlx5_flow_spec_verbs_add(flow, &ipv6, size); } @@ -904,9 +1020,10 @@ mlx5_flow_item_udp(const struct rte_flow_item *item, struct rte_flow *flow, { const struct rte_flow_item_udp *spec = item->spec; const struct rte_flow_item_udp *mask = item->mask; + const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL); unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp); struct ibv_flow_spec_tcp_udp udp = { - .type = IBV_FLOW_SPEC_UDP, + .type = IBV_FLOW_SPEC_UDP | (tunnel ? IBV_FLOW_SPEC_INNER : 0), .size = size, }; int ret; @@ -917,13 +1034,15 @@ mlx5_flow_item_udp(const struct rte_flow_item *item, struct rte_flow *flow, item, "protocol filtering not compatible" " with UDP layer"); - if (!(flow->layers & MLX5_FLOW_LAYER_OUTER_L3)) + if (!(flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 : + MLX5_FLOW_LAYER_OUTER_L3))) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, "L3 is mandatory to filter" " on L4"); - if (flow->layers & MLX5_FLOW_LAYER_OUTER_L4) + if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 : + MLX5_FLOW_LAYER_OUTER_L4)) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, @@ -937,7 +1056,8 @@ mlx5_flow_item_udp(const struct rte_flow_item *item, struct rte_flow *flow, sizeof(struct rte_flow_item_udp), error); if (ret < 0) return ret; - flow->layers |= MLX5_FLOW_LAYER_OUTER_L4_UDP; + flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP : + MLX5_FLOW_LAYER_OUTER_L4_UDP; if (spec) { udp.val.dst_port = spec->hdr.dst_port; udp.val.src_port = spec->hdr.src_port; @@ -948,12 +1068,9 @@ mlx5_flow_item_udp(const struct rte_flow_item *item, struct rte_flow *flow, udp.val.dst_port &= udp.mask.dst_port; } if (size <= flow_size) { - uint64_t hash_fields = IBV_RX_HASH_SRC_PORT_UDP | - IBV_RX_HASH_DST_PORT_UDP; - - if (!(flow->rss.types & ETH_RSS_UDP)) - hash_fields = 0; - flow->cur_verbs->hash_fields |= hash_fields; + mlx5_flow_verbs_hashfields_adjust(flow, tunnel, ETH_RSS_UDP, + (IBV_RX_HASH_SRC_PORT_UDP | + IBV_RX_HASH_DST_PORT_UDP)); flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L4; mlx5_flow_spec_verbs_add(flow, &udp, size); } @@ -988,9 +1105,10 @@ mlx5_flow_item_tcp(const struct rte_flow_item *item, struct rte_flow *flow, { const struct rte_flow_item_tcp *spec = item->spec; const struct rte_flow_item_tcp *mask = item->mask; + const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL); unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp); struct ibv_flow_spec_tcp_udp tcp = { - .type = IBV_FLOW_SPEC_TCP, + .type = IBV_FLOW_SPEC_TCP | (tunnel ? IBV_FLOW_SPEC_INNER : 0), .size = size, }; int ret; @@ -1001,12 +1119,14 @@ mlx5_flow_item_tcp(const struct rte_flow_item *item, struct rte_flow *flow, item, "protocol filtering not compatible" " with TCP layer"); - if (!(flow->layers & MLX5_FLOW_LAYER_OUTER_L3)) + if (!(flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 : + MLX5_FLOW_LAYER_OUTER_L3))) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, "L3 is mandatory to filter on L4"); - if (flow->layers & MLX5_FLOW_LAYER_OUTER_L4) + if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 : + MLX5_FLOW_LAYER_OUTER_L4)) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, @@ -1019,7 +1139,8 @@ mlx5_flow_item_tcp(const struct rte_flow_item *item, struct rte_flow *flow, sizeof(struct rte_flow_item_tcp), error); if (ret < 0) return ret; - flow->layers |= MLX5_FLOW_LAYER_OUTER_L4_TCP; + flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP : + MLX5_FLOW_LAYER_OUTER_L4_TCP; if (spec) { tcp.val.dst_port = spec->hdr.dst_port; tcp.val.src_port = spec->hdr.src_port; @@ -1030,12 +1151,9 @@ mlx5_flow_item_tcp(const struct rte_flow_item *item, struct rte_flow *flow, tcp.val.dst_port &= tcp.mask.dst_port; } if (size <= flow_size) { - uint64_t hash_fields = IBV_RX_HASH_SRC_PORT_TCP | - IBV_RX_HASH_DST_PORT_TCP; - - if (!(flow->rss.types & ETH_RSS_TCP)) - hash_fields = 0; - flow->cur_verbs->hash_fields |= hash_fields; + mlx5_flow_verbs_hashfields_adjust(flow, tunnel, ETH_RSS_TCP, + (IBV_RX_HASH_SRC_PORT_TCP | + IBV_RX_HASH_DST_PORT_TCP)); flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L4; mlx5_flow_spec_verbs_add(flow, &tcp, size); } @@ -1261,7 +1379,11 @@ mlx5_flow_action_rss(struct rte_eth_dev *dev, RTE_FLOW_ERROR_TYPE_ACTION_CONF, &rss->func, "RSS hash function not supported"); +#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT + if (rss->level > 2) +#else if (rss->level > 1) +#endif return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION_CONF, &rss->level, @@ -1301,6 +1423,7 @@ mlx5_flow_action_rss(struct rte_eth_dev *dev, flow->rss.queue_num = rss->queue_num; memcpy(flow->key, rss->key, MLX5_RSS_HASH_KEY_LEN); flow->rss.types = rss->types; + flow->rss.level = rss->level; flow->fate |= MLX5_FLOW_FATE_RSS; return 0; } @@ -1608,7 +1731,9 @@ mlx5_flow_merge(struct rte_eth_dev *dev, struct rte_flow *flow, ret = rte_flow_expand_rss(buf, sizeof(expand_buffer.buffer), pattern, local_flow.rss.types, mlx5_support_expansion, - MLX5_EXPANSION_ROOT); + local_flow.rss.level < 2 ? + MLX5_EXPANSION_ROOT : + MLX5_EXPANSION_ROOT_OUTER); assert(ret > 0 && (unsigned int)ret < sizeof(expand_buffer.buffer)); } else { @@ -1979,8 +2104,8 @@ mlx5_flow_list_create(struct rte_eth_dev *dev, return NULL; } } - mlx5_flow_rxq_mark_set(dev, flow); TAILQ_INSERT_TAIL(list, flow, next); + mlx5_flow_rxq_mark_set(dev, flow); return flow; } -- 2.18.0