From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mail-wm0-f68.google.com (mail-wm0-f68.google.com [74.125.82.68]) by dpdk.org (Postfix) with ESMTP id 6CE5B1B447 for ; Wed, 11 Jul 2018 09:23:32 +0200 (CEST) Received: by mail-wm0-f68.google.com with SMTP id s12-v6so1361407wmc.1 for ; Wed, 11 Jul 2018 00:23:32 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=6wind-com.20150623.gappssmtp.com; s=20150623; h=from:to:cc:subject:date:message-id:in-reply-to:references; bh=YVdXU5pci31tgKwmq3O3Yl17jgWI6rB1zRfabSgQRz0=; b=Bau1jGF8iD+2ZXeWNvmb1SM2ujh6j33t/rGtM4qkgJoK7l4jFkdjSUJCYukvBa0BXo RXs6ATnNxDVwqaZ0WXb/q5CLbuRwrOw08iIJCVmDRMjF03OnyJVbQW6qmys04H2dIC50 ln1p27wfAykBSdn+JOV76AFKRA1d1x102ibc/gyRyWTy74CjgGgyfn8PUrX8rgCm9BsF BWISeAMRcls+WuLG5KRAO4hdsIj1Y592kcmPNA0kQTZ06FUfmxZ7sGCc8AbQe5/aGbsR mp9ikXUuiVh96vYcxxoH5QNxrRzklkqaq1kthmWVRrgcy9tl7gnjJrWGhCecdU7uie+t EHOg== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20161025; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references; bh=YVdXU5pci31tgKwmq3O3Yl17jgWI6rB1zRfabSgQRz0=; b=gSaudH7ZAAm5Ee2MivTmX+kpW/87+Lh6XURAbk8q3x7CJzcKhS1dwT4hZ9K0z3cPF1 8h/CcqxLbXWk5j9MvejJO1pZOoH+VxquVlqwOpAvA1l8sDpgMZGeiriKnoiPhsP3ZKJX cu4mZHdZ9/4KD5NNpQoP84ycI4Sg1DhVfChXLdI6C/7TaFDOypm3VIu05hrW1RbXm+ui ojUc2w5Td0jzZe35I0Uzl6mnPO3xX2nnWcUDzYE4AGEGebDs42so6l819Ih/0fxHn0Sn mG58Fb4WxaQuizRUhk0DHymmRZzjZ4mCMMBrsZd5wM7RZUQ1ma/iU+lkF66s9hLYIV0a ph7w== X-Gm-Message-State: APt69E3qRkNCwU2vndrykUycgjodGuaRGzJ3QaTbgHhiV85zD+ZQI04w 0N2LknZif7LHr+gf4eGAL64l5ULn1w== X-Google-Smtp-Source: AAOMgpeGMJ9UmMNjQycx5iV111XuVGxh5KOSPspUawAIspgJVLhXcIm8RuNidn86ChmE3wJDdpXyvg== X-Received: by 2002:a1c:ec9d:: with SMTP id h29-v6mr17484818wmi.94.1531293811773; Wed, 11 Jul 2018 00:23:31 -0700 (PDT) Received: from laranjeiro-vm.dev.6wind.com (host.78.145.23.62.rev.coltfrance.com. [62.23.145.78]) by smtp.gmail.com with ESMTPSA id t10-v6sm31314212wre.95.2018.07.11.00.23.30 (version=TLS1_2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128); Wed, 11 Jul 2018 00:23:31 -0700 (PDT) From: Nelio Laranjeiro To: dev@dpdk.org, Yongseok Koh Cc: Adrien Mazarguil Date: Wed, 11 Jul 2018 09:22:49 +0200 Message-Id: X-Mailer: git-send-email 2.18.0 In-Reply-To: References: Subject: [dpdk-dev] [PATCH v3 16/21] net/mlx5: support inner RSS computation X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Wed, 11 Jul 2018 07:23:32 -0000 Signed-off-by: Nelio Laranjeiro --- drivers/net/mlx5/mlx5_flow.c | 241 ++++++++++++++++++++++++++--------- 1 file changed, 180 insertions(+), 61 deletions(-) diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index f2acac1f5..edceb17ed 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -35,18 +35,42 @@ extern const struct eth_dev_ops mlx5_dev_ops; extern const struct eth_dev_ops mlx5_dev_ops_isolate; -/* Pattern Layer bits. */ +/* Pattern outer Layer bits. */ #define MLX5_FLOW_LAYER_OUTER_L2 (1u << 0) #define MLX5_FLOW_LAYER_OUTER_L3_IPV4 (1u << 1) #define MLX5_FLOW_LAYER_OUTER_L3_IPV6 (1u << 2) #define MLX5_FLOW_LAYER_OUTER_L4_UDP (1u << 3) #define MLX5_FLOW_LAYER_OUTER_L4_TCP (1u << 4) #define MLX5_FLOW_LAYER_OUTER_VLAN (1u << 5) -/* Masks. */ + +/* Pattern inner Layer bits. */ +#define MLX5_FLOW_LAYER_INNER_L2 (1u << 6) +#define MLX5_FLOW_LAYER_INNER_L3_IPV4 (1u << 7) +#define MLX5_FLOW_LAYER_INNER_L3_IPV6 (1u << 8) +#define MLX5_FLOW_LAYER_INNER_L4_UDP (1u << 9) +#define MLX5_FLOW_LAYER_INNER_L4_TCP (1u << 10) +#define MLX5_FLOW_LAYER_INNER_VLAN (1u << 11) + +/* Outer Masks. */ #define MLX5_FLOW_LAYER_OUTER_L3 \ (MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6) #define MLX5_FLOW_LAYER_OUTER_L4 \ (MLX5_FLOW_LAYER_OUTER_L4_UDP | MLX5_FLOW_LAYER_OUTER_L4_TCP) +#define MLX5_FLOW_LAYER_OUTER \ + (MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_L3 | \ + MLX5_FLOW_LAYER_OUTER_L4) + +/* Tunnel Masks. */ +#define MLX5_FLOW_LAYER_TUNNEL 0 + +/* Inner Masks. */ +#define MLX5_FLOW_LAYER_INNER_L3 \ + (MLX5_FLOW_LAYER_INNER_L3_IPV4 | MLX5_FLOW_LAYER_INNER_L3_IPV6) +#define MLX5_FLOW_LAYER_INNER_L4 \ + (MLX5_FLOW_LAYER_INNER_L4_UDP | MLX5_FLOW_LAYER_INNER_L4_TCP) +#define MLX5_FLOW_LAYER_INNER \ + (MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_L3 | \ + MLX5_FLOW_LAYER_INNER_L4) /* Actions that modify the fate of matching traffic. */ #define MLX5_FLOW_FATE_DROP (1u << 0) @@ -66,6 +90,14 @@ extern const struct eth_dev_ops mlx5_dev_ops_isolate; enum mlx5_expansion { MLX5_EXPANSION_ROOT, + MLX5_EXPANSION_ROOT_OUTER, + MLX5_EXPANSION_OUTER_ETH, + MLX5_EXPANSION_OUTER_IPV4, + MLX5_EXPANSION_OUTER_IPV4_UDP, + MLX5_EXPANSION_OUTER_IPV4_TCP, + MLX5_EXPANSION_OUTER_IPV6, + MLX5_EXPANSION_OUTER_IPV6_UDP, + MLX5_EXPANSION_OUTER_IPV6_TCP, MLX5_EXPANSION_ETH, MLX5_EXPANSION_IPV4, MLX5_EXPANSION_IPV4_UDP, @@ -83,6 +115,50 @@ static const struct rte_flow_expand_node mlx5_support_expansion[] = { MLX5_EXPANSION_IPV6), .type = RTE_FLOW_ITEM_TYPE_END, }, + [MLX5_EXPANSION_ROOT_OUTER] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH, + MLX5_EXPANSION_OUTER_IPV4, + MLX5_EXPANSION_OUTER_IPV6), + .type = RTE_FLOW_ITEM_TYPE_END, + }, + [MLX5_EXPANSION_OUTER_ETH] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4, + MLX5_EXPANSION_OUTER_IPV6), + .type = RTE_FLOW_ITEM_TYPE_ETH, + .rss_types = 0, + }, + [MLX5_EXPANSION_OUTER_IPV4] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT + (MLX5_EXPANSION_OUTER_IPV4_UDP, + MLX5_EXPANSION_OUTER_IPV4_TCP), + .type = RTE_FLOW_ITEM_TYPE_IPV4, + .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | + ETH_RSS_NONFRAG_IPV4_OTHER, + }, + [MLX5_EXPANSION_OUTER_IPV4_UDP] = { + .type = RTE_FLOW_ITEM_TYPE_UDP, + .rss_types = ETH_RSS_NONFRAG_IPV4_UDP, + }, + [MLX5_EXPANSION_OUTER_IPV4_TCP] = { + .type = RTE_FLOW_ITEM_TYPE_TCP, + .rss_types = ETH_RSS_NONFRAG_IPV4_TCP, + }, + [MLX5_EXPANSION_OUTER_IPV6] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT + (MLX5_EXPANSION_OUTER_IPV6_UDP, + MLX5_EXPANSION_OUTER_IPV6_TCP), + .type = RTE_FLOW_ITEM_TYPE_IPV6, + .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | + ETH_RSS_NONFRAG_IPV6_OTHER, + }, + [MLX5_EXPANSION_OUTER_IPV6_UDP] = { + .type = RTE_FLOW_ITEM_TYPE_UDP, + .rss_types = ETH_RSS_NONFRAG_IPV6_UDP, + }, + [MLX5_EXPANSION_OUTER_IPV6_TCP] = { + .type = RTE_FLOW_ITEM_TYPE_TCP, + .rss_types = ETH_RSS_NONFRAG_IPV6_TCP, + }, [MLX5_EXPANSION_ETH] = { .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, MLX5_EXPANSION_IPV6), @@ -453,6 +529,32 @@ mlx5_flow_spec_verbs_add(struct rte_flow *flow, void *src, unsigned int size) verbs->size += size; } +/** + * Adjust verbs hash fields according to the @p flow information. + * + * @param[in, out] flow. + * Pointer to flow structure. + * @param[in] tunnel + * 1 when the hash field is for a tunnel item. + * @param[in] layer_types + * ETH_RSS_* types. + * @param[in] hash_fields + * Item hash fields. + */ +static void +mlx5_flow_verbs_hashfields_adjust(struct rte_flow *flow, int tunnel, + uint32_t layer_types, uint64_t hash_fields) +{ + hash_fields |= (tunnel ? IBV_RX_HASH_INNER : 0); + if (!(flow->rss.types & layer_types)) + hash_fields = 0; + if (flow->rss.level == 2 && !tunnel) + hash_fields = 0; + else if (flow->rss.level < 2 && tunnel) + hash_fields = 0; + flow->cur_verbs->hash_fields |= hash_fields; +} + /** * Convert the @p item into a Verbs specification after ensuring the NIC * will understand and process it correctly. @@ -486,14 +588,16 @@ mlx5_flow_item_eth(const struct rte_flow_item *item, struct rte_flow *flow, .src.addr_bytes = "\xff\xff\xff\xff\xff\xff", .type = RTE_BE16(0xffff), }; + const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL); const unsigned int size = sizeof(struct ibv_flow_spec_eth); struct ibv_flow_spec_eth eth = { - .type = IBV_FLOW_SPEC_ETH, + .type = IBV_FLOW_SPEC_ETH | (tunnel ? IBV_FLOW_SPEC_INNER : 0), .size = size, }; int ret; - if (flow->layers & MLX5_FLOW_LAYER_OUTER_L2) + if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L2 : + MLX5_FLOW_LAYER_OUTER_L2)) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, @@ -506,7 +610,8 @@ mlx5_flow_item_eth(const struct rte_flow_item *item, struct rte_flow *flow, error); if (ret) return ret; - flow->layers |= MLX5_FLOW_LAYER_OUTER_L2; + flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 : + MLX5_FLOW_LAYER_OUTER_L2; if (size > flow_size) return size; if (spec) { @@ -543,7 +648,7 @@ mlx5_flow_item_vlan_update(struct ibv_flow_attr *attr, struct ibv_flow_spec_eth *eth) { unsigned int i; - enum ibv_flow_spec_type search = IBV_FLOW_SPEC_ETH; + const enum ibv_flow_spec_type search = eth->type; struct ibv_spec_header *hdr = (struct ibv_spec_header *) ((uint8_t *)attr + sizeof(struct ibv_flow_attr)); @@ -596,16 +701,19 @@ mlx5_flow_item_vlan(const struct rte_flow_item *item, struct rte_flow *flow, .inner_type = RTE_BE16(0xffff), }; unsigned int size = sizeof(struct ibv_flow_spec_eth); - struct mlx5_flow_verbs *verbs = flow->cur_verbs; + const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL); struct ibv_flow_spec_eth eth = { - .type = IBV_FLOW_SPEC_ETH, + .type = IBV_FLOW_SPEC_ETH | (tunnel ? IBV_FLOW_SPEC_INNER : 0), .size = size, }; int ret; - const uint32_t l34m = MLX5_FLOW_LAYER_OUTER_L3 | - MLX5_FLOW_LAYER_OUTER_L4; - const uint32_t vlanm = MLX5_FLOW_LAYER_OUTER_VLAN; - const uint32_t l2m = MLX5_FLOW_LAYER_OUTER_L2; + const uint32_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 | + MLX5_FLOW_LAYER_INNER_L4) : + (MLX5_FLOW_LAYER_OUTER_L3 | MLX5_FLOW_LAYER_OUTER_L4); + const uint32_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN : + MLX5_FLOW_LAYER_OUTER_VLAN; + const uint32_t l2m = tunnel ? MLX5_FLOW_LAYER_INNER_L2 : + MLX5_FLOW_LAYER_OUTER_L2; if (flow->layers & vlanm) return rte_flow_error_set(error, ENOTSUP, @@ -648,11 +756,14 @@ mlx5_flow_item_vlan(const struct rte_flow_item *item, struct rte_flow *flow, mlx5_flow_spec_verbs_add(flow, ð, size); } } else { - if (verbs->attr) - mlx5_flow_item_vlan_update(verbs->attr, ð); + if (flow->cur_verbs) + mlx5_flow_item_vlan_update(flow->cur_verbs->attr, + ð); size = 0; /* Only an update is done in eth specification. */ } - flow->layers |= MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_VLAN; + flow->layers |= tunnel ? + (MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_VLAN) : + (MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_VLAN); return size; } @@ -692,19 +803,23 @@ mlx5_flow_item_ipv4(const struct rte_flow_item *item, struct rte_flow *flow, .next_proto_id = 0xff, }, }; + const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL); unsigned int size = sizeof(struct ibv_flow_spec_ipv4_ext); struct ibv_flow_spec_ipv4_ext ipv4 = { - .type = IBV_FLOW_SPEC_IPV4_EXT, + .type = IBV_FLOW_SPEC_IPV4_EXT | + (tunnel ? IBV_FLOW_SPEC_INNER : 0), .size = size, }; int ret; - if (flow->layers & MLX5_FLOW_LAYER_OUTER_L3) + if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 : + MLX5_FLOW_LAYER_OUTER_L3)) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, "multiple L3 layers not supported"); - else if (flow->layers & MLX5_FLOW_LAYER_OUTER_L4) + else if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 : + MLX5_FLOW_LAYER_OUTER_L4)) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, @@ -717,7 +832,8 @@ mlx5_flow_item_ipv4(const struct rte_flow_item *item, struct rte_flow *flow, sizeof(struct rte_flow_item_ipv4), error); if (ret < 0) return ret; - flow->layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV4; + flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : + MLX5_FLOW_LAYER_OUTER_L3_IPV4; if (spec) { ipv4.val = (struct ibv_flow_ipv4_ext_filter){ .src_ip = spec->hdr.src_addr, @@ -740,14 +856,11 @@ mlx5_flow_item_ipv4(const struct rte_flow_item *item, struct rte_flow *flow, flow->l3_protocol_en = !!ipv4.mask.proto; flow->l3_protocol = ipv4.val.proto; if (size <= flow_size) { - uint64_t hash_fields = IBV_RX_HASH_SRC_IPV4 | - IBV_RX_HASH_DST_IPV4; - - if (!(flow->rss.types & - (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | - ETH_RSS_NONFRAG_IPV4_OTHER))) - hash_fields = 0; - flow->cur_verbs->hash_fields |= hash_fields; + mlx5_flow_verbs_hashfields_adjust + (flow, tunnel, + (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | + ETH_RSS_NONFRAG_IPV4_OTHER), + (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4)); flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L3; mlx5_flow_spec_verbs_add(flow, &ipv4, size); } @@ -795,19 +908,22 @@ mlx5_flow_item_ipv6(const struct rte_flow_item *item, struct rte_flow *flow, .hop_limits = 0xff, }, }; + const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL); unsigned int size = sizeof(struct ibv_flow_spec_ipv6); struct ibv_flow_spec_ipv6 ipv6 = { - .type = IBV_FLOW_SPEC_IPV6, + .type = IBV_FLOW_SPEC_IPV6 | (tunnel ? IBV_FLOW_SPEC_INNER : 0), .size = size, }; int ret; - if (flow->layers & MLX5_FLOW_LAYER_OUTER_L3) + if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 : + MLX5_FLOW_LAYER_OUTER_L3)) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, "multiple L3 layers not supported"); - else if (flow->layers & MLX5_FLOW_LAYER_OUTER_L4) + else if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 : + MLX5_FLOW_LAYER_OUTER_L4)) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, @@ -820,7 +936,8 @@ mlx5_flow_item_ipv6(const struct rte_flow_item *item, struct rte_flow *flow, sizeof(struct rte_flow_item_ipv6), error); if (ret < 0) return ret; - flow->layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV6; + flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : + MLX5_FLOW_LAYER_OUTER_L3_IPV6; if (spec) { unsigned int i; uint32_t vtc_flow_val; @@ -863,13 +980,10 @@ mlx5_flow_item_ipv6(const struct rte_flow_item *item, struct rte_flow *flow, flow->l3_protocol_en = !!ipv6.mask.next_hdr; flow->l3_protocol = ipv6.val.next_hdr; if (size <= flow_size) { - uint64_t hash_fields = IBV_RX_HASH_SRC_IPV6 | - IBV_RX_HASH_DST_IPV6; - - if (!(flow->rss.types & - (ETH_RSS_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER))) - hash_fields = 0; - flow->cur_verbs->hash_fields |= hash_fields; + mlx5_flow_verbs_hashfields_adjust + (flow, tunnel, + (ETH_RSS_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER), + (IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6)); flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L3; mlx5_flow_spec_verbs_add(flow, &ipv6, size); } @@ -904,9 +1018,10 @@ mlx5_flow_item_udp(const struct rte_flow_item *item, struct rte_flow *flow, { const struct rte_flow_item_udp *spec = item->spec; const struct rte_flow_item_udp *mask = item->mask; + const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL); unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp); struct ibv_flow_spec_tcp_udp udp = { - .type = IBV_FLOW_SPEC_UDP, + .type = IBV_FLOW_SPEC_UDP | (tunnel ? IBV_FLOW_SPEC_INNER : 0), .size = size, }; int ret; @@ -917,13 +1032,15 @@ mlx5_flow_item_udp(const struct rte_flow_item *item, struct rte_flow *flow, item, "protocol filtering not compatible" " with UDP layer"); - if (!(flow->layers & MLX5_FLOW_LAYER_OUTER_L3)) + if (!(flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 : + MLX5_FLOW_LAYER_OUTER_L3))) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, "L3 is mandatory to filter" " on L4"); - if (flow->layers & MLX5_FLOW_LAYER_OUTER_L4) + if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 : + MLX5_FLOW_LAYER_OUTER_L4)) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, @@ -937,7 +1054,8 @@ mlx5_flow_item_udp(const struct rte_flow_item *item, struct rte_flow *flow, sizeof(struct rte_flow_item_udp), error); if (ret < 0) return ret; - flow->layers |= MLX5_FLOW_LAYER_OUTER_L4_UDP; + flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP : + MLX5_FLOW_LAYER_OUTER_L4_UDP; if (spec) { udp.val.dst_port = spec->hdr.dst_port; udp.val.src_port = spec->hdr.src_port; @@ -948,12 +1066,9 @@ mlx5_flow_item_udp(const struct rte_flow_item *item, struct rte_flow *flow, udp.val.dst_port &= udp.mask.dst_port; } if (size <= flow_size) { - uint64_t hash_fields = IBV_RX_HASH_SRC_PORT_UDP | - IBV_RX_HASH_DST_PORT_UDP; - - if (!(flow->rss.types & ETH_RSS_UDP)) - hash_fields = 0; - flow->cur_verbs->hash_fields |= hash_fields; + mlx5_flow_verbs_hashfields_adjust(flow, tunnel, ETH_RSS_UDP, + (IBV_RX_HASH_SRC_PORT_UDP | + IBV_RX_HASH_DST_PORT_UDP)); flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L4; mlx5_flow_spec_verbs_add(flow, &udp, size); } @@ -988,9 +1103,10 @@ mlx5_flow_item_tcp(const struct rte_flow_item *item, struct rte_flow *flow, { const struct rte_flow_item_tcp *spec = item->spec; const struct rte_flow_item_tcp *mask = item->mask; + const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL); unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp); struct ibv_flow_spec_tcp_udp tcp = { - .type = IBV_FLOW_SPEC_TCP, + .type = IBV_FLOW_SPEC_TCP | (tunnel ? IBV_FLOW_SPEC_INNER : 0), .size = size, }; int ret; @@ -1001,12 +1117,14 @@ mlx5_flow_item_tcp(const struct rte_flow_item *item, struct rte_flow *flow, item, "protocol filtering not compatible" " with TCP layer"); - if (!(flow->layers & MLX5_FLOW_LAYER_OUTER_L3)) + if (!(flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 : + MLX5_FLOW_LAYER_OUTER_L3))) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, "L3 is mandatory to filter on L4"); - if (flow->layers & MLX5_FLOW_LAYER_OUTER_L4) + if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 : + MLX5_FLOW_LAYER_OUTER_L4)) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, @@ -1019,7 +1137,8 @@ mlx5_flow_item_tcp(const struct rte_flow_item *item, struct rte_flow *flow, sizeof(struct rte_flow_item_tcp), error); if (ret < 0) return ret; - flow->layers |= MLX5_FLOW_LAYER_OUTER_L4_TCP; + flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP : + MLX5_FLOW_LAYER_OUTER_L4_TCP; if (spec) { tcp.val.dst_port = spec->hdr.dst_port; tcp.val.src_port = spec->hdr.src_port; @@ -1030,12 +1149,9 @@ mlx5_flow_item_tcp(const struct rte_flow_item *item, struct rte_flow *flow, tcp.val.dst_port &= tcp.mask.dst_port; } if (size <= flow_size) { - uint64_t hash_fields = IBV_RX_HASH_SRC_PORT_TCP | - IBV_RX_HASH_DST_PORT_TCP; - - if (!(flow->rss.types & ETH_RSS_TCP)) - hash_fields = 0; - flow->cur_verbs->hash_fields |= hash_fields; + mlx5_flow_verbs_hashfields_adjust(flow, tunnel, ETH_RSS_TCP, + (IBV_RX_HASH_SRC_PORT_TCP | + IBV_RX_HASH_DST_PORT_TCP)); flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L4; mlx5_flow_spec_verbs_add(flow, &tcp, size); } @@ -1261,7 +1377,7 @@ mlx5_flow_action_rss(struct rte_eth_dev *dev, RTE_FLOW_ERROR_TYPE_ACTION_CONF, &rss->func, "RSS hash function not supported"); - if (rss->level > 1) + if (rss->level > 2) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION_CONF, &rss->level, @@ -1301,6 +1417,7 @@ mlx5_flow_action_rss(struct rte_eth_dev *dev, flow->rss.queue_num = rss->queue_num; memcpy(flow->key, rss->key, MLX5_RSS_HASH_KEY_LEN); flow->rss.types = rss->types; + flow->rss.level = rss->level; flow->fate |= MLX5_FLOW_FATE_RSS; return 0; } @@ -1607,7 +1724,9 @@ mlx5_flow_merge(struct rte_eth_dev *dev, struct rte_flow *flow, ret = rte_flow_expand_rss(buf, sizeof(expand_buffer.buffer), pattern, local_flow.rss.types, mlx5_support_expansion, - MLX5_EXPANSION_ROOT); + local_flow.rss.level < 2 ? + MLX5_EXPANSION_ROOT : + MLX5_EXPANSION_ROOT_OUTER); assert(ret > 0 && (unsigned int)ret < sizeof(expand_buffer.buffer)); } else { @@ -1975,8 +2094,8 @@ mlx5_flow_list_create(struct rte_eth_dev *dev, return NULL; } } - mlx5_flow_rxq_mark_set(dev, flow); TAILQ_INSERT_TAIL(list, flow, next); + mlx5_flow_rxq_mark_set(dev, flow); return flow; } -- 2.18.0