From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id B1D10A0352; Tue, 5 Nov 2019 09:03:17 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 5F1FA1BE95; Tue, 5 Nov 2019 09:02:17 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 9F61B5B3A for ; Tue, 5 Nov 2019 09:02:09 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from viacheslavo@mellanox.com) with ESMTPS (AES256-SHA encrypted); 5 Nov 2019 10:02:05 +0200 Received: from pegasus11.mtr.labs.mlnx (pegasus11.mtr.labs.mlnx [10.210.16.104]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id xA582566026462; Tue, 5 Nov 2019 10:02:05 +0200 Received: from pegasus11.mtr.labs.mlnx (localhost [127.0.0.1]) by pegasus11.mtr.labs.mlnx (8.14.7/8.14.7) with ESMTP id xA5825ZH030759; Tue, 5 Nov 2019 08:02:05 GMT Received: (from viacheslavo@localhost) by pegasus11.mtr.labs.mlnx (8.14.7/8.14.7/Submit) id xA5825IM030758; Tue, 5 Nov 2019 08:02:05 GMT X-Authentication-Warning: pegasus11.mtr.labs.mlnx: viacheslavo set sender to viacheslavo@mellanox.com using -f From: Viacheslav Ovsiienko To: dev@dpdk.org Cc: matan@mellanox.com, rasland@mellanox.com, thomas@monjalon.net, orika@mellanox.com, Yongseok Koh Date: Tue, 5 Nov 2019 08:01:39 +0000 Message-Id: <1572940915-29416-5-git-send-email-viacheslavo@mellanox.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1572940915-29416-1-git-send-email-viacheslavo@mellanox.com> References: <1572940915-29416-1-git-send-email-viacheslavo@mellanox.com> Subject: [dpdk-dev] [PATCH 04/20] net/mlx5: refactor flow structure X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Some rte_flow fields which are local to subflows have been moved to mlx5_flow structure. RSS attributes are grouped by mlx5_flow_rss structure. tag_resource is moved to mlx5_flow_dv structure. Signed-off-by: Yongseok Koh Signed-off-by: Viacheslav Ovsiienko Acked-by: Matan Azrad --- drivers/net/mlx5/mlx5_flow.c | 18 +++++--- drivers/net/mlx5/mlx5_flow.h | 25 ++++++----- drivers/net/mlx5/mlx5_flow_dv.c | 89 ++++++++++++++++++++------------------ drivers/net/mlx5/mlx5_flow_verbs.c | 55 ++++++++++++----------- 4 files changed, 105 insertions(+), 82 deletions(-) diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 5408797..d1661f2 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -612,7 +612,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, unsigned int i; for (i = 0; i != flow->rss.queue_num; ++i) { - int idx = (*flow->queue)[i]; + int idx = (*flow->rss.queue)[i]; struct mlx5_rxq_ctrl *rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq); @@ -676,7 +676,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, assert(dev->data->dev_started); for (i = 0; i != flow->rss.queue_num; ++i) { - int idx = (*flow->queue)[i]; + int idx = (*flow->rss.queue)[i]; struct mlx5_rxq_ctrl *rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq); @@ -2815,13 +2815,20 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, goto error_before_flow; } flow->drv_type = flow_get_drv_type(dev, attr); - flow->ingress = attr->ingress; - flow->transfer = attr->transfer; if (hairpin_id != 0) flow->hairpin_flow_id = hairpin_id; assert(flow->drv_type > MLX5_FLOW_TYPE_MIN && flow->drv_type < MLX5_FLOW_TYPE_MAX); - flow->queue = (void *)(flow + 1); + flow->rss.queue = (void *)(flow + 1); + if (rss) { + /* + * The following information is required by + * mlx5_flow_hashfields_adjust() in advance. + */ + flow->rss.level = rss->level; + /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */ + flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types; + } LIST_INIT(&flow->dev_flows); if (rss && rss->types) { unsigned int graph_root; @@ -2861,6 +2868,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, if (!dev_flow) goto error; dev_flow->flow = flow; + dev_flow->external = 0; LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next); ret = flow_drv_translate(dev, dev_flow, &attr_tx, items_tx.items, diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h index 170192d..b9a9507 100644 --- a/drivers/net/mlx5/mlx5_flow.h +++ b/drivers/net/mlx5/mlx5_flow.h @@ -417,7 +417,6 @@ struct mlx5_flow_dv_push_vlan_action_resource { /* DV flows structure. */ struct mlx5_flow_dv { - uint64_t hash_fields; /**< Fields that participate in the hash. */ struct mlx5_hrxq *hrxq; /**< Hash Rx queues. */ /* Flow DV api: */ struct mlx5_flow_dv_matcher *matcher; /**< Cache to matcher. */ @@ -436,6 +435,8 @@ struct mlx5_flow_dv { /**< Structure for VF VLAN workaround. */ struct mlx5_flow_dv_push_vlan_action_resource *push_vlan_res; /**< Pointer to push VLAN action resource in cache. */ + struct mlx5_flow_dv_tag_resource *tag_resource; + /**< pointer to the tag action. */ #ifdef HAVE_IBV_FLOW_DV_SUPPORT void *actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS]; /**< Action list. */ @@ -460,11 +461,18 @@ struct mlx5_flow_verbs { }; struct ibv_flow *flow; /**< Verbs flow pointer. */ struct mlx5_hrxq *hrxq; /**< Hash Rx queue object. */ - uint64_t hash_fields; /**< Verbs hash Rx queue hash fields. */ struct mlx5_vf_vlan vf_vlan; /**< Structure for VF VLAN workaround. */ }; +struct mlx5_flow_rss { + uint32_t level; + uint32_t queue_num; /**< Number of entries in @p queue. */ + uint64_t types; /**< Specific RSS hash types (see ETH_RSS_*). */ + uint16_t (*queue)[]; /**< Destination queues to redirect traffic to. */ + uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */ +}; + /** Device flow structure. */ struct mlx5_flow { LIST_ENTRY(mlx5_flow) next; @@ -473,6 +481,10 @@ struct mlx5_flow { /**< Bit-fields of present layers, see MLX5_FLOW_LAYER_*. */ uint64_t actions; /**< Bit-fields of detected actions, see MLX5_FLOW_ACTION_*. */ + uint64_t hash_fields; /**< Verbs hash Rx queue hash fields. */ + uint8_t ingress; /**< 1 if the flow is ingress. */ + uint32_t group; /**< The group index. */ + uint8_t transfer; /**< 1 if the flow is E-Switch flow. */ union { #ifdef HAVE_IBV_FLOW_DV_SUPPORT struct mlx5_flow_dv dv; @@ -486,18 +498,11 @@ struct mlx5_flow { struct rte_flow { TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */ enum mlx5_flow_drv_type drv_type; /**< Driver type. */ + struct mlx5_flow_rss rss; /**< RSS context. */ struct mlx5_flow_counter *counter; /**< Holds flow counter. */ - struct mlx5_flow_dv_tag_resource *tag_resource; - /**< pointer to the tag action. */ - struct rte_flow_action_rss rss;/**< RSS context. */ - uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */ - uint16_t (*queue)[]; /**< Destination queues to redirect traffic to. */ LIST_HEAD(dev_flows, mlx5_flow) dev_flows; /**< Device flows that are part of the flow. */ struct mlx5_fdir *fdir; /**< Pointer to associated FDIR if any. */ - uint8_t ingress; /**< 1 if the flow is ingress. */ - uint32_t group; /**< The group index. */ - uint8_t transfer; /**< 1 if the flow is E-Switch flow. */ uint32_t hairpin_flow_id; /**< The flow id used for hairpin. */ }; diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index 930f088..019c9b3 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -1583,10 +1583,9 @@ struct field_modify_info modify_tcp[] = { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_ibv_shared *sh = priv->sh; struct mlx5_flow_dv_encap_decap_resource *cache_resource; - struct rte_flow *flow = dev_flow->flow; struct mlx5dv_dr_domain *domain; - resource->flags = flow->group ? 0 : 1; + resource->flags = dev_flow->group ? 0 : 1; if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) domain = sh->fdb_domain; else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX) @@ -2745,7 +2744,7 @@ struct field_modify_info modify_tcp[] = { else ns = sh->rx_domain; resource->flags = - dev_flow->flow->group ? 0 : MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL; + dev_flow->group ? 0 : MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL; /* Lookup a matching resource from cache. */ LIST_FOREACH(cache_resource, &sh->modify_cmds, next) { if (resource->ft_type == cache_resource->ft_type && @@ -4066,18 +4065,20 @@ struct field_modify_info modify_tcp[] = { const struct rte_flow_action actions[] __rte_unused, struct rte_flow_error *error) { - uint32_t size = sizeof(struct mlx5_flow); - struct mlx5_flow *flow; + size_t size = sizeof(struct mlx5_flow); + struct mlx5_flow *dev_flow; - flow = rte_calloc(__func__, 1, size, 0); - if (!flow) { + dev_flow = rte_calloc(__func__, 1, size, 0); + if (!dev_flow) { rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "not enough memory to create flow"); return NULL; } - flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param); - return flow; + dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param); + dev_flow->ingress = attr->ingress; + dev_flow->transfer = attr->transfer; + return dev_flow; } #ifndef NDEBUG @@ -5458,7 +5459,7 @@ struct field_modify_info modify_tcp[] = { (void *)cache_resource, rte_atomic32_read(&cache_resource->refcnt)); rte_atomic32_inc(&cache_resource->refcnt); - dev_flow->flow->tag_resource = cache_resource; + dev_flow->dv.tag_resource = cache_resource; return 0; } } @@ -5480,7 +5481,7 @@ struct field_modify_info modify_tcp[] = { rte_atomic32_init(&cache_resource->refcnt); rte_atomic32_inc(&cache_resource->refcnt); LIST_INSERT_HEAD(&sh->tags, cache_resource, next); - dev_flow->flow->tag_resource = cache_resource; + dev_flow->dv.tag_resource = cache_resource; DRV_LOG(DEBUG, "new tag resource %p: refcnt %d++", (void *)cache_resource, rte_atomic32_read(&cache_resource->refcnt)); @@ -5660,7 +5661,7 @@ struct field_modify_info modify_tcp[] = { &table, error); if (ret) return ret; - flow->group = table; + dev_flow->group = table; if (attr->transfer) res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB; if (priority == MLX5_FLOW_PRIO_RSVD) @@ -5697,47 +5698,50 @@ struct field_modify_info modify_tcp[] = { case RTE_FLOW_ACTION_TYPE_FLAG: tag_resource.tag = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT); - if (!flow->tag_resource) + if (!dev_flow->dv.tag_resource) if (flow_dv_tag_resource_register (dev, &tag_resource, dev_flow, error)) return errno; dev_flow->dv.actions[actions_n++] = - flow->tag_resource->action; + dev_flow->dv.tag_resource->action; action_flags |= MLX5_FLOW_ACTION_FLAG; break; case RTE_FLOW_ACTION_TYPE_MARK: tag_resource.tag = mlx5_flow_mark_set (((const struct rte_flow_action_mark *) (actions->conf))->id); - if (!flow->tag_resource) + if (!dev_flow->dv.tag_resource) if (flow_dv_tag_resource_register (dev, &tag_resource, dev_flow, error)) return errno; dev_flow->dv.actions[actions_n++] = - flow->tag_resource->action; + dev_flow->dv.tag_resource->action; action_flags |= MLX5_FLOW_ACTION_MARK; break; case RTE_FLOW_ACTION_TYPE_DROP: action_flags |= MLX5_FLOW_ACTION_DROP; break; case RTE_FLOW_ACTION_TYPE_QUEUE: + assert(flow->rss.queue); queue = actions->conf; flow->rss.queue_num = 1; - (*flow->queue)[0] = queue->index; + (*flow->rss.queue)[0] = queue->index; action_flags |= MLX5_FLOW_ACTION_QUEUE; break; case RTE_FLOW_ACTION_TYPE_RSS: + assert(flow->rss.queue); rss = actions->conf; - if (flow->queue) - memcpy((*flow->queue), rss->queue, + if (flow->rss.queue) + memcpy((*flow->rss.queue), rss->queue, rss->queue_num * sizeof(uint16_t)); flow->rss.queue_num = rss->queue_num; /* NULL RSS key indicates default RSS key. */ rss_key = !rss->key ? rss_hash_default_key : rss->key; - memcpy(flow->key, rss_key, MLX5_RSS_HASH_KEY_LEN); - /* RSS type 0 indicates default RSS type ETH_RSS_IP. */ - flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types; - flow->rss.level = rss->level; + memcpy(flow->rss.key, rss_key, MLX5_RSS_HASH_KEY_LEN); + /* + * rss->level and rss.types should be set in advance + * when expanding items for RSS. + */ action_flags |= MLX5_FLOW_ACTION_RSS; break; case RTE_FLOW_ACTION_TYPE_COUNT: @@ -5748,7 +5752,7 @@ struct field_modify_info modify_tcp[] = { flow->counter = flow_dv_counter_alloc(dev, count->shared, count->id, - flow->group); + dev_flow->group); if (flow->counter == NULL) goto cnt_err; dev_flow->dv.actions[actions_n++] = @@ -6046,9 +6050,10 @@ struct field_modify_info modify_tcp[] = { mlx5_flow_tunnel_ip_check(items, next_protocol, &item_flags, &tunnel); flow_dv_translate_item_ipv4(match_mask, match_value, - items, tunnel, flow->group); + items, tunnel, + dev_flow->group); matcher.priority = MLX5_PRIORITY_MAP_L3; - dev_flow->dv.hash_fields |= + dev_flow->hash_fields |= mlx5_flow_hashfields_adjust (dev_flow, tunnel, MLX5_IPV4_LAYER_TYPES, @@ -6073,9 +6078,10 @@ struct field_modify_info modify_tcp[] = { mlx5_flow_tunnel_ip_check(items, next_protocol, &item_flags, &tunnel); flow_dv_translate_item_ipv6(match_mask, match_value, - items, tunnel, flow->group); + items, tunnel, + dev_flow->group); matcher.priority = MLX5_PRIORITY_MAP_L3; - dev_flow->dv.hash_fields |= + dev_flow->hash_fields |= mlx5_flow_hashfields_adjust (dev_flow, tunnel, MLX5_IPV6_LAYER_TYPES, @@ -6100,7 +6106,7 @@ struct field_modify_info modify_tcp[] = { flow_dv_translate_item_tcp(match_mask, match_value, items, tunnel); matcher.priority = MLX5_PRIORITY_MAP_L4; - dev_flow->dv.hash_fields |= + dev_flow->hash_fields |= mlx5_flow_hashfields_adjust (dev_flow, tunnel, ETH_RSS_TCP, IBV_RX_HASH_SRC_PORT_TCP | @@ -6112,7 +6118,7 @@ struct field_modify_info modify_tcp[] = { flow_dv_translate_item_udp(match_mask, match_value, items, tunnel); matcher.priority = MLX5_PRIORITY_MAP_L4; - dev_flow->dv.hash_fields |= + dev_flow->hash_fields |= mlx5_flow_hashfields_adjust (dev_flow, tunnel, ETH_RSS_UDP, IBV_RX_HASH_SRC_PORT_UDP | @@ -6208,7 +6214,7 @@ struct field_modify_info modify_tcp[] = { matcher.priority = mlx5_flow_adjust_priority(dev, priority, matcher.priority); matcher.egress = attr->egress; - matcher.group = flow->group; + matcher.group = dev_flow->group; matcher.transfer = attr->transfer; if (flow_dv_matcher_register(dev, &matcher, dev_flow, error)) return -rte_errno; @@ -6242,7 +6248,7 @@ struct field_modify_info modify_tcp[] = { dv = &dev_flow->dv; n = dv->actions_n; if (dev_flow->actions & MLX5_FLOW_ACTION_DROP) { - if (flow->transfer) { + if (dev_flow->transfer) { dv->actions[n++] = priv->sh->esw_drop_action; } else { dv->hrxq = mlx5_hrxq_drop_new(dev); @@ -6260,15 +6266,18 @@ struct field_modify_info modify_tcp[] = { (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) { struct mlx5_hrxq *hrxq; - hrxq = mlx5_hrxq_get(dev, flow->key, + assert(flow->rss.queue); + hrxq = mlx5_hrxq_get(dev, flow->rss.key, MLX5_RSS_HASH_KEY_LEN, - dv->hash_fields, - (*flow->queue), + dev_flow->hash_fields, + (*flow->rss.queue), flow->rss.queue_num); if (!hrxq) { hrxq = mlx5_hrxq_new - (dev, flow->key, MLX5_RSS_HASH_KEY_LEN, - dv->hash_fields, (*flow->queue), + (dev, flow->rss.key, + MLX5_RSS_HASH_KEY_LEN, + dev_flow->hash_fields, + (*flow->rss.queue), flow->rss.queue_num, !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL)); @@ -6578,10 +6587,6 @@ struct field_modify_info modify_tcp[] = { flow_dv_counter_release(dev, flow->counter); flow->counter = NULL; } - if (flow->tag_resource) { - flow_dv_tag_release(dev, flow->tag_resource); - flow->tag_resource = NULL; - } while (!LIST_EMPTY(&flow->dev_flows)) { dev_flow = LIST_FIRST(&flow->dev_flows); LIST_REMOVE(dev_flow, next); @@ -6597,6 +6602,8 @@ struct field_modify_info modify_tcp[] = { flow_dv_port_id_action_resource_release(dev_flow); if (dev_flow->dv.push_vlan_res) flow_dv_push_vlan_action_resource_release(dev_flow); + if (dev_flow->dv.tag_resource) + flow_dv_tag_release(dev, dev_flow->dv.tag_resource); rte_free(dev_flow); } } diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c index fd27f6c..3ab73c2 100644 --- a/drivers/net/mlx5/mlx5_flow_verbs.c +++ b/drivers/net/mlx5/mlx5_flow_verbs.c @@ -864,8 +864,8 @@ const struct rte_flow_action_queue *queue = action->conf; struct rte_flow *flow = dev_flow->flow; - if (flow->queue) - (*flow->queue)[0] = queue->index; + if (flow->rss.queue) + (*flow->rss.queue)[0] = queue->index; flow->rss.queue_num = 1; } @@ -889,16 +889,17 @@ const uint8_t *rss_key; struct rte_flow *flow = dev_flow->flow; - if (flow->queue) - memcpy((*flow->queue), rss->queue, + if (flow->rss.queue) + memcpy((*flow->rss.queue), rss->queue, rss->queue_num * sizeof(uint16_t)); flow->rss.queue_num = rss->queue_num; /* NULL RSS key indicates default RSS key. */ rss_key = !rss->key ? rss_hash_default_key : rss->key; - memcpy(flow->key, rss_key, MLX5_RSS_HASH_KEY_LEN); - /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */ - flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types; - flow->rss.level = rss->level; + memcpy(flow->rss.key, rss_key, MLX5_RSS_HASH_KEY_LEN); + /* + * rss->level and rss.types should be set in advance when expanding + * items for RSS. + */ } /** @@ -1365,22 +1366,23 @@ const struct rte_flow_action actions[], struct rte_flow_error *error) { - uint32_t size = sizeof(struct mlx5_flow) + sizeof(struct ibv_flow_attr); - struct mlx5_flow *flow; + size_t size = sizeof(struct mlx5_flow) + sizeof(struct ibv_flow_attr); + struct mlx5_flow *dev_flow; size += flow_verbs_get_actions_size(actions); size += flow_verbs_get_items_size(items); - flow = rte_calloc(__func__, 1, size, 0); - if (!flow) { + dev_flow = rte_calloc(__func__, 1, size, 0); + if (!dev_flow) { rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "not enough memory to create flow"); return NULL; } - flow->verbs.attr = (void *)(flow + 1); - flow->verbs.specs = - (uint8_t *)(flow + 1) + sizeof(struct ibv_flow_attr); - return flow; + dev_flow->verbs.attr = (void *)(dev_flow + 1); + dev_flow->verbs.specs = (void *)(dev_flow->verbs.attr + 1); + dev_flow->ingress = attr->ingress; + dev_flow->transfer = attr->transfer; + return dev_flow; } /** @@ -1486,7 +1488,7 @@ flow_verbs_translate_item_ipv4(dev_flow, items, item_flags); subpriority = MLX5_PRIORITY_MAP_L3; - dev_flow->verbs.hash_fields |= + dev_flow->hash_fields |= mlx5_flow_hashfields_adjust (dev_flow, tunnel, MLX5_IPV4_LAYER_TYPES, @@ -1498,7 +1500,7 @@ flow_verbs_translate_item_ipv6(dev_flow, items, item_flags); subpriority = MLX5_PRIORITY_MAP_L3; - dev_flow->verbs.hash_fields |= + dev_flow->hash_fields |= mlx5_flow_hashfields_adjust (dev_flow, tunnel, MLX5_IPV6_LAYER_TYPES, @@ -1510,7 +1512,7 @@ flow_verbs_translate_item_tcp(dev_flow, items, item_flags); subpriority = MLX5_PRIORITY_MAP_L4; - dev_flow->verbs.hash_fields |= + dev_flow->hash_fields |= mlx5_flow_hashfields_adjust (dev_flow, tunnel, ETH_RSS_TCP, (IBV_RX_HASH_SRC_PORT_TCP | @@ -1522,7 +1524,7 @@ flow_verbs_translate_item_udp(dev_flow, items, item_flags); subpriority = MLX5_PRIORITY_MAP_L4; - dev_flow->verbs.hash_fields |= + dev_flow->hash_fields |= mlx5_flow_hashfields_adjust (dev_flow, tunnel, ETH_RSS_UDP, (IBV_RX_HASH_SRC_PORT_UDP | @@ -1667,16 +1669,17 @@ } else { struct mlx5_hrxq *hrxq; - hrxq = mlx5_hrxq_get(dev, flow->key, + assert(flow->rss.queue); + hrxq = mlx5_hrxq_get(dev, flow->rss.key, MLX5_RSS_HASH_KEY_LEN, - verbs->hash_fields, - (*flow->queue), + dev_flow->hash_fields, + (*flow->rss.queue), flow->rss.queue_num); if (!hrxq) - hrxq = mlx5_hrxq_new(dev, flow->key, + hrxq = mlx5_hrxq_new(dev, flow->rss.key, MLX5_RSS_HASH_KEY_LEN, - verbs->hash_fields, - (*flow->queue), + dev_flow->hash_fields, + (*flow->rss.queue), flow->rss.queue_num, !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL)); -- 1.8.3.1