From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 8C0F8A04AD; Wed, 6 Nov 2019 18:38:46 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 05AF01D44E; Wed, 6 Nov 2019 18:38:16 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 6DA501D413 for ; Wed, 6 Nov 2019 18:38:07 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from viacheslavo@mellanox.com) with ESMTPS (AES256-SHA encrypted); 6 Nov 2019 19:38:03 +0200 Received: from pegasus11.mtr.labs.mlnx (pegasus11.mtr.labs.mlnx [10.210.16.104]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id xA6Hc3Iq026676; Wed, 6 Nov 2019 19:38:03 +0200 Received: from pegasus11.mtr.labs.mlnx (localhost [127.0.0.1]) by pegasus11.mtr.labs.mlnx (8.14.7/8.14.7) with ESMTP id xA6Hc3Vr021140; Wed, 6 Nov 2019 17:38:03 GMT Received: (from viacheslavo@localhost) by pegasus11.mtr.labs.mlnx (8.14.7/8.14.7/Submit) id xA6Hc3OO021139; Wed, 6 Nov 2019 17:38:03 GMT X-Authentication-Warning: pegasus11.mtr.labs.mlnx: viacheslavo set sender to viacheslavo@mellanox.com using -f From: Viacheslav Ovsiienko To: dev@dpdk.org Cc: matan@mellanox.com, rasland@mellanox.com, thomas@monjalon.net, orika@mellanox.com, Yongseok Koh Date: Wed, 6 Nov 2019 17:37:38 +0000 Message-Id: <1573061873-20898-5-git-send-email-viacheslavo@mellanox.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1573061873-20898-1-git-send-email-viacheslavo@mellanox.com> References: <1572940915-29416-1-git-send-email-viacheslavo@mellanox.com> <1573061873-20898-1-git-send-email-viacheslavo@mellanox.com> Subject: [dpdk-dev] [PATCH v2 04/19] net/mlx5: refactor flow structure X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Some rte_flow fields which are local to subflows have been moved to mlx5_flow structure. RSS attributes are grouped by mlx5_flow_rss structure. tag_resource is moved to mlx5_flow_dv structure. Signed-off-by: Yongseok Koh Signed-off-by: Viacheslav Ovsiienko Acked-by: Matan Azrad --- drivers/net/mlx5/mlx5_flow.c | 18 +++++--- drivers/net/mlx5/mlx5_flow.h | 25 ++++++----- drivers/net/mlx5/mlx5_flow_dv.c | 89 ++++++++++++++++++++------------------ drivers/net/mlx5/mlx5_flow_verbs.c | 55 ++++++++++++----------- 4 files changed, 105 insertions(+), 82 deletions(-) diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 5408797..d1661f2 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -612,7 +612,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, unsigned int i; for (i = 0; i != flow->rss.queue_num; ++i) { - int idx = (*flow->queue)[i]; + int idx = (*flow->rss.queue)[i]; struct mlx5_rxq_ctrl *rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq); @@ -676,7 +676,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, assert(dev->data->dev_started); for (i = 0; i != flow->rss.queue_num; ++i) { - int idx = (*flow->queue)[i]; + int idx = (*flow->rss.queue)[i]; struct mlx5_rxq_ctrl *rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq); @@ -2815,13 +2815,20 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, goto error_before_flow; } flow->drv_type = flow_get_drv_type(dev, attr); - flow->ingress = attr->ingress; - flow->transfer = attr->transfer; if (hairpin_id != 0) flow->hairpin_flow_id = hairpin_id; assert(flow->drv_type > MLX5_FLOW_TYPE_MIN && flow->drv_type < MLX5_FLOW_TYPE_MAX); - flow->queue = (void *)(flow + 1); + flow->rss.queue = (void *)(flow + 1); + if (rss) { + /* + * The following information is required by + * mlx5_flow_hashfields_adjust() in advance. + */ + flow->rss.level = rss->level; + /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */ + flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types; + } LIST_INIT(&flow->dev_flows); if (rss && rss->types) { unsigned int graph_root; @@ -2861,6 +2868,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, if (!dev_flow) goto error; dev_flow->flow = flow; + dev_flow->external = 0; LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next); ret = flow_drv_translate(dev, dev_flow, &attr_tx, items_tx.items, diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h index 170192d..b9a9507 100644 --- a/drivers/net/mlx5/mlx5_flow.h +++ b/drivers/net/mlx5/mlx5_flow.h @@ -417,7 +417,6 @@ struct mlx5_flow_dv_push_vlan_action_resource { /* DV flows structure. */ struct mlx5_flow_dv { - uint64_t hash_fields; /**< Fields that participate in the hash. */ struct mlx5_hrxq *hrxq; /**< Hash Rx queues. */ /* Flow DV api: */ struct mlx5_flow_dv_matcher *matcher; /**< Cache to matcher. */ @@ -436,6 +435,8 @@ struct mlx5_flow_dv { /**< Structure for VF VLAN workaround. */ struct mlx5_flow_dv_push_vlan_action_resource *push_vlan_res; /**< Pointer to push VLAN action resource in cache. */ + struct mlx5_flow_dv_tag_resource *tag_resource; + /**< pointer to the tag action. */ #ifdef HAVE_IBV_FLOW_DV_SUPPORT void *actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS]; /**< Action list. */ @@ -460,11 +461,18 @@ struct mlx5_flow_verbs { }; struct ibv_flow *flow; /**< Verbs flow pointer. */ struct mlx5_hrxq *hrxq; /**< Hash Rx queue object. */ - uint64_t hash_fields; /**< Verbs hash Rx queue hash fields. */ struct mlx5_vf_vlan vf_vlan; /**< Structure for VF VLAN workaround. */ }; +struct mlx5_flow_rss { + uint32_t level; + uint32_t queue_num; /**< Number of entries in @p queue. */ + uint64_t types; /**< Specific RSS hash types (see ETH_RSS_*). */ + uint16_t (*queue)[]; /**< Destination queues to redirect traffic to. */ + uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */ +}; + /** Device flow structure. */ struct mlx5_flow { LIST_ENTRY(mlx5_flow) next; @@ -473,6 +481,10 @@ struct mlx5_flow { /**< Bit-fields of present layers, see MLX5_FLOW_LAYER_*. */ uint64_t actions; /**< Bit-fields of detected actions, see MLX5_FLOW_ACTION_*. */ + uint64_t hash_fields; /**< Verbs hash Rx queue hash fields. */ + uint8_t ingress; /**< 1 if the flow is ingress. */ + uint32_t group; /**< The group index. */ + uint8_t transfer; /**< 1 if the flow is E-Switch flow. */ union { #ifdef HAVE_IBV_FLOW_DV_SUPPORT struct mlx5_flow_dv dv; @@ -486,18 +498,11 @@ struct mlx5_flow { struct rte_flow { TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */ enum mlx5_flow_drv_type drv_type; /**< Driver type. */ + struct mlx5_flow_rss rss; /**< RSS context. */ struct mlx5_flow_counter *counter; /**< Holds flow counter. */ - struct mlx5_flow_dv_tag_resource *tag_resource; - /**< pointer to the tag action. */ - struct rte_flow_action_rss rss;/**< RSS context. */ - uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */ - uint16_t (*queue)[]; /**< Destination queues to redirect traffic to. */ LIST_HEAD(dev_flows, mlx5_flow) dev_flows; /**< Device flows that are part of the flow. */ struct mlx5_fdir *fdir; /**< Pointer to associated FDIR if any. */ - uint8_t ingress; /**< 1 if the flow is ingress. */ - uint32_t group; /**< The group index. */ - uint8_t transfer; /**< 1 if the flow is E-Switch flow. */ uint32_t hairpin_flow_id; /**< The flow id used for hairpin. */ }; diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index baa34a2..b7e8e0a 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -1585,10 +1585,9 @@ struct field_modify_info modify_tcp[] = { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_ibv_shared *sh = priv->sh; struct mlx5_flow_dv_encap_decap_resource *cache_resource; - struct rte_flow *flow = dev_flow->flow; struct mlx5dv_dr_domain *domain; - resource->flags = flow->group ? 0 : 1; + resource->flags = dev_flow->group ? 0 : 1; if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) domain = sh->fdb_domain; else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX) @@ -2747,7 +2746,7 @@ struct field_modify_info modify_tcp[] = { else ns = sh->rx_domain; resource->flags = - dev_flow->flow->group ? 0 : MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL; + dev_flow->group ? 0 : MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL; /* Lookup a matching resource from cache. */ LIST_FOREACH(cache_resource, &sh->modify_cmds, next) { if (resource->ft_type == cache_resource->ft_type && @@ -4068,18 +4067,20 @@ struct field_modify_info modify_tcp[] = { const struct rte_flow_action actions[] __rte_unused, struct rte_flow_error *error) { - uint32_t size = sizeof(struct mlx5_flow); - struct mlx5_flow *flow; + size_t size = sizeof(struct mlx5_flow); + struct mlx5_flow *dev_flow; - flow = rte_calloc(__func__, 1, size, 0); - if (!flow) { + dev_flow = rte_calloc(__func__, 1, size, 0); + if (!dev_flow) { rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "not enough memory to create flow"); return NULL; } - flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param); - return flow; + dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param); + dev_flow->ingress = attr->ingress; + dev_flow->transfer = attr->transfer; + return dev_flow; } #ifndef NDEBUG @@ -5460,7 +5461,7 @@ struct field_modify_info modify_tcp[] = { (void *)cache_resource, rte_atomic32_read(&cache_resource->refcnt)); rte_atomic32_inc(&cache_resource->refcnt); - dev_flow->flow->tag_resource = cache_resource; + dev_flow->dv.tag_resource = cache_resource; return 0; } } @@ -5482,7 +5483,7 @@ struct field_modify_info modify_tcp[] = { rte_atomic32_init(&cache_resource->refcnt); rte_atomic32_inc(&cache_resource->refcnt); LIST_INSERT_HEAD(&sh->tags, cache_resource, next); - dev_flow->flow->tag_resource = cache_resource; + dev_flow->dv.tag_resource = cache_resource; DRV_LOG(DEBUG, "new tag resource %p: refcnt %d++", (void *)cache_resource, rte_atomic32_read(&cache_resource->refcnt)); @@ -5662,7 +5663,7 @@ struct field_modify_info modify_tcp[] = { &table, error); if (ret) return ret; - flow->group = table; + dev_flow->group = table; if (attr->transfer) res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB; if (priority == MLX5_FLOW_PRIO_RSVD) @@ -5699,47 +5700,50 @@ struct field_modify_info modify_tcp[] = { case RTE_FLOW_ACTION_TYPE_FLAG: tag_resource.tag = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT); - if (!flow->tag_resource) + if (!dev_flow->dv.tag_resource) if (flow_dv_tag_resource_register (dev, &tag_resource, dev_flow, error)) return errno; dev_flow->dv.actions[actions_n++] = - flow->tag_resource->action; + dev_flow->dv.tag_resource->action; action_flags |= MLX5_FLOW_ACTION_FLAG; break; case RTE_FLOW_ACTION_TYPE_MARK: tag_resource.tag = mlx5_flow_mark_set (((const struct rte_flow_action_mark *) (actions->conf))->id); - if (!flow->tag_resource) + if (!dev_flow->dv.tag_resource) if (flow_dv_tag_resource_register (dev, &tag_resource, dev_flow, error)) return errno; dev_flow->dv.actions[actions_n++] = - flow->tag_resource->action; + dev_flow->dv.tag_resource->action; action_flags |= MLX5_FLOW_ACTION_MARK; break; case RTE_FLOW_ACTION_TYPE_DROP: action_flags |= MLX5_FLOW_ACTION_DROP; break; case RTE_FLOW_ACTION_TYPE_QUEUE: + assert(flow->rss.queue); queue = actions->conf; flow->rss.queue_num = 1; - (*flow->queue)[0] = queue->index; + (*flow->rss.queue)[0] = queue->index; action_flags |= MLX5_FLOW_ACTION_QUEUE; break; case RTE_FLOW_ACTION_TYPE_RSS: + assert(flow->rss.queue); rss = actions->conf; - if (flow->queue) - memcpy((*flow->queue), rss->queue, + if (flow->rss.queue) + memcpy((*flow->rss.queue), rss->queue, rss->queue_num * sizeof(uint16_t)); flow->rss.queue_num = rss->queue_num; /* NULL RSS key indicates default RSS key. */ rss_key = !rss->key ? rss_hash_default_key : rss->key; - memcpy(flow->key, rss_key, MLX5_RSS_HASH_KEY_LEN); - /* RSS type 0 indicates default RSS type ETH_RSS_IP. */ - flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types; - flow->rss.level = rss->level; + memcpy(flow->rss.key, rss_key, MLX5_RSS_HASH_KEY_LEN); + /* + * rss->level and rss.types should be set in advance + * when expanding items for RSS. + */ action_flags |= MLX5_FLOW_ACTION_RSS; break; case RTE_FLOW_ACTION_TYPE_COUNT: @@ -5750,7 +5754,7 @@ struct field_modify_info modify_tcp[] = { flow->counter = flow_dv_counter_alloc(dev, count->shared, count->id, - flow->group); + dev_flow->group); if (flow->counter == NULL) goto cnt_err; dev_flow->dv.actions[actions_n++] = @@ -6048,9 +6052,10 @@ struct field_modify_info modify_tcp[] = { mlx5_flow_tunnel_ip_check(items, next_protocol, &item_flags, &tunnel); flow_dv_translate_item_ipv4(match_mask, match_value, - items, tunnel, flow->group); + items, tunnel, + dev_flow->group); matcher.priority = MLX5_PRIORITY_MAP_L3; - dev_flow->dv.hash_fields |= + dev_flow->hash_fields |= mlx5_flow_hashfields_adjust (dev_flow, tunnel, MLX5_IPV4_LAYER_TYPES, @@ -6075,9 +6080,10 @@ struct field_modify_info modify_tcp[] = { mlx5_flow_tunnel_ip_check(items, next_protocol, &item_flags, &tunnel); flow_dv_translate_item_ipv6(match_mask, match_value, - items, tunnel, flow->group); + items, tunnel, + dev_flow->group); matcher.priority = MLX5_PRIORITY_MAP_L3; - dev_flow->dv.hash_fields |= + dev_flow->hash_fields |= mlx5_flow_hashfields_adjust (dev_flow, tunnel, MLX5_IPV6_LAYER_TYPES, @@ -6102,7 +6108,7 @@ struct field_modify_info modify_tcp[] = { flow_dv_translate_item_tcp(match_mask, match_value, items, tunnel); matcher.priority = MLX5_PRIORITY_MAP_L4; - dev_flow->dv.hash_fields |= + dev_flow->hash_fields |= mlx5_flow_hashfields_adjust (dev_flow, tunnel, ETH_RSS_TCP, IBV_RX_HASH_SRC_PORT_TCP | @@ -6114,7 +6120,7 @@ struct field_modify_info modify_tcp[] = { flow_dv_translate_item_udp(match_mask, match_value, items, tunnel); matcher.priority = MLX5_PRIORITY_MAP_L4; - dev_flow->dv.hash_fields |= + dev_flow->hash_fields |= mlx5_flow_hashfields_adjust (dev_flow, tunnel, ETH_RSS_UDP, IBV_RX_HASH_SRC_PORT_UDP | @@ -6210,7 +6216,7 @@ struct field_modify_info modify_tcp[] = { matcher.priority = mlx5_flow_adjust_priority(dev, priority, matcher.priority); matcher.egress = attr->egress; - matcher.group = flow->group; + matcher.group = dev_flow->group; matcher.transfer = attr->transfer; if (flow_dv_matcher_register(dev, &matcher, dev_flow, error)) return -rte_errno; @@ -6244,7 +6250,7 @@ struct field_modify_info modify_tcp[] = { dv = &dev_flow->dv; n = dv->actions_n; if (dev_flow->actions & MLX5_FLOW_ACTION_DROP) { - if (flow->transfer) { + if (dev_flow->transfer) { dv->actions[n++] = priv->sh->esw_drop_action; } else { dv->hrxq = mlx5_hrxq_drop_new(dev); @@ -6262,15 +6268,18 @@ struct field_modify_info modify_tcp[] = { (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) { struct mlx5_hrxq *hrxq; - hrxq = mlx5_hrxq_get(dev, flow->key, + assert(flow->rss.queue); + hrxq = mlx5_hrxq_get(dev, flow->rss.key, MLX5_RSS_HASH_KEY_LEN, - dv->hash_fields, - (*flow->queue), + dev_flow->hash_fields, + (*flow->rss.queue), flow->rss.queue_num); if (!hrxq) { hrxq = mlx5_hrxq_new - (dev, flow->key, MLX5_RSS_HASH_KEY_LEN, - dv->hash_fields, (*flow->queue), + (dev, flow->rss.key, + MLX5_RSS_HASH_KEY_LEN, + dev_flow->hash_fields, + (*flow->rss.queue), flow->rss.queue_num, !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL)); @@ -6580,10 +6589,6 @@ struct field_modify_info modify_tcp[] = { flow_dv_counter_release(dev, flow->counter); flow->counter = NULL; } - if (flow->tag_resource) { - flow_dv_tag_release(dev, flow->tag_resource); - flow->tag_resource = NULL; - } while (!LIST_EMPTY(&flow->dev_flows)) { dev_flow = LIST_FIRST(&flow->dev_flows); LIST_REMOVE(dev_flow, next); @@ -6599,6 +6604,8 @@ struct field_modify_info modify_tcp[] = { flow_dv_port_id_action_resource_release(dev_flow); if (dev_flow->dv.push_vlan_res) flow_dv_push_vlan_action_resource_release(dev_flow); + if (dev_flow->dv.tag_resource) + flow_dv_tag_release(dev, dev_flow->dv.tag_resource); rte_free(dev_flow); } } diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c index fd27f6c..3ab73c2 100644 --- a/drivers/net/mlx5/mlx5_flow_verbs.c +++ b/drivers/net/mlx5/mlx5_flow_verbs.c @@ -864,8 +864,8 @@ const struct rte_flow_action_queue *queue = action->conf; struct rte_flow *flow = dev_flow->flow; - if (flow->queue) - (*flow->queue)[0] = queue->index; + if (flow->rss.queue) + (*flow->rss.queue)[0] = queue->index; flow->rss.queue_num = 1; } @@ -889,16 +889,17 @@ const uint8_t *rss_key; struct rte_flow *flow = dev_flow->flow; - if (flow->queue) - memcpy((*flow->queue), rss->queue, + if (flow->rss.queue) + memcpy((*flow->rss.queue), rss->queue, rss->queue_num * sizeof(uint16_t)); flow->rss.queue_num = rss->queue_num; /* NULL RSS key indicates default RSS key. */ rss_key = !rss->key ? rss_hash_default_key : rss->key; - memcpy(flow->key, rss_key, MLX5_RSS_HASH_KEY_LEN); - /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */ - flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types; - flow->rss.level = rss->level; + memcpy(flow->rss.key, rss_key, MLX5_RSS_HASH_KEY_LEN); + /* + * rss->level and rss.types should be set in advance when expanding + * items for RSS. + */ } /** @@ -1365,22 +1366,23 @@ const struct rte_flow_action actions[], struct rte_flow_error *error) { - uint32_t size = sizeof(struct mlx5_flow) + sizeof(struct ibv_flow_attr); - struct mlx5_flow *flow; + size_t size = sizeof(struct mlx5_flow) + sizeof(struct ibv_flow_attr); + struct mlx5_flow *dev_flow; size += flow_verbs_get_actions_size(actions); size += flow_verbs_get_items_size(items); - flow = rte_calloc(__func__, 1, size, 0); - if (!flow) { + dev_flow = rte_calloc(__func__, 1, size, 0); + if (!dev_flow) { rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "not enough memory to create flow"); return NULL; } - flow->verbs.attr = (void *)(flow + 1); - flow->verbs.specs = - (uint8_t *)(flow + 1) + sizeof(struct ibv_flow_attr); - return flow; + dev_flow->verbs.attr = (void *)(dev_flow + 1); + dev_flow->verbs.specs = (void *)(dev_flow->verbs.attr + 1); + dev_flow->ingress = attr->ingress; + dev_flow->transfer = attr->transfer; + return dev_flow; } /** @@ -1486,7 +1488,7 @@ flow_verbs_translate_item_ipv4(dev_flow, items, item_flags); subpriority = MLX5_PRIORITY_MAP_L3; - dev_flow->verbs.hash_fields |= + dev_flow->hash_fields |= mlx5_flow_hashfields_adjust (dev_flow, tunnel, MLX5_IPV4_LAYER_TYPES, @@ -1498,7 +1500,7 @@ flow_verbs_translate_item_ipv6(dev_flow, items, item_flags); subpriority = MLX5_PRIORITY_MAP_L3; - dev_flow->verbs.hash_fields |= + dev_flow->hash_fields |= mlx5_flow_hashfields_adjust (dev_flow, tunnel, MLX5_IPV6_LAYER_TYPES, @@ -1510,7 +1512,7 @@ flow_verbs_translate_item_tcp(dev_flow, items, item_flags); subpriority = MLX5_PRIORITY_MAP_L4; - dev_flow->verbs.hash_fields |= + dev_flow->hash_fields |= mlx5_flow_hashfields_adjust (dev_flow, tunnel, ETH_RSS_TCP, (IBV_RX_HASH_SRC_PORT_TCP | @@ -1522,7 +1524,7 @@ flow_verbs_translate_item_udp(dev_flow, items, item_flags); subpriority = MLX5_PRIORITY_MAP_L4; - dev_flow->verbs.hash_fields |= + dev_flow->hash_fields |= mlx5_flow_hashfields_adjust (dev_flow, tunnel, ETH_RSS_UDP, (IBV_RX_HASH_SRC_PORT_UDP | @@ -1667,16 +1669,17 @@ } else { struct mlx5_hrxq *hrxq; - hrxq = mlx5_hrxq_get(dev, flow->key, + assert(flow->rss.queue); + hrxq = mlx5_hrxq_get(dev, flow->rss.key, MLX5_RSS_HASH_KEY_LEN, - verbs->hash_fields, - (*flow->queue), + dev_flow->hash_fields, + (*flow->rss.queue), flow->rss.queue_num); if (!hrxq) - hrxq = mlx5_hrxq_new(dev, flow->key, + hrxq = mlx5_hrxq_new(dev, flow->rss.key, MLX5_RSS_HASH_KEY_LEN, - verbs->hash_fields, - (*flow->queue), + dev_flow->hash_fields, + (*flow->rss.queue), flow->rss.queue_num, !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL)); -- 1.8.3.1