From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 4946DA0548; Fri, 2 Apr 2021 17:17:10 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id C98E11410B0; Fri, 2 Apr 2021 17:16:42 +0200 (CEST) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by mails.dpdk.org (Postfix) with ESMTP id 5AF26141066 for ; Fri, 2 Apr 2021 17:16:35 +0200 (CEST) Received: from Internal Mail-Server by MTLPINE1 (envelope-from lizh@nvidia.com) with SMTP; 2 Apr 2021 18:16:34 +0300 Received: from nvidia.com (c-235-17-1-009.mtl.labs.mlnx [10.235.17.9]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 132FGWdW028272; Fri, 2 Apr 2021 18:16:34 +0300 From: Li Zhang To: dekelp@nvidia.com, orika@nvidia.com, viacheslavo@nvidia.com, matan@nvidia.com, shahafs@nvidia.com Cc: dev@dpdk.org, thomas@monjalon.net, rasland@nvidia.com, roniba@nvidia.com Date: Fri, 2 Apr 2021 18:16:23 +0300 Message-Id: <20210402151627.1531623-10-lizh@nvidia.com> X-Mailer: git-send-email 2.21.0 In-Reply-To: <20210402151627.1531623-1-lizh@nvidia.com> References: <20210331073632.1443011-1-lizh@nvidia.com> <20210402151627.1531623-1-lizh@nvidia.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Subject: [dpdk-dev] [PATCH v2 09/13] net/mlx5: init/uninit flow meter queue for WQE X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Init/uninit flow meter SQ for WQE Signed-off-by: Li Zhang --- drivers/net/mlx5/linux/mlx5_os.c | 17 ++++ drivers/net/mlx5/meson.build | 2 +- drivers/net/mlx5/mlx5.c | 78 ++++++++++++++- drivers/net/mlx5/mlx5.h | 22 +++-- drivers/net/mlx5/mlx5_flow.h | 4 +- .../mlx5/{mlx5_flow_age.c => mlx5_flow_aso.c} | 96 ++++++++++++++++--- drivers/net/mlx5/mlx5_flow_dv.c | 7 +- drivers/net/mlx5/mlx5_flow_meter.c | 7 +- 8 files changed, 201 insertions(+), 32 deletions(-) rename drivers/net/mlx5/{mlx5_flow_age.c => mlx5_flow_aso.c} (85%) diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c index c3ae1972de..bd08fe55ab 100644 --- a/drivers/net/mlx5/linux/mlx5_os.c +++ b/drivers/net/mlx5/linux/mlx5_os.c @@ -741,6 +741,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, int own_domain_id = 0; uint16_t port_id; unsigned int i; + uint32_t log_obj_size; #ifdef HAVE_MLX5DV_DR_DEVX_PORT struct mlx5dv_devx_port devx_port = { .comp_mask = 0 }; #endif @@ -1235,6 +1236,22 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, priv->mtr_color_reg); } } + if (config->hca_attr.qos.sup && + config->hca_attr.qos.flow_meter_aso_sup) { + log_obj_size = + rte_log2_u32(MLX5_ASO_MTRS_PER_POOL >> 1); + if (log_obj_size >= + config->hca_attr.qos.log_meter_aso_granularity && + log_obj_size <= + config->hca_attr.qos.log_meter_aso_max_alloc) { + sh->meter_aso_en = 1; + err = mlx5_aso_flow_mtrs_mng_init(priv); + if (err) { + err = -err; + goto error; + } + } + } #endif #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO if (config->hca_attr.flow_hit_aso && diff --git a/drivers/net/mlx5/meson.build b/drivers/net/mlx5/meson.build index f2fafbdd05..89a16f8f3a 100644 --- a/drivers/net/mlx5/meson.build +++ b/drivers/net/mlx5/meson.build @@ -15,7 +15,7 @@ sources = files( 'mlx5_flow.c', 'mlx5_flow_meter.c', 'mlx5_flow_dv.c', - 'mlx5_flow_age.c', + 'mlx5_flow_aso.c', 'mlx5_mac.c', 'mlx5_mr.c', 'mlx5_rss.c', diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index 147283d655..915cf995e0 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -381,7 +381,7 @@ mlx5_flow_aso_age_mng_init(struct mlx5_dev_ctx_shared *sh) rte_errno = ENOMEM; return -ENOMEM; } - err = mlx5_aso_queue_init(sh); + err = mlx5_aso_queue_init(sh, ASO_OPC_MOD_FLOW_HIT); if (err) { mlx5_free(sh->aso_age_mng); return -1; @@ -403,8 +403,8 @@ mlx5_flow_aso_age_mng_close(struct mlx5_dev_ctx_shared *sh) { int i, j; - mlx5_aso_queue_stop(sh); - mlx5_aso_queue_uninit(sh); + mlx5_aso_flow_hit_queue_poll_stop(sh); + mlx5_aso_queue_uninit(sh, ASO_OPC_MOD_FLOW_HIT); if (sh->aso_age_mng->pools) { struct mlx5_aso_age_pool *pool; @@ -542,6 +542,66 @@ mlx5_flow_counters_mng_close(struct mlx5_dev_ctx_shared *sh) memset(&sh->cmng, 0, sizeof(sh->cmng)); } +/** + * Initialize the aso flow meters management structure. + * + * @param[in] sh + * Pointer to mlx5_dev_ctx_shared object to free + */ +int +mlx5_aso_flow_mtrs_mng_init(struct mlx5_priv *priv) +{ + if (!priv->mtr_idx_tbl) { + priv->mtr_idx_tbl = mlx5_l3t_create(MLX5_L3T_TYPE_DWORD); + if (!priv->mtr_idx_tbl) { + DRV_LOG(ERR, "fail to create meter lookup table."); + rte_errno = ENOMEM; + return -ENOMEM; + } + } + if (!priv->sh->mtrmng) { + priv->sh->mtrmng = mlx5_malloc(MLX5_MEM_ZERO, + sizeof(*priv->sh->mtrmng), + RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); + if (!priv->sh->mtrmng) { + DRV_LOG(ERR, "mlx5_aso_mtr_pools_mng allocation was failed."); + rte_errno = ENOMEM; + return -ENOMEM; + } + rte_spinlock_init(&priv->sh->mtrmng->mtrsl); + LIST_INIT(&priv->sh->mtrmng->meters); + } + return 0; +} + +/** + * Close and release all the resources of + * the ASO flow meter management structure. + * + * @param[in] sh + * Pointer to mlx5_dev_ctx_shared object to free. + */ +static void +mlx5_aso_flow_mtrs_mng_close(struct mlx5_dev_ctx_shared *sh) +{ + struct mlx5_aso_mtr_pool *mtr_pool; + struct mlx5_aso_mtr_pools_mng *mtrmng = sh->mtrmng; + uint32_t idx; + + mlx5_aso_queue_uninit(sh, ASO_OPC_MOD_POLICER); + idx = mtrmng->n_valid; + while (idx--) { + mtr_pool = mtrmng->pools[idx]; + claim_zero(mlx5_devx_cmd_destroy + (mtr_pool->devx_obj)); + mtrmng->n_valid--; + mlx5_free(mtr_pool); + } + mlx5_free(sh->mtrmng->pools); + mlx5_free(sh->mtrmng); + sh->mtrmng = NULL; +} + /* Send FLOW_AGED event if needed. */ void mlx5_age_event_prepare(struct mlx5_dev_ctx_shared *sh) @@ -1090,6 +1150,8 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh) mlx5_flow_aso_age_mng_close(sh); sh->aso_age_mng = NULL; } + if (sh->mtrmng) + mlx5_aso_flow_mtrs_mng_close(sh); mlx5_flow_ipool_destroy(sh); mlx5_os_dev_shared_handler_uninstall(sh); if (sh->cnt_id_tbl) { @@ -1294,6 +1356,8 @@ mlx5_dev_close(struct rte_eth_dev *dev) struct mlx5_priv *priv = dev->data->dev_private; unsigned int i; int ret; + uint32_t mtr_idx; + void *entry; if (rte_eal_process_type() == RTE_PROC_SECONDARY) { /* Check if process_private released. */ @@ -1370,6 +1434,14 @@ mlx5_dev_close(struct rte_eth_dev *dev) close(priv->nl_socket_rdma); if (priv->vmwa_context) mlx5_vlan_vmwa_exit(priv->vmwa_context); + if (priv->mtr_idx_tbl) { + MLX5_L3T_FOREACH(priv->mtr_idx_tbl, i, entry) { + mtr_idx = *(uint32_t *)entry; + if (mtr_idx) + mlx5_flow_mtr_free(dev, mtr_idx); + } + mlx5_l3t_destroy(priv->mtr_idx_tbl); + } ret = mlx5_hrxq_verify(dev); if (ret) DRV_LOG(WARNING, "port %u some hash Rx queue still remain", diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 820633e4ef..89fbf1a9b9 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -491,8 +491,13 @@ struct mlx5_aso_devx_mr { }; struct mlx5_aso_sq_elem { - struct mlx5_aso_age_pool *pool; - uint16_t burst_size; + union { + struct { + struct mlx5_aso_age_pool *pool; + uint16_t burst_size; + }; + struct mlx5_aso_mtr *mtr; + }; }; struct mlx5_aso_sq { @@ -764,7 +769,6 @@ struct mlx5_aso_mtr_pools_mng { volatile uint16_t n_valid; /* Number of valid pools. */ uint16_t n; /* Number of pools. */ rte_spinlock_t mtrsl; /* The ASO flow meter free list lock. */ - struct mlx5_l3t_tbl *mtr_idx_tbl; /* Meter index lookup table. */ struct aso_meter_list meters; /* Free ASO flow meter list. */ struct mlx5_aso_sq sq; /*SQ using by ASO flow meter. */ struct mlx5_aso_mtr_pool **pools; /* ASO flow meter pool array. */ @@ -1182,6 +1186,7 @@ struct mlx5_priv { uint8_t mtr_color_reg; /* Meter color match REG_C. */ struct mlx5_mtr_profiles flow_meter_profiles; /* MTR profile list. */ struct mlx5_legacy_flow_meters flow_meters; /* MTR list. */ + struct mlx5_l3t_tbl *mtr_idx_tbl; /* Meter index lookup table. */ uint8_t skip_default_rss_reta; /* Skip configuration of default reta. */ uint8_t fdb_def_rule; /* Whether fdb jump to table 1 is configured. */ struct mlx5_mp_id mp_id; /* ID of a multi-process process */ @@ -1244,6 +1249,7 @@ int mlx5_hairpin_cap_get(struct rte_eth_dev *dev, bool mlx5_flex_parser_ecpri_exist(struct rte_eth_dev *dev); int mlx5_flex_parser_ecpri_alloc(struct rte_eth_dev *dev); int mlx5_flow_aso_age_mng_init(struct mlx5_dev_ctx_shared *sh); +int mlx5_aso_flow_mtrs_mng_init(struct mlx5_priv *priv); /* mlx5_ethdev.c */ @@ -1504,9 +1510,11 @@ eth_tx_burst_t mlx5_select_tx_function(struct rte_eth_dev *dev); /* mlx5_flow_aso.c */ -int mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh); -int mlx5_aso_queue_start(struct mlx5_dev_ctx_shared *sh); -int mlx5_aso_queue_stop(struct mlx5_dev_ctx_shared *sh); -void mlx5_aso_queue_uninit(struct mlx5_dev_ctx_shared *sh); +int mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh, + enum mlx5_access_aso_opc_mod aso_opc_mod); +int mlx5_aso_flow_hit_queue_poll_start(struct mlx5_dev_ctx_shared *sh); +int mlx5_aso_flow_hit_queue_poll_stop(struct mlx5_dev_ctx_shared *sh); +void mlx5_aso_queue_uninit(struct mlx5_dev_ctx_shared *sh, + enum mlx5_access_aso_opc_mod aso_opc_mod); #endif /* RTE_PMD_MLX5_H_ */ diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h index 8debe1c845..9e146685c2 100644 --- a/drivers/net/mlx5/mlx5_flow.h +++ b/drivers/net/mlx5/mlx5_flow.h @@ -827,8 +827,8 @@ struct mlx5_flow { #define MLX5_FLOW_METER_DISABLE 0 #define MLX5_FLOW_METER_ENABLE 1 -#define MLX5_ASO_CQE_RESPONSE_DELAY 10 -#define MLX5_MTR_POLL_CQE_TIMES 100000u +#define MLX5_ASO_WQE_CQE_RESPONSE_DELAY 10u +#define MLX5_MTR_POLL_WQE_CQE_TIMES 100000u #define MLX5_MAN_WIDTH 8 /* Legacy Meter parameter structure. */ diff --git a/drivers/net/mlx5/mlx5_flow_age.c b/drivers/net/mlx5/mlx5_flow_aso.c similarity index 85% rename from drivers/net/mlx5/mlx5_flow_age.c rename to drivers/net/mlx5/mlx5_flow_aso.c index 00cb20dd62..067471ba0f 100644 --- a/drivers/net/mlx5/mlx5_flow_age.c +++ b/drivers/net/mlx5/mlx5_flow_aso.c @@ -144,7 +144,6 @@ mlx5_aso_destroy_sq(struct mlx5_aso_sq *sq) { mlx5_devx_sq_destroy(&sq->sq_obj); mlx5_aso_cq_destroy(&sq->cq); - mlx5_aso_devx_dereg_mr(&sq->mr); memset(sq, 0, sizeof(*sq)); } @@ -155,7 +154,7 @@ mlx5_aso_destroy_sq(struct mlx5_aso_sq *sq) * ASO SQ to initialize. */ static void -mlx5_aso_init_sq(struct mlx5_aso_sq *sq) +mlx5_aso_age_init_sq(struct mlx5_aso_sq *sq) { volatile struct mlx5_aso_wqe *restrict wqe; int i; @@ -181,6 +180,39 @@ mlx5_aso_init_sq(struct mlx5_aso_sq *sq) } } +/** + * Initialize Send Queue used for ASO flow meter access. + * + * @param[in] sq + * ASO SQ to initialize. + */ +static void +mlx5_aso_mtr_init_sq(struct mlx5_aso_sq *sq) +{ + volatile struct mlx5_aso_wqe *restrict wqe; + int i; + int size = 1 << sq->log_desc_n; + uint32_t idx; + + /* All the next fields state should stay constant. */ + for (i = 0, wqe = &sq->sq_obj.aso_wqes[0]; i < size; ++i, ++wqe) { + wqe->general_cseg.sq_ds = rte_cpu_to_be_32((sq->sqn << 8) | + (sizeof(*wqe) >> 4)); + wqe->aso_cseg.operand_masks = RTE_BE32(0u | + (ASO_OPER_LOGICAL_OR << ASO_CSEG_COND_OPER_OFFSET) | + (ASO_OP_ALWAYS_TRUE << ASO_CSEG_COND_1_OPER_OFFSET) | + (ASO_OP_ALWAYS_TRUE << ASO_CSEG_COND_0_OPER_OFFSET) | + (BYTEWISE_64BYTE << ASO_CSEG_DATA_MASK_MODE_OFFSET)); + wqe->general_cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS << + MLX5_COMP_MODE_OFFSET); + for (idx = 0; idx < MLX5_ASO_METERS_PER_WQE; + idx++) + wqe->aso_dseg.mtrs[idx].v_bo_sc_bbog_mm = + RTE_BE32((1 << ASO_DSEG_VALID_OFFSET) | + (MLX5_FLOW_COLOR_GREEN << ASO_DSEG_SC_OFFSET)); + } +} + /** * Create Send Queue used for ASO access. * @@ -216,13 +248,9 @@ mlx5_aso_sq_create(void *ctx, struct mlx5_aso_sq *sq, int socket, struct mlx5_devx_modify_sq_attr modify_attr = { .state = MLX5_SQC_STATE_RDY, }; - uint32_t sq_desc_n = 1 << log_desc_n; uint16_t log_wqbb_n; int ret; - if (mlx5_aso_devx_reg_mr(ctx, (MLX5_ASO_AGE_ACTIONS_PER_POOL / 8) * - sq_desc_n, &sq->mr, socket, pdn)) - return -1; if (mlx5_aso_cq_create(ctx, &sq->cq, log_desc_n, socket, mlx5_os_get_devx_uar_page_id(uar))) goto error; @@ -247,7 +275,6 @@ mlx5_aso_sq_create(void *ctx, struct mlx5_aso_sq *sq, int socket, sq->tail = 0; sq->sqn = sq->sq_obj.sq->id; sq->uar_addr = mlx5_os_get_devx_uar_reg_addr(uar); - mlx5_aso_init_sq(sq); return 0; error: mlx5_aso_destroy_sq(sq); @@ -264,11 +291,37 @@ mlx5_aso_sq_create(void *ctx, struct mlx5_aso_sq *sq, int socket, * 0 on success, a negative errno value otherwise and rte_errno is set. */ int -mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh) +mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh, + enum mlx5_access_aso_opc_mod aso_opc_mod) { - return mlx5_aso_sq_create(sh->ctx, &sh->aso_age_mng->aso_sq, 0, + uint32_t sq_desc_n = 1 << MLX5_ASO_QUEUE_LOG_DESC; + + switch (aso_opc_mod) { + case ASO_OPC_MOD_FLOW_HIT: + if (mlx5_aso_devx_reg_mr(sh->ctx, + (MLX5_ASO_AGE_ACTIONS_PER_POOL / 8) * + sq_desc_n, &sh->aso_age_mng->aso_sq.mr, 0, sh->pdn)) + return -1; + if (mlx5_aso_sq_create(sh->ctx, &sh->aso_age_mng->aso_sq, 0, + sh->tx_uar, sh->pdn, MLX5_ASO_QUEUE_LOG_DESC, + sh->sq_ts_format)) { + mlx5_aso_devx_dereg_mr(&sh->aso_age_mng->aso_sq.mr); + return -1; + } + mlx5_aso_age_init_sq(&sh->aso_age_mng->aso_sq); + break; + case ASO_OPC_MOD_POLICER: + if (mlx5_aso_sq_create(sh->ctx, &sh->mtrmng->sq, 0, sh->tx_uar, sh->pdn, MLX5_ASO_QUEUE_LOG_DESC, - sh->sq_ts_format); + sh->sq_ts_format)) + return -1; + mlx5_aso_mtr_init_sq(&sh->mtrmng->sq); + break; + default: + DRV_LOG(ERR, "Unknown ASO operation mode"); + return -1; + } + return 0; } /** @@ -278,9 +331,24 @@ mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh) * Pointer to shared device context. */ void -mlx5_aso_queue_uninit(struct mlx5_dev_ctx_shared *sh) +mlx5_aso_queue_uninit(struct mlx5_dev_ctx_shared *sh, + enum mlx5_access_aso_opc_mod aso_opc_mod) { - mlx5_aso_destroy_sq(&sh->aso_age_mng->aso_sq); + struct mlx5_aso_sq *sq; + + switch (aso_opc_mod) { + case ASO_OPC_MOD_FLOW_HIT: + mlx5_aso_devx_dereg_mr(&sh->aso_age_mng->aso_sq.mr); + sq = &sh->aso_age_mng->aso_sq; + break; + case ASO_OPC_MOD_POLICER: + sq = &sh->mtrmng->sq; + break; + default: + DRV_LOG(ERR, "Unknown ASO operation mode"); + return; + } + mlx5_aso_destroy_sq(sq); } /** @@ -555,7 +623,7 @@ mlx5_flow_aso_alarm(void *arg) * 0 on success, a negative errno value otherwise and rte_errno is set. */ int -mlx5_aso_queue_start(struct mlx5_dev_ctx_shared *sh) +mlx5_aso_flow_hit_queue_poll_start(struct mlx5_dev_ctx_shared *sh) { if (rte_eal_alarm_set(US_PER_S, mlx5_flow_aso_alarm, sh)) { DRV_LOG(ERR, "Cannot reinitialize ASO age alarm."); @@ -574,7 +642,7 @@ mlx5_aso_queue_start(struct mlx5_dev_ctx_shared *sh) * 0 on success, a negative errno value otherwise and rte_errno is set. */ int -mlx5_aso_queue_stop(struct mlx5_dev_ctx_shared *sh) +mlx5_aso_flow_hit_queue_poll_stop(struct mlx5_dev_ctx_shared *sh) { int retries = 1024; diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index b2178dea18..03dcc2bfd4 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -5947,6 +5947,11 @@ flow_dv_mtr_container_resize(struct rte_eth_dev *dev) rte_errno = ENOMEM; return -ENOMEM; } + if (!mtrmng->n) + if (mlx5_aso_queue_init(priv->sh, ASO_OPC_MOD_POLICER)) { + mlx5_free(pools); + return -ENOMEM; + } if (old_pools) memcpy(pools, old_pools, mtrmng->n * sizeof(struct mlx5_aso_mtr_pool *)); @@ -10818,7 +10823,7 @@ flow_dv_aso_age_pools_resize(struct rte_eth_dev *dev) mlx5_free(old_pools); } else { /* First ASO flow hit allocation - starting ASO data-path. */ - int ret = mlx5_aso_queue_start(priv->sh); + int ret = mlx5_aso_flow_hit_queue_poll_start(priv->sh); if (ret) { mlx5_free(pools); diff --git a/drivers/net/mlx5/mlx5_flow_meter.c b/drivers/net/mlx5/mlx5_flow_meter.c index bbfc8d885a..89d1d42e3f 100644 --- a/drivers/net/mlx5/mlx5_flow_meter.c +++ b/drivers/net/mlx5/mlx5_flow_meter.c @@ -811,7 +811,6 @@ mlx5_flow_meter_destroy(struct rte_eth_dev *dev, uint32_t meter_id, struct rte_mtr_error *error) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_aso_mtr_pools_mng *mtrmng = priv->sh->mtrmng; struct mlx5_flow_meter_info *fm; const struct rte_flow_attr attr = { .ingress = 1, @@ -836,7 +835,7 @@ mlx5_flow_meter_destroy(struct rte_eth_dev *dev, uint32_t meter_id, RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL, "Meter object is being used."); if (priv->sh->meter_aso_en) { - if (mlx5_l3t_clear_entry(mtrmng->mtr_idx_tbl, meter_id)) + if (mlx5_l3t_clear_entry(priv->mtr_idx_tbl, meter_id)) return -rte_mtr_error_set(error, EBUSY, RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL, "Fail to delete ASO Meter in index table."); @@ -1302,7 +1301,7 @@ mlx5_flow_meter_find(struct mlx5_priv *priv, uint32_t meter_id, rte_spinlock_unlock(&mtrmng->mtrsl); return NULL; } - if (mlx5_l3t_get_entry(mtrmng->mtr_idx_tbl, meter_id, &data) || + if (mlx5_l3t_get_entry(priv->mtr_idx_tbl, meter_id, &data) || !data.dword) { rte_spinlock_unlock(&mtrmng->mtrsl); return NULL; @@ -1310,7 +1309,7 @@ mlx5_flow_meter_find(struct mlx5_priv *priv, uint32_t meter_id, if (mtr_idx) *mtr_idx = data.dword; aso_mtr = mlx5_aso_meter_by_idx(priv, data.dword); - mlx5_l3t_clear_entry(mtrmng->mtr_idx_tbl, meter_id); + mlx5_l3t_clear_entry(priv->mtr_idx_tbl, meter_id); if (meter_id == aso_mtr->fm.meter_id) { rte_spinlock_unlock(&mtrmng->mtrsl); return &aso_mtr->fm; -- 2.27.0