From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 4ED88A04D9 for ; Tue, 11 Aug 2020 10:59:53 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 3CFE62AB; Tue, 11 Aug 2020 10:59:53 +0200 (CEST) Received: from git-send-mailer.rdmz.labs.mlnx (unknown [37.142.13.130]) by dpdk.org (Postfix) with ESMTP id D2C692AB for ; Tue, 11 Aug 2020 10:59:51 +0200 (CEST) From: Suanming Mou To: luca.boccassi@gmail.com Cc: stable@dpdk.org, rasland@nvidia.com Date: Tue, 11 Aug 2020 16:59:47 +0800 Message-Id: <1597136387-49015-1-git-send-email-suanmingm@mellanox.com> X-Mailer: git-send-email 1.8.3.1 Subject: [dpdk-stable] [PATCH 19.11] net/mlx5: fix counter query X-BeenThere: stable@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches for DPDK stable branches List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: stable-bounces@dpdk.org Sender: "stable" [ upstream commit e1293b10dea9bbae2cb2ebc47b5066df73275e2f ] Currently, the counter query requires the counter ID should start with 4 aligned. In none-batch mode, the counter pool might have the chance to get the counter ID not 4 aligned. In this case, the counter should be skipped, or the query will be failed. Skip the counter with ID not 4 aligned as the first counter in the none-batch count pool to avoid invalid counter query. Once having new min_dcs ID in the poll less than the skipped counters, the skipped counters will be returned to the pool free list to use. Fixes: 5382d28c2110 ("net/mlx5: accelerate DV flow counter transactions") Signed-off-by: Suanming Mou --- drivers/net/mlx5/mlx5.h | 4 +- drivers/net/mlx5/mlx5_flow.c | 6 +++ drivers/net/mlx5/mlx5_flow_dv.c | 94 ++++++++++++++++++++++++++++++++- drivers/net/mlx5/mlx5_prm.h | 3 ++ 4 files changed, 104 insertions(+), 3 deletions(-) diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 51f067b4b..07be509f6 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -486,7 +486,8 @@ struct mlx5_flow_counter { uint32_t shared:1; /**< Share counter ID with other flow rules. */ uint32_t batch: 1; /**< Whether the counter was allocated by batch command. */ - uint32_t ref_cnt:30; /**< Reference counter. */ + uint32_t ref_cnt:29; /**< Reference counter. */ + uint32_t skipped:1; /* This counter is skipped or not. */ uint32_t id; /**< Counter ID. */ union { /**< Holds the counters for the rule. */ #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) @@ -518,6 +519,7 @@ struct mlx5_flow_counter_pool { /* The devx object of the minimum counter ID. */ rte_atomic64_t query_gen; uint32_t n_counters: 16; /* Number of devx allocated counters. */ + uint32_t skip_cnt:1; /* Pool contains skipped counter. */ rte_spinlock_t sl; /* The pool lock. */ struct mlx5_counter_stats_raw *raw; struct mlx5_counter_stats_raw *raw_hw; /* The raw on HW working. */ diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index a4bbb74fc..3868b1cf5 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -5594,6 +5594,11 @@ mlx5_flow_query_alarm(void *arg) goto set_alarm; dcs = (struct mlx5_devx_obj *)(uintptr_t)rte_atomic64_read (&pool->a64_dcs); + if (dcs->id & (MLX5_CNT_BATCH_QUERY_ID_ALIGNMENT - 1)) { + /* Pool without valid counter. */ + pool->raw_hw = NULL; + goto next_pool; + } offset = batch ? 0 : dcs->id % MLX5_COUNTERS_PER_POOL; ret = mlx5_devx_cmd_flow_counter_query(dcs, 0, MLX5_COUNTERS_PER_POOL - offset, NULL, NULL, @@ -5611,6 +5616,7 @@ mlx5_flow_query_alarm(void *arg) pool->raw_hw->min_dcs_id = dcs->id; LIST_REMOVE(pool->raw_hw, next); sh->cmng.pending_queries++; +next_pool: pool_index++; if (pool_index >= rte_atomic16_read(&cont->n_valid)) { batch ^= 0x1; diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index 387036d0b..60d6af04e 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -4101,6 +4101,63 @@ flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs, rte_atomic16_add(&cont->n_valid, 1); return cont; } +/** + * Restore skipped counters in the pool. + * + * As counter pool query requires the first counter dcs + * ID start with 4 alinged, if the pool counters with + * min_dcs ID are not aligned with 4, the counters will + * be skipped. + * Once other min_dcs ID less than these skipped counter + * dcs ID appears, the skipped counters will be safe to + * use. + * Should be called when min_dcs is updated. + * + * @param[in] pool + * Current counter pool. + * @param[in] last_min_dcs + * Last min_dcs. + */ +static void +flow_dv_counter_restore(struct mlx5_flow_counter_pool *pool, + struct mlx5_devx_obj *last_min_dcs) +{ + struct mlx5_flow_counter *cnt; + uint32_t offset, new_offset; + uint32_t skip_cnt = 0; + uint32_t i; + + if (!pool->skip_cnt) + return; + /* + * If last min_dcs is not valid. The skipped counter may even after + * last min_dcs, set the offset to the whole pool. + */ + if (last_min_dcs->id & (MLX5_CNT_BATCH_QUERY_ID_ALIGNMENT - 1)) + offset = MLX5_COUNTERS_PER_POOL; + else + offset = last_min_dcs->id % MLX5_COUNTERS_PER_POOL; + new_offset = pool->min_dcs->id % MLX5_COUNTERS_PER_POOL; + /* + * Check the counters from 1 to the last_min_dcs range. Counters + * before new min_dcs indicates pool still has skipped counters. + * Counters be skipped after new min_dcs will be ready to use. + * Offset 0 counter must be empty or min_dcs, start from 1. + */ + for (i = 1; i < offset; i++) { + cnt = &pool->counters_raw[i]; + if (cnt->skipped) { + if (i > new_offset) { + cnt->skipped = 0; + TAILQ_INSERT_TAIL(&pool->counters, cnt, next); + } else { + skip_cnt++; + } + } + } + if (!skip_cnt) + pool->skip_cnt = 0; +} /** * Prepare a new counter and/or a new counter pool. @@ -4125,11 +4182,13 @@ flow_dv_counter_pool_prepare(struct rte_eth_dev *dev, struct mlx5_pools_container *cont; struct mlx5_flow_counter_pool *pool; struct mlx5_devx_obj *dcs = NULL; + struct mlx5_devx_obj *last_min_dcs; struct mlx5_flow_counter *cnt; uint32_t i; cont = MLX5_CNT_CONTAINER(priv->sh, batch, 0); if (!batch) { +retry: /* bulk_bitmap must be 0 for single counter allocation. */ dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0); if (!dcs) @@ -4142,13 +4201,44 @@ flow_dv_counter_pool_prepare(struct rte_eth_dev *dev, return NULL; } pool = TAILQ_FIRST(&cont->pool_list); - } else if (dcs->id < pool->min_dcs->id) { + } else if (((dcs->id < pool->min_dcs->id) || + pool->min_dcs->id & + (MLX5_CNT_BATCH_QUERY_ID_ALIGNMENT - 1)) && + !(dcs->id & + (MLX5_CNT_BATCH_QUERY_ID_ALIGNMENT - 1))) { + /* + * Update the pool min_dcs only if current dcs is + * valid and exist min_dcs is not valid or greater + * than new dcs. + */ + last_min_dcs = pool->min_dcs; rte_atomic64_set(&pool->a64_dcs, (int64_t)(uintptr_t)dcs); + /* + * Restore any skipped counters if the new min_dcs + * ID is smaller or min_dcs is not valid. + */ + if (dcs->id < last_min_dcs->id || + last_min_dcs->id & + (MLX5_CNT_BATCH_QUERY_ID_ALIGNMENT - 1)) + flow_dv_counter_restore(pool, last_min_dcs); } cnt = &pool->counters_raw[dcs->id % MLX5_COUNTERS_PER_POOL]; - TAILQ_INSERT_HEAD(&pool->counters, cnt, next); cnt->dcs = dcs; + /* + * If min_dcs is not valid, it means the new allocated dcs + * also fail to become the valid min_dcs, just skip it. + * Or if min_dcs is valid, and new dcs ID is smaller than + * min_dcs, but not become the min_dcs, also skip it. + */ + if (pool->min_dcs->id & + (MLX5_CNT_BATCH_QUERY_ID_ALIGNMENT - 1) || + dcs->id < pool->min_dcs->id) { + cnt->skipped = 1; + pool->skip_cnt = 1; + goto retry; + } + TAILQ_INSERT_HEAD(&pool->counters, cnt, next); *cnt_free = cnt; return cont; } diff --git a/drivers/net/mlx5/mlx5_prm.h b/drivers/net/mlx5/mlx5_prm.h index 4c8671976..c82a854bd 100644 --- a/drivers/net/mlx5/mlx5_prm.h +++ b/drivers/net/mlx5/mlx5_prm.h @@ -725,6 +725,9 @@ enum { MLX5_MKC_ACCESS_MODE_MTT = 0x1, }; +/* The counter batch query requires ID align with 4. */ +#define MLX5_CNT_BATCH_QUERY_ID_ALIGNMENT 4 + /* Flow counters. */ struct mlx5_ifc_alloc_flow_counter_out_bits { u8 status[0x8]; -- 2.25.1