From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 45616A057B; Thu, 2 Apr 2020 13:27:15 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 336D71BED2; Thu, 2 Apr 2020 13:27:03 +0200 (CEST) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 016921BED0 for ; Thu, 2 Apr 2020 13:27:01 +0200 (CEST) Received: from Internal Mail-Server by MTLPINE2 (envelope-from matan@mellanox.com) with ESMTPS (AES256-SHA encrypted); 2 Apr 2020 14:26:59 +0300 Received: from pegasus07.mtr.labs.mlnx (pegasus07.mtr.labs.mlnx [10.210.16.112]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 032BQf1I026689; Thu, 2 Apr 2020 14:26:59 +0300 From: Matan Azrad To: dev@dpdk.org Cc: Viacheslav Ovsiienko , Shahaf Shuler , Maxime Coquelin Date: Thu, 2 Apr 2020 11:26:32 +0000 Message-Id: <1585826793-28709-4-git-send-email-matan@mellanox.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1585826793-28709-1-git-send-email-matan@mellanox.com> References: <1585826793-28709-1-git-send-email-matan@mellanox.com> Subject: [dpdk-dev] [PATCH 3/4] vdpa/mlx5: support virtio queue statistics get X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add support for stats_get operation. A DevX counter object is allocated per virtq in order to manage the virtq statistics. The counter object is allocated before the virtq creation and destroyed after it, so the statistics are valid only in the life time of the virtq. Signed-off-by: Matan Azrad Acked-by: Viacheslav Ovsiienko --- doc/guides/vdpadevs/features/mlx5.ini | 1 + drivers/vdpa/mlx5/mlx5_vdpa.c | 28 +++++++++++++++++++++++ drivers/vdpa/mlx5/mlx5_vdpa.h | 16 +++++++++++++ drivers/vdpa/mlx5/mlx5_vdpa_virtq.c | 43 +++++++++++++++++++++++++++++++++++ 4 files changed, 88 insertions(+) diff --git a/doc/guides/vdpadevs/features/mlx5.ini b/doc/guides/vdpadevs/features/mlx5.ini index 1da9c1b..788d4e0 100644 --- a/doc/guides/vdpadevs/features/mlx5.ini +++ b/doc/guides/vdpadevs/features/mlx5.ini @@ -17,6 +17,7 @@ packed = Y proto mq = Y proto log shmfd = Y proto host notifier = Y +queue statistics = Y Other kdrv = Y ARMv8 = Y Power8 = Y diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.c b/drivers/vdpa/mlx5/mlx5_vdpa.c index fe17ced..91d8f96 100644 --- a/drivers/vdpa/mlx5/mlx5_vdpa.c +++ b/drivers/vdpa/mlx5/mlx5_vdpa.c @@ -274,6 +274,31 @@ return 0; } +static int +mlx5_vdpa_get_stats(int did, int qid, struct rte_vdpa_queue_stats *stats) +{ + struct mlx5_vdpa_priv *priv = mlx5_vdpa_find_priv_resource_by_did(did); + + if (priv == NULL) { + DRV_LOG(ERR, "Invalid device id: %d.", did); + return -ENODEV; + } + if (!priv->configured) { + DRV_LOG(ERR, "Device %d was not configured.", did); + return -ENODATA; + } + if (qid >= (int)priv->nr_virtqs) { + DRV_LOG(ERR, "Too big vring id: %d.", qid); + return -E2BIG; + } + if (!priv->caps.queue_counters_valid) { + DRV_LOG(ERR, "Virtq statistics is not supported for device %d.", + did); + return -ENOTSUP; + } + return mlx5_vdpa_virtq_stats_get(priv, qid, stats); +} + static struct rte_vdpa_dev_ops mlx5_vdpa_ops = { .get_queue_num = mlx5_vdpa_get_queue_num, .get_features = mlx5_vdpa_get_vdpa_features, @@ -286,6 +311,7 @@ .get_vfio_group_fd = NULL, .get_vfio_device_fd = mlx5_vdpa_get_device_fd, .get_notify_area = mlx5_vdpa_get_notify_area, + .get_stats = mlx5_vdpa_get_stats, }; static struct ibv_device * @@ -489,6 +515,8 @@ rte_errno = ENOTSUP; goto error; } + if (!attr.vdpa.queue_counters_valid) + DRV_LOG(DEBUG, "No capability to support virtq statistics."); priv = rte_zmalloc("mlx5 vDPA device private", sizeof(*priv) + sizeof(struct mlx5_vdpa_virtq) * attr.vdpa.max_num_virtio_queues * 2, diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.h b/drivers/vdpa/mlx5/mlx5_vdpa.h index fcc216a..35b2be1 100644 --- a/drivers/vdpa/mlx5/mlx5_vdpa.h +++ b/drivers/vdpa/mlx5/mlx5_vdpa.h @@ -76,6 +76,7 @@ struct mlx5_vdpa_virtq { uint16_t vq_size; struct mlx5_vdpa_priv *priv; struct mlx5_devx_obj *virtq; + struct mlx5_devx_obj *counters; struct mlx5_vdpa_event_qp eqp; struct { struct mlx5dv_devx_umem *obj; @@ -352,4 +353,19 @@ int mlx5_vdpa_dirty_bitmap_set(struct mlx5_vdpa_priv *priv, uint64_t log_base, */ int mlx5_vdpa_virtq_stop(struct mlx5_vdpa_priv *priv, int index); +/** + * Get virtq statistics. + * + * @param[in] priv + * The vdpa driver private structure. + * @param[in] qid + * The virtq index. + * @param stats + * The virtq statistics structure to fill. + * + * @return + * 0 on success and @p stats is updated, a negative value otherwise. + */ +int mlx5_vdpa_virtq_stats_get(struct mlx5_vdpa_priv *priv, int qid, + struct rte_vdpa_queue_stats *stats); #endif /* RTE_PMD_MLX5_VDPA_H_ */ diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c index defb9e1..9fea6e9 100644 --- a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c +++ b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c @@ -72,6 +72,10 @@ rte_free(virtq->umems[i].buf); } memset(&virtq->umems, 0, sizeof(virtq->umems)); + if (virtq->counters) { + claim_zero(mlx5_devx_cmd_destroy(virtq->counters)); + virtq->counters = NULL; + } if (virtq->eqp.fw_qp) mlx5_vdpa_event_qp_destroy(&virtq->eqp); return 0; @@ -205,6 +209,16 @@ DRV_LOG(INFO, "Virtq %d is, for sure, working by poll mode, no" " need event QPs and event mechanism.", index); } + if (priv->caps.queue_counters_valid) { + virtq->counters = mlx5_devx_cmd_create_virtio_q_counters + (priv->ctx); + if (!virtq->counters) { + DRV_LOG(ERR, "Failed to create virtq couners for virtq" + " %d.", index); + goto error; + } + attr.counters_obj_id = virtq->counters->id; + } /* Setup 3 UMEMs for each virtq. */ for (i = 0; i < RTE_DIM(virtq->umems); ++i) { virtq->umems[i].size = priv->caps.umems[i].a * vq.size + @@ -448,3 +462,32 @@ } return 0; } + +int +mlx5_vdpa_virtq_stats_get(struct mlx5_vdpa_priv *priv, int qid, + struct rte_vdpa_queue_stats *stats) +{ + struct mlx5_vdpa_virtq *virtq = &priv->virtqs[qid]; + struct mlx5_devx_virtio_q_couners_attr attr = {0}; + int ret; + + if (!virtq->virtq) { + DRV_LOG(ERR, "Failed to read virtq %d statistics - virtq " + "synchronization failed.", qid); + } + MLX5_ASSERT(virtq->counters); + ret = mlx5_devx_cmd_query_virtio_q_counters(virtq->counters, &attr); + if (ret) { + DRV_LOG(ERR, "Failed to read virtq %d stats from HW.", qid); + return ret; + } + *stats = (struct rte_vdpa_queue_stats) { + .received_desc = attr.received_desc, + .completed_desc = attr.completed_desc, + .bad_desc = attr.bad_desc_errors, + .exceed_max_chain = attr.exceed_max_chain, + .invalid_buffer = attr.invalid_buffer, + .errors = attr.error_cqes, + }; + return 0; +} -- 1.8.3.1