From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 3C33AA052A; Fri, 10 Jul 2020 11:50:11 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id B91971DE4B; Fri, 10 Jul 2020 11:49:11 +0200 (CEST) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id B15181DD8F for ; Fri, 10 Jul 2020 11:49:00 +0200 (CEST) Received: from Internal Mail-Server by MTLPINE1 (envelope-from viacheslavo@mellanox.com) with SMTP; 10 Jul 2020 12:49:00 +0300 Received: from pegasus12.mtr.labs.mlnx (pegasus12.mtr.labs.mlnx [10.210.17.40]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 06A9mx1P012280; Fri, 10 Jul 2020 12:48:59 +0300 Received: from pegasus12.mtr.labs.mlnx (localhost [127.0.0.1]) by pegasus12.mtr.labs.mlnx (8.14.7/8.14.7) with ESMTP id 06A9mxml024759; Fri, 10 Jul 2020 09:48:59 GMT Received: (from viacheslavo@localhost) by pegasus12.mtr.labs.mlnx (8.14.7/8.14.7/Submit) id 06A9mxbn024758; Fri, 10 Jul 2020 09:48:59 GMT X-Authentication-Warning: pegasus12.mtr.labs.mlnx: viacheslavo set sender to viacheslavo@mellanox.com using -f From: Viacheslav Ovsiienko To: dev@dpdk.org Cc: matan@mellanox.com, rasland@mellanox.com, thomas@monjalon.net, ferruh.yigit@intel.com Date: Fri, 10 Jul 2020 09:48:42 +0000 Message-Id: <1594374530-24659-8-git-send-email-viacheslavo@mellanox.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1594374530-24659-1-git-send-email-viacheslavo@mellanox.com> References: <1594374530-24659-1-git-send-email-viacheslavo@mellanox.com> Subject: [dpdk-dev] [PATCH v1 08/16] net/mlx5: allocate packet pacing context X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch allocates the Packet Pacing context from the kernel, configures one according to requested pace send scheduling granularuty and assigns to Clock Queue. Signed-off-by: Viacheslav Ovsiienko --- drivers/net/mlx5/mlx5.h | 2 ++ drivers/net/mlx5/mlx5_txpp.c | 71 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 73 insertions(+) diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index a1956cc..c1eafed 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -568,6 +568,8 @@ struct mlx5_dev_txpp { struct mlx5dv_devx_event_channel *echan; /* Event Channel. */ struct mlx5_txpp_wq clock_queue; /* Clock Queue. */ struct mlx5_txpp_wq rearm_queue; /* Clock Queue. */ + struct mlx5dv_pp *pp; /* Packet pacing context. */ + uint16_t pp_id; /* Packet pacing context index. */ }; /* diff --git a/drivers/net/mlx5/mlx5_txpp.c b/drivers/net/mlx5/mlx5_txpp.c index 34ac493..ebc24ba 100644 --- a/drivers/net/mlx5/mlx5_txpp.c +++ b/drivers/net/mlx5/mlx5_txpp.c @@ -6,6 +6,7 @@ #include #include #include +#include #include "mlx5.h" #include "mlx5_rxtx.h" @@ -49,6 +50,69 @@ } static void +mlx5_txpp_free_pp_index(struct mlx5_dev_ctx_shared *sh) +{ + if (sh->txpp.pp) { + mlx5_glue->dv_free_pp(sh->txpp.pp); + sh->txpp.pp = NULL; + sh->txpp.pp_id = 0; + } +} + +/* Allocate Packet Pacing index from kernel via mlx5dv call. */ +static int +mlx5_txpp_alloc_pp_index(struct mlx5_dev_ctx_shared *sh) +{ +#ifdef HAVE_MLX5DV_PP_ALLOC + uint32_t pp[MLX5_ST_SZ_DW(set_pp_rate_limit_context)]; + uint64_t rate; + + MLX5_ASSERT(!sh->txpp.pp); + memset(&pp, 0, sizeof(pp)); + rate = NS_PER_S / sh->txpp.tick; + if (rate * sh->txpp.tick != NS_PER_S) + DRV_LOG(WARNING, "Packet pacing frequency is not precize."); + if (sh->txpp.test) { + uint32_t len; + + len = RTE_MAX(MLX5_TXPP_TEST_PKT_SIZE, + (size_t)RTE_ETHER_MIN_LEN); + MLX5_SET(set_pp_rate_limit_context, &pp, + burst_upper_bound, len); + MLX5_SET(set_pp_rate_limit_context, &pp, + typical_packet_size, len); + /* Convert packets per second into kilobits. */ + rate = (rate * len) / (1000ul / CHAR_BIT); + DRV_LOG(INFO, "Packet pacing rate set to %" PRIu64, rate); + } + MLX5_SET(set_pp_rate_limit_context, &pp, rate_limit, rate); + MLX5_SET(set_pp_rate_limit_context, &pp, rate_mode, + sh->txpp.test ? MLX5_DATA_RATE : MLX5_WQE_RATE); + sh->txpp.pp = mlx5_glue->dv_alloc_pp + (sh->ctx, sizeof(pp), &pp, + MLX5DV_PP_ALLOC_FLAGS_DEDICATED_INDEX); + if (sh->txpp.pp == NULL) { + DRV_LOG(ERR, "Failed to allocate packet pacing index."); + rte_errno = errno; + return -errno; + } + if (!sh->txpp.pp->index) { + DRV_LOG(ERR, "Zero packet pacing index allocated."); + mlx5_txpp_free_pp_index(sh); + rte_errno = ENOTSUP; + return -ENOTSUP; + } + sh->txpp.pp_id = sh->txpp.pp->index; + return 0; +#else + RTE_SET_USED(sh); + DRV_LOG(ERR, "Allocating pacing index is not supported."); + rte_errno = ENOTSUP; + return -ENOTSUP; +#endif +} + +static void mlx5_txpp_destroy_send_queue(struct mlx5_txpp_wq *wq) { if (wq->sq) @@ -457,6 +521,7 @@ } sq_attr.state = MLX5_SQC_STATE_RST; sq_attr.cqn = wq->cq->id; + sq_attr.packet_pacing_rate_limit_index = sh->txpp.pp_id; sq_attr.wq_attr.cd_slave = 1; sq_attr.wq_attr.uar_page = sh->tx_uar->page_id; sq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC; @@ -503,6 +568,7 @@ * - Clock CQ/SQ * - Rearm CQ/SQ * - attaches rearm interrupt handler + * - starts Clock Queue * * Returns 0 on success, negative otherwise */ @@ -520,6 +586,9 @@ ret = mlx5_txpp_create_eqn(sh); if (ret) goto exit; + ret = mlx5_txpp_alloc_pp_index(sh); + if (ret) + goto exit; ret = mlx5_txpp_create_clock_queue(sh); if (ret) goto exit; @@ -530,6 +599,7 @@ if (ret) { mlx5_txpp_destroy_rearm_queue(sh); mlx5_txpp_destroy_clock_queue(sh); + mlx5_txpp_free_pp_index(sh); mlx5_txpp_destroy_eqn(sh); sh->txpp.tick = 0; sh->txpp.test = 0; @@ -550,6 +620,7 @@ { mlx5_txpp_destroy_rearm_queue(sh); mlx5_txpp_destroy_clock_queue(sh); + mlx5_txpp_free_pp_index(sh); mlx5_txpp_destroy_eqn(sh); sh->txpp.tick = 0; sh->txpp.test = 0; -- 1.8.3.1