From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id E14FCA04B5; Wed, 30 Sep 2020 21:26:55 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 927131D59C; Wed, 30 Sep 2020 21:26:11 +0200 (CEST) Received: from mga12.intel.com (mga12.intel.com [192.55.52.136]) by dpdk.org (Postfix) with ESMTP id B62461D43F for ; Wed, 30 Sep 2020 21:25:58 +0200 (CEST) IronPort-SDR: wds0sb5sjtXsPBLS1IFmdcbXTSEiLLHuJS23nfKzKexySyxzRLRL1zUFGrG72lkQY0VqHQarD3 Olofcb6SOTBA== X-IronPort-AV: E=McAfee;i="6000,8403,9760"; a="141933405" X-IronPort-AV: E=Sophos;i="5.77,322,1596524400"; d="scan'208";a="141933405" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga003.jf.intel.com ([10.7.209.27]) by fmsmga106.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 30 Sep 2020 12:25:54 -0700 IronPort-SDR: jau8rG+jA1uVNHK7+4ko9O1vOWRSqYNpOVdntVVqOPvl5VhJ3MniAsvhY9F8f9+qCsq7639Czg nPZPwLkIBI4Q== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.77,322,1596524400"; d="scan'208";a="308272991" Received: from silpixa00400629.ir.intel.com ([10.237.214.112]) by orsmga003.jf.intel.com with ESMTP; 30 Sep 2020 12:25:52 -0700 From: Savinay Dharmappa To: cristian.dumitrescu@intel.com, jasvinder.singh@intel.com, dev@dpdk.org Cc: savinay.dharmappa@intel.com Date: Wed, 30 Sep 2020 20:24:28 +0100 Message-Id: <20200930192434.47793-4-savinay.dharmappa@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20200930192434.47793-1-savinay.dharmappa@intel.com> References: <1600332159-26018-1-git-send-email-savinay.dharmappa@intel.com> <20200930192434.47793-1-savinay.dharmappa@intel.com> Subject: [dpdk-dev] [PATCH v5 3/9] sched : add dynamic config of subport bandwidth X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch adds support for dynamic config of subport bandwidth. Signed-off-by: Savinay Dharmappa Signed-off-by: Jasvinder Singh --- lib/librte_sched/rte_sched.c | 219 +++++++++++++++++++++++++++++++++++ lib/librte_sched/rte_sched.h | 23 ++++ 2 files changed, 242 insertions(+) diff --git a/lib/librte_sched/rte_sched.c b/lib/librte_sched/rte_sched.c index 895b40d72..54940c451 100644 --- a/lib/librte_sched/rte_sched.c +++ b/lib/librte_sched/rte_sched.c @@ -174,6 +174,8 @@ struct rte_sched_subport { /* Statistics */ struct rte_sched_subport_stats stats __rte_cache_aligned; + /* subport profile */ + uint32_t profile; /* Subport pipes */ uint32_t n_pipes_per_subport_enabled; uint32_t n_pipe_profiles; @@ -1155,6 +1157,49 @@ rte_sched_free_memory(struct rte_sched_port *port, uint32_t n_subports) rte_free(port); } +static int +rte_sched_subport_profile_config(struct rte_sched_port *port, + uint32_t subport_id, + uint32_t subport_profile_id) +{ + int i; + struct rte_sched_subport_profile *params; + struct rte_sched_subport *s; + + /* Subport profile exceeds the max limit */ + if (subport_profile_id >= port->n_max_subport_profiles) { + RTE_LOG(ERR, SCHED, "%s: " + "Number of subport profile exceeds the max limit\n", + __func__); + return -EINVAL; + } + + params = port->subport_profiles + subport_profile_id; + + s = port->subports[subport_id]; + + s->tb_credits = params->tb_size / 2; + + s->tc_time = port->time + params->tc_period; + + for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) + if (s->qsize[i]) + s->tc_credits[i] = + params->tc_credits_per_period[i]; + else + params->tc_credits_per_period[i] = 0; + +#ifdef RTE_SCHED_SUBPORT_TC_OV + s->tc_ov_wm_max = rte_sched_time_ms_to_bytes(params->tc_period, + s->pipe_tc_be_rate_max); +#endif + s->profile = subport_profile_id; + + rte_sched_port_log_subport_profile(port, subport_profile_id); + + return 0; +} + int rte_sched_subport_config(struct rte_sched_port *port, uint32_t subport_id, @@ -1344,6 +1389,180 @@ rte_sched_subport_config(struct rte_sched_port *port, return 0; } +int +rte_dynamic_sched_subport_config(struct rte_sched_port *port, + uint32_t subport_id, + struct rte_sched_subport_params *params, + uint32_t subport_profile_id) +{ + struct rte_sched_subport *s = NULL; + uint32_t n_subports = subport_id; + uint32_t n_subport_pipe_queues, i; + uint32_t size0, size1, bmp_mem_size; + int status; + + /* Check user parameters */ + if (port == NULL) { + RTE_LOG(ERR, SCHED, + "%s: Incorrect value for parameter port\n", __func__); + return 0; + } + + if (subport_id >= port->n_subports_per_port) { + RTE_LOG(ERR, SCHED, + "%s: Incorrect value for subport id\n", __func__); + + rte_sched_free_memory(port, n_subports); + return -EINVAL; + } + + + if (port->subports[subport_id] == NULL) { + + status = rte_sched_subport_check_params(params, + port->n_pipes_per_subport, + port->rate); + + if (status != 0) { + RTE_LOG(NOTICE, SCHED, + "%s: Port scheduler params check failed (%d)\n", + __func__, status); + + rte_sched_free_memory(port, n_subports); + return -EINVAL; + } + + /* Determine the amount of memory to allocate */ + size0 = sizeof(struct rte_sched_subport); + size1 = rte_sched_subport_get_array_base(params, + e_RTE_SCHED_SUBPORT_ARRAY_TOTAL); + + /* Allocate memory to store the data structures */ + s = rte_zmalloc_socket("subport_params", size0 + size1, + RTE_CACHE_LINE_SIZE, port->socket); + if (s == NULL) { + RTE_LOG(ERR, SCHED, + "%s: Memory allocation fails\n", __func__); + + rte_sched_free_memory(port, n_subports); + return -ENOMEM; + } + + n_subports++; + + /* Port */ + port->subports[subport_id] = s; + + s->tb_time = port->time; + + subport_profile_id = 0; + + /* compile time checks */ + RTE_BUILD_BUG_ON(RTE_SCHED_PORT_N_GRINDERS == 0); + RTE_BUILD_BUG_ON(RTE_SCHED_PORT_N_GRINDERS & + (RTE_SCHED_PORT_N_GRINDERS - 1)); + + /* User parameters */ + s->n_pipes_per_subport_enabled = + params->n_pipes_per_subport_enabled; + memcpy(s->qsize, params->qsize, sizeof(params->qsize)); + s->n_pipe_profiles = params->n_pipe_profiles; + s->n_max_pipe_profiles = params->n_max_pipe_profiles; + +#ifdef RTE_SCHED_RED + for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) { + uint32_t j; + + for (j = 0; j < RTE_COLORS; j++) { + /* if min/max are both zero, then RED is disabled */ + if ((params->red_params[i][j].min_th | + params->red_params[i][j].max_th) == 0) { + continue; + } + + if (rte_red_config_init(&s->red_config[i][j], + params->red_params[i][j].wq_log2, + params->red_params[i][j].min_th, + params->red_params[i][j].max_th, + params->red_params[i][j].maxp_inv) != 0) { + rte_sched_free_memory(port, n_subports); + + RTE_LOG(NOTICE, SCHED, + "%s: RED configuration init fails\n", + __func__); + return -EINVAL; + } + } + } +#endif + + /* Scheduling loop detection */ + s->pipe_loop = RTE_SCHED_PIPE_INVALID; + s->pipe_exhaustion = 0; + + /* Grinders */ + s->busy_grinders = 0; + + /* Queue base calculation */ + rte_sched_subport_config_qsize(s); + + /* Large data structures */ + s->pipe = (struct rte_sched_pipe *) + (s->memory + rte_sched_subport_get_array_base(params, + e_RTE_SCHED_SUBPORT_ARRAY_PIPE)); + s->queue = (struct rte_sched_queue *) + (s->memory + rte_sched_subport_get_array_base(params, + e_RTE_SCHED_SUBPORT_ARRAY_QUEUE)); + s->queue_extra = (struct rte_sched_queue_extra *) + (s->memory + rte_sched_subport_get_array_base(params, + e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_EXTRA)); + s->pipe_profiles = (struct rte_sched_pipe_profile *) + (s->memory + rte_sched_subport_get_array_base(params, + e_RTE_SCHED_SUBPORT_ARRAY_PIPE_PROFILES)); + s->bmp_array = s->memory + + rte_sched_subport_get_array_base(params, + e_RTE_SCHED_SUBPORT_ARRAY_BMP_ARRAY); + s->queue_array = (struct rte_mbuf **) + (s->memory + rte_sched_subport_get_array_base(params, + e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_ARRAY)); + + /* Pipe profile table */ + rte_sched_subport_config_pipe_profile_table(s, params, + port->rate); + + /* Bitmap */ + n_subport_pipe_queues = rte_sched_subport_pipe_queues(s); + bmp_mem_size = rte_bitmap_get_memory_footprint( + n_subport_pipe_queues); + s->bmp = rte_bitmap_init(n_subport_pipe_queues, s->bmp_array, + bmp_mem_size); + if (s->bmp == NULL) { + RTE_LOG(ERR, SCHED, + "%s: Subport bitmap init error\n", __func__); + + rte_sched_free_memory(port, n_subports); + return -EINVAL; + } + + for (i = 0; i < RTE_SCHED_PORT_N_GRINDERS; i++) + s->grinder_base_bmp_pos[i] = RTE_SCHED_PIPE_INVALID; + +#ifdef RTE_SCHED_SUBPORT_TC_OV + /* TC oversubscription */ + s->tc_ov_wm_min = port->mtu; + s->tc_ov_wm = s->tc_ov_wm_max; + s->tc_ov_period_id = 0; + s->tc_ov = 0; + s->tc_ov_n = 0; + s->tc_ov_rate = 0; +#endif + } + + rte_sched_subport_profile_config(port, subport_id, subport_profile_id); + + return 0; +} + int rte_sched_pipe_config(struct rte_sched_port *port, uint32_t subport_id, diff --git a/lib/librte_sched/rte_sched.h b/lib/librte_sched/rte_sched.h index 3d823692c..3ecb0e9c3 100644 --- a/lib/librte_sched/rte_sched.h +++ b/lib/librte_sched/rte_sched.h @@ -376,6 +376,29 @@ rte_sched_subport_config(struct rte_sched_port *port, uint32_t subport_id, struct rte_sched_subport_params *params); +/** + * Hierarchical scheduler subport configuration. + * Note that this function is also used at runtime + * to configure subport bandwidth profile. + * @param port + * Handle to port scheduler instance + * @param subport_id + * Subport ID + * @param params + * Subport configuration parameters. + * This parameter should be set to NULL, to + * configure the subport bandwidth profile + * at runtime. + * @param subport_profile_id + * ID of profile configured for subport + * @return + * 0 upon success, error code otherwise + */ +int +rte_dynamic_sched_subport_config(struct rte_sched_port *port, + uint32_t subport_id, + struct rte_sched_subport_params *params, + uint32_t subport_profile_id); /** * Hierarchical scheduler pipe configuration * -- 2.17.1