From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by dpdk.space (Postfix) with ESMTP id 145A2A045E for ; Tue, 28 May 2019 14:09:33 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id E623B1B99C; Tue, 28 May 2019 14:08:24 +0200 (CEST) Received: from mga09.intel.com (mga09.intel.com [134.134.136.24]) by dpdk.org (Postfix) with ESMTP id 4337B1B950 for ; Tue, 28 May 2019 14:08:16 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by orsmga102.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 28 May 2019 05:08:15 -0700 X-ExtLoop1: 1 Received: from lkrakowx-mobl.ger.corp.intel.com ([10.103.104.99]) by fmsmga001.fm.intel.com with ESMTP; 28 May 2019 05:08:14 -0700 From: Lukasz Krakowiak To: cristian.dumitrescu@intel.com Cc: dev@dpdk.org, Jasvinder Singh , Abraham Tovar , Lukasz Krakowiak Date: Tue, 28 May 2019 14:05:32 +0200 Message-Id: <20190528120553.2992-7-lukaszx.krakowiak@intel.com> X-Mailer: git-send-email 2.19.2.windows.1 In-Reply-To: <20190528120553.2992-1-lukaszx.krakowiak@intel.com> References: <20190528120553.2992-1-lukaszx.krakowiak@intel.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Subject: [dpdk-dev] [PATCH 06/27] sched: update subport config api X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Jasvinder Singh Update suport configuration api implementation of the scheduler to allow configuration flexiblity for pipe traffic classes and queues, and subport level configuration of the pipe parameters. Signed-off-by: Jasvinder Singh Signed-off-by: Abraham Tovar Signed-off-by: Lukasz Krakowiak --- lib/librte_sched/rte_sched.c | 325 ++++++++++++++++++++++++++++++----- 1 file changed, 283 insertions(+), 42 deletions(-) diff --git a/lib/librte_sched/rte_sched.c b/lib/librte_sched/rte_sched.c index 39a6165e3..020c028fd 100644 --- a/lib/librte_sched/rte_sched.c +++ b/lib/librte_sched/rte_sched.c @@ -495,24 +495,72 @@ rte_sched_port_get_array_base(struct rte_sched_port_params *params, enum rte_sch return base; } -uint32_t -rte_sched_port_get_memory_footprint(struct rte_sched_port_params *params) +static uint32_t +rte_sched_subport_get_array_base(struct rte_sched_subport_params *params, + enum rte_sched_subport_array array) { - uint32_t size0, size1; - int status; + uint32_t n_subport_pipes = params->n_subport_pipes; + uint32_t n_subport_queues = RTE_SCHED_QUEUES_PER_PIPE * n_subport_pipes; - status = rte_sched_port_check_params(params); - if (status != 0) { - RTE_LOG(NOTICE, SCHED, - "Port scheduler params check failed (%d)\n", status); + uint32_t size_pipe = n_subport_pipes * sizeof(struct rte_sched_pipe); + uint32_t size_queue = n_subport_queues * sizeof(struct rte_sched_queue); + uint32_t size_queue_extra + = n_subport_queues * sizeof(struct rte_sched_queue_extra); + uint32_t size_pipe_profiles = RTE_SCHED_PIPE_PROFILES_PER_SUBPORT * + sizeof(struct rte_sched_pipe_profile); + uint32_t size_bmp_array = + rte_bitmap_get_memory_footprint(n_subport_queues); + uint32_t size_per_pipe_queue_array, size_queue_array; - return 0; + uint32_t base, i; + + size_per_pipe_queue_array = 0; + for (i = 0; i < RTE_SCHED_QUEUES_PER_PIPE; i++) { + size_per_pipe_queue_array += params->qsize[i] * sizeof(struct rte_mbuf *); } + size_queue_array = n_subport_pipes * size_per_pipe_queue_array; - size0 = sizeof(struct rte_sched_port); - size1 = rte_sched_port_get_array_base(params, e_RTE_SCHED_PORT_ARRAY_TOTAL); + base = 0; - return size0 + size1; + if (array == e_RTE_SCHED_SUBPORT_ARRAY_PIPE) + return base; + base += RTE_CACHE_LINE_ROUNDUP(size_pipe); + + if (array == e_RTE_SCHED_SUBPORT_ARRAY_QUEUE) + return base; + base += RTE_CACHE_LINE_ROUNDUP(size_queue); + + if (array == e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_EXTRA) + return base; + base += RTE_CACHE_LINE_ROUNDUP(size_queue_extra); + + if (array == e_RTE_SCHED_SUBPORT_ARRAY_PIPE_PROFILES) + return base; + base += RTE_CACHE_LINE_ROUNDUP(size_pipe_profiles); + + if (array == e_RTE_SCHED_SUBPORT_ARRAY_BMP_ARRAY) + return base; + base += RTE_CACHE_LINE_ROUNDUP(size_bmp_array); + + if (array == e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_ARRAY) + return base; + base += RTE_CACHE_LINE_ROUNDUP(size_queue_array); + + return base; +} + +static void +rte_sched_subport_config_qsize(struct rte_sched_subport *subport) +{ + uint32_t i; + + subport->qsize_add[0] = 0; + + for (i = 1; i < RTE_SCHED_QUEUES_PER_PIPE; i++) + subport->qsize_add[i] = + subport->qsize_add[i-1] + subport->qsize[i-1]; + + subport->qsize_sum = subport->qsize_add[15] + subport->qsize[15]; } static void @@ -621,6 +669,120 @@ rte_sched_pipe_profile_convert(struct rte_sched_pipe_params *src, } } +static int +rte_sched_subport_check_params(struct rte_sched_subport_params *params, + uint32_t rate) +{ + uint32_t i, j; + + /* Check user parameters */ + if (params == NULL) + return -1; + + if (params->tb_rate == 0 || params->tb_rate > rate) + return -2; + + if (params->tb_size == 0) + return -3; + + for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) + if (params->tc_rate[i] > params->tb_rate) { + printf("traffic class %u, tc_rate %u, tb_rate %u\n", i, + params->tc_rate[i], params->tb_rate); + return -4; + } + if (params->tc_period == 0) + return -6; + + /* n_subport_pipes: non-zero, power of 2 */ + if (params->n_subport_pipes == 0 || + !rte_is_power_of_2(params->n_subport_pipes)) + return -7; + + /* qsize: power of 2, if non-zero + * no bigger than 32K (due to 16-bit read/write pointers) + */ + for (i = 0, j = 0; i < RTE_SCHED_QUEUES_PER_PIPE; i++) { + uint32_t tc_rate = params->tc_rate[j]; + uint16_t qsize = params->qsize[i]; + + if (((qsize == 0) && + ((tc_rate != 0) && + (j != RTE_SCHED_TRAFFIC_CLASS_BE))) || + ((qsize != 0) && !rte_is_power_of_2(qsize))) + return -8; + + if (j < RTE_SCHED_TRAFFIC_CLASS_BE) + j++; + } + + /* WRR queues : 1, 4, 8 */ + uint32_t wrr_queues = 0; + for (i = 0; i < RTE_SCHED_WRR_QUEUES_PER_PIPE; i++) { + if (params->qsize[RTE_SCHED_TRAFFIC_CLASS_BE + i]) + wrr_queues++; + } + if (params->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE] && + (wrr_queues != 1 && wrr_queues != 2 && + wrr_queues != 4 && wrr_queues != 8)) + return -9; + + /* pipe_profiles and n_pipe_profiles */ + if (params->pipe_profiles == NULL || + params->n_pipe_profiles == 0 || + params->n_pipe_profiles > RTE_SCHED_PIPE_PROFILES_PER_SUBPORT) + return -10; + + return 0; +} + +static uint32_t +rte_sched_subport_get_memory_footprint(struct rte_sched_port *port, + uint32_t subport_id, struct rte_sched_subport_params *params) +{ + uint32_t size0, size1; + int status; + + if (port == NULL || + subport_id >= port->n_subports_per_port) + return 0; + + status = rte_sched_subport_check_params(params, port->rate); + if (status != 0) { + RTE_LOG(NOTICE, SCHED, + "Port scheduler params check failed (%d)\n", status); + + return 0; + } + + size0 = sizeof(struct rte_sched_subport); + size1 = rte_sched_subport_get_array_base(params, + e_RTE_SCHED_SUBPORT_ARRAY_TOTAL); + + return size0 + size1; +} + +uint32_t +rte_sched_port_get_memory_footprint(struct rte_sched_port_params *params) +{ + uint32_t size0, size1; + int status; + + status = rte_sched_port_check_params(params); + if (status != 0) { + RTE_LOG(NOTICE, SCHED, + "Port scheduler params check failed (%d)\n", status); + + return 0; + } + + size0 = sizeof(struct rte_sched_port); + size1 = rte_sched_port_get_array_base(params, + e_RTE_SCHED_PORT_ARRAY_TOTAL); + + return size0 + size1; +} + struct rte_sched_port * rte_sched_port_config(struct rte_sched_port_params *params) { @@ -710,12 +872,12 @@ rte_sched_port_free(struct rte_sched_port *port) static void rte_sched_port_log_subport_config(struct rte_sched_port *port, uint32_t i) { - struct rte_sched_subport *s = port->subport + i; + struct rte_sched_subport *s = port->subports[i]; RTE_LOG(DEBUG, SCHED, "Low level config for subport %u:\n" " Token bucket: period = %u, credits per period = %u, size = %u\n" - " Traffic classes: period = %u, credits per period = [%u, %u, %u, %u]\n" - " Traffic class 3 oversubscription: wm min = %u, wm max = %u\n", + " Traffic classes: period = %u, credits per period = [%u, %u, %u, %u, %u, %u, %u, %u, %u]\n" + " Traffic class BE oversubscription: wm min = %u, wm max = %u\n", i, /* Token bucket */ @@ -729,8 +891,13 @@ rte_sched_port_log_subport_config(struct rte_sched_port *port, uint32_t i) s->tc_credits_per_period[1], s->tc_credits_per_period[2], s->tc_credits_per_period[3], + s->tc_credits_per_period[4], + s->tc_credits_per_period[5], + s->tc_credits_per_period[6], + s->tc_credits_per_period[7], + s->tc_credits_per_period[8], - /* Traffic class 3 oversubscription */ + /* Traffic class BE oversubscription */ s->tc_ov_wm_min, s->tc_ov_wm_max); } @@ -740,32 +907,21 @@ rte_sched_subport_config(struct rte_sched_port *port, uint32_t subport_id, struct rte_sched_subport_params *params) { - struct rte_sched_subport *s; - uint32_t i; + struct rte_sched_subport *s = NULL; + uint32_t mem_size, bmp_mem_size, n_subport_queues, n_subport_pipes_log2, i; - /* Check user parameters */ - if (port == NULL || - subport_id >= port->n_subports_per_port || - params == NULL) + /* Check user parameters. Determine the amount of memory to allocate */ + mem_size = rte_sched_subport_get_memory_footprint(port, + subport_id, params); + if (mem_size == 0) return -1; - if (params->tb_rate == 0 || params->tb_rate > port->rate) + /* Allocate memory to store the data structures */ + s = rte_zmalloc_socket("subport_params", mem_size, RTE_CACHE_LINE_SIZE, + port->socket); + if (s == NULL) return -2; - if (params->tb_size == 0) - return -3; - - for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) { - if (params->tc_rate[i] == 0 || - params->tc_rate[i] > params->tb_rate) - return -4; - } - - if (params->tc_period == 0) - return -5; - - s = port->subport + subport_id; - /* Token Bucket (TB) */ if (params->tb_rate == port->rate) { s->tb_credits_per_period = 1; @@ -784,19 +940,104 @@ rte_sched_subport_config(struct rte_sched_port *port, /* Traffic Classes (TCs) */ s->tc_period = rte_sched_time_ms_to_bytes(params->tc_period, port->rate); for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) { - s->tc_credits_per_period[i] - = rte_sched_time_ms_to_bytes(params->tc_period, - params->tc_rate[i]); + if (params->qsize[i]) + s->tc_credits_per_period[i] + = rte_sched_time_ms_to_bytes(params->tc_period, + params->tc_rate[i]); } s->tc_time = port->time + s->tc_period; for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) - s->tc_credits[i] = s->tc_credits_per_period[i]; + if (params->qsize[i]) + s->tc_credits[i] = s->tc_credits_per_period[i]; + + /* compile time checks */ + RTE_BUILD_BUG_ON(RTE_SCHED_PORT_N_GRINDERS == 0); + RTE_BUILD_BUG_ON(RTE_SCHED_PORT_N_GRINDERS & + (RTE_SCHED_PORT_N_GRINDERS - 1)); + + /* User parameters */ + s->n_subport_pipes = params->n_subport_pipes; + n_subport_pipes_log2 = __builtin_ctz(params->n_subport_pipes); + memcpy(s->qsize, params->qsize, sizeof(params->qsize)); + s->n_pipe_profiles = params->n_pipe_profiles; + +#ifdef RTE_SCHED_RED + for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) { + uint32_t j; + + for (j = 0; j < RTE_COLORS; j++) { + /* if min/max are both zero, then RED is disabled */ + if ((params->red_params[i][j].min_th | + params->red_params[i][j].max_th) == 0) { + continue; + } + + if (rte_red_config_init(&s->red_config[i][j], + params->red_params[i][j].wq_log2, + params->red_params[i][j].min_th, + params->red_params[i][j].max_th, + params->red_params[i][j].maxp_inv) != 0) { + rte_free(s); + return -3; + } + } + } +#endif + + /* Scheduling loop detection */ + s->pipe_loop = RTE_SCHED_PIPE_INVALID; + s->pipe_exhaustion = 0; + + /* Grinders */ + s->busy_grinders = 0; + + /* Queue base calculation */ + rte_sched_subport_config_qsize(s); + + /* Large data structures */ + s->pipe = (struct rte_sched_pipe *) + (s->memory + rte_sched_subport_get_array_base(params, + e_RTE_SCHED_SUBPORT_ARRAY_PIPE)); + s->queue = (struct rte_sched_queue *) + (s->memory + rte_sched_subport_get_array_base(params, + e_RTE_SCHED_SUBPORT_ARRAY_QUEUE)); + s->queue_extra = (struct rte_sched_queue_extra *) + (s->memory + rte_sched_subport_get_array_base(params, + e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_EXTRA)); + s->pipe_profiles = (struct rte_sched_pipe_profile *) + (s->memory + rte_sched_subport_get_array_base(params, + e_RTE_SCHED_SUBPORT_ARRAY_PIPE_PROFILES)); + s->bmp_array = s->memory + rte_sched_subport_get_array_base(params, + e_RTE_SCHED_SUBPORT_ARRAY_BMP_ARRAY); + s->queue_array = (struct rte_mbuf **) + (s->memory + rte_sched_subport_get_array_base(params, + e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_ARRAY)); + + /* Bitmap */ + n_subport_queues = rte_sched_subport_queues(s); + bmp_mem_size = rte_bitmap_get_memory_footprint(n_subport_queues); + s->bmp = rte_bitmap_init(n_subport_queues, s->bmp_array, + bmp_mem_size); + if (s->bmp == NULL) { + RTE_LOG(ERR, SCHED, "Subport bitmap init error\n"); + rte_free(port); + return -4; + } + + for (i = 0; i < RTE_SCHED_PORT_N_GRINDERS; i++) + s->grinder_base_bmp_pos[i] = RTE_SCHED_PIPE_INVALID; + + /* Port */ + port->subports[subport_id] = s; + + if (n_subport_pipes_log2 > port->n_max_subport_pipes_log2) + port->n_max_subport_pipes_log2 = n_subport_pipes_log2; #ifdef RTE_SCHED_SUBPORT_TC_OV /* TC oversubscription */ s->tc_ov_wm_min = port->mtu; s->tc_ov_wm_max = rte_sched_time_ms_to_bytes(params->tc_period, - port->pipe_tc3_rate_max); + s->pipe_tc_be_rate_max); s->tc_ov_wm = s->tc_ov_wm_max; s->tc_ov_period_id = 0; s->tc_ov = 0; -- 2.20.1