From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by dpdk.space (Postfix) with ESMTP id 509A5A046B for ; Tue, 25 Jun 2019 17:32:38 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id A43D11BA83; Tue, 25 Jun 2019 17:32:10 +0200 (CEST) Received: from mga05.intel.com (mga05.intel.com [192.55.52.43]) by dpdk.org (Postfix) with ESMTP id 597EF1B9FA for ; Tue, 25 Jun 2019 17:32:04 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga105.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 25 Jun 2019 08:32:03 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.63,416,1557212400"; d="scan'208";a="166711415" Received: from silpixa00381635.ir.intel.com (HELO silpixa00381635.ger.corp.intel.com) ([10.237.223.4]) by orsmga006.jf.intel.com with ESMTP; 25 Jun 2019 08:32:02 -0700 From: Jasvinder Singh To: dev@dpdk.org Cc: cristian.dumitrescu@intel.com, Abraham Tovar , Lukasz Krakowiak Date: Tue, 25 Jun 2019 16:31:52 +0100 Message-Id: <20190625153217.24301-4-jasvinder.singh@intel.com> X-Mailer: git-send-email 2.21.0 In-Reply-To: <20190625153217.24301-1-jasvinder.singh@intel.com> References: <20190528120553.2992-2-lukaszx.krakowiak@intel.com> <20190625153217.24301-1-jasvinder.singh@intel.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Subject: [dpdk-dev] [PATCH v2 03/28] sched: update internal data structures X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Update internal data structures of the scheduler to allow configuration flexiblity for pipe traffic classes and queues, and subport level configuration of the pipe parameters. Signed-off-by: Jasvinder Singh Signed-off-by: Abraham Tovar Signed-off-by: Lukasz Krakowiak --- lib/librte_sched/rte_sched.c | 162 +++++++++++++++++++++++------------ 1 file changed, 109 insertions(+), 53 deletions(-) diff --git a/lib/librte_sched/rte_sched.c b/lib/librte_sched/rte_sched.c index a60ddf97e..c81d59947 100644 --- a/lib/librte_sched/rte_sched.c +++ b/lib/librte_sched/rte_sched.c @@ -37,6 +37,8 @@ #define RTE_SCHED_TB_RATE_CONFIG_ERR (1e-7) #define RTE_SCHED_WRR_SHIFT 3 +#define RTE_SCHED_MAX_QUEUES_PER_TC RTE_SCHED_BE_QUEUES_PER_PIPE +#define RTE_SCHED_TRAFFIC_CLASS_BE (RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE - 1) #define RTE_SCHED_GRINDER_PCACHE_SIZE (64 / RTE_SCHED_QUEUES_PER_PIPE) #define RTE_SCHED_PIPE_INVALID UINT32_MAX #define RTE_SCHED_BMP_POS_INVALID UINT32_MAX @@ -46,6 +48,52 @@ */ #define RTE_SCHED_TIME_SHIFT 8 +enum grinder_state { + e_GRINDER_PREFETCH_PIPE = 0, + e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS, + e_GRINDER_PREFETCH_MBUF, + e_GRINDER_READ_MBUF +}; + +struct rte_sched_grinder { + /* Pipe cache */ + uint16_t pcache_qmask[RTE_SCHED_GRINDER_PCACHE_SIZE]; + uint32_t pcache_qindex[RTE_SCHED_GRINDER_PCACHE_SIZE]; + uint32_t pcache_w; + uint32_t pcache_r; + + /* Current pipe */ + uint32_t pindex; + struct rte_sched_pipe *pipe; + struct rte_sched_pipe_profile *pipe_params; + struct rte_sched_subport *subport; + + /* Grinder state*/ + enum grinder_state state; + uint32_t productive; + + /* TC cache */ + uint8_t tccache_qmask[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; + uint32_t tccache_qindex[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; + uint32_t tccache_w; + uint32_t tccache_r; + + /* Current TC */ + uint32_t tc_index; + uint32_t qpos; + struct rte_sched_queue *queue[RTE_SCHED_MAX_QUEUES_PER_TC]; + struct rte_mbuf **qbase[RTE_SCHED_MAX_QUEUES_PER_TC]; + uint32_t qindex[RTE_SCHED_MAX_QUEUES_PER_TC]; + uint16_t qsize; + uint32_t qmask; + struct rte_mbuf *pkt; + + /* WRR */ + uint16_t wrr_tokens[RTE_SCHED_BE_QUEUES_PER_PIPE]; + uint16_t wrr_mask[RTE_SCHED_BE_QUEUES_PER_PIPE]; + uint8_t wrr_cost[RTE_SCHED_BE_QUEUES_PER_PIPE]; +}; + struct rte_sched_subport { /* Token bucket (TB) */ uint64_t tb_time; /* time of last update */ @@ -71,7 +119,42 @@ struct rte_sched_subport { /* Statistics */ struct rte_sched_subport_stats stats; -}; + + /* Subport Pipes*/ + uint32_t n_subport_pipes; + + uint16_t qsize[RTE_SCHED_QUEUES_PER_PIPE]; + uint32_t n_pipe_profiles; + uint32_t n_max_pipe_profiles; + uint32_t pipe_tc_be_rate_max; +#ifdef RTE_SCHED_RED + struct rte_red_config red_config[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE][RTE_COLORS]; +#endif + + /* Scheduling loop detection */ + uint32_t pipe_loop; + uint32_t pipe_exhaustion; + + /* Bitmap */ + struct rte_bitmap *bmp; + uint32_t grinder_base_bmp_pos[RTE_SCHED_PORT_N_GRINDERS] __rte_aligned_16; + + /* Grinders */ + struct rte_sched_grinder grinder[RTE_SCHED_PORT_N_GRINDERS]; + uint32_t busy_grinders; + + /* Queue base calculation */ + uint32_t qsize_add[RTE_SCHED_QUEUES_PER_PIPE]; + uint32_t qsize_sum; + + struct rte_sched_pipe *pipe; + struct rte_sched_queue *queue; + struct rte_sched_queue_extra *queue_extra; + struct rte_sched_pipe_profile *pipe_profiles; + uint8_t *bmp_array; + struct rte_mbuf **queue_array; + uint8_t memory[0] __rte_cache_aligned; +} __rte_cache_aligned; struct rte_sched_pipe_profile { /* Token bucket (TB) */ @@ -84,8 +167,10 @@ struct rte_sched_pipe_profile { uint32_t tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; uint8_t tc_ov_weight; - /* Pipe queues */ - uint8_t wrr_cost[RTE_SCHED_QUEUES_PER_PIPE]; + /* Pipe best-effort traffic class queues */ + uint8_t n_be_queues; + + uint8_t wrr_cost[RTE_SCHED_BE_QUEUES_PER_PIPE]; }; struct rte_sched_pipe { @@ -100,8 +185,10 @@ struct rte_sched_pipe { uint64_t tc_time; /* time of next update */ uint32_t tc_credits[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; + uint8_t n_be_queues; /* Best effort traffic class queues */ + /* Weighted Round Robin (WRR) */ - uint8_t wrr_tokens[RTE_SCHED_QUEUES_PER_PIPE]; + uint8_t wrr_tokens[RTE_SCHED_BE_QUEUES_PER_PIPE]; /* TC oversubscription */ uint32_t tc_ov_credits; @@ -121,55 +208,12 @@ struct rte_sched_queue_extra { #endif }; -enum grinder_state { - e_GRINDER_PREFETCH_PIPE = 0, - e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS, - e_GRINDER_PREFETCH_MBUF, - e_GRINDER_READ_MBUF -}; - -struct rte_sched_grinder { - /* Pipe cache */ - uint16_t pcache_qmask[RTE_SCHED_GRINDER_PCACHE_SIZE]; - uint32_t pcache_qindex[RTE_SCHED_GRINDER_PCACHE_SIZE]; - uint32_t pcache_w; - uint32_t pcache_r; - - /* Current pipe */ - enum grinder_state state; - uint32_t productive; - uint32_t pindex; - struct rte_sched_subport *subport; - struct rte_sched_pipe *pipe; - struct rte_sched_pipe_profile *pipe_params; - - /* TC cache */ - uint8_t tccache_qmask[4]; - uint32_t tccache_qindex[4]; - uint32_t tccache_w; - uint32_t tccache_r; - - /* Current TC */ - uint32_t tc_index; - struct rte_sched_queue *queue[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; - struct rte_mbuf **qbase[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; - uint32_t qindex[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; - uint16_t qsize; - uint32_t qmask; - uint32_t qpos; - struct rte_mbuf *pkt; - - /* WRR */ - uint16_t wrr_tokens[RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS]; - uint16_t wrr_mask[RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS]; - uint8_t wrr_cost[RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS]; -}; - struct rte_sched_port { /* User parameters */ uint32_t n_subports_per_port; uint32_t n_pipes_per_subport; uint32_t n_pipes_per_subport_log2; + int socket; uint32_t rate; uint32_t mtu; uint32_t frame_overhead; @@ -199,6 +243,9 @@ struct rte_sched_port { uint32_t busy_grinders; struct rte_mbuf **pkts_out; uint32_t n_pkts_out; + uint32_t subport_id; + + uint32_t max_subport_pipes_log2; /* Max number of subport pipes */ /* Queue base calculation */ uint32_t qsize_add[RTE_SCHED_QUEUES_PER_PIPE]; @@ -212,6 +259,7 @@ struct rte_sched_port { struct rte_sched_pipe_profile *pipe_profiles; uint8_t *bmp_array; struct rte_mbuf **queue_array; + struct rte_sched_subport *subports[0]; uint8_t memory[0] __rte_cache_aligned; } __rte_cache_aligned; @@ -226,6 +274,16 @@ enum rte_sched_port_array { e_RTE_SCHED_PORT_ARRAY_TOTAL, }; +enum rte_sched_subport_array { + e_RTE_SCHED_SUBPORT_ARRAY_PIPE = 0, + e_RTE_SCHED_SUBPORT_ARRAY_QUEUE, + e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_EXTRA, + e_RTE_SCHED_SUBPORT_ARRAY_PIPE_PROFILES, + e_RTE_SCHED_SUBPORT_ARRAY_BMP_ARRAY, + e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_ARRAY, + e_RTE_SCHED_SUBPORT_ARRAY_TOTAL, +}; + #ifdef RTE_SCHED_COLLECT_STATS static inline uint32_t @@ -483,7 +541,7 @@ rte_sched_port_log_pipe_profile(struct rte_sched_port *port, uint32_t i) " Token bucket: period = %u, credits per period = %u, size = %u\n" " Traffic classes: period = %u, credits per period = [%u, %u, %u, %u]\n" " Traffic class 3 oversubscription: weight = %hhu\n" - " WRR cost: [%hhu, %hhu, %hhu, %hhu], [%hhu, %hhu, %hhu, %hhu], [%hhu, %hhu, %hhu, %hhu], [%hhu, %hhu, %hhu, %hhu]\n", + " WRR cost: [%hhu, %hhu, %hhu, %hhu], [%hhu, %hhu, %hhu, %hhu],\n", i, /* Token bucket */ @@ -502,10 +560,8 @@ rte_sched_port_log_pipe_profile(struct rte_sched_port *port, uint32_t i) p->tc_ov_weight, /* WRR */ - p->wrr_cost[ 0], p->wrr_cost[ 1], p->wrr_cost[ 2], p->wrr_cost[ 3], - p->wrr_cost[ 4], p->wrr_cost[ 5], p->wrr_cost[ 6], p->wrr_cost[ 7], - p->wrr_cost[ 8], p->wrr_cost[ 9], p->wrr_cost[10], p->wrr_cost[11], - p->wrr_cost[12], p->wrr_cost[13], p->wrr_cost[14], p->wrr_cost[15]); + p->wrr_cost[0], p->wrr_cost[1], p->wrr_cost[2], p->wrr_cost[3], + p->wrr_cost[4], p->wrr_cost[5], p->wrr_cost[6], p->wrr_cost[7]); } static inline uint64_t -- 2.21.0