From: Jasvinder Singh <jasvinder.singh@intel.com>
To: dev@dpdk.org
Cc: cristian.dumitrescu@intel.com,
Abraham Tovar <abrahamx.tovar@intel.com>,
Lukasz Krakowiak <lukaszx.krakowiak@intel.com>
Subject: [dpdk-dev] [PATCH v3 10/11] examples/qos_sched: add tc and queue config flexibility
Date: Thu, 11 Jul 2019 11:26:58 +0100 [thread overview]
Message-ID: <20190711102659.59001-11-jasvinder.singh@intel.com> (raw)
In-Reply-To: <20190711102659.59001-1-jasvinder.singh@intel.com>
Update qos sched sample app for configuration flexibility of
pipe traffic classes and queues.
Signed-off-by: Jasvinder Singh <jasvinder.singh@intel.com>
Signed-off-by: Abraham Tovar <abrahamx.tovar@intel.com>
Signed-off-by: Lukasz Krakowiak <lukaszx.krakowiak@intel.com>
---
examples/qos_sched/app_thread.c | 9 +-
examples/qos_sched/cfg_file.c | 119 +++++---
examples/qos_sched/init.c | 63 +++-
examples/qos_sched/main.h | 4 +
examples/qos_sched/profile.cfg | 66 +++-
examples/qos_sched/profile_ov.cfg | 54 +++-
examples/qos_sched/stats.c | 483 +++++++++++++++++-------------
7 files changed, 517 insertions(+), 281 deletions(-)
diff --git a/examples/qos_sched/app_thread.c b/examples/qos_sched/app_thread.c
index e14b275e3..1ce3639ee 100644
--- a/examples/qos_sched/app_thread.c
+++ b/examples/qos_sched/app_thread.c
@@ -20,13 +20,11 @@
* QoS parameters are encoded as follows:
* Outer VLAN ID defines subport
* Inner VLAN ID defines pipe
- * Destination IP 0.0.XXX.0 defines traffic class
* Destination IP host (0.0.0.XXX) defines queue
* Values below define offset to each field from start of frame
*/
#define SUBPORT_OFFSET 7
#define PIPE_OFFSET 9
-#define TC_OFFSET 20
#define QUEUE_OFFSET 20
#define COLOR_OFFSET 19
@@ -40,10 +38,9 @@ get_pkt_sched(struct rte_mbuf *m, uint32_t *subport, uint32_t *pipe,
(port_params.n_subports_per_port - 1); /* Outer VLAN ID*/
*pipe = (rte_be_to_cpu_16(pdata[PIPE_OFFSET]) & 0x0FFF) &
(port_params.n_pipes_per_subport - 1); /* Inner VLAN ID */
- *traffic_class = (pdata[QUEUE_OFFSET] & 0x0F) &
- (RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE - 1); /* Destination IP */
- *queue = ((pdata[QUEUE_OFFSET] >> 8) & 0x0F) &
- (RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS - 1) ; /* Destination IP */
+ *queue = active_queues[(pdata[QUEUE_OFFSET] >> 8) % n_active_queues];
+ *traffic_class = (*queue > (RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE - 1) ?
+ (RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE - 1) : *queue); /* Destination IP */
*color = pdata[COLOR_OFFSET] & 0x03; /* Destination IP */
return 0;
diff --git a/examples/qos_sched/cfg_file.c b/examples/qos_sched/cfg_file.c
index 76ffffc4b..522de1aea 100644
--- a/examples/qos_sched/cfg_file.c
+++ b/examples/qos_sched/cfg_file.c
@@ -29,6 +29,9 @@ cfg_load_port(struct rte_cfgfile *cfg, struct rte_sched_port_params *port_params
if (!cfg || !port_params)
return -1;
+ memset(active_queues, 0, sizeof(active_queues));
+ n_active_queues = 0;
+
entry = rte_cfgfile_get_entry(cfg, "port", "frame overhead");
if (entry)
port_params->frame_overhead = (uint32_t)atoi(entry);
@@ -45,8 +48,12 @@ cfg_load_port(struct rte_cfgfile *cfg, struct rte_sched_port_params *port_params
if (entry) {
char *next;
- for(j = 0; j < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; j++) {
+ for (j = 0; j < RTE_SCHED_QUEUES_PER_PIPE; j++) {
port_params->qsize[j] = (uint16_t)strtol(entry, &next, 10);
+ if (port_params->qsize[j] != 0) {
+ active_queues[n_active_queues] = j;
+ n_active_queues++;
+ }
if (next == NULL)
break;
entry = next;
@@ -173,46 +180,52 @@ cfg_load_pipe(struct rte_cfgfile *cfg, struct rte_sched_pipe_params *pipe_params
if (entry)
pipe_params[j].tc_rate[3] = (uint32_t)atoi(entry);
+ entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 4 rate");
+ if (entry)
+ pipe_params[j].tc_rate[4] = (uint32_t)atoi(entry);
+
+ entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 5 rate");
+ if (entry)
+ pipe_params[j].tc_rate[5] = (uint32_t)atoi(entry);
+
+ entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 6 rate");
+ if (entry)
+ pipe_params[j].tc_rate[6] = (uint32_t)atoi(entry);
+
+ entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 7 rate");
+ if (entry)
+ pipe_params[j].tc_rate[7] = (uint32_t)atoi(entry);
+
+ entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 8 rate");
+ if (entry)
+ pipe_params[j].tc_rate[8] = (uint32_t)atoi(entry);
+
+ entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 9 rate");
+ if (entry)
+ pipe_params[j].tc_rate[9] = (uint32_t)atoi(entry);
+
+ entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 10 rate");
+ if (entry)
+ pipe_params[j].tc_rate[10] = (uint32_t)atoi(entry);
+
+ entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 11 rate");
+ if (entry)
+ pipe_params[j].tc_rate[11] = (uint32_t)atoi(entry);
+
+ entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 12 rate");
+ if (entry)
+ pipe_params[j].tc_rate[12] = (uint32_t)atoi(entry);
+
#ifdef RTE_SCHED_SUBPORT_TC_OV
- entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 3 oversubscription weight");
+ entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 12 oversubscription weight");
if (entry)
pipe_params[j].tc_ov_weight = (uint8_t)atoi(entry);
#endif
- entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 0 wrr weights");
+ entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 12 wrr weights");
if (entry) {
- for(i = 0; i < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; i++) {
- pipe_params[j].wrr_weights[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE*0 + i] =
- (uint8_t)strtol(entry, &next, 10);
- if (next == NULL)
- break;
- entry = next;
- }
- }
- entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 1 wrr weights");
- if (entry) {
- for(i = 0; i < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; i++) {
- pipe_params[j].wrr_weights[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE*1 + i] =
- (uint8_t)strtol(entry, &next, 10);
- if (next == NULL)
- break;
- entry = next;
- }
- }
- entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 2 wrr weights");
- if (entry) {
- for(i = 0; i < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; i++) {
- pipe_params[j].wrr_weights[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE*2 + i] =
- (uint8_t)strtol(entry, &next, 10);
- if (next == NULL)
- break;
- entry = next;
- }
- }
- entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 3 wrr weights");
- if (entry) {
- for(i = 0; i < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; i++) {
- pipe_params[j].wrr_weights[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE*3 + i] =
+ for (i = 0; i < RTE_SCHED_BE_QUEUES_PER_PIPE; i++) {
+ pipe_params[j].wrr_weights[i] =
(uint8_t)strtol(entry, &next, 10);
if (next == NULL)
break;
@@ -267,6 +280,42 @@ cfg_load_subport(struct rte_cfgfile *cfg, struct rte_sched_subport_params *subpo
if (entry)
subport_params[i].tc_rate[3] = (uint32_t)atoi(entry);
+ entry = rte_cfgfile_get_entry(cfg, sec_name, "tc 4 rate");
+ if (entry)
+ subport_params[i].tc_rate[4] = (uint32_t)atoi(entry);
+
+ entry = rte_cfgfile_get_entry(cfg, sec_name, "tc 5 rate");
+ if (entry)
+ subport_params[i].tc_rate[5] = (uint32_t)atoi(entry);
+
+ entry = rte_cfgfile_get_entry(cfg, sec_name, "tc 6 rate");
+ if (entry)
+ subport_params[i].tc_rate[6] = (uint32_t)atoi(entry);
+
+ entry = rte_cfgfile_get_entry(cfg, sec_name, "tc 7 rate");
+ if (entry)
+ subport_params[i].tc_rate[7] = (uint32_t)atoi(entry);
+
+ entry = rte_cfgfile_get_entry(cfg, sec_name, "tc 8 rate");
+ if (entry)
+ subport_params[i].tc_rate[8] = (uint32_t)atoi(entry);
+
+ entry = rte_cfgfile_get_entry(cfg, sec_name, "tc 9 rate");
+ if (entry)
+ subport_params[i].tc_rate[9] = (uint32_t)atoi(entry);
+
+ entry = rte_cfgfile_get_entry(cfg, sec_name, "tc 10 rate");
+ if (entry)
+ subport_params[i].tc_rate[10] = (uint32_t)atoi(entry);
+
+ entry = rte_cfgfile_get_entry(cfg, sec_name, "tc 11 rate");
+ if (entry)
+ subport_params[i].tc_rate[11] = (uint32_t)atoi(entry);
+
+ entry = rte_cfgfile_get_entry(cfg, sec_name, "tc 12 rate");
+ if (entry)
+ subport_params[i].tc_rate[12] = (uint32_t)atoi(entry);
+
int n_entries = rte_cfgfile_section_num_entries(cfg, sec_name);
struct rte_cfgfile_entry entries[n_entries];
diff --git a/examples/qos_sched/init.c b/examples/qos_sched/init.c
index 6b63d4e0e..5fd2a38e4 100644
--- a/examples/qos_sched/init.c
+++ b/examples/qos_sched/init.c
@@ -170,17 +170,20 @@ static struct rte_sched_subport_params subport_params[MAX_SCHED_SUBPORTS] = {
.tb_rate = 1250000000,
.tb_size = 1000000,
- .tc_rate = {1250000000, 1250000000, 1250000000, 1250000000},
+ .tc_rate = {1250000000, 1250000000, 1250000000, 1250000000,
+ 1250000000, 1250000000, 1250000000, 1250000000, 1250000000,
+ 1250000000, 1250000000, 1250000000, 1250000000},
.tc_period = 10,
},
};
-static struct rte_sched_pipe_params pipe_profiles[RTE_SCHED_PIPE_PROFILES_PER_PORT] = {
+static struct rte_sched_pipe_params pipe_profiles[MAX_SCHED_PIPE_PROFILES] = {
{ /* Profile #0 */
.tb_rate = 305175,
.tb_size = 1000000,
- .tc_rate = {305175, 305175, 305175, 305175},
+ .tc_rate = {305175, 305175, 305175, 305175, 305175, 305175,
+ 305175, 305175, 305175, 305175, 305175, 305175, 305175},
.tc_period = 40,
#ifdef RTE_SCHED_SUBPORT_TC_OV
.tc_ov_weight = 1,
@@ -198,9 +201,10 @@ struct rte_sched_port_params port_params = {
.frame_overhead = RTE_SCHED_FRAME_OVERHEAD_DEFAULT,
.n_subports_per_port = 1,
.n_pipes_per_subport = 4096,
- .qsize = {64, 64, 64, 64},
+ .qsize = {64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64},
.pipe_profiles = pipe_profiles,
.n_pipe_profiles = sizeof(pipe_profiles) / sizeof(struct rte_sched_pipe_params),
+ .n_max_pipe_profiles = MAX_SCHED_PIPE_PROFILES,
#ifdef RTE_SCHED_RED
.red_params = {
@@ -222,8 +226,53 @@ struct rte_sched_port_params port_params = {
/* Traffic Class 3 - Colors Green / Yellow / Red */
[3][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
[3][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
- [3][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9}
- }
+ [3][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+
+ /* Traffic Class 4 - Colors Green / Yellow / Red */
+ [4][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+ [4][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+ [4][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+
+ /* Traffic Class 5 - Colors Green / Yellow / Red */
+ [5][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+ [5][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+ [5][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+
+ /* Traffic Class 6 - Colors Green / Yellow / Red */
+ [6][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+ [6][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+ [6][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+
+ /* Traffic Class 7 - Colors Green / Yellow / Red */
+ [7][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+ [7][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+ [7][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+
+ /* Traffic Class 8 - Colors Green / Yellow / Red */
+ [8][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+ [8][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+ [8][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+
+ /* Traffic Class 9 - Colors Green / Yellow / Red */
+ [9][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+ [9][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+ [9][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+
+ /* Traffic Class 10 - Colors Green / Yellow / Red */
+ [10][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+ [10][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+ [10][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+
+ /* Traffic Class 11 - Colors Green / Yellow / Red */
+ [11][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+ [11][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+ [11][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+
+ /* Traffic Class 12 - Colors Green / Yellow / Red */
+ [12][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+ [12][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+ [12][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
+ },
#endif /* RTE_SCHED_RED */
};
@@ -255,7 +304,7 @@ app_init_sched_port(uint32_t portid, uint32_t socketid)
subport, err);
}
- for (pipe = 0; pipe < port_params.n_pipes_per_subport; pipe ++) {
+ for (pipe = 0; pipe < port_params.n_pipes_per_subport; pipe++) {
if (app_pipe_to_profile[subport][pipe] != -1) {
err = rte_sched_pipe_config(port, subport, pipe,
app_pipe_to_profile[subport][pipe]);
diff --git a/examples/qos_sched/main.h b/examples/qos_sched/main.h
index 8a2741c58..d8f890b64 100644
--- a/examples/qos_sched/main.h
+++ b/examples/qos_sched/main.h
@@ -50,6 +50,7 @@ extern "C" {
#define MAX_DATA_STREAMS (APP_MAX_LCORE/2)
#define MAX_SCHED_SUBPORTS 8
#define MAX_SCHED_PIPES 4096
+#define MAX_SCHED_PIPE_PROFILES 256
#ifndef APP_COLLECT_STAT
#define APP_COLLECT_STAT 1
@@ -147,6 +148,9 @@ extern struct burst_conf burst_conf;
extern struct ring_thresh rx_thresh;
extern struct ring_thresh tx_thresh;
+uint32_t active_queues[RTE_SCHED_QUEUES_PER_PIPE];
+uint32_t n_active_queues;
+
extern struct rte_sched_port_params port_params;
int app_parse_args(int argc, char **argv);
diff --git a/examples/qos_sched/profile.cfg b/examples/qos_sched/profile.cfg
index f5b704cc6..55fd7d1e0 100644
--- a/examples/qos_sched/profile.cfg
+++ b/examples/qos_sched/profile.cfg
@@ -1,6 +1,6 @@
; BSD LICENSE
;
-; Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+; Copyright(c) 2010-2019 Intel Corporation. All rights reserved.
; All rights reserved.
;
; Redistribution and use in source and binary forms, with or without
@@ -33,12 +33,12 @@
; 10GbE output port:
; * Single subport (subport 0):
; - Subport rate set to 100% of port rate
-; - Each of the 4 traffic classes has rate set to 100% of port rate
+; - Each of the 9 traffic classes has rate set to 100% of port rate
; * 4K pipes per subport 0 (pipes 0 .. 4095) with identical configuration:
; - Pipe rate set to 1/4K of port rate
-; - Each of the 4 traffic classes has rate set to 100% of pipe rate
-; - Within each traffic class, the byte-level WRR weights for the 4 queues
-; are set to 1:1:1:1
+; - Each of the 9 traffic classes has rate set to 100% of pipe rate
+; - Within lowest priority traffic class (best-effort), the byte-level
+; WRR weights for the 8 queues are set to 1:1:1:1:1:1:1:1
;
; For more details, please refer to chapter "Quality of Service (QoS) Framework"
; of Data Plane Development Kit (DPDK) Programmer's Guide.
@@ -48,7 +48,7 @@
frame overhead = 24
number of subports per port = 1
number of pipes per subport = 4096
-queue sizes = 64 64 64 64
+queue sizes = 64 64 64 64 64 64 64 64 64 64 64 64 64 64 64 64
; Subport configuration
[subport 0]
@@ -59,6 +59,16 @@ tc 0 rate = 1250000000 ; Bytes per second
tc 1 rate = 1250000000 ; Bytes per second
tc 2 rate = 1250000000 ; Bytes per second
tc 3 rate = 1250000000 ; Bytes per second
+tc 4 rate = 1250000000 ; Bytes per second
+tc 5 rate = 1250000000 ; Bytes per second
+tc 6 rate = 1250000000 ; Bytes per second
+tc 7 rate = 1250000000 ; Bytes per second
+tc 8 rate = 1250000000 ; Bytes per second
+tc 9 rate = 1250000000 ; Bytes per second
+tc 10 rate = 1250000000 ; Bytes per second
+tc 11 rate = 1250000000 ; Bytes per second
+tc 12 rate = 1250000000 ; Bytes per second
+
tc period = 10 ; Milliseconds
pipe 0-4095 = 0 ; These pipes are configured with pipe profile 0
@@ -72,14 +82,21 @@ tc 0 rate = 305175 ; Bytes per second
tc 1 rate = 305175 ; Bytes per second
tc 2 rate = 305175 ; Bytes per second
tc 3 rate = 305175 ; Bytes per second
-tc period = 40 ; Milliseconds
+tc 4 rate = 305175 ; Bytes per second
+tc 5 rate = 305175 ; Bytes per second
+tc 6 rate = 305175 ; Bytes per second
+tc 7 rate = 305175 ; Bytes per second
+tc 8 rate = 305175 ; Bytes per second
+tc 9 rate = 305175 ; Bytes per second
+tc 10 rate = 305175 ; Bytes per second
+tc 11 rate = 305175 ; Bytes per second
+tc 12 rate = 305175 ; Bytes per second
+
+tc period = 40 ; Milliseconds
-tc 3 oversubscription weight = 1
+tc 12 oversubscription weight = 1
-tc 0 wrr weights = 1 1 1 1
-tc 1 wrr weights = 1 1 1 1
-tc 2 wrr weights = 1 1 1 1
-tc 3 wrr weights = 1 1 1 1
+tc 12 wrr weights = 1 1 1 1
; RED params per traffic class and color (Green / Yellow / Red)
[red]
@@ -102,3 +119,28 @@ tc 3 wred min = 48 40 32
tc 3 wred max = 64 64 64
tc 3 wred inv prob = 10 10 10
tc 3 wred weight = 9 9 9
+
+tc 4 wred min = 48 40 32
+tc 4 wred max = 64 64 64
+tc 4 wred inv prob = 10 10 10
+tc 4 wred weight = 9 9 9
+
+tc 5 wred min = 48 40 32
+tc 5 wred max = 64 64 64
+tc 5 wred inv prob = 10 10 10
+tc 5 wred weight = 9 9 9
+
+tc 6 wred min = 48 40 32
+tc 6 wred max = 64 64 64
+tc 6 wred inv prob = 10 10 10
+tc 6 wred weight = 9 9 9
+
+tc 7 wred min = 48 40 32
+tc 7 wred max = 64 64 64
+tc 7 wred inv prob = 10 10 10
+tc 7 wred weight = 9 9 9
+
+tc 8 wred min = 48 40 32
+tc 8 wred max = 64 64 64
+tc 8 wred inv prob = 10 10 10
+tc 8 wred weight = 9 9 9
diff --git a/examples/qos_sched/profile_ov.cfg b/examples/qos_sched/profile_ov.cfg
index 33000df9e..d5d9b321e 100644
--- a/examples/qos_sched/profile_ov.cfg
+++ b/examples/qos_sched/profile_ov.cfg
@@ -1,6 +1,6 @@
; BSD LICENSE
;
-; Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+; Copyright(c) 2010-2019 Intel Corporation. All rights reserved.
; All rights reserved.
;
; Redistribution and use in source and binary forms, with or without
@@ -34,7 +34,7 @@
frame overhead = 24
number of subports per port = 1
number of pipes per subport = 32
-queue sizes = 64 64 64 64
+queue sizes = 64 64 64 64 64 64 64 64 64 64 64 64 64 64 64 64
; Subport configuration
[subport 0]
@@ -45,6 +45,15 @@ tc 0 rate = 8400000 ; Bytes per second
tc 1 rate = 8400000 ; Bytes per second
tc 2 rate = 8400000 ; Bytes per second
tc 3 rate = 8400000 ; Bytes per second
+tc 4 rate = 8400000 ; Bytes per second
+tc 5 rate = 8400000 ; Bytes per second
+tc 6 rate = 8400000 ; Bytes per second
+tc 7 rate = 8400000 ; Bytes per second
+tc 8 rate = 8400000 ; Bytes per second
+tc 9 rate = 8400000 ; Bytes per second
+tc 10 rate = 8400000 ; Bytes per second
+tc 11 rate = 8400000 ; Bytes per second
+tc 12 rate = 8400000 ; Bytes per second
tc period = 10 ; Milliseconds
pipe 0-31 = 0 ; These pipes are configured with pipe profile 0
@@ -58,14 +67,20 @@ tc 0 rate = 16800000 ; Bytes per second
tc 1 rate = 16800000 ; Bytes per second
tc 2 rate = 16800000 ; Bytes per second
tc 3 rate = 16800000 ; Bytes per second
+tc 4 rate = 16800000 ; Bytes per second
+tc 5 rate = 16800000 ; Bytes per second
+tc 6 rate = 16800000 ; Bytes per second
+tc 7 rate = 16800000 ; Bytes per second
+tc 8 rate = 16800000 ; Bytes per second
+tc 9 rate = 16800000 ; Bytes per second
+tc 10 rate = 16800000 ; Bytes per second
+tc 11 rate = 16800000 ; Bytes per second
+tc 12 rate = 16800000 ; Bytes per second
tc period = 28 ; Milliseconds
-tc 3 oversubscription weight = 1
+tc 12 oversubscription weight = 1
-tc 0 wrr weights = 1 1 1 1
-tc 1 wrr weights = 1 1 1 1
-tc 2 wrr weights = 1 1 1 1
-tc 3 wrr weights = 1 1 1 1
+tc 12 wrr weights = 1 1 1 1
; RED params per traffic class and color (Green / Yellow / Red)
[red]
@@ -88,3 +103,28 @@ tc 3 wred min = 48 40 32
tc 3 wred max = 64 64 64
tc 3 wred inv prob = 10 10 10
tc 3 wred weight = 9 9 9
+
+tc 4 wred min = 48 40 32
+tc 4 wred max = 64 64 64
+tc 4 wred inv prob = 10 10 10
+tc 4 wred weight = 9 9 9
+
+tc 5 wred min = 48 40 32
+tc 5 wred max = 64 64 64
+tc 5 wred inv prob = 10 10 10
+tc 5 wred weight = 9 9 9
+
+tc 6 wred min = 48 40 32
+tc 6 wred max = 64 64 64
+tc 6 wred inv prob = 10 10 10
+tc 6 wred weight = 9 9 9
+
+tc 7 wred min = 48 40 32
+tc 7 wred max = 64 64 64
+tc 7 wred inv prob = 10 10 10
+tc 7 wred weight = 9 9 9
+
+tc 8 wred min = 48 40 32
+tc 8 wred max = 64 64 64
+tc 8 wred inv prob = 10 10 10
+tc 8 wred weight = 9 9 9
diff --git a/examples/qos_sched/stats.c b/examples/qos_sched/stats.c
index 8193d964c..4f5fdda47 100644
--- a/examples/qos_sched/stats.c
+++ b/examples/qos_sched/stats.c
@@ -11,278 +11,333 @@ int
qavg_q(uint16_t port_id, uint32_t subport_id, uint32_t pipe_id, uint8_t tc,
uint8_t q)
{
- struct rte_sched_queue_stats stats;
- struct rte_sched_port *port;
- uint16_t qlen;
- uint32_t queue_id, count, i;
- uint32_t average;
-
- for (i = 0; i < nb_pfc; i++) {
- if (qos_conf[i].tx_port == port_id)
- break;
- }
- if (i == nb_pfc || subport_id >= port_params.n_subports_per_port || pipe_id >= port_params.n_pipes_per_subport
- || tc >= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE || q >= RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS)
- return -1;
-
- port = qos_conf[i].sched_port;
-
- queue_id = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS * (subport_id * port_params.n_pipes_per_subport + pipe_id);
- queue_id = queue_id + (tc * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + q);
-
- average = 0;
-
- for (count = 0; count < qavg_ntimes; count++) {
- rte_sched_queue_read_stats(port, queue_id, &stats, &qlen);
- average += qlen;
- usleep(qavg_period);
- }
-
- average /= qavg_ntimes;
-
- printf("\nAverage queue size: %" PRIu32 " bytes.\n\n", average);
-
- return 0;
+ struct rte_sched_queue_stats stats;
+ struct rte_sched_port *port;
+ uint16_t qlen;
+ uint32_t count, i, queue_id = 0;
+ uint32_t average;
+
+ for (i = 0; i < nb_pfc; i++) {
+ if (qos_conf[i].tx_port == port_id)
+ break;
+ }
+
+ if (i == nb_pfc || subport_id >= port_params.n_subports_per_port ||
+ pipe_id >= port_params.n_pipes_per_subport ||
+ tc >= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE ||
+ q >= RTE_SCHED_BE_QUEUES_PER_PIPE ||
+ (tc < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE - 1 && q > 0))
+ return -1;
+
+ port = qos_conf[i].sched_port;
+ for (i = 0; i < subport_id; i++)
+ queue_id += port_params.n_pipes_per_subport *
+ RTE_SCHED_QUEUES_PER_PIPE;
+ if (tc < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE - 1)
+ queue_id += pipe_id * RTE_SCHED_QUEUES_PER_PIPE + tc;
+ else
+ queue_id += pipe_id * RTE_SCHED_QUEUES_PER_PIPE + tc + q;
+
+ average = 0;
+ for (count = 0; count < qavg_ntimes; count++) {
+ rte_sched_queue_read_stats(port, queue_id, &stats, &qlen);
+ average += qlen;
+ usleep(qavg_period);
+ }
+
+ average /= qavg_ntimes;
+
+ printf("\nAverage queue size: %" PRIu32 " bytes.\n\n", average);
+
+ return 0;
}
int
qavg_tcpipe(uint16_t port_id, uint32_t subport_id, uint32_t pipe_id,
- uint8_t tc)
+ uint8_t tc)
{
- struct rte_sched_queue_stats stats;
- struct rte_sched_port *port;
- uint16_t qlen;
- uint32_t queue_id, count, i;
- uint32_t average, part_average;
+ struct rte_sched_queue_stats stats;
+ struct rte_sched_port *port;
+ uint16_t qlen;
+ uint32_t count, i, queue_id = 0;
+ uint32_t average, part_average;
+
+ for (i = 0; i < nb_pfc; i++) {
+ if (qos_conf[i].tx_port == port_id)
+ break;
+ }
+
+ if (i == nb_pfc || subport_id >= port_params.n_subports_per_port ||
+ pipe_id >= port_params.n_pipes_per_subport ||
+ tc >= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
+ return -1;
+
+ port = qos_conf[i].sched_port;
- for (i = 0; i < nb_pfc; i++) {
- if (qos_conf[i].tx_port == port_id)
- break;
- }
- if (i == nb_pfc || subport_id >= port_params.n_subports_per_port || pipe_id >= port_params.n_pipes_per_subport
- || tc >= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
- return -1;
+ for (i = 0; i < subport_id; i++)
+ queue_id += port_params.n_pipes_per_subport * RTE_SCHED_QUEUES_PER_PIPE;
- port = qos_conf[i].sched_port;
+ queue_id += pipe_id * RTE_SCHED_QUEUES_PER_PIPE + tc;
- queue_id = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS * (subport_id * port_params.n_pipes_per_subport + pipe_id);
+ average = 0;
- average = 0;
+ for (count = 0; count < qavg_ntimes; count++) {
+ part_average = 0;
- for (count = 0; count < qavg_ntimes; count++) {
- part_average = 0;
- for (i = 0; i < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; i++) {
- rte_sched_queue_read_stats(port, queue_id + (tc * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + i), &stats, &qlen);
- part_average += qlen;
- }
- average += part_average / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS;
- usleep(qavg_period);
- }
+ if (tc < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE - 1) {
+ rte_sched_queue_read_stats(port, queue_id, &stats, &qlen);
+ part_average += qlen;
+ } else {
+ for (i = 0; i < RTE_SCHED_BE_QUEUES_PER_PIPE; i++) {
+ rte_sched_queue_read_stats(port, queue_id + i, &stats, &qlen);
+ part_average += qlen;
+ }
+ average += part_average / RTE_SCHED_BE_QUEUES_PER_PIPE;
+ }
+ usleep(qavg_period);
+ }
- average /= qavg_ntimes;
+ average /= qavg_ntimes;
- printf("\nAverage queue size: %" PRIu32 " bytes.\n\n", average);
+ printf("\nAverage queue size: %" PRIu32 " bytes.\n\n", average);
- return 0;
+ return 0;
}
int
qavg_pipe(uint16_t port_id, uint32_t subport_id, uint32_t pipe_id)
{
- struct rte_sched_queue_stats stats;
- struct rte_sched_port *port;
- uint16_t qlen;
- uint32_t queue_id, count, i;
- uint32_t average, part_average;
+ struct rte_sched_queue_stats stats;
+ struct rte_sched_port *port;
+ uint16_t qlen;
+ uint32_t count, i, queue_id = 0;
+ uint32_t average, part_average;
- for (i = 0; i < nb_pfc; i++) {
- if (qos_conf[i].tx_port == port_id)
- break;
- }
- if (i == nb_pfc || subport_id >= port_params.n_subports_per_port || pipe_id >= port_params.n_pipes_per_subport)
- return -1;
+ for (i = 0; i < nb_pfc; i++) {
+ if (qos_conf[i].tx_port == port_id)
+ break;
+ }
- port = qos_conf[i].sched_port;
+ if (i == nb_pfc ||
+ subport_id >= port_params.n_subports_per_port ||
+ pipe_id >= port_params.n_pipes_per_subport)
+ return -1;
- queue_id = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS * (subport_id * port_params.n_pipes_per_subport + pipe_id);
+ port = qos_conf[i].sched_port;
- average = 0;
+ for (i = 0; i < subport_id; i++)
+ queue_id += port_params.n_pipes_per_subport *
+ RTE_SCHED_QUEUES_PER_PIPE;
- for (count = 0; count < qavg_ntimes; count++) {
- part_average = 0;
- for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; i++) {
- rte_sched_queue_read_stats(port, queue_id + i, &stats, &qlen);
- part_average += qlen;
- }
- average += part_average / (RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS);
- usleep(qavg_period);
- }
+ queue_id += pipe_id * RTE_SCHED_QUEUES_PER_PIPE;
- average /= qavg_ntimes;
+ average = 0;
- printf("\nAverage queue size: %" PRIu32 " bytes.\n\n", average);
+ for (count = 0; count < qavg_ntimes; count++) {
+ part_average = 0;
+ for (i = 0; i < RTE_SCHED_QUEUES_PER_PIPE; i++) {
+ rte_sched_queue_read_stats(port, queue_id + i, &stats, &qlen);
+ part_average += qlen;
+ }
+ average += part_average / RTE_SCHED_QUEUES_PER_PIPE;
+ usleep(qavg_period);
+ }
- return 0;
+ average /= qavg_ntimes;
+
+ printf("\nAverage queue size: %" PRIu32 " bytes.\n\n", average);
+
+ return 0;
}
int
qavg_tcsubport(uint16_t port_id, uint32_t subport_id, uint8_t tc)
{
- struct rte_sched_queue_stats stats;
- struct rte_sched_port *port;
- uint16_t qlen;
- uint32_t queue_id, count, i, j;
- uint32_t average, part_average;
-
- for (i = 0; i < nb_pfc; i++) {
- if (qos_conf[i].tx_port == port_id)
- break;
- }
- if (i == nb_pfc || subport_id >= port_params.n_subports_per_port || tc >= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
- return -1;
-
- port = qos_conf[i].sched_port;
-
- average = 0;
-
- for (count = 0; count < qavg_ntimes; count++) {
- part_average = 0;
- for (i = 0; i < port_params.n_pipes_per_subport; i++) {
- queue_id = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS * (subport_id * port_params.n_pipes_per_subport + i);
-
- for (j = 0; j < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; j++) {
- rte_sched_queue_read_stats(port, queue_id + (tc * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + j), &stats, &qlen);
- part_average += qlen;
- }
- }
-
- average += part_average / (port_params.n_pipes_per_subport * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS);
- usleep(qavg_period);
- }
-
- average /= qavg_ntimes;
-
- printf("\nAverage queue size: %" PRIu32 " bytes.\n\n", average);
-
- return 0;
+ struct rte_sched_queue_stats stats;
+ struct rte_sched_port *port;
+ uint16_t qlen;
+ uint32_t queue_id, count, i, j, subport_queue_id = 0;
+ uint32_t average, part_average;
+
+ for (i = 0; i < nb_pfc; i++) {
+ if (qos_conf[i].tx_port == port_id)
+ break;
+ }
+
+ if (i == nb_pfc ||
+ subport_id >= port_params.n_subports_per_port ||
+ tc >= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
+ return -1;
+
+ port = qos_conf[i].sched_port;
+
+ for (i = 0; i < subport_id; i++)
+ subport_queue_id += port_params.n_pipes_per_subport * RTE_SCHED_QUEUES_PER_PIPE;
+
+ average = 0;
+
+ for (count = 0; count < qavg_ntimes; count++) {
+ part_average = 0;
+ for (i = 0; i < port_params.n_pipes_per_subport; i++) {
+ if (tc < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE - 1) {
+ queue_id = subport_queue_id + i * RTE_SCHED_QUEUES_PER_PIPE + tc;
+ rte_sched_queue_read_stats(port, queue_id, &stats, &qlen);
+ part_average += qlen;
+ } else {
+ for (j = 0; j < RTE_SCHED_BE_QUEUES_PER_PIPE; j++) {
+ queue_id = subport_queue_id +
+ i * RTE_SCHED_QUEUES_PER_PIPE + tc + j;
+ rte_sched_queue_read_stats(port, queue_id, &stats, &qlen);
+ part_average += qlen;
+ }
+ }
+ }
+
+ if (tc < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE - 1)
+ average += part_average / (port_params.n_pipes_per_subport);
+ else
+ average += part_average / (port_params.n_pipes_per_subport) * RTE_SCHED_BE_QUEUES_PER_PIPE;
+
+ usleep(qavg_period);
+ }
+
+ average /= qavg_ntimes;
+
+ printf("\nAverage queue size: %" PRIu32 " bytes.\n\n", average);
+
+ return 0;
}
int
qavg_subport(uint16_t port_id, uint32_t subport_id)
{
- struct rte_sched_queue_stats stats;
- struct rte_sched_port *port;
- uint16_t qlen;
- uint32_t queue_id, count, i, j;
- uint32_t average, part_average;
+ struct rte_sched_queue_stats stats;
+ struct rte_sched_port *port;
+ uint16_t qlen;
+ uint32_t queue_id, count, i, j, subport_queue_id = 0;
+ uint32_t average, part_average;
+
+ for (i = 0; i < nb_pfc; i++) {
+ if (qos_conf[i].tx_port == port_id)
+ break;
+ }
+
+ if (i == nb_pfc ||
+ subport_id >= port_params.n_subports_per_port)
+ return -1;
- for (i = 0; i < nb_pfc; i++) {
- if (qos_conf[i].tx_port == port_id)
- break;
- }
- if (i == nb_pfc || subport_id >= port_params.n_subports_per_port)
- return -1;
+ port = qos_conf[i].sched_port;
- port = qos_conf[i].sched_port;
+ for (i = 0; i < subport_id; i++)
+ subport_queue_id += port_params.n_pipes_per_subport * RTE_SCHED_QUEUES_PER_PIPE;
- average = 0;
+ average = 0;
- for (count = 0; count < qavg_ntimes; count++) {
- part_average = 0;
- for (i = 0; i < port_params.n_pipes_per_subport; i++) {
- queue_id = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS * (subport_id * port_params.n_pipes_per_subport + i);
+ for (count = 0; count < qavg_ntimes; count++) {
+ part_average = 0;
+ for (i = 0; i < port_params.n_pipes_per_subport; i++) {
+ queue_id = subport_queue_id + i * RTE_SCHED_QUEUES_PER_PIPE;
- for (j = 0; j < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; j++) {
- rte_sched_queue_read_stats(port, queue_id + j, &stats, &qlen);
- part_average += qlen;
- }
- }
+ for (j = 0; j < RTE_SCHED_QUEUES_PER_PIPE; j++) {
+ rte_sched_queue_read_stats(port, queue_id + j, &stats, &qlen);
+ part_average += qlen;
+ }
+ }
- average += part_average / (port_params.n_pipes_per_subport * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS);
- usleep(qavg_period);
- }
+ average += part_average / (port_params.n_pipes_per_subport * RTE_SCHED_QUEUES_PER_PIPE);
+ usleep(qavg_period);
+ }
- average /= qavg_ntimes;
+ average /= qavg_ntimes;
- printf("\nAverage queue size: %" PRIu32 " bytes.\n\n", average);
+ printf("\nAverage queue size: %" PRIu32 " bytes.\n\n", average);
- return 0;
+ return 0;
}
int
subport_stat(uint16_t port_id, uint32_t subport_id)
{
- struct rte_sched_subport_stats stats;
- struct rte_sched_port *port;
- uint32_t tc_ov[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
- uint8_t i;
-
- for (i = 0; i < nb_pfc; i++) {
- if (qos_conf[i].tx_port == port_id)
- break;
- }
- if (i == nb_pfc || subport_id >= port_params.n_subports_per_port)
- return -1;
-
- port = qos_conf[i].sched_port;
+ struct rte_sched_subport_stats stats;
+ struct rte_sched_port *port;
+ uint32_t tc_ov[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
+ uint8_t i;
+
+ for (i = 0; i < nb_pfc; i++) {
+ if (qos_conf[i].tx_port == port_id)
+ break;
+ }
+
+ if (i == nb_pfc || subport_id >= port_params.n_subports_per_port)
+ return -1;
+
+ port = qos_conf[i].sched_port;
memset (tc_ov, 0, sizeof(tc_ov));
- rte_sched_subport_read_stats(port, subport_id, &stats, tc_ov);
+ rte_sched_subport_read_stats(port, subport_id, &stats, tc_ov);
- printf("\n");
- printf("+----+-------------+-------------+-------------+-------------+-------------+\n");
- printf("| TC | Pkts OK |Pkts Dropped | Bytes OK |Bytes Dropped| OV Status |\n");
- printf("+----+-------------+-------------+-------------+-------------+-------------+\n");
+ printf("\n");
+ printf("+----+-------------+-------------+-------------+-------------+-------------+\n");
+ printf("| TC | Pkts OK |Pkts Dropped | Bytes OK |Bytes Dropped| OV Status |\n");
+ printf("+----+-------------+-------------+-------------+-------------+-------------+\n");
- for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
- printf("| %d | %11" PRIu32 " | %11" PRIu32 " | %11" PRIu32 " | %11" PRIu32 " | %11" PRIu32 " |\n", i,
- stats.n_pkts_tc[i], stats.n_pkts_tc_dropped[i],
- stats.n_bytes_tc[i], stats.n_bytes_tc_dropped[i], tc_ov[i]);
- printf("+----+-------------+-------------+-------------+-------------+-------------+\n");
- }
- printf("\n");
+ for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
+ printf("| %d | %11" PRIu32 " | %11" PRIu32 " | %11" PRIu32 " | %11" PRIu32 " | %11" PRIu32 " |\n", i,
+ stats.n_pkts_tc[i], stats.n_pkts_tc_dropped[i],
+ stats.n_bytes_tc[i], stats.n_bytes_tc_dropped[i], tc_ov[i]);
+ printf("+----+-------------+-------------+-------------+-------------+-------------+\n");
+ }
+ printf("\n");
- return 0;
+ return 0;
}
int
pipe_stat(uint16_t port_id, uint32_t subport_id, uint32_t pipe_id)
{
- struct rte_sched_queue_stats stats;
- struct rte_sched_port *port;
- uint16_t qlen;
- uint8_t i, j;
- uint32_t queue_id;
-
- for (i = 0; i < nb_pfc; i++) {
- if (qos_conf[i].tx_port == port_id)
- break;
- }
- if (i == nb_pfc || subport_id >= port_params.n_subports_per_port || pipe_id >= port_params.n_pipes_per_subport)
- return -1;
-
- port = qos_conf[i].sched_port;
-
- queue_id = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS * (subport_id * port_params.n_pipes_per_subport + pipe_id);
-
- printf("\n");
- printf("+----+-------+-------------+-------------+-------------+-------------+-------------+\n");
- printf("| TC | Queue | Pkts OK |Pkts Dropped | Bytes OK |Bytes Dropped| Length |\n");
- printf("+----+-------+-------------+-------------+-------------+-------------+-------------+\n");
-
- for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
- for (j = 0; j < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; j++) {
-
- rte_sched_queue_read_stats(port, queue_id + (i * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + j), &stats, &qlen);
-
- printf("| %d | %d | %11" PRIu32 " | %11" PRIu32 " | %11" PRIu32 " | %11" PRIu32 " | %11i |\n", i, j,
- stats.n_pkts, stats.n_pkts_dropped, stats.n_bytes, stats.n_bytes_dropped, qlen);
- printf("+----+-------+-------------+-------------+-------------+-------------+-------------+\n");
- }
- if (i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE - 1)
- printf("+----+-------+-------------+-------------+-------------+-------------+-------------+\n");
- }
- printf("\n");
-
- return 0;
+ struct rte_sched_queue_stats stats;
+ struct rte_sched_port *port;
+ uint16_t qlen;
+ uint8_t i, j;
+ uint32_t queue_id = 0;
+
+ for (i = 0; i < nb_pfc; i++) {
+ if (qos_conf[i].tx_port == port_id)
+ break;
+ }
+
+ if (i == nb_pfc ||
+ subport_id >= port_params.n_subports_per_port ||
+ pipe_id >= port_params.n_pipes_per_subport)
+ return -1;
+
+ port = qos_conf[i].sched_port;
+ for (i = 0; i < subport_id; i++)
+ queue_id += port_params.n_pipes_per_subport * RTE_SCHED_QUEUES_PER_PIPE;
+
+ queue_id += pipe_id * RTE_SCHED_QUEUES_PER_PIPE;
+
+ printf("\n");
+ printf("+----+-------+-------------+-------------+-------------+-------------+-------------+\n");
+ printf("| TC | Queue | Pkts OK |Pkts Dropped | Bytes OK |Bytes Dropped| Length |\n");
+ printf("+----+-------+-------------+-------------+-------------+-------------+-------------+\n");
+
+ for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
+ if (i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE - 1) {
+ rte_sched_queue_read_stats(port, queue_id + i, &stats, &qlen);
+ printf("| %d | %d | %11" PRIu32 " | %11" PRIu32 " | %11" PRIu32 " | %11" PRIu32 " | %11i |\n", i, 0,
+ stats.n_pkts, stats.n_pkts_dropped, stats.n_bytes, stats.n_bytes_dropped, qlen);
+ printf("+----+-------+-------------+-------------+-------------+-------------+-------------+\n");
+ } else {
+ for (j = 0; j < RTE_SCHED_BE_QUEUES_PER_PIPE; j++) {
+ rte_sched_queue_read_stats(port, queue_id + i + j, &stats, &qlen);
+ printf("| %d | %d | %11" PRIu32 " | %11" PRIu32 " | %11" PRIu32 " | %11" PRIu32 " | %11i |\n", i, j,
+ stats.n_pkts, stats.n_pkts_dropped, stats.n_bytes, stats.n_bytes_dropped, qlen);
+ printf("+----+-------+-------------+-------------+-------------+-------------+-------------+\n");
+ }
+ }
+ }
+ printf("\n");
+
+ return 0;
}
--
2.21.0
next prev parent reply other threads:[~2019-07-11 10:28 UTC|newest]
Thread overview: 163+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-05-28 12:05 [dpdk-dev] [PATCH 00/27] sched: feature enhancements Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 01/27] sched: update macros for flexible config Lukasz Krakowiak
2019-06-25 15:31 ` [dpdk-dev] [PATCH v2 00/28] sched: feature enhancements Jasvinder Singh
2019-06-25 15:31 ` [dpdk-dev] [PATCH v2 01/28] sched: update macros for flexible config Jasvinder Singh
2019-07-01 19:04 ` Dumitrescu, Cristian
2019-07-02 13:26 ` Singh, Jasvinder
2019-07-11 10:26 ` [dpdk-dev] [PATCH v3 00/11] sched: feature enhancements Jasvinder Singh
2019-07-11 10:26 ` [dpdk-dev] [PATCH v3 01/11] sched: remove wrr from strict priority tc queues Jasvinder Singh
2019-07-12 9:57 ` [dpdk-dev] [PATCH v4 00/11] sched: feature enhancements Jasvinder Singh
2019-07-12 9:57 ` [dpdk-dev] [PATCH v4 01/11] sched: remove wrr from strict priority tc queues Jasvinder Singh
2019-07-15 23:50 ` Dumitrescu, Cristian
2019-07-17 14:49 ` Singh, Jasvinder
2019-07-17 14:42 ` [dpdk-dev] [PATCH v5 00/11] sched: feature enhancements Jasvinder Singh
2019-07-17 14:42 ` [dpdk-dev] [PATCH v5 01/11] sched: remove wrr from strict priority tc queues Jasvinder Singh
2019-07-19 14:18 ` [dpdk-dev] [PATCH v6 00/11] sched: feature enhancements Jasvinder Singh
2019-07-19 14:18 ` [dpdk-dev] [PATCH v6 01/11] sched: remove wrr from strict priority tc queues Jasvinder Singh
2019-07-22 11:01 ` [dpdk-dev] [PATCH v7 00/11] sched: feature enhancements Jasvinder Singh
2019-07-22 11:01 ` [dpdk-dev] [PATCH v7 01/11] sched: remove wrr from strict priority tc queues Jasvinder Singh
2019-07-22 11:01 ` [dpdk-dev] [PATCH v7 02/11] sched: add config flexibility to tc queue sizes Jasvinder Singh
2019-07-22 11:01 ` [dpdk-dev] [PATCH v7 03/11] sched: add max pipe profiles config in run time Jasvinder Singh
2019-07-22 11:01 ` [dpdk-dev] [PATCH v7 04/11] sched: rename tc3 params to best-effort tc Jasvinder Singh
2019-07-22 11:01 ` [dpdk-dev] [PATCH v7 05/11] sched: improve error log messages Jasvinder Singh
2019-07-22 11:01 ` [dpdk-dev] [PATCH v7 06/11] sched: improve doxygen comments Jasvinder Singh
2019-07-22 11:01 ` [dpdk-dev] [PATCH v7 07/11] net/softnic: add config flexibility to softnic tm Jasvinder Singh
2019-07-22 11:01 ` [dpdk-dev] [PATCH v7 08/11] test_sched: modify tests for config flexibility Jasvinder Singh
2019-07-22 11:01 ` [dpdk-dev] [PATCH v7 09/11] examples/ip_pipeline: add config flexibility to tm function Jasvinder Singh
2019-07-22 11:01 ` [dpdk-dev] [PATCH v7 10/11] examples/qos_sched: add tc and queue config flexibility Jasvinder Singh
2019-07-22 11:01 ` [dpdk-dev] [PATCH v7 11/11] sched: remove redundant macros Jasvinder Singh
2019-07-22 13:15 ` [dpdk-dev] [PATCH v7 00/11] sched: feature enhancements Thomas Monjalon
2019-07-22 13:22 ` Singh, Jasvinder
2019-07-22 13:33 ` Thomas Monjalon
2019-07-22 13:53 ` Ferruh Yigit
2019-07-22 13:56 ` Bruce Richardson
2019-07-22 14:08 ` Ferruh Yigit
2019-07-22 14:08 ` Thomas Monjalon
2019-07-19 14:18 ` [dpdk-dev] [PATCH v6 02/11] sched: add config flexibility to tc queue sizes Jasvinder Singh
2019-07-19 14:18 ` [dpdk-dev] [PATCH v6 03/11] sched: add max pipe profiles config in run time Jasvinder Singh
2019-07-19 14:18 ` [dpdk-dev] [PATCH v6 04/11] sched: rename tc3 params to best-effort tc Jasvinder Singh
2019-07-19 14:18 ` [dpdk-dev] [PATCH v6 05/11] sched: improve error log messages Jasvinder Singh
2019-07-19 14:18 ` [dpdk-dev] [PATCH v6 06/11] sched: improve doxygen comments Jasvinder Singh
2019-07-19 14:18 ` [dpdk-dev] [PATCH v6 07/11] net/softnic: add config flexibility to softnic tm Jasvinder Singh
2019-07-19 14:18 ` [dpdk-dev] [PATCH v6 08/11] test_sched: modify tests for config flexibility Jasvinder Singh
2019-07-19 14:18 ` [dpdk-dev] [PATCH v6 09/11] examples/ip_pipeline: add config flexibility to tm function Jasvinder Singh
2019-07-19 14:18 ` [dpdk-dev] [PATCH v6 10/11] examples/qos_sched: add tc and queue config flexibility Jasvinder Singh
2019-07-19 14:18 ` [dpdk-dev] [PATCH v6 11/11] sched: remove redundant macros Jasvinder Singh
2019-07-22 8:19 ` [dpdk-dev] [PATCH v6 00/11] sched: feature enhancements Thomas Monjalon
2019-07-22 11:05 ` Singh, Jasvinder
2019-07-22 9:56 ` Dumitrescu, Cristian
2019-07-17 14:42 ` [dpdk-dev] [PATCH v5 02/11] sched: add config flexibility to tc queue sizes Jasvinder Singh
2019-07-18 23:04 ` Dumitrescu, Cristian
2019-07-19 15:25 ` Singh, Jasvinder
2019-07-17 14:42 ` [dpdk-dev] [PATCH v5 03/11] sched: add max pipe profiles config in run time Jasvinder Singh
2019-07-17 14:42 ` [dpdk-dev] [PATCH v5 04/11] sched: rename tc3 params to best-effort tc Jasvinder Singh
2019-07-17 14:42 ` [dpdk-dev] [PATCH v5 05/11] sched: improve error log messages Jasvinder Singh
2019-07-17 14:42 ` [dpdk-dev] [PATCH v5 06/11] sched: improve doxygen comments Jasvinder Singh
2019-07-18 23:12 ` Dumitrescu, Cristian
2019-07-19 15:25 ` Singh, Jasvinder
2019-07-17 14:42 ` [dpdk-dev] [PATCH v5 07/11] net/softnic: add config flexibility to softnic tm Jasvinder Singh
2019-07-17 14:42 ` [dpdk-dev] [PATCH v5 08/11] test_sched: modify tests for config flexibility Jasvinder Singh
2019-07-17 14:42 ` [dpdk-dev] [PATCH v5 09/11] examples/ip_pipeline: add config flexibility to tm function Jasvinder Singh
2019-07-17 14:42 ` [dpdk-dev] [PATCH v5 10/11] examples/qos_sched: add tc and queue config flexibility Jasvinder Singh
2019-07-17 14:42 ` [dpdk-dev] [PATCH v5 11/11] sched: remove redundant macros Jasvinder Singh
2019-07-18 22:57 ` [dpdk-dev] [PATCH v5 00/11] sched: feature enhancements Dumitrescu, Cristian
2019-07-19 10:41 ` Thomas Monjalon
2019-07-19 11:16 ` Singh, Jasvinder
2019-07-19 11:40 ` Thomas Monjalon
2019-07-19 11:42 ` Singh, Jasvinder
2019-07-12 9:57 ` [dpdk-dev] [PATCH v4 02/11] sched: add config flexibility to tc queue sizes Jasvinder Singh
2019-07-16 0:37 ` Dumitrescu, Cristian
2019-07-17 14:57 ` Singh, Jasvinder
2019-07-16 0:57 ` Dumitrescu, Cristian
2019-07-17 15:03 ` Singh, Jasvinder
2019-07-12 9:57 ` [dpdk-dev] [PATCH v4 03/11] sched: add max pipe profiles config in run time Jasvinder Singh
2019-07-12 9:57 ` [dpdk-dev] [PATCH v4 04/11] sched: rename tc3 params to best-effort tc Jasvinder Singh
2019-07-12 9:57 ` [dpdk-dev] [PATCH v4 05/11] sched: improve error log messages Jasvinder Singh
2019-07-12 9:57 ` [dpdk-dev] [PATCH v4 06/11] sched: improve doxygen comments Jasvinder Singh
2019-07-16 0:44 ` Dumitrescu, Cristian
2019-07-17 14:58 ` Singh, Jasvinder
2019-07-16 0:49 ` Dumitrescu, Cristian
2019-07-17 15:00 ` Singh, Jasvinder
2019-07-12 9:57 ` [dpdk-dev] [PATCH v4 07/11] net/softnic: add config flexibility to softnic tm Jasvinder Singh
2019-07-12 9:57 ` [dpdk-dev] [PATCH v4 08/11] test_sched: modify tests for config flexibility Jasvinder Singh
2019-07-12 9:57 ` [dpdk-dev] [PATCH v4 09/11] examples/ip_pipeline: add config flexibility to tm function Jasvinder Singh
2019-07-12 9:57 ` [dpdk-dev] [PATCH v4 10/11] examples/qos_sched: add tc and queue config flexibility Jasvinder Singh
2019-07-12 9:57 ` [dpdk-dev] [PATCH v4 11/11] sched: remove redundant macros Jasvinder Singh
2019-07-11 10:26 ` [dpdk-dev] [PATCH v3 02/11] sched: add config flexibility to tc queue sizes Jasvinder Singh
2019-07-11 10:26 ` [dpdk-dev] [PATCH v3 03/11] sched: add max pipe profiles config in run time Jasvinder Singh
2019-07-11 10:26 ` [dpdk-dev] [PATCH v3 04/11] sched: rename tc3 params to best-effort tc Jasvinder Singh
2019-07-11 10:26 ` [dpdk-dev] [PATCH v3 05/11] sched: improve error log messages Jasvinder Singh
2019-07-11 10:26 ` [dpdk-dev] [PATCH v3 06/11] sched: improve doxygen comments Jasvinder Singh
2019-07-11 10:26 ` [dpdk-dev] [PATCH v3 07/11] net/softnic: add config flexibility to softnic tm Jasvinder Singh
2019-07-11 10:26 ` [dpdk-dev] [PATCH v3 08/11] test_sched: modify tests for config flexibility Jasvinder Singh
2019-07-11 10:26 ` [dpdk-dev] [PATCH v3 09/11] examples/ip_pipeline: add config flexibility to tm function Jasvinder Singh
2019-07-11 10:26 ` Jasvinder Singh [this message]
2019-07-11 10:26 ` [dpdk-dev] [PATCH v3 11/11] sched: remove redundant macros Jasvinder Singh
2019-06-25 15:31 ` [dpdk-dev] [PATCH v2 02/28] sched: update subport and pipe data structures Jasvinder Singh
2019-07-01 18:58 ` Dumitrescu, Cristian
2019-07-02 13:20 ` Singh, Jasvinder
2019-07-01 19:12 ` Dumitrescu, Cristian
2019-06-25 15:31 ` [dpdk-dev] [PATCH v2 03/28] sched: update internal " Jasvinder Singh
2019-06-25 15:31 ` [dpdk-dev] [PATCH v2 04/28] sched: update port config API Jasvinder Singh
2019-06-25 15:31 ` [dpdk-dev] [PATCH v2 05/28] sched: update port free API Jasvinder Singh
2019-06-25 15:31 ` [dpdk-dev] [PATCH v2 06/28] sched: update subport config API Jasvinder Singh
2019-06-25 15:31 ` [dpdk-dev] [PATCH v2 07/28] sched: update pipe profile add API Jasvinder Singh
2019-06-25 15:31 ` [dpdk-dev] [PATCH v2 08/28] sched: update pipe config API Jasvinder Singh
2019-06-25 15:31 ` [dpdk-dev] [PATCH v2 09/28] sched: update pkt read and write API Jasvinder Singh
2019-07-01 23:25 ` Dumitrescu, Cristian
2019-07-02 21:05 ` Singh, Jasvinder
2019-07-03 13:40 ` Dumitrescu, Cristian
2019-06-25 15:31 ` [dpdk-dev] [PATCH v2 10/28] sched: update subport and tc queue stats Jasvinder Singh
2019-06-25 15:32 ` [dpdk-dev] [PATCH v2 11/28] sched: update port memory footprint API Jasvinder Singh
2019-06-25 15:32 ` [dpdk-dev] [PATCH v2 12/28] sched: update packet enqueue API Jasvinder Singh
2019-06-25 15:32 ` [dpdk-dev] [PATCH v2 13/28] sched: update grinder pipe and tc cache Jasvinder Singh
2019-06-25 15:32 ` [dpdk-dev] [PATCH v2 14/28] sched: update grinder next pipe and tc functions Jasvinder Singh
2019-06-25 15:32 ` [dpdk-dev] [PATCH v2 15/28] sched: update pipe and tc queues prefetch Jasvinder Singh
2019-06-25 15:32 ` [dpdk-dev] [PATCH v2 16/28] sched: update grinder wrr compute function Jasvinder Singh
2019-06-25 15:32 ` [dpdk-dev] [PATCH v2 17/28] sched: modify credits update function Jasvinder Singh
2019-06-25 15:32 ` [dpdk-dev] [PATCH v2 18/28] sched: update mbuf prefetch function Jasvinder Singh
2019-06-25 15:32 ` [dpdk-dev] [PATCH v2 19/28] sched: update grinder schedule function Jasvinder Singh
2019-06-25 15:32 ` [dpdk-dev] [PATCH v2 20/28] sched: update grinder handle function Jasvinder Singh
2019-06-25 15:32 ` [dpdk-dev] [PATCH v2 21/28] sched: update packet dequeue API Jasvinder Singh
2019-06-25 15:32 ` [dpdk-dev] [PATCH v2 22/28] sched: update sched queue stats API Jasvinder Singh
2019-06-25 15:32 ` [dpdk-dev] [PATCH v2 23/28] test/sched: update unit test Jasvinder Singh
2019-06-25 15:32 ` [dpdk-dev] [PATCH v2 24/28] net/softnic: update softnic tm function Jasvinder Singh
2019-06-25 15:32 ` [dpdk-dev] [PATCH v2 25/28] examples/qos_sched: update qos sched sample app Jasvinder Singh
2019-06-25 15:32 ` [dpdk-dev] [PATCH v2 26/28] examples/ip_pipeline: update ip pipeline " Jasvinder Singh
2019-06-25 15:32 ` [dpdk-dev] [PATCH v2 27/28] sched: code cleanup Jasvinder Singh
2019-06-25 15:32 ` [dpdk-dev] [PATCH v2 28/28] sched: add release note Jasvinder Singh
2019-06-26 21:31 ` Thomas Monjalon
2019-06-27 10:50 ` Singh, Jasvinder
2019-06-26 21:33 ` [dpdk-dev] [PATCH v2 00/28] sched: feature enhancements Thomas Monjalon
2019-06-27 10:52 ` Singh, Jasvinder
2019-06-27 0:04 ` Stephen Hemminger
2019-06-27 10:49 ` Singh, Jasvinder
2019-07-01 18:51 ` Dumitrescu, Cristian
2019-07-02 9:32 ` Singh, Jasvinder
2019-05-28 12:05 ` [dpdk-dev] [PATCH 02/27] sched: update subport and pipe data structures Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 03/27] sched: update internal " Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 04/27] sched: update port config api Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 05/27] sched: update port free api Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 06/27] sched: update subport config api Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 07/27] sched: update pipe profile add api Lukasz Krakowiak
2019-05-28 14:06 ` Stephen Hemminger
2019-05-28 12:05 ` [dpdk-dev] [PATCH 08/27] sched: update pipe config api Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 09/27] sched: update pkt read and write api Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 10/27] sched: update subport and tc queue stats Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 11/27] sched: update port memory footprint api Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 12/27] sched: update packet enqueue api Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 13/27] sched: update grinder pipe and tc cache Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 14/27] sched: update grinder next pipe and tc functions Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 15/27] sched: update pipe and tc queues prefetch Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 16/27] sched: update grinder wrr compute function Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 17/27] sched: modify credits update function Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 18/27] sched: update mbuf prefetch function Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 19/27] sched: update grinder schedule function Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 20/27] sched: update grinder handle function Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 21/27] sched: update packet dequeue api Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 22/27] sched: update sched queue stats api Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 23/27] test/sched: update unit test Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 24/27] net/softnic: update softnic tm function Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 25/27] examples/qos_sched: update qos sched sample app Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 26/27] examples/ip_pipeline: update ip pipeline " Lukasz Krakowiak
2019-05-28 12:05 ` [dpdk-dev] [PATCH 27/27] sched: code cleanup Lukasz Krakowiak
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190711102659.59001-11-jasvinder.singh@intel.com \
--to=jasvinder.singh@intel.com \
--cc=abrahamx.tovar@intel.com \
--cc=cristian.dumitrescu@intel.com \
--cc=dev@dpdk.org \
--cc=lukaszx.krakowiak@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).