* [PATCH v1] sched: enable/disable TC OV at runtime
@ 2022-03-10 19:54 Megha Ajmera
2022-03-10 20:23 ` Stephen Hemminger
0 siblings, 1 reply; 4+ messages in thread
From: Megha Ajmera @ 2022-03-10 19:54 UTC (permalink / raw)
To: dev, john.mcnamara, jasvinder.singh, cristian.dumitrescu,
ferruh.yigit, sham.singh.thakur
Added new API to enable or disable TC over subscription for best
effort traffic class at subport level.
By default TC OV is disabled for subport.
Signed-off-by: Megha Ajmera <megha.ajmera@intel.com>
---
lib/sched/rte_sched.c | 182 +++++++++++++++++++++++++++++++++++-------
lib/sched/rte_sched.h | 18 +++++
lib/sched/version.map | 3 +
3 files changed, 172 insertions(+), 31 deletions(-)
diff --git a/lib/sched/rte_sched.c b/lib/sched/rte_sched.c
index ec74bee939..1d05089d00 100644
--- a/lib/sched/rte_sched.c
+++ b/lib/sched/rte_sched.c
@@ -155,6 +155,7 @@ struct rte_sched_subport {
uint64_t tc_credits[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
/* TC oversubscription */
+ uint8_t is_tc_ov_enabled;
uint64_t tc_ov_wm;
uint64_t tc_ov_wm_min;
uint64_t tc_ov_wm_max;
@@ -1165,6 +1166,45 @@ rte_sched_cman_config(struct rte_sched_port *port,
}
#endif
+int
+rte_sched_subport_tc_ov_config(struct rte_sched_port *port,
+ uint32_t subport_id,
+ bool tc_ov_enable)
+{
+ struct rte_sched_subport *s;
+ struct rte_sched_subport_profile *profile;
+
+ if (port == NULL) {
+ RTE_LOG(ERR, SCHED,
+ "%s: Incorrect value for parameter port\n", __func__);
+ return -EINVAL;
+ }
+
+ if (subport_id >= port->n_subports_per_port) {
+ RTE_LOG(ERR, SCHED,
+ "%s: Incorrect value for parameter subport id\n", __func__);
+ return -EINVAL;
+ }
+
+ s = port->subports[subport_id];
+ s->is_tc_ov_enabled = tc_ov_enable;
+
+ if (s->is_tc_ov_enabled) {
+ /* TC oversubscription */
+ s->tc_ov_wm_min = port->mtu;
+ s->tc_ov_period_id = 0;
+ s->tc_ov = 0;
+ s->tc_ov_n = 0;
+ s->tc_ov_rate = 0;
+
+ profile = port->subport_profiles + s->profile;
+ s->tc_ov_wm_max = rte_sched_time_ms_to_bytes(profile->tc_period,
+ s->pipe_tc_be_rate_max);
+ s->tc_ov_wm = s->tc_ov_wm_max;
+ }
+ return 0;
+}
+
int
rte_sched_subport_config(struct rte_sched_port *port,
uint32_t subport_id,
@@ -1317,12 +1357,8 @@ rte_sched_subport_config(struct rte_sched_port *port,
for (i = 0; i < RTE_SCHED_PORT_N_GRINDERS; i++)
s->grinder_base_bmp_pos[i] = RTE_SCHED_PIPE_INVALID;
- /* TC oversubscription */
- s->tc_ov_wm_min = port->mtu;
- s->tc_ov_period_id = 0;
- s->tc_ov = 0;
- s->tc_ov_n = 0;
- s->tc_ov_rate = 0;
+ /* TC over-subscription is disabled by default */
+ s->is_tc_ov_enabled = 0;
}
{
@@ -1342,9 +1378,6 @@ rte_sched_subport_config(struct rte_sched_port *port,
else
profile->tc_credits_per_period[i] = 0;
- s->tc_ov_wm_max = rte_sched_time_ms_to_bytes(profile->tc_period,
- s->pipe_tc_be_rate_max);
- s->tc_ov_wm = s->tc_ov_wm_max;
s->profile = subport_profile_id;
}
@@ -1417,17 +1450,20 @@ rte_sched_pipe_config(struct rte_sched_port *port,
double pipe_tc_be_rate =
(double) params->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE]
/ (double) params->tc_period;
- uint32_t tc_be_ov = s->tc_ov;
- /* Unplug pipe from its subport */
- s->tc_ov_n -= params->tc_ov_weight;
- s->tc_ov_rate -= pipe_tc_be_rate;
- s->tc_ov = s->tc_ov_rate > subport_tc_be_rate;
+ if (s->is_tc_ov_enabled) {
+ uint32_t tc_be_ov = s->tc_ov;
- if (s->tc_ov != tc_be_ov) {
- RTE_LOG(DEBUG, SCHED,
- "Subport %u Best-effort TC oversubscription is OFF (%.4lf >= %.4lf)\n",
- subport_id, subport_tc_be_rate, s->tc_ov_rate);
+ /* Unplug pipe from its subport */
+ s->tc_ov_n -= params->tc_ov_weight;
+ s->tc_ov_rate -= pipe_tc_be_rate;
+ s->tc_ov = s->tc_ov_rate > subport_tc_be_rate;
+
+ if (s->tc_ov != tc_be_ov) {
+ RTE_LOG(DEBUG, SCHED,
+ "Subport %u Best-effort TC oversubscription is OFF (%.4lf >= %.4lf)\n",
+ subport_id, subport_tc_be_rate, s->tc_ov_rate);
+ }
}
/* Reset the pipe */
@@ -1460,19 +1496,22 @@ rte_sched_pipe_config(struct rte_sched_port *port,
double pipe_tc_be_rate =
(double) params->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE]
/ (double) params->tc_period;
- uint32_t tc_be_ov = s->tc_ov;
- s->tc_ov_n += params->tc_ov_weight;
- s->tc_ov_rate += pipe_tc_be_rate;
- s->tc_ov = s->tc_ov_rate > subport_tc_be_rate;
+ if (s->is_tc_ov_enabled) {
+ uint32_t tc_be_ov = s->tc_ov;
- if (s->tc_ov != tc_be_ov) {
- RTE_LOG(DEBUG, SCHED,
- "Subport %u Best effort TC oversubscription is ON (%.4lf < %.4lf)\n",
- subport_id, subport_tc_be_rate, s->tc_ov_rate);
+ s->tc_ov_n += params->tc_ov_weight;
+ s->tc_ov_rate += pipe_tc_be_rate;
+ s->tc_ov = s->tc_ov_rate > subport_tc_be_rate;
+
+ if (s->tc_ov != tc_be_ov) {
+ RTE_LOG(DEBUG, SCHED,
+ "Subport %u Best effort TC oversubscription is ON (%.4lf < %.4lf)\n",
+ subport_id, subport_tc_be_rate, s->tc_ov_rate);
+ }
+ p->tc_ov_period_id = s->tc_ov_period_id;
+ p->tc_ov_credits = s->tc_ov_wm;
}
- p->tc_ov_period_id = s->tc_ov_period_id;
- p->tc_ov_credits = s->tc_ov_wm;
}
return 0;
@@ -2318,6 +2357,45 @@ grinder_credits_update(struct rte_sched_port *port,
pipe->tb_credits = RTE_MIN(pipe->tb_credits, params->tb_size);
pipe->tb_time += n_periods * params->tb_period;
+ /* Subport TCs */
+ if (unlikely(port->time >= subport->tc_time)) {
+ for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
+ subport->tc_credits[i] = sp->tc_credits_per_period[i];
+
+ subport->tc_time = port->time + sp->tc_period;
+ }
+
+ /* Pipe TCs */
+ if (unlikely(port->time >= pipe->tc_time)) {
+ for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
+ pipe->tc_credits[i] = params->tc_credits_per_period[i];
+ pipe->tc_time = port->time + params->tc_period;
+ }
+}
+
+static inline void
+grinder_credits_update_with_tc_ov(struct rte_sched_port *port,
+ struct rte_sched_subport *subport, uint32_t pos)
+{
+ struct rte_sched_grinder *grinder = subport->grinder + pos;
+ struct rte_sched_pipe *pipe = grinder->pipe;
+ struct rte_sched_pipe_profile *params = grinder->pipe_params;
+ struct rte_sched_subport_profile *sp = grinder->subport_params;
+ uint64_t n_periods;
+ uint32_t i;
+
+ /* Subport TB */
+ n_periods = (port->time - subport->tb_time) / sp->tb_period;
+ subport->tb_credits += n_periods * sp->tb_credits_per_period;
+ subport->tb_credits = RTE_MIN(subport->tb_credits, sp->tb_size);
+ subport->tb_time += n_periods * sp->tb_period;
+
+ /* Pipe TB */
+ n_periods = (port->time - pipe->tb_time) / params->tb_period;
+ pipe->tb_credits += n_periods * params->tb_credits_per_period;
+ pipe->tb_credits = RTE_MIN(pipe->tb_credits, params->tb_size);
+ pipe->tb_time += n_periods * params->tb_period;
+
/* Subport TCs */
if (unlikely(port->time >= subport->tc_time)) {
subport->tc_ov_wm =
@@ -2348,6 +2426,39 @@ grinder_credits_update(struct rte_sched_port *port,
static inline int
grinder_credits_check(struct rte_sched_port *port,
struct rte_sched_subport *subport, uint32_t pos)
+{
+ struct rte_sched_grinder *grinder = subport->grinder + pos;
+ struct rte_sched_pipe *pipe = grinder->pipe;
+ struct rte_mbuf *pkt = grinder->pkt;
+ uint32_t tc_index = grinder->tc_index;
+ uint64_t pkt_len = pkt->pkt_len + port->frame_overhead;
+ uint64_t subport_tb_credits = subport->tb_credits;
+ uint64_t subport_tc_credits = subport->tc_credits[tc_index];
+ uint64_t pipe_tb_credits = pipe->tb_credits;
+ uint64_t pipe_tc_credits = pipe->tc_credits[tc_index];
+ int enough_credits;
+
+ /* Check pipe and subport credits */
+ enough_credits = (pkt_len <= subport_tb_credits) &&
+ (pkt_len <= subport_tc_credits) &&
+ (pkt_len <= pipe_tb_credits) &&
+ (pkt_len <= pipe_tc_credits);
+
+ if (!enough_credits)
+ return 0;
+
+ /* Update pipe and subport credits */
+ subport->tb_credits -= pkt_len;
+ subport->tc_credits[tc_index] -= pkt_len;
+ pipe->tb_credits -= pkt_len;
+ pipe->tc_credits[tc_index] -= pkt_len;
+
+ return 1;
+}
+
+static inline int
+grinder_credits_check_with_tc_ov(struct rte_sched_port *port,
+ struct rte_sched_subport *subport, uint32_t pos)
{
struct rte_sched_grinder *grinder = subport->grinder + pos;
struct rte_sched_pipe *pipe = grinder->pipe;
@@ -2403,8 +2514,13 @@ grinder_schedule(struct rte_sched_port *port,
uint32_t pkt_len = pkt->pkt_len + port->frame_overhead;
uint32_t be_tc_active;
- if (!grinder_credits_check(port, subport, pos))
- return 0;
+ if (unlikely(subport->is_tc_ov_enabled)) {
+ if (!grinder_credits_check_with_tc_ov(port, subport, pos))
+ return 0;
+ } else {
+ if (!grinder_credits_check(port, subport, pos))
+ return 0;
+ }
/* Advance port time */
port->time += pkt_len;
@@ -2770,7 +2886,11 @@ grinder_handle(struct rte_sched_port *port,
subport->profile;
grinder_prefetch_tc_queue_arrays(subport, pos);
- grinder_credits_update(port, subport, pos);
+
+ if (unlikely(subport->is_tc_ov_enabled))
+ grinder_credits_update_with_tc_ov(port, subport, pos);
+ else
+ grinder_credits_update(port, subport, pos);
grinder->state = e_GRINDER_PREFETCH_MBUF;
return 0;
diff --git a/lib/sched/rte_sched.h b/lib/sched/rte_sched.h
index 5ece64e527..94febe1d94 100644
--- a/lib/sched/rte_sched.h
+++ b/lib/sched/rte_sched.h
@@ -579,6 +579,24 @@ rte_sched_port_enqueue(struct rte_sched_port *port, struct rte_mbuf **pkts, uint
int
rte_sched_port_dequeue(struct rte_sched_port *port, struct rte_mbuf **pkts, uint32_t n_pkts);
+/**
+ * Hierarchical scheduler subport TC OV enable/disable config.
+ * Note that this function is safe to use at runtime
+ * to enable/disable TC OV for subport.
+ *
+ * @param port
+ * Handle to port scheduler instance
+ * @param subport_id
+ * Subport ID
+ * @param tc_ov_enable
+ * Boolean flag to enable/disable TC OV
+ * @return
+ * 0 upon success, error code otherwise
+ */
+__rte_experimental
+int
+rte_sched_subport_tc_ov_config(struct rte_sched_port *port, uint32_t subport_id, bool tc_ov_enable);
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/sched/version.map b/lib/sched/version.map
index d22c07fc9f..c6e994d8df 100644
--- a/lib/sched/version.map
+++ b/lib/sched/version.map
@@ -34,4 +34,7 @@ EXPERIMENTAL {
# added in 21.11
rte_pie_rt_data_init;
rte_pie_config_init;
+
+ # added in 22.03
+ rte_sched_subport_tc_ov_config;
};
--
2.25.1
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH v1] sched: enable/disable TC OV at runtime
2022-03-10 19:54 [PATCH v1] sched: enable/disable TC OV at runtime Megha Ajmera
@ 2022-03-10 20:23 ` Stephen Hemminger
2022-03-11 6:24 ` Ajmera, Megha
0 siblings, 1 reply; 4+ messages in thread
From: Stephen Hemminger @ 2022-03-10 20:23 UTC (permalink / raw)
To: Megha Ajmera
Cc: dev, john.mcnamara, jasvinder.singh, cristian.dumitrescu,
ferruh.yigit, sham.singh.thakur
On Thu, 10 Mar 2022 19:54:26 +0000
Megha Ajmera <megha.ajmera@intel.com> wrote:
> diff --git a/lib/sched/rte_sched.c b/lib/sched/rte_sched.c
> index ec74bee939..1d05089d00 100644
> --- a/lib/sched/rte_sched.c
> +++ b/lib/sched/rte_sched.c
> @@ -155,6 +155,7 @@ struct rte_sched_subport {
> uint64_t tc_credits[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
>
> /* TC oversubscription */
> + uint8_t is_tc_ov_enabled;
> uint64_t tc_ov_wm;
> uint64_t tc_ov_wm_min;
> uint64_t tc_ov_wm_max;
Putting the field there creates a hole in the structure.
Put it after tc_ov and fill an existing hole.
This is pahole of current code, looks like this struct could
use some work to be better packed and aligned.
struct rte_sched_port {
uint32_t n_subports_per_port; /* 0 4 */
uint32_t n_pipes_per_subport; /* 4 4 */
uint32_t n_pipes_per_subport_log2; /* 8 4 */
uint16_t pipe_queue[13]; /* 12 26 */
uint8_t pipe_tc[16]; /* 38 16 */
uint8_t tc_queue[16]; /* 54 16 */
/* XXX 2 bytes hole, try to pack */
/* --- cacheline 1 boundary (64 bytes) was 8 bytes ago --- */
uint32_t n_subport_profiles; /* 72 4 */
uint32_t n_max_subport_profiles; /* 76 4 */
uint64_t rate; /* 80 8 */
uint32_t mtu; /* 88 4 */
uint32_t frame_overhead; /* 92 4 */
int socket; /* 96 4 */
/* XXX 4 bytes hole, try to pack */
uint64_t time_cpu_cycles; /* 104 8 */
uint64_t time_cpu_bytes; /* 112 8 */
uint64_t time; /* 120 8 */
/* --- cacheline 2 boundary (128 bytes) --- */
struct rte_reciprocal inv_cycles_per_byte; /* 128 8 */
/* XXX last struct has 2 bytes of padding */
uint64_t cycles_per_byte; /* 136 8 */
struct rte_mbuf * * pkts_out; /* 144 8 */
uint32_t n_pkts_out; /* 152 4 */
uint32_t subport_id; /* 156 4 */
struct rte_sched_subport_profile * subport_profiles; /* 160 8 */
/* XXX 24 bytes hole, try to pack */
/* --- cacheline 3 boundary (192 bytes) --- */
struct rte_sched_subport * subports[] __attribute__((__aligned__(64))); /* 192 0 */
/* size: 192, cachelines: 3, members: 22 */
/* sum members: 162, holes: 3, sum holes: 30 */
/* paddings: 1, sum paddings: 2 */
/* forced alignments: 1, forced holes: 1, sum forced holes: 24 */
} __attribute__((__aligned__(64)));
^ permalink raw reply [flat|nested] 4+ messages in thread
* RE: [PATCH v1] sched: enable/disable TC OV at runtime
2022-03-10 20:23 ` Stephen Hemminger
@ 2022-03-11 6:24 ` Ajmera, Megha
2022-03-11 17:13 ` Stephen Hemminger
0 siblings, 1 reply; 4+ messages in thread
From: Ajmera, Megha @ 2022-03-11 6:24 UTC (permalink / raw)
To: Stephen Hemminger
Cc: dev, Mcnamara, John, Singh, Jasvinder, Dumitrescu, Cristian,
Yigit, Ferruh, Thakur, Sham Singh
>
> > diff --git a/lib/sched/rte_sched.c b/lib/sched/rte_sched.c index
> > ec74bee939..1d05089d00 100644
> > --- a/lib/sched/rte_sched.c
> > +++ b/lib/sched/rte_sched.c
> > @@ -155,6 +155,7 @@ struct rte_sched_subport {
> > uint64_t tc_credits[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
> >
> > /* TC oversubscription */
> > + uint8_t is_tc_ov_enabled;
> > uint64_t tc_ov_wm;
> > uint64_t tc_ov_wm_min;
> > uint64_t tc_ov_wm_max;
>
> Putting the field there creates a hole in the structure.
> Put it after tc_ov and fill an existing hole.
>
> This is pahole of current code, looks like this struct could use some work to be
> better packed and aligned.
>
Thanks Stephen for pointing this out. I agree there is some work needed to pack this structure better.
Can we take this up in a separate patch in later release as it requires more performance test runs to see the impact ?
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH v1] sched: enable/disable TC OV at runtime
2022-03-11 6:24 ` Ajmera, Megha
@ 2022-03-11 17:13 ` Stephen Hemminger
0 siblings, 0 replies; 4+ messages in thread
From: Stephen Hemminger @ 2022-03-11 17:13 UTC (permalink / raw)
To: Ajmera, Megha
Cc: dev, Mcnamara, John, Singh, Jasvinder, Dumitrescu, Cristian,
Yigit, Ferruh, Thakur, Sham Singh
On Fri, 11 Mar 2022 06:24:35 +0000
"Ajmera, Megha" <megha.ajmera@intel.com> wrote:
> >
> > > diff --git a/lib/sched/rte_sched.c b/lib/sched/rte_sched.c index
> > > ec74bee939..1d05089d00 100644
> > > --- a/lib/sched/rte_sched.c
> > > +++ b/lib/sched/rte_sched.c
> > > @@ -155,6 +155,7 @@ struct rte_sched_subport {
> > > uint64_t tc_credits[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
> > >
> > > /* TC oversubscription */
> > > + uint8_t is_tc_ov_enabled;
> > > uint64_t tc_ov_wm;
> > > uint64_t tc_ov_wm_min;
> > > uint64_t tc_ov_wm_max;
> >
> > Putting the field there creates a hole in the structure.
> > Put it after tc_ov and fill an existing hole.
> >
> > This is pahole of current code, looks like this struct could use some work to be
> > better packed and aligned.
> >
>
> Thanks Stephen for pointing this out. I agree there is some work needed to pack this structure better.
> Can we take this up in a separate patch in later release as it requires more performance test runs to see the impact ?
>
Please add new field in existing hole
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2022-03-11 17:13 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-03-10 19:54 [PATCH v1] sched: enable/disable TC OV at runtime Megha Ajmera
2022-03-10 20:23 ` Stephen Hemminger
2022-03-11 6:24 ` Ajmera, Megha
2022-03-11 17:13 ` Stephen Hemminger
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).