* [PATCH] Revert "sched: enable traffic class oversubscription unconditionally"
@ 2022-03-14 9:59 Megha Ajmera
2022-03-14 10:32 ` Kevin Traynor
2022-03-14 10:47 ` [PATCH] " Dumitrescu, Cristian
0 siblings, 2 replies; 8+ messages in thread
From: Megha Ajmera @ 2022-03-14 9:59 UTC (permalink / raw)
To: dev, john.mcnamara, jasvinder.singh, cristian.dumitrescu,
sham.singh.thakur
This reverts commit d91c4b1bb5a938734fe8e66da8f965304919f38e.
When enabling TC OV unconditionally, it is observed the performance
drops by ~20% hence reverting this commit.
Signed-off-by: Megha Ajmera <megha.ajmera@intel.com>
---
config/rte_config.h | 1 +
drivers/net/softnic/rte_eth_softnic_tm.c | 18 +++++
examples/qos_sched/init.c | 2 +
lib/sched/rte_sched.c | 90 ++++++++++++++++++++++++
4 files changed, 111 insertions(+)
diff --git a/config/rte_config.h b/config/rte_config.h
index 8eb29c1525..de6fea5b67 100644
--- a/config/rte_config.h
+++ b/config/rte_config.h
@@ -90,6 +90,7 @@
/* rte_sched defines */
#undef RTE_SCHED_CMAN
+#undef RTE_SCHED_SUBPORT_TC_OV
/* rte_graph defines */
#define RTE_GRAPH_BURST_SIZE 256
diff --git a/drivers/net/softnic/rte_eth_softnic_tm.c b/drivers/net/softnic/rte_eth_softnic_tm.c
index 6a7766ba1c..e74092ce7f 100644
--- a/drivers/net/softnic/rte_eth_softnic_tm.c
+++ b/drivers/net/softnic/rte_eth_softnic_tm.c
@@ -595,9 +595,15 @@ static const struct rte_tm_level_capabilities tm_level_cap[] = {
.sched_sp_n_priorities_max = 1,
.sched_wfq_n_children_per_group_max = UINT32_MAX,
.sched_wfq_n_groups_max = 1,
+#ifdef RTE_SCHED_SUBPORT_TC_OV
.sched_wfq_weight_max = UINT32_MAX,
.sched_wfq_packet_mode_supported = 0,
.sched_wfq_byte_mode_supported = 1,
+#else
+ .sched_wfq_weight_max = 1,
+ .sched_wfq_packet_mode_supported = 0,
+ .sched_wfq_byte_mode_supported = 1,
+#endif
.stats_mask = STATS_MASK_DEFAULT,
} },
@@ -2822,6 +2828,8 @@ pmd_tm_hierarchy_commit(struct rte_eth_dev *dev,
return 0;
}
+#ifdef RTE_SCHED_SUBPORT_TC_OV
+
static int
update_pipe_weight(struct rte_eth_dev *dev, struct tm_node *np, uint32_t weight)
{
@@ -2859,6 +2867,8 @@ update_pipe_weight(struct rte_eth_dev *dev, struct tm_node *np, uint32_t weight)
return 0;
}
+#endif
+
static int
update_queue_weight(struct rte_eth_dev *dev,
struct tm_node *nq, uint32_t weight)
@@ -2973,6 +2983,7 @@ pmd_tm_node_parent_update(struct rte_eth_dev *dev,
rte_strerror(EINVAL));
/* fall-through */
case TM_NODE_LEVEL_PIPE:
+#ifdef RTE_SCHED_SUBPORT_TC_OV
if (update_pipe_weight(dev, n, weight))
return -rte_tm_error_set(error,
EINVAL,
@@ -2980,6 +2991,13 @@ pmd_tm_node_parent_update(struct rte_eth_dev *dev,
NULL,
rte_strerror(EINVAL));
return 0;
+#else
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_WEIGHT,
+ NULL,
+ rte_strerror(EINVAL));
+#endif
/* fall-through */
case TM_NODE_LEVEL_TC:
return -rte_tm_error_set(error,
diff --git a/examples/qos_sched/init.c b/examples/qos_sched/init.c
index 8a0fb8a374..3c1f0bc680 100644
--- a/examples/qos_sched/init.c
+++ b/examples/qos_sched/init.c
@@ -183,7 +183,9 @@ static struct rte_sched_pipe_params pipe_profiles[MAX_SCHED_PIPE_PROFILES] = {
.tc_rate = {305175, 305175, 305175, 305175, 305175, 305175,
305175, 305175, 305175, 305175, 305175, 305175, 305175},
.tc_period = 40,
+#ifdef RTE_SCHED_SUBPORT_TC_OV
.tc_ov_weight = 1,
+#endif
.wrr_weights = {1, 1, 1, 1},
},
diff --git a/lib/sched/rte_sched.c b/lib/sched/rte_sched.c
index ec74bee939..7298df9e90 100644
--- a/lib/sched/rte_sched.c
+++ b/lib/sched/rte_sched.c
@@ -1317,12 +1317,14 @@ rte_sched_subport_config(struct rte_sched_port *port,
for (i = 0; i < RTE_SCHED_PORT_N_GRINDERS; i++)
s->grinder_base_bmp_pos[i] = RTE_SCHED_PIPE_INVALID;
+#ifdef RTE_SCHED_SUBPORT_TC_OV
/* TC oversubscription */
s->tc_ov_wm_min = port->mtu;
s->tc_ov_period_id = 0;
s->tc_ov = 0;
s->tc_ov_n = 0;
s->tc_ov_rate = 0;
+#endif
}
{
@@ -1342,9 +1344,11 @@ rte_sched_subport_config(struct rte_sched_port *port,
else
profile->tc_credits_per_period[i] = 0;
+#ifdef RTE_SCHED_SUBPORT_TC_OV
s->tc_ov_wm_max = rte_sched_time_ms_to_bytes(profile->tc_period,
s->pipe_tc_be_rate_max);
s->tc_ov_wm = s->tc_ov_wm_max;
+#endif
s->profile = subport_profile_id;
}
@@ -2251,6 +2255,50 @@ rte_sched_port_enqueue(struct rte_sched_port *port, struct rte_mbuf **pkts,
return result;
}
+#ifndef RTE_SCHED_SUBPORT_TC_OV
+
+static inline void
+grinder_credits_update(struct rte_sched_port *port,
+ struct rte_sched_subport *subport, uint32_t pos)
+{
+ struct rte_sched_grinder *grinder = subport->grinder + pos;
+ struct rte_sched_pipe *pipe = grinder->pipe;
+ struct rte_sched_pipe_profile *params = grinder->pipe_params;
+ struct rte_sched_subport_profile *sp = grinder->subport_params;
+ uint64_t n_periods;
+ uint32_t i;
+
+ /* Subport TB */
+ n_periods = (port->time - subport->tb_time) / sp->tb_period;
+ subport->tb_credits += n_periods * sp->tb_credits_per_period;
+ subport->tb_credits = RTE_MIN(subport->tb_credits, sp->tb_size);
+ subport->tb_time += n_periods * sp->tb_period;
+
+ /* Pipe TB */
+ n_periods = (port->time - pipe->tb_time) / params->tb_period;
+ pipe->tb_credits += n_periods * params->tb_credits_per_period;
+ pipe->tb_credits = RTE_MIN(pipe->tb_credits, params->tb_size);
+ pipe->tb_time += n_periods * params->tb_period;
+
+ /* Subport TCs */
+ if (unlikely(port->time >= subport->tc_time)) {
+ for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
+ subport->tc_credits[i] = sp->tc_credits_per_period[i];
+
+ subport->tc_time = port->time + sp->tc_period;
+ }
+
+ /* Pipe TCs */
+ if (unlikely(port->time >= pipe->tc_time)) {
+ for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
+ pipe->tc_credits[i] = params->tc_credits_per_period[i];
+
+ pipe->tc_time = port->time + params->tc_period;
+ }
+}
+
+#else
+
static inline uint64_t
grinder_tc_ov_credits_update(struct rte_sched_port *port,
struct rte_sched_subport *subport, uint32_t pos)
@@ -2345,6 +2393,46 @@ grinder_credits_update(struct rte_sched_port *port,
}
}
+#endif /* RTE_SCHED_TS_CREDITS_UPDATE, RTE_SCHED_SUBPORT_TC_OV */
+
+
+#ifndef RTE_SCHED_SUBPORT_TC_OV
+
+static inline int
+grinder_credits_check(struct rte_sched_port *port,
+ struct rte_sched_subport *subport, uint32_t pos)
+{
+ struct rte_sched_grinder *grinder = subport->grinder + pos;
+ struct rte_sched_pipe *pipe = grinder->pipe;
+ struct rte_mbuf *pkt = grinder->pkt;
+ uint32_t tc_index = grinder->tc_index;
+ uint64_t pkt_len = pkt->pkt_len + port->frame_overhead;
+ uint64_t subport_tb_credits = subport->tb_credits;
+ uint64_t subport_tc_credits = subport->tc_credits[tc_index];
+ uint64_t pipe_tb_credits = pipe->tb_credits;
+ uint64_t pipe_tc_credits = pipe->tc_credits[tc_index];
+ int enough_credits;
+
+ /* Check queue credits */
+ enough_credits = (pkt_len <= subport_tb_credits) &&
+ (pkt_len <= subport_tc_credits) &&
+ (pkt_len <= pipe_tb_credits) &&
+ (pkt_len <= pipe_tc_credits);
+
+ if (!enough_credits)
+ return 0;
+
+ /* Update port credits */
+ subport->tb_credits -= pkt_len;
+ subport->tc_credits[tc_index] -= pkt_len;
+ pipe->tb_credits -= pkt_len;
+ pipe->tc_credits[tc_index] -= pkt_len;
+
+ return 1;
+}
+
+#else
+
static inline int
grinder_credits_check(struct rte_sched_port *port,
struct rte_sched_subport *subport, uint32_t pos)
@@ -2391,6 +2479,8 @@ grinder_credits_check(struct rte_sched_port *port,
return 1;
}
+#endif /* RTE_SCHED_SUBPORT_TC_OV */
+
static inline int
grinder_schedule(struct rte_sched_port *port,
--
2.25.1
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH] Revert "sched: enable traffic class oversubscription unconditionally"
2022-03-14 9:59 [PATCH] Revert "sched: enable traffic class oversubscription unconditionally" Megha Ajmera
@ 2022-03-14 10:32 ` Kevin Traynor
2022-03-14 12:27 ` [PATCH v2] " Megha Ajmera
2022-03-14 10:47 ` [PATCH] " Dumitrescu, Cristian
1 sibling, 1 reply; 8+ messages in thread
From: Kevin Traynor @ 2022-03-14 10:32 UTC (permalink / raw)
To: Megha Ajmera, dev, john.mcnamara, jasvinder.singh,
cristian.dumitrescu, sham.singh.thakur
On 14/03/2022 09:59, Megha Ajmera wrote:
> This reverts commit d91c4b1bb5a938734fe8e66da8f965304919f38e.
>
> When enabling TC OV unconditionally, it is observed the performance
> drops by ~20% hence reverting this commit.
>
Fixes: d91c4b1bb5a9 ("sched: enable traffic class oversubscription
unconditionally")
> Signed-off-by: Megha Ajmera <megha.ajmera@intel.com>
> ---
Even though there is already a reference to the commit, it is better to
add the DPDK style 'Fixes:' tag too for consistency and stable
maintainers. Thanks.
^ permalink raw reply [flat|nested] 8+ messages in thread
* RE: [PATCH] Revert "sched: enable traffic class oversubscription unconditionally"
2022-03-14 9:59 [PATCH] Revert "sched: enable traffic class oversubscription unconditionally" Megha Ajmera
2022-03-14 10:32 ` Kevin Traynor
@ 2022-03-14 10:47 ` Dumitrescu, Cristian
1 sibling, 0 replies; 8+ messages in thread
From: Dumitrescu, Cristian @ 2022-03-14 10:47 UTC (permalink / raw)
To: Ajmera, Megha, dev, Mcnamara, John, Singh, Jasvinder, Thakur, Sham Singh
> -----Original Message-----
> From: Ajmera, Megha <megha.ajmera@intel.com>
> Sent: Monday, March 14, 2022 9:59 AM
> To: dev@dpdk.org; Mcnamara, John <john.mcnamara@intel.com>; Singh,
> Jasvinder <jasvinder.singh@intel.com>; Dumitrescu, Cristian
> <cristian.dumitrescu@intel.com>; Thakur, Sham Singh
> <sham.singh.thakur@intel.com>
> Subject: [PATCH] Revert "sched: enable traffic class oversubscription
> unconditionally"
>
> This reverts commit d91c4b1bb5a938734fe8e66da8f965304919f38e.
>
> When enabling TC OV unconditionally, it is observed the performance
> drops by ~20% hence reverting this commit.
>
> Signed-off-by: Megha Ajmera <megha.ajmera@intel.com>
> ---
Acked-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
We are reworking the original patch into a run-time configurable option and will resend the improved version shortly during the 22.07 development cycle.
^ permalink raw reply [flat|nested] 8+ messages in thread
* [PATCH v2] Revert "sched: enable traffic class oversubscription unconditionally"
2022-03-14 10:32 ` Kevin Traynor
@ 2022-03-14 12:27 ` Megha Ajmera
2022-03-15 11:22 ` Thomas Monjalon
0 siblings, 1 reply; 8+ messages in thread
From: Megha Ajmera @ 2022-03-14 12:27 UTC (permalink / raw)
To: dev, john.mcnamara, jasvinder.singh, cristian.dumitrescu,
sham.singh.thakur
This reverts commit d91c4b1bb5a938734fe8e66da8f965304919f38e.
When enabling TC OV unconditionally, it is observed the performance
drops by ~20% hence reverting this commit.
Fixes: d91c4b1bb5a9 ("sched: enable traffic class oversubscription
unconditionally")
Signed-off-by: Megha Ajmera <megha.ajmera@intel.com>
---
config/rte_config.h | 1 +
drivers/net/softnic/rte_eth_softnic_tm.c | 18 +++++
examples/qos_sched/init.c | 2 +
lib/sched/rte_sched.c | 90 ++++++++++++++++++++++++
4 files changed, 111 insertions(+)
diff --git a/config/rte_config.h b/config/rte_config.h
index 8eb29c1525..de6fea5b67 100644
--- a/config/rte_config.h
+++ b/config/rte_config.h
@@ -90,6 +90,7 @@
/* rte_sched defines */
#undef RTE_SCHED_CMAN
+#undef RTE_SCHED_SUBPORT_TC_OV
/* rte_graph defines */
#define RTE_GRAPH_BURST_SIZE 256
diff --git a/drivers/net/softnic/rte_eth_softnic_tm.c b/drivers/net/softnic/rte_eth_softnic_tm.c
index 6a7766ba1c..e74092ce7f 100644
--- a/drivers/net/softnic/rte_eth_softnic_tm.c
+++ b/drivers/net/softnic/rte_eth_softnic_tm.c
@@ -595,9 +595,15 @@ static const struct rte_tm_level_capabilities tm_level_cap[] = {
.sched_sp_n_priorities_max = 1,
.sched_wfq_n_children_per_group_max = UINT32_MAX,
.sched_wfq_n_groups_max = 1,
+#ifdef RTE_SCHED_SUBPORT_TC_OV
.sched_wfq_weight_max = UINT32_MAX,
.sched_wfq_packet_mode_supported = 0,
.sched_wfq_byte_mode_supported = 1,
+#else
+ .sched_wfq_weight_max = 1,
+ .sched_wfq_packet_mode_supported = 0,
+ .sched_wfq_byte_mode_supported = 1,
+#endif
.stats_mask = STATS_MASK_DEFAULT,
} },
@@ -2822,6 +2828,8 @@ pmd_tm_hierarchy_commit(struct rte_eth_dev *dev,
return 0;
}
+#ifdef RTE_SCHED_SUBPORT_TC_OV
+
static int
update_pipe_weight(struct rte_eth_dev *dev, struct tm_node *np, uint32_t weight)
{
@@ -2859,6 +2867,8 @@ update_pipe_weight(struct rte_eth_dev *dev, struct tm_node *np, uint32_t weight)
return 0;
}
+#endif
+
static int
update_queue_weight(struct rte_eth_dev *dev,
struct tm_node *nq, uint32_t weight)
@@ -2973,6 +2983,7 @@ pmd_tm_node_parent_update(struct rte_eth_dev *dev,
rte_strerror(EINVAL));
/* fall-through */
case TM_NODE_LEVEL_PIPE:
+#ifdef RTE_SCHED_SUBPORT_TC_OV
if (update_pipe_weight(dev, n, weight))
return -rte_tm_error_set(error,
EINVAL,
@@ -2980,6 +2991,13 @@ pmd_tm_node_parent_update(struct rte_eth_dev *dev,
NULL,
rte_strerror(EINVAL));
return 0;
+#else
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_WEIGHT,
+ NULL,
+ rte_strerror(EINVAL));
+#endif
/* fall-through */
case TM_NODE_LEVEL_TC:
return -rte_tm_error_set(error,
diff --git a/examples/qos_sched/init.c b/examples/qos_sched/init.c
index 8a0fb8a374..3c1f0bc680 100644
--- a/examples/qos_sched/init.c
+++ b/examples/qos_sched/init.c
@@ -183,7 +183,9 @@ static struct rte_sched_pipe_params pipe_profiles[MAX_SCHED_PIPE_PROFILES] = {
.tc_rate = {305175, 305175, 305175, 305175, 305175, 305175,
305175, 305175, 305175, 305175, 305175, 305175, 305175},
.tc_period = 40,
+#ifdef RTE_SCHED_SUBPORT_TC_OV
.tc_ov_weight = 1,
+#endif
.wrr_weights = {1, 1, 1, 1},
},
diff --git a/lib/sched/rte_sched.c b/lib/sched/rte_sched.c
index ec74bee939..7298df9e90 100644
--- a/lib/sched/rte_sched.c
+++ b/lib/sched/rte_sched.c
@@ -1317,12 +1317,14 @@ rte_sched_subport_config(struct rte_sched_port *port,
for (i = 0; i < RTE_SCHED_PORT_N_GRINDERS; i++)
s->grinder_base_bmp_pos[i] = RTE_SCHED_PIPE_INVALID;
+#ifdef RTE_SCHED_SUBPORT_TC_OV
/* TC oversubscription */
s->tc_ov_wm_min = port->mtu;
s->tc_ov_period_id = 0;
s->tc_ov = 0;
s->tc_ov_n = 0;
s->tc_ov_rate = 0;
+#endif
}
{
@@ -1342,9 +1344,11 @@ rte_sched_subport_config(struct rte_sched_port *port,
else
profile->tc_credits_per_period[i] = 0;
+#ifdef RTE_SCHED_SUBPORT_TC_OV
s->tc_ov_wm_max = rte_sched_time_ms_to_bytes(profile->tc_period,
s->pipe_tc_be_rate_max);
s->tc_ov_wm = s->tc_ov_wm_max;
+#endif
s->profile = subport_profile_id;
}
@@ -2251,6 +2255,50 @@ rte_sched_port_enqueue(struct rte_sched_port *port, struct rte_mbuf **pkts,
return result;
}
+#ifndef RTE_SCHED_SUBPORT_TC_OV
+
+static inline void
+grinder_credits_update(struct rte_sched_port *port,
+ struct rte_sched_subport *subport, uint32_t pos)
+{
+ struct rte_sched_grinder *grinder = subport->grinder + pos;
+ struct rte_sched_pipe *pipe = grinder->pipe;
+ struct rte_sched_pipe_profile *params = grinder->pipe_params;
+ struct rte_sched_subport_profile *sp = grinder->subport_params;
+ uint64_t n_periods;
+ uint32_t i;
+
+ /* Subport TB */
+ n_periods = (port->time - subport->tb_time) / sp->tb_period;
+ subport->tb_credits += n_periods * sp->tb_credits_per_period;
+ subport->tb_credits = RTE_MIN(subport->tb_credits, sp->tb_size);
+ subport->tb_time += n_periods * sp->tb_period;
+
+ /* Pipe TB */
+ n_periods = (port->time - pipe->tb_time) / params->tb_period;
+ pipe->tb_credits += n_periods * params->tb_credits_per_period;
+ pipe->tb_credits = RTE_MIN(pipe->tb_credits, params->tb_size);
+ pipe->tb_time += n_periods * params->tb_period;
+
+ /* Subport TCs */
+ if (unlikely(port->time >= subport->tc_time)) {
+ for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
+ subport->tc_credits[i] = sp->tc_credits_per_period[i];
+
+ subport->tc_time = port->time + sp->tc_period;
+ }
+
+ /* Pipe TCs */
+ if (unlikely(port->time >= pipe->tc_time)) {
+ for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
+ pipe->tc_credits[i] = params->tc_credits_per_period[i];
+
+ pipe->tc_time = port->time + params->tc_period;
+ }
+}
+
+#else
+
static inline uint64_t
grinder_tc_ov_credits_update(struct rte_sched_port *port,
struct rte_sched_subport *subport, uint32_t pos)
@@ -2345,6 +2393,46 @@ grinder_credits_update(struct rte_sched_port *port,
}
}
+#endif /* RTE_SCHED_TS_CREDITS_UPDATE, RTE_SCHED_SUBPORT_TC_OV */
+
+
+#ifndef RTE_SCHED_SUBPORT_TC_OV
+
+static inline int
+grinder_credits_check(struct rte_sched_port *port,
+ struct rte_sched_subport *subport, uint32_t pos)
+{
+ struct rte_sched_grinder *grinder = subport->grinder + pos;
+ struct rte_sched_pipe *pipe = grinder->pipe;
+ struct rte_mbuf *pkt = grinder->pkt;
+ uint32_t tc_index = grinder->tc_index;
+ uint64_t pkt_len = pkt->pkt_len + port->frame_overhead;
+ uint64_t subport_tb_credits = subport->tb_credits;
+ uint64_t subport_tc_credits = subport->tc_credits[tc_index];
+ uint64_t pipe_tb_credits = pipe->tb_credits;
+ uint64_t pipe_tc_credits = pipe->tc_credits[tc_index];
+ int enough_credits;
+
+ /* Check queue credits */
+ enough_credits = (pkt_len <= subport_tb_credits) &&
+ (pkt_len <= subport_tc_credits) &&
+ (pkt_len <= pipe_tb_credits) &&
+ (pkt_len <= pipe_tc_credits);
+
+ if (!enough_credits)
+ return 0;
+
+ /* Update port credits */
+ subport->tb_credits -= pkt_len;
+ subport->tc_credits[tc_index] -= pkt_len;
+ pipe->tb_credits -= pkt_len;
+ pipe->tc_credits[tc_index] -= pkt_len;
+
+ return 1;
+}
+
+#else
+
static inline int
grinder_credits_check(struct rte_sched_port *port,
struct rte_sched_subport *subport, uint32_t pos)
@@ -2391,6 +2479,8 @@ grinder_credits_check(struct rte_sched_port *port,
return 1;
}
+#endif /* RTE_SCHED_SUBPORT_TC_OV */
+
static inline int
grinder_schedule(struct rte_sched_port *port,
--
2.25.1
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH v2] Revert "sched: enable traffic class oversubscription unconditionally"
2022-03-14 12:27 ` [PATCH v2] " Megha Ajmera
@ 2022-03-15 11:22 ` Thomas Monjalon
2022-03-15 17:25 ` Dumitrescu, Cristian
0 siblings, 1 reply; 8+ messages in thread
From: Thomas Monjalon @ 2022-03-15 11:22 UTC (permalink / raw)
To: Megha Ajmera
Cc: dev, john.mcnamara, jasvinder.singh, cristian.dumitrescu,
sham.singh.thakur, david.marchand
14/03/2022 13:27, Megha Ajmera:
> This reverts commit d91c4b1bb5a938734fe8e66da8f965304919f38e.
>
> When enabling TC OV unconditionally, it is observed the performance
> drops by ~20% hence reverting this commit.
>
> Fixes: d91c4b1bb5a9 ("sched: enable traffic class oversubscription
> unconditionally")
>
> Signed-off-by: Megha Ajmera <megha.ajmera@intel.com>
Repeating what was suggested yesterday in a private email.
Sorry Megha, I don't know why you were not Cc'ed by your Intel colleagues.
David and I suggested to drop the code which was enabled
by the compilation flag RTE_SCHED_SUBPORT_TC_OV,
which was kind of dead code before enabling it unconditionally.
This way you maintain the performance of the default compilation,
and you can re-introduce the feature, if proven useful,
in the next release with a runtime option.
^ permalink raw reply [flat|nested] 8+ messages in thread
* RE: [PATCH v2] Revert "sched: enable traffic class oversubscription unconditionally"
2022-03-15 11:22 ` Thomas Monjalon
@ 2022-03-15 17:25 ` Dumitrescu, Cristian
2022-03-15 17:25 ` Singh, Jasvinder
0 siblings, 1 reply; 8+ messages in thread
From: Dumitrescu, Cristian @ 2022-03-15 17:25 UTC (permalink / raw)
To: Thomas Monjalon, Ajmera, Megha
Cc: dev, Mcnamara, John, Singh, Jasvinder, Thakur, Sham Singh,
david.marchand
Hi Thomas,
> -----Original Message-----
> From: Thomas Monjalon <thomas@monjalon.net>
> Sent: Tuesday, March 15, 2022 11:23 AM
> To: Ajmera, Megha <megha.ajmera@intel.com>
> Cc: dev@dpdk.org; Mcnamara, John <john.mcnamara@intel.com>; Singh,
> Jasvinder <jasvinder.singh@intel.com>; Dumitrescu, Cristian
> <cristian.dumitrescu@intel.com>; Thakur, Sham Singh
> <sham.singh.thakur@intel.com>; david.marchand@redhat.com
> Subject: Re: [PATCH v2] Revert "sched: enable traffic class oversubscription
> unconditionally"
>
> 14/03/2022 13:27, Megha Ajmera:
> > This reverts commit d91c4b1bb5a938734fe8e66da8f965304919f38e.
> >
> > When enabling TC OV unconditionally, it is observed the performance
> > drops by ~20% hence reverting this commit.
> >
> > Fixes: d91c4b1bb5a9 ("sched: enable traffic class oversubscription
> > unconditionally")
> >
> > Signed-off-by: Megha Ajmera <megha.ajmera@intel.com>
>
> Repeating what was suggested yesterday in a private email.
> Sorry Megha, I don't know why you were not Cc'ed by your Intel colleagues.
>
> David and I suggested to drop the code which was enabled
> by the compilation flag RTE_SCHED_SUBPORT_TC_OV,
> which was kind of dead code before enabling it unconditionally.
> This way you maintain the performance of the default compilation,
> and you can re-introduce the feature, if proven useful,
> in the next release with a runtime option.
>
>
After talking with Jasvinder and a few other folks, we think the best option at this point is to keep the code as it is now in the repository, so no further change at this point.
There is a small performance glitch that we plan to fix shortly after the 22.03 release by making the Traffic Class Oversubscription feature conditionally enabled at run-time as opposed to enabled unconditionally. Megha already sent a patch on this, which is under review.
Is this OK with you?
Thank you for your help, we don't want to delay the release in any way!
Regards,
Cristian
^ permalink raw reply [flat|nested] 8+ messages in thread
* RE: [PATCH v2] Revert "sched: enable traffic class oversubscription unconditionally"
2022-03-15 17:25 ` Dumitrescu, Cristian
@ 2022-03-15 17:25 ` Singh, Jasvinder
2022-03-15 17:35 ` Thomas Monjalon
0 siblings, 1 reply; 8+ messages in thread
From: Singh, Jasvinder @ 2022-03-15 17:25 UTC (permalink / raw)
To: Dumitrescu, Cristian, Thomas Monjalon, Ajmera, Megha
Cc: dev, Mcnamara, John, Thakur, Sham Singh, david.marchand
> -----Original Message-----
> From: Dumitrescu, Cristian <cristian.dumitrescu@intel.com>
> Sent: Tuesday, March 15, 2022 5:25 PM
> To: Thomas Monjalon <thomas@monjalon.net>; Ajmera, Megha
> <megha.ajmera@intel.com>
> Cc: dev@dpdk.org; Mcnamara, John <john.mcnamara@intel.com>; Singh,
> Jasvinder <jasvinder.singh@intel.com>; Thakur, Sham Singh
> <sham.singh.thakur@intel.com>; david.marchand@redhat.com
> Subject: RE: [PATCH v2] Revert "sched: enable traffic class oversubscription
> unconditionally"
>
> Hi Thomas,
>
> > -----Original Message-----
> > From: Thomas Monjalon <thomas@monjalon.net>
> > Sent: Tuesday, March 15, 2022 11:23 AM
> > To: Ajmera, Megha <megha.ajmera@intel.com>
> > Cc: dev@dpdk.org; Mcnamara, John <john.mcnamara@intel.com>; Singh,
> > Jasvinder <jasvinder.singh@intel.com>; Dumitrescu, Cristian
> > <cristian.dumitrescu@intel.com>; Thakur, Sham Singh
> > <sham.singh.thakur@intel.com>; david.marchand@redhat.com
> > Subject: Re: [PATCH v2] Revert "sched: enable traffic class
> > oversubscription unconditionally"
> >
> > 14/03/2022 13:27, Megha Ajmera:
> > > This reverts commit d91c4b1bb5a938734fe8e66da8f965304919f38e.
> > >
> > > When enabling TC OV unconditionally, it is observed the performance
> > > drops by ~20% hence reverting this commit.
> > >
> > > Fixes: d91c4b1bb5a9 ("sched: enable traffic class oversubscription
> > > unconditionally")
> > >
> > > Signed-off-by: Megha Ajmera <megha.ajmera@intel.com>
> >
> > Repeating what was suggested yesterday in a private email.
> > Sorry Megha, I don't know why you were not Cc'ed by your Intel
> colleagues.
> >
> > David and I suggested to drop the code which was enabled by the
> > compilation flag RTE_SCHED_SUBPORT_TC_OV, which was kind of dead
> code
> > before enabling it unconditionally.
> > This way you maintain the performance of the default compilation, and
> > you can re-introduce the feature, if proven useful, in the next
> > release with a runtime option.
> >
> >
>
> After talking with Jasvinder and a few other folks, we think the best option at
> this point is to keep the code as it is now in the repository, so no further
> change at this point.
>
> There is a small performance glitch that we plan to fix shortly after the 22.03
> release by making the Traffic Class Oversubscription feature conditionally
> enabled at run-time as opposed to enabled unconditionally. Megha already
> sent a patch on this, which is under review.
>
> Is this OK with you?
>
> Thank you for your help, we don't want to delay the release in any way!
>
> Regards,
> Cristian
+1
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH v2] Revert "sched: enable traffic class oversubscription unconditionally"
2022-03-15 17:25 ` Singh, Jasvinder
@ 2022-03-15 17:35 ` Thomas Monjalon
0 siblings, 0 replies; 8+ messages in thread
From: Thomas Monjalon @ 2022-03-15 17:35 UTC (permalink / raw)
To: Dumitrescu, Cristian, Ajmera, Megha, Singh, Jasvinder
Cc: dev, Mcnamara, John, Thakur, Sham Singh, david.marchand
15/03/2022 18:25, Singh, Jasvinder:
> From: Dumitrescu, Cristian <cristian.dumitrescu@intel.com>
> > From: Thomas Monjalon <thomas@monjalon.net>
> > > 14/03/2022 13:27, Megha Ajmera:
> > > > This reverts commit d91c4b1bb5a938734fe8e66da8f965304919f38e.
> > > >
> > > > When enabling TC OV unconditionally, it is observed the performance
> > > > drops by ~20% hence reverting this commit.
> > > >
> > > > Fixes: d91c4b1bb5a9 ("sched: enable traffic class oversubscription
> > > > unconditionally")
> > > >
> > > > Signed-off-by: Megha Ajmera <megha.ajmera@intel.com>
> > >
> > > Repeating what was suggested yesterday in a private email.
> > > Sorry Megha, I don't know why you were not Cc'ed by your Intel
> > colleagues.
> > >
> > > David and I suggested to drop the code which was enabled by the
> > > compilation flag RTE_SCHED_SUBPORT_TC_OV, which was kind of dead
> > code
> > > before enabling it unconditionally.
> > > This way you maintain the performance of the default compilation, and
> > > you can re-introduce the feature, if proven useful, in the next
> > > release with a runtime option.
> > >
> > >
> >
> > After talking with Jasvinder and a few other folks, we think the best option at
> > this point is to keep the code as it is now in the repository, so no further
> > change at this point.
> >
> > There is a small performance glitch that we plan to fix shortly after the 22.03
> > release by making the Traffic Class Oversubscription feature conditionally
> > enabled at run-time as opposed to enabled unconditionally. Megha already
> > sent a patch on this, which is under review.
> >
> > Is this OK with you?
> >
> > Thank you for your help, we don't want to delay the release in any way!
>
> +1
OK so I mark this revert as rejected.
Thanks.
^ permalink raw reply [flat|nested] 8+ messages in thread
end of thread, other threads:[~2022-03-15 17:35 UTC | newest]
Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-03-14 9:59 [PATCH] Revert "sched: enable traffic class oversubscription unconditionally" Megha Ajmera
2022-03-14 10:32 ` Kevin Traynor
2022-03-14 12:27 ` [PATCH v2] " Megha Ajmera
2022-03-15 11:22 ` Thomas Monjalon
2022-03-15 17:25 ` Dumitrescu, Cristian
2022-03-15 17:25 ` Singh, Jasvinder
2022-03-15 17:35 ` Thomas Monjalon
2022-03-14 10:47 ` [PATCH] " Dumitrescu, Cristian
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).