* [dpdk-dev] [PATCH v2 1/7] rte_sched: make RED optional at runtime
@ 2015-02-05 6:04 Stephen Hemminger
2015-02-05 6:04 ` [dpdk-dev] [PATCH v2 2/7] rte_sched: expand scheduler hierarchy for more VLAN's Stephen Hemminger
` (2 more replies)
0 siblings, 3 replies; 7+ messages in thread
From: Stephen Hemminger @ 2015-02-05 6:04 UTC (permalink / raw)
To: dev; +Cc: Stephen Hemminger
From: Stephen Hemminger <shemming@brocade.com>
Want to be able to build with RTE_SCHED_RED enabled but
allow disabling RED on a per-queue basis at runtime.
RED is disabled unless min/max thresholds set.
Signed-off-by: Stephen Hemmminger <stephen@networkplumber.org>
---
lib/librte_sched/rte_sched.c | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/lib/librte_sched/rte_sched.c b/lib/librte_sched/rte_sched.c
index 95dee27..6928c98 100644
--- a/lib/librte_sched/rte_sched.c
+++ b/lib/librte_sched/rte_sched.c
@@ -636,6 +636,12 @@ rte_sched_port_config(struct rte_sched_port_params *params)
uint32_t j;
for (j = 0; j < e_RTE_METER_COLORS; j++) {
+ /* if min/max are both zero, then RED is disabled */
+ if ((params->red_params[i][j].min_th |
+ params->red_params[i][j].max_th) == 0) {
+ continue;
+ }
+
if (rte_red_config_init(&port->red_config[i][j],
params->red_params[i][j].wq_log2,
params->red_params[i][j].min_th,
@@ -1069,6 +1075,9 @@ rte_sched_port_red_drop(struct rte_sched_port *port, struct rte_mbuf *pkt, uint3
color = rte_sched_port_pkt_read_color(pkt);
red_cfg = &port->red_config[tc_index][color];
+ if ( (red_cfg->min_th | red_cfg->max_th) == 0)
+ return 0;
+
qe = port->queue_extra + qindex;
red = &qe->red;
--
2.1.4
^ permalink raw reply [flat|nested] 7+ messages in thread
* [dpdk-dev] [PATCH v2 2/7] rte_sched: expand scheduler hierarchy for more VLAN's
2015-02-05 6:04 [dpdk-dev] [PATCH v2 1/7] rte_sched: make RED optional at runtime Stephen Hemminger
@ 2015-02-05 6:04 ` Stephen Hemminger
2015-02-05 9:57 ` Ananyev, Konstantin
2015-02-20 18:18 ` Dumitrescu, Cristian
2015-02-05 6:04 ` [dpdk-dev] [PATCH v2 3/7] rte_sched: keep track of RED drops Stephen Hemminger
2015-02-20 17:54 ` [dpdk-dev] [PATCH v2 1/7] rte_sched: make RED optional at runtime Dumitrescu, Cristian
2 siblings, 2 replies; 7+ messages in thread
From: Stephen Hemminger @ 2015-02-05 6:04 UTC (permalink / raw)
To: dev; +Cc: Stephen Hemminger
From: Stephen Hemminger <shemming@brocade.com>
The QoS subport is limited to 8 bits in original code.
But customers demanded ability to support full number of VLAN's (4096)
therefore use the full part of the tag field of mbuf.
Resize the pipe as well to allow for more pipes in future and
avoid expensive bitfield access.
Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
v2 use tag area rather than claiming reserved bit which isn't documented
lib/librte_mbuf/rte_mbuf.h | 5 ++++-
lib/librte_sched/rte_sched.h | 38 ++++++++++++++++++++++++--------------
2 files changed, 28 insertions(+), 15 deletions(-)
diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
index 16059c6..8f0c3a4 100644
--- a/lib/librte_mbuf/rte_mbuf.h
+++ b/lib/librte_mbuf/rte_mbuf.h
@@ -258,7 +258,10 @@ struct rte_mbuf {
/**< First 4 flexible bytes or FD ID, dependent on
PKT_RX_FDIR_* flag in ol_flags. */
} fdir; /**< Filter identifier if FDIR enabled */
- uint32_t sched; /**< Hierarchical scheduler */
+ struct {
+ uint32_t lo;
+ uint32_t hi;
+ } sched; /**< Hierarchical scheduler */
uint32_t usr; /**< User defined tags. See @rte_distributor_process */
} hash; /**< hash information */
diff --git a/lib/librte_sched/rte_sched.h b/lib/librte_sched/rte_sched.h
index e6bba22..dda287f 100644
--- a/lib/librte_sched/rte_sched.h
+++ b/lib/librte_sched/rte_sched.h
@@ -195,16 +195,20 @@ struct rte_sched_port_params {
#endif
};
-/** Path through the scheduler hierarchy used by the scheduler enqueue operation to
-identify the destination queue for the current packet. Stored in the field hash.sched
-of struct rte_mbuf of each packet, typically written by the classification stage and read by
-scheduler enqueue.*/
+/*
+ * Path through the scheduler hierarchy used by the scheduler enqueue
+ * operation to identify the destination queue for the current
+ * packet. Stored in the field pkt.hash.sched of struct rte_mbuf of
+ * each packet, typically written by the classification stage and read
+ * by scheduler enqueue.
+ */
struct rte_sched_port_hierarchy {
- uint32_t queue:2; /**< Queue ID (0 .. 3) */
- uint32_t traffic_class:2; /**< Traffic class ID (0 .. 3)*/
- uint32_t pipe:20; /**< Pipe ID */
- uint32_t subport:6; /**< Subport ID */
- uint32_t color:2; /**< Color */
+ uint16_t queue:2; /**< Queue ID (0 .. 3) */
+ uint16_t traffic_class:2; /**< Traffic class ID (0 .. 3)*/
+ uint16_t color:2; /**< Color */
+ uint16_t unused:10;
+ uint16_t subport; /**< Subport ID */
+ uint32_t pipe; /**< Pipe ID */
};
/*
@@ -350,12 +354,15 @@ rte_sched_queue_read_stats(struct rte_sched_port *port,
*/
static inline void
rte_sched_port_pkt_write(struct rte_mbuf *pkt,
- uint32_t subport, uint32_t pipe, uint32_t traffic_class, uint32_t queue, enum rte_meter_color color)
+ uint32_t subport, uint32_t pipe,
+ uint32_t traffic_class,
+ uint32_t queue, enum rte_meter_color color)
{
- struct rte_sched_port_hierarchy *sched = (struct rte_sched_port_hierarchy *) &pkt->hash.sched;
+ struct rte_sched_port_hierarchy *sched
+ = (struct rte_sched_port_hierarchy *) &pkt->hash.sched;
- sched->color = (uint32_t) color;
sched->subport = subport;
+ sched->color = (uint32_t) color;
sched->pipe = pipe;
sched->traffic_class = traffic_class;
sched->queue = queue;
@@ -379,9 +386,12 @@ rte_sched_port_pkt_write(struct rte_mbuf *pkt,
*
*/
static inline void
-rte_sched_port_pkt_read_tree_path(struct rte_mbuf *pkt, uint32_t *subport, uint32_t *pipe, uint32_t *traffic_class, uint32_t *queue)
+rte_sched_port_pkt_read_tree_path(struct rte_mbuf *pkt, uint32_t *subport,
+ uint32_t *pipe, uint32_t *traffic_class,
+ uint32_t *queue)
{
- struct rte_sched_port_hierarchy *sched = (struct rte_sched_port_hierarchy *) &pkt->hash.sched;
+ struct rte_sched_port_hierarchy *sched
+ = (struct rte_sched_port_hierarchy *) &pkt->hash.sched;
*subport = sched->subport;
*pipe = sched->pipe;
--
2.1.4
^ permalink raw reply [flat|nested] 7+ messages in thread
* [dpdk-dev] [PATCH v2 3/7] rte_sched: keep track of RED drops
2015-02-05 6:04 [dpdk-dev] [PATCH v2 1/7] rte_sched: make RED optional at runtime Stephen Hemminger
2015-02-05 6:04 ` [dpdk-dev] [PATCH v2 2/7] rte_sched: expand scheduler hierarchy for more VLAN's Stephen Hemminger
@ 2015-02-05 6:04 ` Stephen Hemminger
2015-02-20 18:22 ` Dumitrescu, Cristian
2015-02-20 17:54 ` [dpdk-dev] [PATCH v2 1/7] rte_sched: make RED optional at runtime Dumitrescu, Cristian
2 siblings, 1 reply; 7+ messages in thread
From: Stephen Hemminger @ 2015-02-05 6:04 UTC (permalink / raw)
To: dev; +Cc: Stephen Hemminger
From: Stephen Hemminger <shemming@brocade.com>
Add new statistic to keep track of drops due to RED.
Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
lib/librte_sched/rte_sched.c | 28 +++++++++++++++++++++++-----
lib/librte_sched/rte_sched.h | 6 ++++++
2 files changed, 29 insertions(+), 5 deletions(-)
diff --git a/lib/librte_sched/rte_sched.c b/lib/librte_sched/rte_sched.c
index 6928c98..8cb8bf1 100644
--- a/lib/librte_sched/rte_sched.c
+++ b/lib/librte_sched/rte_sched.c
@@ -1028,7 +1028,9 @@ rte_sched_port_update_subport_stats(struct rte_sched_port *port, uint32_t qindex
}
static inline void
-rte_sched_port_update_subport_stats_on_drop(struct rte_sched_port *port, uint32_t qindex, struct rte_mbuf *pkt)
+rte_sched_port_update_subport_stats_on_drop(struct rte_sched_port *port,
+ uint32_t qindex,
+ struct rte_mbuf *pkt, uint32_t red)
{
struct rte_sched_subport *s = port->subport + (qindex / rte_sched_port_queues_per_subport(port));
uint32_t tc_index = (qindex >> 2) & 0x3;
@@ -1036,6 +1038,9 @@ rte_sched_port_update_subport_stats_on_drop(struct rte_sched_port *port, uint32_
s->stats.n_pkts_tc_dropped[tc_index] += 1;
s->stats.n_bytes_tc_dropped[tc_index] += pkt_len;
+#ifdef RTE_SCHED_RED
+ s->stats.n_pkts_red_dropped[tc_index] += red;
+#endif
}
static inline void
@@ -1049,13 +1054,18 @@ rte_sched_port_update_queue_stats(struct rte_sched_port *port, uint32_t qindex,
}
static inline void
-rte_sched_port_update_queue_stats_on_drop(struct rte_sched_port *port, uint32_t qindex, struct rte_mbuf *pkt)
+rte_sched_port_update_queue_stats_on_drop(struct rte_sched_port *port,
+ uint32_t qindex,
+ struct rte_mbuf *pkt, uint32_t red)
{
struct rte_sched_queue_extra *qe = port->queue_extra + qindex;
uint32_t pkt_len = pkt->pkt_len;
qe->stats.n_pkts_dropped += 1;
qe->stats.n_bytes_dropped += pkt_len;
+#ifdef RTE_SCHED_RED
+ qe->stats.n_pkts_red_dropped += red;
+#endif
}
#endif /* RTE_SCHED_COLLECT_STATS */
@@ -1206,12 +1216,20 @@ rte_sched_port_enqueue_qwa(struct rte_sched_port *port, uint32_t qindex, struct
qlen = q->qw - q->qr;
/* Drop the packet (and update drop stats) when queue is full */
- if (unlikely(rte_sched_port_red_drop(port, pkt, qindex, qlen) || (qlen >= qsize))) {
+ if (unlikely(rte_sched_port_red_drop(port, pkt, qindex, qlen))) {
+#ifdef RTE_SCHED_COLLECT_STATS
+ rte_sched_port_update_subport_stats_on_drop(port, qindex, pkt, 1);
+ rte_sched_port_update_queue_stats_on_drop(port, qindex, pkt, 1);
+#endif
rte_pktmbuf_free(pkt);
+ }
+
+ if (qlen >= qsize) {
#ifdef RTE_SCHED_COLLECT_STATS
- rte_sched_port_update_subport_stats_on_drop(port, qindex, pkt);
- rte_sched_port_update_queue_stats_on_drop(port, qindex, pkt);
+ rte_sched_port_update_subport_stats_on_drop(port, qindex, pkt, 0);
+ rte_sched_port_update_queue_stats_on_drop(port, qindex, pkt, 0);
#endif
+ rte_pktmbuf_free(pkt);
return 0;
}
diff --git a/lib/librte_sched/rte_sched.h b/lib/librte_sched/rte_sched.h
index dda287f..e9bf18a 100644
--- a/lib/librte_sched/rte_sched.h
+++ b/lib/librte_sched/rte_sched.h
@@ -140,6 +140,9 @@ struct rte_sched_subport_stats {
subport for each traffic class*/
uint32_t n_bytes_tc_dropped[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; /**< Number of bytes dropped by the current
subport for each traffic class due to subport queues being full or congested */
+#ifdef RTE_SCHED_RED
+ uint32_t n_pkts_red_dropped[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; /**< Number of packets dropped by red */
+#endif
};
/** Pipe configuration parameters. The period and credits_per_period parameters are measured
@@ -168,6 +171,9 @@ struct rte_sched_queue_stats {
/* Packets */
uint32_t n_pkts; /**< Number of packets successfully written to current queue */
uint32_t n_pkts_dropped; /**< Number of packets dropped due to current queue being full or congested */
+#ifdef RTE_SCHED_RED
+ uint32_t n_pkts_red_dropped;
+#endif
/* Bytes */
uint32_t n_bytes; /**< Number of bytes successfully written to current queue */
--
2.1.4
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [dpdk-dev] [PATCH v2 2/7] rte_sched: expand scheduler hierarchy for more VLAN's
2015-02-05 6:04 ` [dpdk-dev] [PATCH v2 2/7] rte_sched: expand scheduler hierarchy for more VLAN's Stephen Hemminger
@ 2015-02-05 9:57 ` Ananyev, Konstantin
2015-02-20 18:18 ` Dumitrescu, Cristian
1 sibling, 0 replies; 7+ messages in thread
From: Ananyev, Konstantin @ 2015-02-05 9:57 UTC (permalink / raw)
To: Stephen Hemminger, dev; +Cc: Stephen Hemminger
> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Stephen Hemminger
> Sent: Thursday, February 05, 2015 6:05 AM
> To: dev@dpdk.org
> Cc: Stephen Hemminger
> Subject: [dpdk-dev] [PATCH v2 2/7] rte_sched: expand scheduler hierarchy for more VLAN's
>
> From: Stephen Hemminger <shemming@brocade.com>
>
> The QoS subport is limited to 8 bits in original code.
> But customers demanded ability to support full number of VLAN's (4096)
> therefore use the full part of the tag field of mbuf.
>
> Resize the pipe as well to allow for more pipes in future and
> avoid expensive bitfield access.
>
> Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
> ---
> v2 use tag area rather than claiming reserved bit which isn't documented
>
> lib/librte_mbuf/rte_mbuf.h | 5 ++++-
> lib/librte_sched/rte_sched.h | 38 ++++++++++++++++++++++++--------------
> 2 files changed, 28 insertions(+), 15 deletions(-)
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [dpdk-dev] [PATCH v2 1/7] rte_sched: make RED optional at runtime
2015-02-05 6:04 [dpdk-dev] [PATCH v2 1/7] rte_sched: make RED optional at runtime Stephen Hemminger
2015-02-05 6:04 ` [dpdk-dev] [PATCH v2 2/7] rte_sched: expand scheduler hierarchy for more VLAN's Stephen Hemminger
2015-02-05 6:04 ` [dpdk-dev] [PATCH v2 3/7] rte_sched: keep track of RED drops Stephen Hemminger
@ 2015-02-20 17:54 ` Dumitrescu, Cristian
2 siblings, 0 replies; 7+ messages in thread
From: Dumitrescu, Cristian @ 2015-02-20 17:54 UTC (permalink / raw)
To: Stephen Hemminger, dev; +Cc: Stephen Hemminger
> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Stephen
> Hemminger
> Sent: Thursday, February 5, 2015 6:05 AM
> To: dev@dpdk.org
> Cc: Stephen Hemminger
> Subject: [dpdk-dev] [PATCH v2 1/7] rte_sched: make RED optional at runtime
>
> From: Stephen Hemminger <shemming@brocade.com>
>
> Want to be able to build with RTE_SCHED_RED enabled but
> allow disabling RED on a per-queue basis at runtime.
>
> RED is disabled unless min/max thresholds set.
>
> Signed-off-by: Stephen Hemmminger <stephen@networkplumber.org>
> ---
> lib/librte_sched/rte_sched.c | 9 +++++++++
> 1 file changed, 9 insertions(+)
>
> diff --git a/lib/librte_sched/rte_sched.c b/lib/librte_sched/rte_sched.c
> index 95dee27..6928c98 100644
> --- a/lib/librte_sched/rte_sched.c
> +++ b/lib/librte_sched/rte_sched.c
> @@ -636,6 +636,12 @@ rte_sched_port_config(struct
> rte_sched_port_params *params)
> uint32_t j;
>
> for (j = 0; j < e_RTE_METER_COLORS; j++) {
> + /* if min/max are both zero, then RED is disabled */
> + if ((params->red_params[i][j].min_th |
> + params->red_params[i][j].max_th) == 0) {
> + continue;
> + }
> +
> if (rte_red_config_init(&port->red_config[i][j],
> params->red_params[i][j].wq_log2,
> params->red_params[i][j].min_th,
> @@ -1069,6 +1075,9 @@ rte_sched_port_red_drop(struct rte_sched_port
> *port, struct rte_mbuf *pkt, uint3
> color = rte_sched_port_pkt_read_color(pkt);
> red_cfg = &port->red_config[tc_index][color];
>
> + if ( (red_cfg->min_th | red_cfg->max_th) == 0)
> + return 0;
> +
> qe = port->queue_extra + qindex;
> red = &qe->red;
>
> --
> 2.1.4
Acked by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
I agree with this approach of leaving RED as a library option that can be enabled/disabled at build-time in order to avoid any performance penalty when RED is not required.
On enqueue side (when RED is enabled):
-Makes sense to avoid the RED processing when RED is not enabled for the current queue;
On dequeue side (when RED is enabled):
-Not sure what's best when RED is not enabled for the current queue: whether to go ahead anyway and apply the timestamp, as it is harmless (but might trigger a cache miss) or check (which might trigger cache miss). Makes sense to go with the former option, which is implemented by this patch.
--------------------------------------------------------------
Intel Shannon Limited
Registered in Ireland
Registered Office: Collinstown Industrial Park, Leixlip, County Kildare
Registered Number: 308263
Business address: Dromore House, East Park, Shannon, Co. Clare
This e-mail and any attachments may contain confidential material for the sole use of the intended recipient(s). Any review or distribution by others is strictly prohibited. If you are not the intended recipient, please contact the sender and delete all copies.
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [dpdk-dev] [PATCH v2 2/7] rte_sched: expand scheduler hierarchy for more VLAN's
2015-02-05 6:04 ` [dpdk-dev] [PATCH v2 2/7] rte_sched: expand scheduler hierarchy for more VLAN's Stephen Hemminger
2015-02-05 9:57 ` Ananyev, Konstantin
@ 2015-02-20 18:18 ` Dumitrescu, Cristian
1 sibling, 0 replies; 7+ messages in thread
From: Dumitrescu, Cristian @ 2015-02-20 18:18 UTC (permalink / raw)
To: Stephen Hemminger, dev; +Cc: Stephen Hemminger
> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Stephen
> Hemminger
> Sent: Thursday, February 5, 2015 6:05 AM
> To: dev@dpdk.org
> Cc: Stephen Hemminger
> Subject: [dpdk-dev] [PATCH v2 2/7] rte_sched: expand scheduler hierarchy
> for more VLAN's
>
> From: Stephen Hemminger <shemming@brocade.com>
>
> The QoS subport is limited to 8 bits in original code.
> But customers demanded ability to support full number of VLAN's (4096)
> therefore use the full part of the tag field of mbuf.
>
> Resize the pipe as well to allow for more pipes in future and
> avoid expensive bitfield access.
>
> Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
> ---
> v2 use tag area rather than claiming reserved bit which isn't documented
>
> lib/librte_mbuf/rte_mbuf.h | 5 ++++-
> lib/librte_sched/rte_sched.h | 38 ++++++++++++++++++++++++-------------
> -
> 2 files changed, 28 insertions(+), 15 deletions(-)
>
> diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
> index 16059c6..8f0c3a4 100644
> --- a/lib/librte_mbuf/rte_mbuf.h
> +++ b/lib/librte_mbuf/rte_mbuf.h
> @@ -258,7 +258,10 @@ struct rte_mbuf {
> /**< First 4 flexible bytes or FD ID, dependent on
> PKT_RX_FDIR_* flag in ol_flags. */
> } fdir; /**< Filter identifier if FDIR enabled */
> - uint32_t sched; /**< Hierarchical scheduler */
> + struct {
> + uint32_t lo;
> + uint32_t hi;
> + } sched; /**< Hierarchical scheduler */
> uint32_t usr; /**< User defined tags. See
> @rte_distributor_process */
> } hash; /**< hash information */
>
> diff --git a/lib/librte_sched/rte_sched.h b/lib/librte_sched/rte_sched.h
> index e6bba22..dda287f 100644
> --- a/lib/librte_sched/rte_sched.h
> +++ b/lib/librte_sched/rte_sched.h
> @@ -195,16 +195,20 @@ struct rte_sched_port_params {
> #endif
> };
>
> -/** Path through the scheduler hierarchy used by the scheduler enqueue
> operation to
> -identify the destination queue for the current packet. Stored in the field
> hash.sched
> -of struct rte_mbuf of each packet, typically written by the classification
> stage and read by
> -scheduler enqueue.*/
> +/*
> + * Path through the scheduler hierarchy used by the scheduler enqueue
> + * operation to identify the destination queue for the current
> + * packet. Stored in the field pkt.hash.sched of struct rte_mbuf of
> + * each packet, typically written by the classification stage and read
> + * by scheduler enqueue.
> + */
> struct rte_sched_port_hierarchy {
> - uint32_t queue:2; /**< Queue ID (0 .. 3) */
> - uint32_t traffic_class:2; /**< Traffic class ID (0 .. 3)*/
> - uint32_t pipe:20; /**< Pipe ID */
> - uint32_t subport:6; /**< Subport ID */
> - uint32_t color:2; /**< Color */
> + uint16_t queue:2; /**< Queue ID (0 .. 3) */
> + uint16_t traffic_class:2; /**< Traffic class ID (0 .. 3)*/
> + uint16_t color:2; /**< Color */
> + uint16_t unused:10;
> + uint16_t subport; /**< Subport ID */
> + uint32_t pipe; /**< Pipe ID */
> };
Extending the number of bits allocated for mbuf->sched makes sense to me. I agree with this partitioning.
>
> /*
> @@ -350,12 +354,15 @@ rte_sched_queue_read_stats(struct
> rte_sched_port *port,
> */
> static inline void
> rte_sched_port_pkt_write(struct rte_mbuf *pkt,
> - uint32_t subport, uint32_t pipe, uint32_t traffic_class, uint32_t
> queue, enum rte_meter_color color)
> + uint32_t subport, uint32_t pipe,
> + uint32_t traffic_class,
> + uint32_t queue, enum rte_meter_color color)
> {
> - struct rte_sched_port_hierarchy *sched = (struct
> rte_sched_port_hierarchy *) &pkt->hash.sched;
> + struct rte_sched_port_hierarchy *sched
> + = (struct rte_sched_port_hierarchy *) &pkt->hash.sched;
>
> - sched->color = (uint32_t) color;
> sched->subport = subport;
> + sched->color = (uint32_t) color;
> sched->pipe = pipe;
> sched->traffic_class = traffic_class;
> sched->queue = queue;
> @@ -379,9 +386,12 @@ rte_sched_port_pkt_write(struct rte_mbuf *pkt,
> *
> */
> static inline void
> -rte_sched_port_pkt_read_tree_path(struct rte_mbuf *pkt, uint32_t
> *subport, uint32_t *pipe, uint32_t *traffic_class, uint32_t *queue)
> +rte_sched_port_pkt_read_tree_path(struct rte_mbuf *pkt, uint32_t
> *subport,
> + uint32_t *pipe, uint32_t *traffic_class,
> + uint32_t *queue)
> {
> - struct rte_sched_port_hierarchy *sched = (struct
> rte_sched_port_hierarchy *) &pkt->hash.sched;
> + struct rte_sched_port_hierarchy *sched
> + = (struct rte_sched_port_hierarchy *) &pkt->hash.sched;
>
> *subport = sched->subport;
> *pipe = sched->pipe;
> --
> 2.1.4
The functions used to access the mbuf->sched field are very slow.
Functions rte_sched_port_pkt_write(), rte_sched_port_pkt_read_tree_path(), rte_sched_port_pkt_read_color() are accessing the bitfields directly, and gcc seems to do a particularly bad job at optimizing the code that makes use of bitfields. Although less readable, a more performant alternative is to implement these functions with bit shifting, masking and or-ing operations as opposed to accessing the bit fields, as it seems to save dozens of cycles per packet.
Stephen, based on a previous conversation we had a while ago, would you be OK to do this now and resubmit this patch?
--------------------------------------------------------------
Intel Shannon Limited
Registered in Ireland
Registered Office: Collinstown Industrial Park, Leixlip, County Kildare
Registered Number: 308263
Business address: Dromore House, East Park, Shannon, Co. Clare
This e-mail and any attachments may contain confidential material for the sole use of the intended recipient(s). Any review or distribution by others is strictly prohibited. If you are not the intended recipient, please contact the sender and delete all copies.
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [dpdk-dev] [PATCH v2 3/7] rte_sched: keep track of RED drops
2015-02-05 6:04 ` [dpdk-dev] [PATCH v2 3/7] rte_sched: keep track of RED drops Stephen Hemminger
@ 2015-02-20 18:22 ` Dumitrescu, Cristian
0 siblings, 0 replies; 7+ messages in thread
From: Dumitrescu, Cristian @ 2015-02-20 18:22 UTC (permalink / raw)
To: Stephen Hemminger, dev; +Cc: Stephen Hemminger
> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Stephen
> Hemminger
> Sent: Thursday, February 5, 2015 6:05 AM
> To: dev@dpdk.org
> Cc: Stephen Hemminger
> Subject: [dpdk-dev] [PATCH v2 3/7] rte_sched: keep track of RED drops
>
> From: Stephen Hemminger <shemming@brocade.com>
>
> Add new statistic to keep track of drops due to RED.
>
> Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
> ---
> lib/librte_sched/rte_sched.c | 28 +++++++++++++++++++++++-----
> lib/librte_sched/rte_sched.h | 6 ++++++
> 2 files changed, 29 insertions(+), 5 deletions(-)
>
> diff --git a/lib/librte_sched/rte_sched.c b/lib/librte_sched/rte_sched.c
> index 6928c98..8cb8bf1 100644
> --- a/lib/librte_sched/rte_sched.c
> +++ b/lib/librte_sched/rte_sched.c
> @@ -1028,7 +1028,9 @@ rte_sched_port_update_subport_stats(struct
> rte_sched_port *port, uint32_t qindex
> }
>
> static inline void
> -rte_sched_port_update_subport_stats_on_drop(struct rte_sched_port
> *port, uint32_t qindex, struct rte_mbuf *pkt)
> +rte_sched_port_update_subport_stats_on_drop(struct rte_sched_port
> *port,
> + uint32_t qindex,
> + struct rte_mbuf *pkt, uint32_t red)
> {
> struct rte_sched_subport *s = port->subport + (qindex /
> rte_sched_port_queues_per_subport(port));
> uint32_t tc_index = (qindex >> 2) & 0x3;
> @@ -1036,6 +1038,9 @@
> rte_sched_port_update_subport_stats_on_drop(struct rte_sched_port
> *port, uint32_
>
> s->stats.n_pkts_tc_dropped[tc_index] += 1;
> s->stats.n_bytes_tc_dropped[tc_index] += pkt_len;
> +#ifdef RTE_SCHED_RED
> + s->stats.n_pkts_red_dropped[tc_index] += red;
> +#endif
> }
>
> static inline void
> @@ -1049,13 +1054,18 @@ rte_sched_port_update_queue_stats(struct
> rte_sched_port *port, uint32_t qindex,
> }
>
> static inline void
> -rte_sched_port_update_queue_stats_on_drop(struct rte_sched_port
> *port, uint32_t qindex, struct rte_mbuf *pkt)
> +rte_sched_port_update_queue_stats_on_drop(struct rte_sched_port
> *port,
> + uint32_t qindex,
> + struct rte_mbuf *pkt, uint32_t red)
> {
> struct rte_sched_queue_extra *qe = port->queue_extra + qindex;
> uint32_t pkt_len = pkt->pkt_len;
>
> qe->stats.n_pkts_dropped += 1;
> qe->stats.n_bytes_dropped += pkt_len;
> +#ifdef RTE_SCHED_RED
> + qe->stats.n_pkts_red_dropped += red;
> +#endif
> }
>
> #endif /* RTE_SCHED_COLLECT_STATS */
> @@ -1206,12 +1216,20 @@ rte_sched_port_enqueue_qwa(struct
> rte_sched_port *port, uint32_t qindex, struct
> qlen = q->qw - q->qr;
>
> /* Drop the packet (and update drop stats) when queue is full */
> - if (unlikely(rte_sched_port_red_drop(port, pkt, qindex, qlen) ||
> (qlen >= qsize))) {
> + if (unlikely(rte_sched_port_red_drop(port, pkt, qindex, qlen))) {
> +#ifdef RTE_SCHED_COLLECT_STATS
> + rte_sched_port_update_subport_stats_on_drop(port,
> qindex, pkt, 1);
> + rte_sched_port_update_queue_stats_on_drop(port,
> qindex, pkt, 1);
> +#endif
> rte_pktmbuf_free(pkt);
> + }
> +
> + if (qlen >= qsize) {
> #ifdef RTE_SCHED_COLLECT_STATS
> - rte_sched_port_update_subport_stats_on_drop(port,
> qindex, pkt);
> - rte_sched_port_update_queue_stats_on_drop(port,
> qindex, pkt);
> + rte_sched_port_update_subport_stats_on_drop(port,
> qindex, pkt, 0);
> + rte_sched_port_update_queue_stats_on_drop(port,
> qindex, pkt, 0);
> #endif
> + rte_pktmbuf_free(pkt);
> return 0;
> }
>
> diff --git a/lib/librte_sched/rte_sched.h b/lib/librte_sched/rte_sched.h
> index dda287f..e9bf18a 100644
> --- a/lib/librte_sched/rte_sched.h
> +++ b/lib/librte_sched/rte_sched.h
> @@ -140,6 +140,9 @@ struct rte_sched_subport_stats {
> subport for each traffic class*/
> uint32_t
> n_bytes_tc_dropped[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; /**<
> Number of bytes dropped by the current
> subport for each traffic class due to subport queues
> being full or congested */
> +#ifdef RTE_SCHED_RED
> + uint32_t
> n_pkts_red_dropped[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; /**<
> Number of packets dropped by red */
> +#endif
> };
>
> /** Pipe configuration parameters. The period and credits_per_period
> parameters are measured
> @@ -168,6 +171,9 @@ struct rte_sched_queue_stats {
> /* Packets */
> uint32_t n_pkts; /**< Number of packets successfully
> written to current queue */
> uint32_t n_pkts_dropped; /**< Number of packets dropped due
> to current queue being full or congested */
> +#ifdef RTE_SCHED_RED
> + uint32_t n_pkts_red_dropped;
> +#endif
>
> /* Bytes */
> uint32_t n_bytes; /**< Number of bytes successfully written
> to current queue */
> --
> 2.1.4
Acked by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
--------------------------------------------------------------
Intel Shannon Limited
Registered in Ireland
Registered Office: Collinstown Industrial Park, Leixlip, County Kildare
Registered Number: 308263
Business address: Dromore House, East Park, Shannon, Co. Clare
This e-mail and any attachments may contain confidential material for the sole use of the intended recipient(s). Any review or distribution by others is strictly prohibited. If you are not the intended recipient, please contact the sender and delete all copies.
^ permalink raw reply [flat|nested] 7+ messages in thread
end of thread, other threads:[~2015-02-20 18:22 UTC | newest]
Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-02-05 6:04 [dpdk-dev] [PATCH v2 1/7] rte_sched: make RED optional at runtime Stephen Hemminger
2015-02-05 6:04 ` [dpdk-dev] [PATCH v2 2/7] rte_sched: expand scheduler hierarchy for more VLAN's Stephen Hemminger
2015-02-05 9:57 ` Ananyev, Konstantin
2015-02-20 18:18 ` Dumitrescu, Cristian
2015-02-05 6:04 ` [dpdk-dev] [PATCH v2 3/7] rte_sched: keep track of RED drops Stephen Hemminger
2015-02-20 18:22 ` Dumitrescu, Cristian
2015-02-20 17:54 ` [dpdk-dev] [PATCH v2 1/7] rte_sched: make RED optional at runtime Dumitrescu, Cristian
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).