DPDK patches and discussions
 help / color / mirror / Atom feed
From: "Mattias Rönnblom" <mattias.ronnblom@ericsson.com>
To: <jerinj@marvell.com>
Cc: dev@dpdk.org, stefan.sundkvist@ericsson.com,
	Ola.Liljedahl@arm.com,
	"Mattias Rönnblom" <mattias.ronnblom@ericsson.com>
Subject: [dpdk-dev] [PATCH 3/8] event/dsw: extend statistics
Date: Mon, 9 Mar 2020 07:51:01 +0100	[thread overview]
Message-ID: <20200309065106.23800-4-mattias.ronnblom@ericsson.com> (raw)
In-Reply-To: <20200309065106.23800-1-mattias.ronnblom@ericsson.com>

Extend DSW xstats.

To allow visualization of migrations, track the number flow
immigrations in "port_<N>_immigrations". The "port_<N>_migrations"
retains legacy semantics, but is renamed "port_<N>_emigrations".

Expose the number of events currently undergoing processing
(i.e. pending releases) at a particular port.

Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
---
 drivers/event/dsw/dsw_evdev.h  |  16 ++--
 drivers/event/dsw/dsw_event.c  | 131 +++++++++++++++++----------------
 drivers/event/dsw/dsw_xstats.c |  17 +++--
 3 files changed, 91 insertions(+), 73 deletions(-)

diff --git a/drivers/event/dsw/dsw_evdev.h b/drivers/event/dsw/dsw_evdev.h
index dc44bce81..2c7f9efa3 100644
--- a/drivers/event/dsw/dsw_evdev.h
+++ b/drivers/event/dsw/dsw_evdev.h
@@ -162,18 +162,20 @@ struct dsw_port {
 	uint64_t total_busy_cycles;
 
 	/* For the ctl interface and flow migration mechanism. */
-	uint64_t next_migration;
+	uint64_t next_emigration;
 	uint64_t migration_interval;
 	enum dsw_migration_state migration_state;
 
-	uint64_t migration_start;
-	uint64_t migrations;
-	uint64_t migration_latency;
+	uint64_t emigration_start;
+	uint64_t emigrations;
+	uint64_t emigration_latency;
 
-	uint8_t migration_target_port_id;
-	struct dsw_queue_flow migration_target_qf;
+	uint8_t emigration_target_port_id;
+	struct dsw_queue_flow emigration_target_qf;
 	uint8_t cfm_cnt;
 
+	uint64_t immigrations;
+
 	uint16_t paused_flows_len;
 	struct dsw_queue_flow paused_flows[DSW_MAX_PAUSED_FLOWS];
 
@@ -187,11 +189,13 @@ struct dsw_port {
 	uint16_t seen_events_idx;
 	struct dsw_queue_flow seen_events[DSW_MAX_EVENTS_RECORDED];
 
+	uint64_t enqueue_calls;
 	uint64_t new_enqueued;
 	uint64_t forward_enqueued;
 	uint64_t release_enqueued;
 	uint64_t queue_enqueued[DSW_MAX_QUEUES];
 
+	uint64_t dequeue_calls;
 	uint64_t dequeued;
 	uint64_t queue_dequeued[DSW_MAX_QUEUES];
 
diff --git a/drivers/event/dsw/dsw_event.c b/drivers/event/dsw/dsw_event.c
index 7f1f29218..69cff7aa2 100644
--- a/drivers/event/dsw/dsw_event.c
+++ b/drivers/event/dsw/dsw_event.c
@@ -385,12 +385,12 @@ dsw_retrieve_port_loads(struct dsw_evdev *dsw, int16_t *port_loads,
 }
 
 static bool
-dsw_select_migration_target(struct dsw_evdev *dsw,
-			    struct dsw_port *source_port,
-			    struct dsw_queue_flow_burst *bursts,
-			    uint16_t num_bursts, int16_t *port_loads,
-			    int16_t max_load, struct dsw_queue_flow *target_qf,
-			    uint8_t *target_port_id)
+dsw_select_emigration_target(struct dsw_evdev *dsw,
+			     struct dsw_port *source_port,
+			     struct dsw_queue_flow_burst *bursts,
+			     uint16_t num_bursts, int16_t *port_loads,
+			     int16_t max_load, struct dsw_queue_flow *target_qf,
+			     uint8_t *target_port_id)
 {
 	uint16_t source_load = port_loads[source_port->id];
 	uint16_t i;
@@ -598,39 +598,39 @@ dsw_port_flush_paused_events(struct dsw_evdev *dsw,
 }
 
 static void
-dsw_port_migration_stats(struct dsw_port *port)
+dsw_port_emigration_stats(struct dsw_port *port)
 {
-	uint64_t migration_latency;
+	uint64_t emigration_latency;
 
-	migration_latency = (rte_get_timer_cycles() - port->migration_start);
-	port->migration_latency += migration_latency;
-	port->migrations++;
+	emigration_latency = (rte_get_timer_cycles() - port->emigration_start);
+	port->emigration_latency += emigration_latency;
+	port->emigrations++;
 }
 
 static void
-dsw_port_end_migration(struct dsw_evdev *dsw, struct dsw_port *port)
+dsw_port_end_emigration(struct dsw_evdev *dsw, struct dsw_port *port)
 {
-	uint8_t queue_id = port->migration_target_qf.queue_id;
-	uint16_t flow_hash = port->migration_target_qf.flow_hash;
+	uint8_t queue_id = port->emigration_target_qf.queue_id;
+	uint16_t flow_hash = port->emigration_target_qf.flow_hash;
 
 	port->migration_state = DSW_MIGRATION_STATE_IDLE;
 	port->seen_events_len = 0;
 
-	dsw_port_migration_stats(port);
+	dsw_port_emigration_stats(port);
 
 	if (dsw->queues[queue_id].schedule_type != RTE_SCHED_TYPE_PARALLEL) {
 		dsw_port_remove_paused_flow(port, queue_id, flow_hash);
 		dsw_port_flush_paused_events(dsw, port, queue_id, flow_hash);
 	}
 
-	DSW_LOG_DP_PORT(DEBUG, port->id, "Migration completed for queue_id "
+	DSW_LOG_DP_PORT(DEBUG, port->id, "Emigration completed for queue_id "
 			"%d flow_hash %d.\n", queue_id, flow_hash);
 }
 
 static void
-dsw_port_consider_migration(struct dsw_evdev *dsw,
-			    struct dsw_port *source_port,
-			    uint64_t now)
+dsw_port_consider_emigration(struct dsw_evdev *dsw,
+			     struct dsw_port *source_port,
+			     uint64_t now)
 {
 	bool any_port_below_limit;
 	struct dsw_queue_flow *seen_events = source_port->seen_events;
@@ -640,7 +640,7 @@ dsw_port_consider_migration(struct dsw_evdev *dsw,
 	int16_t source_port_load;
 	int16_t port_loads[dsw->num_ports];
 
-	if (now < source_port->next_migration)
+	if (now < source_port->next_emigration)
 		return;
 
 	if (dsw->num_ports == 1)
@@ -649,25 +649,25 @@ dsw_port_consider_migration(struct dsw_evdev *dsw,
 	if (seen_events_len < DSW_MAX_EVENTS_RECORDED)
 		return;
 
-	DSW_LOG_DP_PORT(DEBUG, source_port->id, "Considering migration.\n");
+	DSW_LOG_DP_PORT(DEBUG, source_port->id, "Considering emigration.\n");
 
 	/* Randomize interval to avoid having all threads considering
-	 * migration at the same in point in time, which might lead to
-	 * all choosing the same target port.
+	 * emigration at the same in point in time, which might lead
+	 * to all choosing the same target port.
 	 */
-	source_port->next_migration = now +
+	source_port->next_emigration = now +
 		source_port->migration_interval / 2 +
 		rte_rand() % source_port->migration_interval;
 
 	if (source_port->migration_state != DSW_MIGRATION_STATE_IDLE) {
 		DSW_LOG_DP_PORT(DEBUG, source_port->id,
-				"Migration already in progress.\n");
+				"Emigration already in progress.\n");
 		return;
 	}
 
 	/* For simplicity, avoid migration in the unlikely case there
 	 * is still events to consume in the in_buffer (from the last
-	 * migration).
+	 * emigration).
 	 */
 	if (source_port->in_buffer_len > 0) {
 		DSW_LOG_DP_PORT(DEBUG, source_port->id, "There are still "
@@ -719,52 +719,56 @@ dsw_port_consider_migration(struct dsw_evdev *dsw,
 	}
 
 	/* The strategy is to first try to find a flow to move to a
-	 * port with low load (below the migration-attempt
+	 * port with low load (below the emigration-attempt
 	 * threshold). If that fails, we try to find a port which is
 	 * below the max threshold, and also less loaded than this
 	 * port is.
 	 */
-	if (!dsw_select_migration_target(dsw, source_port, bursts, num_bursts,
-					 port_loads,
-					 DSW_MIN_SOURCE_LOAD_FOR_MIGRATION,
-					 &source_port->migration_target_qf,
-					 &source_port->migration_target_port_id)
+	if (!dsw_select_emigration_target(dsw, source_port, bursts, num_bursts,
+				      port_loads,
+				      DSW_MIN_SOURCE_LOAD_FOR_MIGRATION,
+				      &source_port->emigration_target_qf,
+				      &source_port->emigration_target_port_id)
 	    &&
-	    !dsw_select_migration_target(dsw, source_port, bursts, num_bursts,
-					 port_loads,
-					 DSW_MAX_TARGET_LOAD_FOR_MIGRATION,
-					 &source_port->migration_target_qf,
-				       &source_port->migration_target_port_id))
+	    !dsw_select_emigration_target(dsw, source_port, bursts, num_bursts,
+				      port_loads,
+				      DSW_MAX_TARGET_LOAD_FOR_MIGRATION,
+				      &source_port->emigration_target_qf,
+				      &source_port->emigration_target_port_id))
 		return;
 
 	DSW_LOG_DP_PORT(DEBUG, source_port->id, "Migrating queue_id %d "
 			"flow_hash %d from port %d to port %d.\n",
-			source_port->migration_target_qf.queue_id,
-			source_port->migration_target_qf.flow_hash,
-			source_port->id, source_port->migration_target_port_id);
+			source_port->emigration_target_qf.queue_id,
+			source_port->emigration_target_qf.flow_hash,
+			source_port->id,
+			source_port->emigration_target_port_id);
 
 	/* We have a winner. */
 
 	source_port->migration_state = DSW_MIGRATION_STATE_PAUSING;
-	source_port->migration_start = rte_get_timer_cycles();
+	source_port->emigration_start = rte_get_timer_cycles();
 
 	/* No need to go through the whole pause procedure for
 	 * parallel queues, since atomic/ordered semantics need not to
 	 * be maintained.
 	 */
 
-	if (dsw->queues[source_port->migration_target_qf.queue_id].schedule_type
-	    == RTE_SCHED_TYPE_PARALLEL) {
-		uint8_t queue_id = source_port->migration_target_qf.queue_id;
-		uint16_t flow_hash = source_port->migration_target_qf.flow_hash;
-		uint8_t dest_port_id = source_port->migration_target_port_id;
+	if (dsw->queues[source_port->emigration_target_qf.queue_id].
+	    schedule_type == RTE_SCHED_TYPE_PARALLEL) {
+		uint8_t queue_id =
+			source_port->emigration_target_qf.queue_id;
+		uint16_t flow_hash =
+			source_port->emigration_target_qf.flow_hash;
+		uint8_t dest_port_id =
+			source_port->emigration_target_port_id;
 
 		/* Single byte-sized stores are always atomic. */
 		dsw->queues[queue_id].flow_to_port_map[flow_hash] =
 			dest_port_id;
 		rte_smp_wmb();
 
-		dsw_port_end_migration(dsw, source_port);
+		dsw_port_end_emigration(dsw, source_port);
 
 		return;
 	}
@@ -775,12 +779,12 @@ dsw_port_consider_migration(struct dsw_evdev *dsw,
 	dsw_port_flush_out_buffers(dsw, source_port);
 
 	dsw_port_add_paused_flow(source_port,
-				 source_port->migration_target_qf.queue_id,
-				 source_port->migration_target_qf.flow_hash);
+				 source_port->emigration_target_qf.queue_id,
+				 source_port->emigration_target_qf.flow_hash);
 
 	dsw_port_ctl_broadcast(dsw, source_port, DSW_CTL_PAUS_REQ,
-			       source_port->migration_target_qf.queue_id,
-			       source_port->migration_target_qf.flow_hash);
+			       source_port->emigration_target_qf.queue_id,
+			       source_port->emigration_target_qf.flow_hash);
 	source_port->cfm_cnt = 0;
 }
 
@@ -808,6 +812,9 @@ dsw_port_handle_unpause_flow(struct dsw_evdev *dsw, struct dsw_port *port,
 
 	rte_smp_rmb();
 
+	if (dsw_schedule(dsw, queue_id, paused_flow_hash) == port->id)
+		port->immigrations++;
+
 	dsw_port_ctl_enqueue(&dsw->ports[originating_port_id], &cfm);
 
 	dsw_port_flush_paused_events(dsw, port, queue_id, paused_flow_hash);
@@ -816,10 +823,10 @@ dsw_port_handle_unpause_flow(struct dsw_evdev *dsw, struct dsw_port *port,
 #define FORWARD_BURST_SIZE (32)
 
 static void
-dsw_port_forward_migrated_flow(struct dsw_port *source_port,
-			       struct rte_event_ring *dest_ring,
-			       uint8_t queue_id,
-			       uint16_t flow_hash)
+dsw_port_forward_emigrated_flow(struct dsw_port *source_port,
+				struct rte_event_ring *dest_ring,
+				uint8_t queue_id,
+				uint16_t flow_hash)
 {
 	uint16_t events_left;
 
@@ -868,9 +875,9 @@ static void
 dsw_port_move_migrating_flow(struct dsw_evdev *dsw,
 			     struct dsw_port *source_port)
 {
-	uint8_t queue_id = source_port->migration_target_qf.queue_id;
-	uint16_t flow_hash = source_port->migration_target_qf.flow_hash;
-	uint8_t dest_port_id = source_port->migration_target_port_id;
+	uint8_t queue_id = source_port->emigration_target_qf.queue_id;
+	uint16_t flow_hash = source_port->emigration_target_qf.flow_hash;
+	uint8_t dest_port_id = source_port->emigration_target_port_id;
 	struct dsw_port *dest_port = &dsw->ports[dest_port_id];
 
 	dsw_port_flush_out_buffers(dsw, source_port);
@@ -880,8 +887,8 @@ dsw_port_move_migrating_flow(struct dsw_evdev *dsw,
 	dsw->queues[queue_id].flow_to_port_map[flow_hash] =
 		dest_port_id;
 
-	dsw_port_forward_migrated_flow(source_port, dest_port->in_ring,
-				       queue_id, flow_hash);
+	dsw_port_forward_emigrated_flow(source_port, dest_port->in_ring,
+					queue_id, flow_hash);
 
 	/* Flow table update and migration destination port's enqueues
 	 * must be seen before the control message.
@@ -907,7 +914,7 @@ dsw_port_handle_confirm(struct dsw_evdev *dsw, struct dsw_port *port)
 			port->migration_state = DSW_MIGRATION_STATE_FORWARDING;
 			break;
 		case DSW_MIGRATION_STATE_UNPAUSING:
-			dsw_port_end_migration(dsw, port);
+			dsw_port_end_emigration(dsw, port);
 			break;
 		default:
 			RTE_ASSERT(0);
@@ -987,7 +994,7 @@ dsw_port_bg_process(struct dsw_evdev *dsw, struct dsw_port *port)
 
 		dsw_port_consider_load_update(port, now);
 
-		dsw_port_consider_migration(dsw, port, now);
+		dsw_port_consider_emigration(dsw, port, now);
 
 		port->ops_since_bg_task = 0;
 	}
diff --git a/drivers/event/dsw/dsw_xstats.c b/drivers/event/dsw/dsw_xstats.c
index c3f5db89c..d332a57b6 100644
--- a/drivers/event/dsw/dsw_xstats.c
+++ b/drivers/event/dsw/dsw_xstats.c
@@ -84,16 +84,17 @@ dsw_xstats_port_get_queue_dequeued(struct dsw_evdev *dsw, uint8_t port_id,
 	return dsw->ports[port_id].queue_dequeued[queue_id];
 }
 
-DSW_GEN_PORT_ACCESS_FN(migrations)
+DSW_GEN_PORT_ACCESS_FN(emigrations)
+DSW_GEN_PORT_ACCESS_FN(immigrations)
 
 static uint64_t
 dsw_xstats_port_get_migration_latency(struct dsw_evdev *dsw, uint8_t port_id,
 				      uint8_t queue_id __rte_unused)
 {
-	uint64_t total_latency = dsw->ports[port_id].migration_latency;
-	uint64_t num_migrations = dsw->ports[port_id].migrations;
+	uint64_t total_latency = dsw->ports[port_id].emigration_latency;
+	uint64_t num_emigrations = dsw->ports[port_id].emigrations;
 
-	return num_migrations > 0 ? total_latency / num_migrations : 0;
+	return num_emigrations > 0 ? total_latency / num_emigrations : 0;
 }
 
 static uint64_t
@@ -110,6 +111,8 @@ dsw_xstats_port_get_event_proc_latency(struct dsw_evdev *dsw, uint8_t port_id,
 
 DSW_GEN_PORT_ACCESS_FN(inflight_credits)
 
+DSW_GEN_PORT_ACCESS_FN(pending_releases)
+
 static uint64_t
 dsw_xstats_port_get_load(struct dsw_evdev *dsw, uint8_t port_id,
 			 uint8_t queue_id __rte_unused)
@@ -136,14 +139,18 @@ static struct dsw_xstats_port dsw_port_xstats[] = {
 	  false },
 	{ "port_%u_queue_%u_dequeued", dsw_xstats_port_get_queue_dequeued,
 	  true },
-	{ "port_%u_migrations", dsw_xstats_port_get_migrations,
+	{ "port_%u_emigrations", dsw_xstats_port_get_emigrations,
 	  false },
 	{ "port_%u_migration_latency", dsw_xstats_port_get_migration_latency,
 	  false },
+	{ "port_%u_immigrations", dsw_xstats_port_get_immigrations,
+	  false },
 	{ "port_%u_event_proc_latency", dsw_xstats_port_get_event_proc_latency,
 	  false },
 	{ "port_%u_inflight_credits", dsw_xstats_port_get_inflight_credits,
 	  false },
+	{ "port_%u_pending_releases", dsw_xstats_port_get_pending_releases,
+	  false },
 	{ "port_%u_load", dsw_xstats_port_get_load,
 	  false },
 	{ "port_%u_last_bg", dsw_xstats_port_get_last_bg,
-- 
2.17.1


  parent reply	other threads:[~2020-03-09  6:53 UTC|newest]

Thread overview: 17+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-03-09  6:50 [dpdk-dev] [PATCH 0/8] DSW performance and statistics improvements Mattias Rönnblom
2020-03-09  6:50 ` [dpdk-dev] [PATCH 1/8] event/dsw: reduce latency in low-load situations Mattias Rönnblom
2020-03-09  6:51 ` [dpdk-dev] [PATCH 2/8] event/dsw: reduce max flows to speed up load balancing Mattias Rönnblom
2020-03-09  6:51 ` Mattias Rönnblom [this message]
2020-03-09  6:51 ` [dpdk-dev] [PATCH 4/8] event/dsw: improve migration mechanism Mattias Rönnblom
2020-03-09  6:51 ` [dpdk-dev] [PATCH 5/8] event/dsw: avoid migration waves in large systems Mattias Rönnblom
2020-03-09  6:51 ` [dpdk-dev] [PATCH 6/8] event/dsw: remove redundant control ring poll Mattias Rönnblom
2020-03-09  6:51 ` [dpdk-dev] [PATCH 7/8] event/dsw: remove unnecessary read barrier Mattias Rönnblom
2020-03-09  6:51 ` [dpdk-dev] [PATCH 8/8] event/dsw: add port busy cycles xstats Mattias Rönnblom
2020-04-04 14:35 ` [dpdk-dev] [EXT] [PATCH 0/8] DSW performance and statistics improvements Jerin Jacob Kollanukkaran
2020-04-15 16:37   ` David Marchand
2020-04-15 17:39     ` Mattias Rönnblom
2020-04-15 17:45       ` [dpdk-dev] [dpdk-ci] " Thomas Monjalon
2020-04-15 18:09         ` Mattias Rönnblom
2020-04-15 18:15           ` [dpdk-dev] [PATCH v2] event/dsw: fix gcc 4.8 false positive warning Mattias Rönnblom
2020-04-15 19:45             ` David Marchand
2020-04-16  6:15               ` Mattias Rönnblom

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200309065106.23800-4-mattias.ronnblom@ericsson.com \
    --to=mattias.ronnblom@ericsson.com \
    --cc=Ola.Liljedahl@arm.com \
    --cc=dev@dpdk.org \
    --cc=jerinj@marvell.com \
    --cc=stefan.sundkvist@ericsson.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).