patches for DPDK stable branches
 help / color / mirror / Atom feed
From: luca.boccassi@gmail.com
To: "Mattias Rönnblom" <mattias.ronnblom@ericsson.com>
Cc: dpdk stable <stable@dpdk.org>
Subject: patch 'event/dsw: fix flow migration' has been queued to stable release 20.11.7
Date: Thu,  3 Nov 2022 09:26:36 +0000	[thread overview]
Message-ID: <20221103092758.1099402-18-luca.boccassi@gmail.com> (raw)
In-Reply-To: <20221103092758.1099402-1-luca.boccassi@gmail.com>

Hi,

FYI, your patch has been queued to stable release 20.11.7

Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 11/05/22. So please
shout if anyone has objections.

Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.

Queued patches are on a temporary branch at:
https://github.com/kevintraynor/dpdk-stable

This queued commit can be viewed at:
https://github.com/kevintraynor/dpdk-stable/commit/c69260f49962d781417a0d17256d2386b46b4d3d

Thanks.

Luca Boccassi

---
From c69260f49962d781417a0d17256d2386b46b4d3d Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Mattias=20R=C3=B6nnblom?= <mattias.ronnblom@ericsson.com>
Date: Thu, 7 Jul 2022 13:43:25 +0200
Subject: [PATCH] event/dsw: fix flow migration
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

[ upstream commit 70cb0278a4c52a857fb56cda2183e2ee3fa2633a ]

Fix bug in flow migration, which under certain conditions causes
reordering and violation of atomicity guarantees.

The issue occurs when the processing of a flow (on an atomic queue)
has resulted in events enqueued to a flow currently being migrated,
and the former (producer) flow is also selected for migration. The
events are buffered ("paused") on the originating port, and released
(forwarded) when the migration has completed. However, at the time of
"unpausing" the latter (consumer) flow, processing of the producer
flow on the port to which it was migrated may have already produced
events, for the same paused flow. This constitutes a race condition,
and depending on which port wins, reordering may have been introduced.

This patch forbids migration when a port has paused events, since
those events may have been the result of processing a to-be-migrated
flow.

This patch also disallows processing events pertaining to a flow under
migration, for the same reason. A new buffer is introduced, which
holds such not-yet-processed events dequeued from the port's input
ring. Such events are forwarded to the target port as a part of the
migration process.

The 'forwarding' migration state is eliminated, and instead background
processing is only performed if there are no unreleased events on the
port.

The bug is primarily triggered in situations where multiple flows are
migrated as one transaction, but may occur even if only a single flow
is migrated (e.g., with older DSW versions, which does not support
multi-flow migration).

Fixes: f6257b22e767 ("event/dsw: add load balancing")

Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
---
 drivers/event/dsw/dsw_evdev.h |   8 +-
 drivers/event/dsw/dsw_event.c | 315 ++++++++++++++++++++++++----------
 2 files changed, 232 insertions(+), 91 deletions(-)

diff --git a/drivers/event/dsw/dsw_evdev.h b/drivers/event/dsw/dsw_evdev.h
index 6513d35ee7..ceabf9557d 100644
--- a/drivers/event/dsw/dsw_evdev.h
+++ b/drivers/event/dsw/dsw_evdev.h
@@ -126,7 +126,6 @@ struct dsw_queue_flow {
 enum dsw_migration_state {
 	DSW_MIGRATION_STATE_IDLE,
 	DSW_MIGRATION_STATE_PAUSING,
-	DSW_MIGRATION_STATE_FORWARDING,
 	DSW_MIGRATION_STATE_UNPAUSING
 };
 
@@ -190,6 +189,13 @@ struct dsw_port {
 	uint16_t paused_events_len;
 	struct rte_event paused_events[DSW_MAX_EVENTS];
 
+	uint16_t emigrating_events_len;
+	/* Buffer for not-yet-processed events pertaining to a flow
+	 * emigrating from this port. These events will be forwarded
+	 * to the target port.
+	 */
+	struct rte_event emigrating_events[DSW_MAX_EVENTS];
+
 	uint16_t seen_events_len;
 	uint16_t seen_events_idx;
 	struct dsw_queue_flow seen_events[DSW_MAX_EVENTS_RECORDED];
diff --git a/drivers/event/dsw/dsw_event.c b/drivers/event/dsw/dsw_event.c
index 8b81dc5c56..76c89056a1 100644
--- a/drivers/event/dsw/dsw_event.c
+++ b/drivers/event/dsw/dsw_event.c
@@ -234,6 +234,15 @@ dsw_port_is_flow_paused(struct dsw_port *port, uint8_t queue_id,
 					queue_id, flow_hash);
 }
 
+static __rte_always_inline bool
+dsw_port_is_flow_migrating(struct dsw_port *port, uint8_t queue_id,
+			   uint16_t flow_hash)
+{
+	return dsw_is_queue_flow_in_ary(port->emigration_target_qfs,
+					port->emigration_targets_len,
+					queue_id, flow_hash);
+}
+
 static void
 dsw_port_add_paused_flows(struct dsw_port *port, struct dsw_queue_flow *qfs,
 			  uint8_t qfs_len)
@@ -268,9 +277,19 @@ dsw_port_remove_paused_flow(struct dsw_port *port,
 				port->paused_flows[i] =
 					port->paused_flows[last_idx];
 			port->paused_flows_len--;
-			break;
+
+			DSW_LOG_DP_PORT(DEBUG, port->id,
+					"Unpausing queue_id %d flow_hash %d.\n",
+					target_qf->queue_id,
+					target_qf->flow_hash);
+
+			return;
 		}
 	}
+
+	DSW_LOG_DP_PORT(ERR, port->id,
+			"Failed to unpause queue_id %d flow_hash %d.\n",
+			target_qf->queue_id, target_qf->flow_hash);
 }
 
 static void
@@ -281,7 +300,6 @@ dsw_port_remove_paused_flows(struct dsw_port *port,
 
 	for (i = 0; i < qfs_len; i++)
 		dsw_port_remove_paused_flow(port, &qfs[i]);
-
 }
 
 static void
@@ -434,14 +452,15 @@ dsw_is_serving_port(struct dsw_evdev *dsw, uint8_t port_id, uint8_t queue_id)
 
 static bool
 dsw_select_emigration_target(struct dsw_evdev *dsw,
-			    struct dsw_queue_flow_burst *bursts,
-			    uint16_t num_bursts, uint8_t source_port_id,
-			    int16_t *port_loads, uint16_t num_ports,
-			    uint8_t *target_port_ids,
-			    struct dsw_queue_flow *target_qfs,
-			    uint8_t *targets_len)
+			     struct dsw_port *source_port,
+			     struct dsw_queue_flow_burst *bursts,
+			     uint16_t num_bursts,
+			     int16_t *port_loads, uint16_t num_ports,
+			     uint8_t *target_port_ids,
+			     struct dsw_queue_flow *target_qfs,
+			     uint8_t *targets_len)
 {
-	int16_t source_port_load = port_loads[source_port_id];
+	int16_t source_port_load = port_loads[source_port->id];
 	struct dsw_queue_flow *candidate_qf = NULL;
 	uint8_t candidate_port_id = 0;
 	int16_t candidate_weight = -1;
@@ -466,7 +485,7 @@ dsw_select_emigration_target(struct dsw_evdev *dsw,
 		for (port_id = 0; port_id < num_ports; port_id++) {
 			int16_t weight;
 
-			if (port_id == source_port_id)
+			if (port_id == source_port->id)
 				continue;
 
 			if (!dsw_is_serving_port(dsw, port_id, qf->queue_id))
@@ -488,7 +507,7 @@ dsw_select_emigration_target(struct dsw_evdev *dsw,
 	if (candidate_weight < 0)
 		return false;
 
-	DSW_LOG_DP_PORT(DEBUG, source_port_id, "Selected queue_id %d "
+	DSW_LOG_DP_PORT(DEBUG, source_port->id, "Selected queue_id %d "
 			"flow_hash %d (with flow load %d) for migration "
 			"to port %d.\n", candidate_qf->queue_id,
 			candidate_qf->flow_hash,
@@ -496,7 +515,7 @@ dsw_select_emigration_target(struct dsw_evdev *dsw,
 			candidate_port_id);
 
 	port_loads[candidate_port_id] += candidate_flow_load;
-	port_loads[source_port_id] -= candidate_flow_load;
+	port_loads[source_port->id] -= candidate_flow_load;
 
 	target_port_ids[*targets_len] = candidate_port_id;
 	target_qfs[*targets_len] = *candidate_qf;
@@ -522,8 +541,8 @@ dsw_select_emigration_targets(struct dsw_evdev *dsw,
 	for (i = 0; i < DSW_MAX_FLOWS_PER_MIGRATION; i++) {
 		bool found;
 
-		found = dsw_select_emigration_target(dsw, bursts, num_bursts,
-						     source_port->id,
+		found = dsw_select_emigration_target(dsw, source_port,
+						     bursts, num_bursts,
 						     port_loads, dsw->num_ports,
 						     target_port_ids,
 						     target_qfs,
@@ -603,6 +622,7 @@ dsw_port_buffer_paused(struct dsw_port *port,
 	port->paused_events_len++;
 }
 
+
 static void
 dsw_port_buffer_non_paused(struct dsw_evdev *dsw, struct dsw_port *source_port,
 			   uint8_t dest_port_id, const struct rte_event *event)
@@ -674,40 +694,39 @@ dsw_port_buffer_event(struct dsw_evdev *dsw, struct dsw_port *source_port,
 }
 
 static void
-dsw_port_flush_paused_events(struct dsw_evdev *dsw,
-			     struct dsw_port *source_port,
-			     const struct dsw_queue_flow *qf)
+dsw_port_flush_no_longer_paused_events(struct dsw_evdev *dsw,
+				       struct dsw_port *source_port)
 {
 	uint16_t paused_events_len = source_port->paused_events_len;
 	struct rte_event paused_events[paused_events_len];
-	uint8_t dest_port_id;
 	uint16_t i;
 
 	if (paused_events_len == 0)
 		return;
 
-	if (dsw_port_is_flow_paused(source_port, qf->queue_id, qf->flow_hash))
-		return;
-
 	rte_memcpy(paused_events, source_port->paused_events,
 		   paused_events_len * sizeof(struct rte_event));
 
 	source_port->paused_events_len = 0;
 
-	dest_port_id = dsw_schedule(dsw, qf->queue_id, qf->flow_hash);
-
 	for (i = 0; i < paused_events_len; i++) {
 		struct rte_event *event = &paused_events[i];
 		uint16_t flow_hash;
 
 		flow_hash = dsw_flow_id_hash(event->flow_id);
 
-		if (event->queue_id == qf->queue_id &&
-		    flow_hash == qf->flow_hash)
+		if (dsw_port_is_flow_paused(source_port, event->queue_id,
+					    flow_hash))
+			dsw_port_buffer_paused(source_port, event);
+		else {
+			uint8_t dest_port_id;
+
+			dest_port_id = dsw_schedule(dsw, event->queue_id,
+						    flow_hash);
+
 			dsw_port_buffer_non_paused(dsw, source_port,
 						   dest_port_id, event);
-		else
-			dsw_port_buffer_paused(source_port, event);
+		}
 	}
 }
 
@@ -750,11 +769,6 @@ dsw_port_end_emigration(struct dsw_evdev *dsw, struct dsw_port *port,
 		DSW_LOG_DP_PORT(DEBUG, port->id, "Migration completed for "
 				"queue_id %d flow_hash %d.\n", queue_id,
 				flow_hash);
-
-		if (queue_schedule_type == RTE_SCHED_TYPE_ATOMIC) {
-			dsw_port_remove_paused_flow(port, qf);
-			dsw_port_flush_paused_events(dsw, port, qf);
-		}
 	}
 
 	finished = port->emigration_targets_len - left_qfs_len;
@@ -821,11 +835,32 @@ dsw_port_consider_emigration(struct dsw_evdev *dsw,
 	if (dsw->num_ports == 1)
 		return;
 
-	if (seen_events_len < DSW_MAX_EVENTS_RECORDED)
-		return;
-
 	DSW_LOG_DP_PORT(DEBUG, source_port->id, "Considering emigration.\n");
 
+	if (seen_events_len < DSW_MAX_EVENTS_RECORDED) {
+		DSW_LOG_DP_PORT(DEBUG, source_port->id, "Not enough events "
+				"are recorded to allow for a migration.\n");
+		return;
+	}
+
+	/* A flow migration cannot be initiated if there are paused
+	 * events, since some/all of those events may be have been
+	 * produced as a result of processing the flow(s) selected for
+	 * migration. Moving such a flow would potentially introduced
+	 * reordering, since processing the migrated flow on the
+	 * receiving flow may commence before the to-be-enqueued-to
+
+	 * flows are unpaused, leading to paused events on the second
+	 * port as well, destined for the same paused flow(s). When
+	 * those flows are unpaused, the resulting events are
+	 * delivered the owning port in an undefined order.
+	 */
+	if (source_port->paused_events_len > 0) {
+		DSW_LOG_DP_PORT(DEBUG, source_port->id, "There are "
+				"events in the paus buffer.\n");
+		return;
+	}
+
 	/* Randomize interval to avoid having all threads considering
 	 * emigration at the same in point in time, which might lead
 	 * to all choosing the same target port.
@@ -921,9 +956,8 @@ dsw_port_consider_emigration(struct dsw_evdev *dsw,
 }
 
 static void
-dsw_port_flush_paused_events(struct dsw_evdev *dsw,
-			     struct dsw_port *source_port,
-			     const struct dsw_queue_flow *qf);
+dsw_port_flush_no_longer_paused_events(struct dsw_evdev *dsw,
+				       struct dsw_port *source_port);
 
 static void
 dsw_port_handle_unpause_flows(struct dsw_evdev *dsw, struct dsw_port *port,
@@ -948,62 +982,123 @@ dsw_port_handle_unpause_flows(struct dsw_evdev *dsw, struct dsw_port *port,
 
 		if (dsw_schedule(dsw, qf->queue_id, qf->flow_hash) == port->id)
 			port->immigrations++;
+	}
+
+	dsw_port_flush_no_longer_paused_events(dsw, port);
+}
+
+static void
+dsw_port_buffer_in_buffer(struct dsw_port *port,
+			  const struct rte_event *event)
+
+{
+	RTE_ASSERT(port->in_buffer_start == 0);
+
+	port->in_buffer[port->in_buffer_len] = *event;
+	port->in_buffer_len++;
+}
 
-		dsw_port_flush_paused_events(dsw, port, qf);
+static void
+dsw_port_forward_emigrated_event(struct dsw_evdev *dsw,
+				 struct dsw_port *source_port,
+				 struct rte_event *event)
+{
+	uint16_t i;
+
+	for (i = 0; i < source_port->emigration_targets_len; i++) {
+		struct dsw_queue_flow *qf =
+			&source_port->emigration_target_qfs[i];
+		uint8_t dest_port_id =
+			source_port->emigration_target_port_ids[i];
+		struct dsw_port *dest_port = &dsw->ports[dest_port_id];
+
+		if (event->queue_id == qf->queue_id &&
+		    dsw_flow_id_hash(event->flow_id) == qf->flow_hash) {
+			/* No need to care about bursting forwarded
+			 * events (to the destination port's in_ring),
+			 * since migration doesn't happen very often,
+			 * and also the majority of the dequeued
+			 * events will likely *not* be forwarded.
+			 */
+			while (rte_event_ring_enqueue_burst(dest_port->in_ring,
+							    event, 1,
+							    NULL) != 1)
+				rte_pause();
+			return;
+		}
 	}
+
+	/* Event did not belong to the emigrated flows */
+	dsw_port_buffer_in_buffer(source_port, event);
+}
+
+static void
+dsw_port_stash_migrating_event(struct dsw_port *port,
+			       const struct rte_event *event)
+{
+	port->emigrating_events[port->emigrating_events_len] = *event;
+	port->emigrating_events_len++;
 }
 
-#define FORWARD_BURST_SIZE (32)
+#define DRAIN_DEQUEUE_BURST_SIZE (32)
 
 static void
-dsw_port_forward_emigrated_flow(struct dsw_port *source_port,
-				struct rte_event_ring *dest_ring,
-				uint8_t queue_id,
-				uint16_t flow_hash)
+dsw_port_drain_in_ring(struct dsw_port *source_port)
 {
-	uint16_t events_left;
+	uint16_t num_events;
+	uint16_t dequeued;
 
 	/* Control ring message should been seen before the ring count
 	 * is read on the port's in_ring.
 	 */
 	rte_smp_rmb();
 
-	events_left = rte_event_ring_count(source_port->in_ring);
+	num_events = rte_event_ring_count(source_port->in_ring);
 
-	while (events_left > 0) {
-		uint16_t in_burst_size =
-			RTE_MIN(FORWARD_BURST_SIZE, events_left);
-		struct rte_event in_burst[in_burst_size];
-		uint16_t in_len;
+	for (dequeued = 0; dequeued < num_events; ) {
+		uint16_t burst_size = RTE_MIN(DRAIN_DEQUEUE_BURST_SIZE,
+					      num_events - dequeued);
+		struct rte_event events[burst_size];
+		uint16_t len;
 		uint16_t i;
 
-		in_len = rte_event_ring_dequeue_burst(source_port->in_ring,
-						      in_burst,
-						      in_burst_size, NULL);
-		/* No need to care about bursting forwarded events (to
-		 * the destination port's in_ring), since migration
-		 * doesn't happen very often, and also the majority of
-		 * the dequeued events will likely *not* be forwarded.
-		 */
-		for (i = 0; i < in_len; i++) {
-			struct rte_event *e = &in_burst[i];
-			if (e->queue_id == queue_id &&
-			    dsw_flow_id_hash(e->flow_id) == flow_hash) {
-				while (rte_event_ring_enqueue_burst(dest_ring,
-								    e, 1,
-								    NULL) != 1)
-					rte_pause();
-			} else {
-				uint16_t last_idx = source_port->in_buffer_len;
-				source_port->in_buffer[last_idx] = *e;
-				source_port->in_buffer_len++;
-			}
+		len = rte_event_ring_dequeue_burst(source_port->in_ring,
+						   events, burst_size,
+						   NULL);
+
+		for (i = 0; i < len; i++) {
+			struct rte_event *event = &events[i];
+			uint16_t flow_hash;
+
+			flow_hash = dsw_flow_id_hash(event->flow_id);
+
+			if (unlikely(dsw_port_is_flow_migrating(source_port,
+								event->queue_id,
+								flow_hash)))
+				dsw_port_stash_migrating_event(source_port,
+							       event);
+			else
+				dsw_port_buffer_in_buffer(source_port, event);
 		}
 
-		events_left -= in_len;
+		dequeued += len;
 	}
 }
 
+static void
+dsw_port_forward_emigrated_flows(struct dsw_evdev *dsw,
+				 struct dsw_port *source_port)
+{
+	uint16_t i;
+
+	for (i = 0; i < source_port->emigrating_events_len; i++) {
+		struct rte_event *event = &source_port->emigrating_events[i];
+
+		dsw_port_forward_emigrated_event(dsw, source_port, event);
+	}
+	source_port->emigrating_events_len = 0;
+}
+
 static void
 dsw_port_move_emigrating_flows(struct dsw_evdev *dsw,
 			       struct dsw_port *source_port)
@@ -1012,22 +1107,27 @@ dsw_port_move_emigrating_flows(struct dsw_evdev *dsw,
 
 	dsw_port_flush_out_buffers(dsw, source_port);
 
-	rte_smp_wmb();
-
 	for (i = 0; i < source_port->emigration_targets_len; i++) {
 		struct dsw_queue_flow *qf =
 			&source_port->emigration_target_qfs[i];
 		uint8_t dest_port_id =
 			source_port->emigration_target_port_ids[i];
-		struct dsw_port *dest_port = &dsw->ports[dest_port_id];
 
 		dsw->queues[qf->queue_id].flow_to_port_map[qf->flow_hash] =
-			dest_port_id;
-
-		dsw_port_forward_emigrated_flow(source_port, dest_port->in_ring,
-						qf->queue_id, qf->flow_hash);
+		    dest_port_id;
 	}
 
+	rte_smp_wmb();
+
+	dsw_port_drain_in_ring(source_port);
+	dsw_port_forward_emigrated_flows(dsw, source_port);
+
+	dsw_port_remove_paused_flows(source_port,
+				     source_port->emigration_target_qfs,
+				     source_port->emigration_targets_len);
+
+	dsw_port_flush_no_longer_paused_events(dsw, source_port);
+
 	/* Flow table update and migration destination port's enqueues
 	 * must be seen before the control message.
 	 */
@@ -1048,9 +1148,7 @@ dsw_port_handle_confirm(struct dsw_evdev *dsw, struct dsw_port *port)
 	if (port->cfm_cnt == (dsw->num_ports-1)) {
 		switch (port->migration_state) {
 		case DSW_MIGRATION_STATE_PAUSING:
-			DSW_LOG_DP_PORT(DEBUG, port->id, "Going into forwarding "
-					"migration state.\n");
-			port->migration_state = DSW_MIGRATION_STATE_FORWARDING;
+			dsw_port_move_emigrating_flows(dsw, port);
 			break;
 		case DSW_MIGRATION_STATE_UNPAUSING:
 			dsw_port_end_emigration(dsw, port,
@@ -1090,18 +1188,18 @@ dsw_port_ctl_process(struct dsw_evdev *dsw, struct dsw_port *port)
 static void
 dsw_port_note_op(struct dsw_port *port, uint16_t num_events)
 {
-	/* To pull the control ring reasonably often on busy ports,
-	 * each dequeued/enqueued event is considered an 'op' too.
-	 */
 	port->ops_since_bg_task += (num_events+1);
 }
 
 static void
 dsw_port_bg_process(struct dsw_evdev *dsw, struct dsw_port *port)
 {
-	if (unlikely(port->migration_state == DSW_MIGRATION_STATE_FORWARDING &&
-		     port->pending_releases == 0))
-		dsw_port_move_emigrating_flows(dsw, port);
+	/* For simplicity (in the migration logic), avoid all
+	 * background processing in case event processing is in
+	 * progress.
+	 */
+	if (port->pending_releases > 0)
+		return;
 
 	/* Polling the control ring is relatively inexpensive, and
 	 * polling it often helps bringing down migration latency, so
@@ -1161,7 +1259,7 @@ dsw_event_enqueue_burst_generic(struct dsw_port *source_port,
 	uint16_t i;
 
 	DSW_LOG_DP_PORT(DEBUG, source_port->id, "Attempting to enqueue %d "
-			"events to port %d.\n", events_len, source_port->id);
+			"events.\n", events_len);
 
 	dsw_port_bg_process(dsw, source_port);
 
@@ -1344,6 +1442,38 @@ dsw_port_dequeue_burst(struct dsw_port *port, struct rte_event *events,
 	return rte_event_ring_dequeue_burst(port->in_ring, events, num, NULL);
 }
 
+static void
+dsw_port_stash_migrating_events(struct dsw_port *port,
+				struct rte_event *events, uint16_t *num)
+{
+	uint16_t i;
+
+	/* The assumption here - performance-wise - is that events
+	 * belonging to migrating flows are relatively rare.
+	 */
+	for (i = 0; i < (*num); ) {
+		struct rte_event *event = &events[i];
+		uint16_t flow_hash;
+
+		flow_hash = dsw_flow_id_hash(event->flow_id);
+
+		if (unlikely(dsw_port_is_flow_migrating(port, event->queue_id,
+							flow_hash))) {
+			uint16_t left;
+
+			dsw_port_stash_migrating_event(port, event);
+
+			(*num)--;
+			left = *num - i;
+
+			if (left > 0)
+				memmove(event, event + 1,
+					left * sizeof(struct rte_event));
+		} else
+			i++;
+	}
+}
+
 uint16_t
 dsw_event_dequeue_burst(void *port, struct rte_event *events, uint16_t num,
 			uint64_t wait __rte_unused)
@@ -1361,6 +1491,11 @@ dsw_event_dequeue_burst(void *port, struct rte_event *events, uint16_t num,
 
 	dequeued = dsw_port_dequeue_burst(source_port, events, num);
 
+	if (unlikely(source_port->migration_state ==
+		     DSW_MIGRATION_STATE_PAUSING))
+		dsw_port_stash_migrating_events(source_port, events,
+						&dequeued);
+
 	source_port->pending_releases = dequeued;
 
 	dsw_port_load_record(source_port, dequeued);
-- 
2.34.1

---
  Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- -	2022-11-03 09:27:26.560230939 +0000
+++ 0018-event-dsw-fix-flow-migration.patch	2022-11-03 09:27:25.325421511 +0000
@@ -1 +1 @@
-From 70cb0278a4c52a857fb56cda2183e2ee3fa2633a Mon Sep 17 00:00:00 2001
+From c69260f49962d781417a0d17256d2386b46b4d3d Mon Sep 17 00:00:00 2001
@@ -8,0 +9,2 @@
+[ upstream commit 70cb0278a4c52a857fb56cda2183e2ee3fa2633a ]
+
@@ -42 +43,0 @@
-Cc: stable@dpdk.org
@@ -51 +52 @@
-index c907c00c78..df7dcc5577 100644
+index 6513d35ee7..ceabf9557d 100644
@@ -54 +55 @@
-@@ -128,7 +128,6 @@ struct dsw_queue_flow {
+@@ -126,7 +126,6 @@ struct dsw_queue_flow {
@@ -62 +63 @@
-@@ -192,6 +191,13 @@ struct dsw_port {
+@@ -190,6 +189,13 @@ struct dsw_port {
@@ -77 +78 @@
-index 340561b4e6..9932caf2ee 100644
+index 8b81dc5c56..76c89056a1 100644
@@ -80 +81 @@
-@@ -238,6 +238,15 @@ dsw_port_is_flow_paused(struct dsw_port *port, uint8_t queue_id,
+@@ -234,6 +234,15 @@ dsw_port_is_flow_paused(struct dsw_port *port, uint8_t queue_id,
@@ -96 +97 @@
-@@ -272,9 +281,19 @@ dsw_port_remove_paused_flow(struct dsw_port *port,
+@@ -268,9 +277,19 @@ dsw_port_remove_paused_flow(struct dsw_port *port,
@@ -117 +118 @@
-@@ -285,7 +304,6 @@ dsw_port_remove_paused_flows(struct dsw_port *port,
+@@ -281,7 +300,6 @@ dsw_port_remove_paused_flows(struct dsw_port *port,
@@ -125 +126 @@
-@@ -440,14 +458,15 @@ dsw_is_serving_port(struct dsw_evdev *dsw, uint8_t port_id, uint8_t queue_id)
+@@ -434,14 +452,15 @@ dsw_is_serving_port(struct dsw_evdev *dsw, uint8_t port_id, uint8_t queue_id)
@@ -148 +149 @@
-@@ -472,7 +491,7 @@ dsw_select_emigration_target(struct dsw_evdev *dsw,
+@@ -466,7 +485,7 @@ dsw_select_emigration_target(struct dsw_evdev *dsw,
@@ -157 +158 @@
-@@ -494,7 +513,7 @@ dsw_select_emigration_target(struct dsw_evdev *dsw,
+@@ -488,7 +507,7 @@ dsw_select_emigration_target(struct dsw_evdev *dsw,
@@ -166 +167 @@
-@@ -502,7 +521,7 @@ dsw_select_emigration_target(struct dsw_evdev *dsw,
+@@ -496,7 +515,7 @@ dsw_select_emigration_target(struct dsw_evdev *dsw,
@@ -175 +176 @@
-@@ -528,8 +547,8 @@ dsw_select_emigration_targets(struct dsw_evdev *dsw,
+@@ -522,8 +541,8 @@ dsw_select_emigration_targets(struct dsw_evdev *dsw,
@@ -186 +187 @@
-@@ -609,6 +628,7 @@ dsw_port_buffer_paused(struct dsw_port *port,
+@@ -603,6 +622,7 @@ dsw_port_buffer_paused(struct dsw_port *port,
@@ -194 +195 @@
-@@ -680,40 +700,39 @@ dsw_port_buffer_event(struct dsw_evdev *dsw, struct dsw_port *source_port,
+@@ -674,40 +694,39 @@ dsw_port_buffer_event(struct dsw_evdev *dsw, struct dsw_port *source_port,
@@ -247 +248 @@
-@@ -756,11 +775,6 @@ dsw_port_end_emigration(struct dsw_evdev *dsw, struct dsw_port *port,
+@@ -750,11 +769,6 @@ dsw_port_end_emigration(struct dsw_evdev *dsw, struct dsw_port *port,
@@ -259 +260 @@
-@@ -827,11 +841,32 @@ dsw_port_consider_emigration(struct dsw_evdev *dsw,
+@@ -821,11 +835,32 @@ dsw_port_consider_emigration(struct dsw_evdev *dsw,
@@ -295 +296 @@
-@@ -928,9 +963,8 @@ dsw_port_consider_emigration(struct dsw_evdev *dsw,
+@@ -921,9 +956,8 @@ dsw_port_consider_emigration(struct dsw_evdev *dsw,
@@ -307 +308 @@
-@@ -955,62 +989,123 @@ dsw_port_handle_unpause_flows(struct dsw_evdev *dsw, struct dsw_port *port,
+@@ -948,62 +982,123 @@ dsw_port_handle_unpause_flows(struct dsw_evdev *dsw, struct dsw_port *port,
@@ -466 +467 @@
-@@ -1019,22 +1114,27 @@ dsw_port_move_emigrating_flows(struct dsw_evdev *dsw,
+@@ -1012,22 +1107,27 @@ dsw_port_move_emigrating_flows(struct dsw_evdev *dsw,
@@ -501 +502 @@
-@@ -1055,9 +1155,7 @@ dsw_port_handle_confirm(struct dsw_evdev *dsw, struct dsw_port *port)
+@@ -1048,9 +1148,7 @@ dsw_port_handle_confirm(struct dsw_evdev *dsw, struct dsw_port *port)
@@ -512 +513 @@
-@@ -1097,18 +1195,18 @@ dsw_port_ctl_process(struct dsw_evdev *dsw, struct dsw_port *port)
+@@ -1090,18 +1188,18 @@ dsw_port_ctl_process(struct dsw_evdev *dsw, struct dsw_port *port)
@@ -537 +538 @@
-@@ -1168,7 +1266,7 @@ dsw_event_enqueue_burst_generic(struct dsw_port *source_port,
+@@ -1161,7 +1259,7 @@ dsw_event_enqueue_burst_generic(struct dsw_port *source_port,
@@ -546 +547 @@
-@@ -1352,6 +1450,38 @@ dsw_port_dequeue_burst(struct dsw_port *port, struct rte_event *events,
+@@ -1344,6 +1442,38 @@ dsw_port_dequeue_burst(struct dsw_port *port, struct rte_event *events,
@@ -585 +586 @@
-@@ -1369,6 +1499,11 @@ dsw_event_dequeue_burst(void *port, struct rte_event *events, uint16_t num,
+@@ -1361,6 +1491,11 @@ dsw_event_dequeue_burst(void *port, struct rte_event *events, uint16_t num,

  parent reply	other threads:[~2022-11-03  9:29 UTC|newest]

Thread overview: 207+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-11-03  9:26 patch 'net: accept unaligned data in checksum routines' " luca.boccassi
2022-11-03  9:26 ` patch 'eal: fix side effect in some pointer arithmetic macros' " luca.boccassi
2022-11-03  9:26 ` patch 'app/testpmd: restore ixgbe bypass commands' " luca.boccassi
2022-11-03  9:26 ` patch 'net/bonding: fix array overflow in Rx burst' " luca.boccassi
2022-11-03  9:26 ` patch 'net/bonding: fix double slave link status query' " luca.boccassi
2022-11-03  9:26 ` patch 'net/axgbe: fix scattered Rx' " luca.boccassi
2022-11-03  9:26 ` patch 'net/axgbe: fix mbuf lengths in " luca.boccassi
2022-11-03  9:26 ` patch 'net/axgbe: fix length of each segment " luca.boccassi
2022-11-03  9:26 ` patch 'net/axgbe: fix checksum and RSS " luca.boccassi
2022-11-03  9:26 ` patch 'net/axgbe: optimise " luca.boccassi
2022-11-03  9:26 ` patch 'net/axgbe: remove freeing buffer in " luca.boccassi
2022-11-03  9:26 ` patch 'net/nfp: improve HW info header log readability' " luca.boccassi
2022-11-03  9:26 ` patch 'net/txgbe: remove semaphore between SW/FW' " luca.boccassi
2022-11-03  9:26 ` patch 'net/txgbe: rename some extended statistics' " luca.boccassi
2022-11-03  9:26 ` patch 'net/mvneta: fix build with GCC 12' " luca.boccassi
2022-11-03  9:26 ` patch 'malloc: fix storage size for some allocations' " luca.boccassi
2022-11-03  9:26 ` patch 'telemetry: fix escaping of invalid json characters' " luca.boccassi
2022-11-03  9:26 ` luca.boccassi [this message]
2022-11-03  9:26 ` patch 'event/sw: fix device name in dump' " luca.boccassi
2022-11-03  9:26 ` patch 'eventdev/eth_tx: add spinlock for adapter start/stop' " luca.boccassi
2022-11-03  9:26 ` patch 'eventdev/eth_tx: fix adapter stop' " luca.boccassi
2022-11-03  9:26 ` patch 'test/ipsec: skip if no compatible device' " luca.boccassi
2022-11-03  9:26 ` patch 'examples/ipsec-secgw: use Tx checksum offload conditionally' " luca.boccassi
2022-11-03  9:26 ` patch 'test/crypto: fix debug messages' " luca.boccassi
2022-11-03  9:26 ` patch 'test/ipsec: fix build with GCC 12' " luca.boccassi
2022-11-03  9:26 ` patch 'ipsec: " luca.boccassi
2022-11-03  9:26 ` patch 'crypto/qat: " luca.boccassi
2022-11-03  9:26 ` patch 'eventdev: fix name of Rx conf type in documentation' " luca.boccassi
2022-11-03  9:26 ` patch 'net/i40e: fix VF representor release' " luca.boccassi
2022-11-03  9:26 ` patch 'net/ice: fix RSS hash update' " luca.boccassi
2022-11-03  9:26 ` patch 'net/iavf: fix pattern check for flow director parser' " luca.boccassi
2022-11-03  9:26 ` patch 'net/iavf: fix Tx done descriptors cleanup' " luca.boccassi
2022-11-03  9:26 ` patch 'net/igc: remove unnecessary PHY ID checks' " luca.boccassi
2022-11-03  9:26 ` patch 'common/iavf: avoid copy in async mode' " luca.boccassi
2022-11-03  9:26 ` patch 'net/ice/base: fix media type of PHY 10G SFI C2C' " luca.boccassi
2022-11-03  9:26 ` patch 'net/ice/base: fix array overflow in add switch recipe' " luca.boccassi
2022-11-03  9:26 ` patch 'net/ice/base: fix add MAC rule' " luca.boccassi
2022-11-03  9:26 ` patch 'net/ice/base: ignore promiscuous already exist' " luca.boccassi
2022-11-03  9:26 ` patch 'net/virtio: fix crash when configured twice' " luca.boccassi
2022-11-03  9:26 ` patch 'examples/vhost: fix use after free' " luca.boccassi
2022-11-03  9:26 ` patch 'eal/x86: add 256 bytes copy for AVX2' " luca.boccassi
2022-11-03  9:27 ` patch 'net/mlx4: fix Verbs FD leak in secondary process' " luca.boccassi
2022-11-03  9:27 ` patch 'net/mlx5: " luca.boccassi
2022-11-03  9:27 ` patch 'net/mlx5: fix check for orphan wait descriptor' " luca.boccassi
2022-11-03  9:27 ` patch 'net/mlx5: fix single not inline packet storing' " luca.boccassi
2022-11-03  9:27 ` patch 'net/mlx5: fix inline length exceeding descriptor limit' " luca.boccassi
2022-11-03  9:27 ` patch 'net/mlx5: fix Tx check for hardware descriptor length' " luca.boccassi
2022-11-03  9:27 ` patch 'net/mlx5: fix modify action with tunnel decapsulation' " luca.boccassi
2022-11-03  9:27 ` patch 'net/mlx5: fix meter profile delete after disable' " luca.boccassi
2022-11-03  9:27 ` patch 'net/iavf: check illegal packet sizes' " luca.boccassi
2022-11-03  9:27 ` patch 'net/ice: " luca.boccassi
2022-11-03  9:27 ` patch 'net/axgbe: reset end of packet in scattered Rx' " luca.boccassi
2022-11-03  9:27 ` patch 'net/axgbe: clear buffer on scattered Rx chaining failure' " luca.boccassi
2022-11-03  9:27 ` patch 'net/axgbe: save segment data in scattered Rx' " luca.boccassi
2022-11-03  9:27 ` patch 'common/sfc_efx/base: fix maximum Tx data count' " luca.boccassi
2022-11-03  9:27 ` patch 'event/dlb2: handle enqueuing more than maximum depth' " luca.boccassi
2022-11-03 16:20   ` Sevincer, Abdullah
2022-11-03  9:27 ` patch 'cryptodev: fix unduly newlines in logs' " luca.boccassi
2022-11-03  9:27 ` patch 'net/bnxt: fix null pointer dereference in LED config' " luca.boccassi
2022-11-17  9:10   ` 答复: " Mao,Yingming
2022-11-03  9:27 ` patch 'net/bnxt: remove unnecessary check' " luca.boccassi
2022-11-03  9:27 ` patch 'net/bnxt: fix representor info freeing' " luca.boccassi
2022-11-03  9:27 ` patch 'net/bnxt: fix build with GCC 13' " luca.boccassi
2022-11-03  9:27 ` patch 'mem: fix API doc about allocation on secondary processes' " luca.boccassi
2022-11-03  9:27 ` patch 'examples/vm_power_manager: use safe list iterator' " luca.boccassi
2022-11-03  9:27 ` patch 'gro: fix chain index for more than 2 packets' " luca.boccassi
2022-11-03  9:27 ` patch 'timer: fix stopping all timers' " luca.boccassi
2022-11-03  9:27 ` patch 'service: fix stats race condition for MT safe service' " luca.boccassi
2022-11-03  9:27 ` patch 'net/qede/base: fix 32-bit build with GCC 12' " luca.boccassi
2022-11-03  9:27 ` patch 'test/service: fix spurious failures by extending timeout' " luca.boccassi
2022-11-03  9:27 ` patch 'net/tap: fix overflow of network interface index' " luca.boccassi
2022-11-03  9:27 ` patch 'net/memif: fix crash with different number of Rx/Tx queues' " luca.boccassi
2022-11-03  9:27 ` patch 'net/hns3: fix code check warnings' " luca.boccassi
2022-11-03  9:27 ` patch 'net/hns3: fix next-to-use overflow in SVE Tx' " luca.boccassi
2022-11-03  9:27 ` patch 'net/hns3: fix next-to-use overflow in simple " luca.boccassi
2022-11-03  9:27 ` patch 'net/hns3: optimize SVE Tx performance' " luca.boccassi
2022-11-03  9:27 ` patch 'net/hns3: fix crash when secondary process access FW' " luca.boccassi
2022-11-03  9:27 ` patch 'net/hns3: delete unused markup' " luca.boccassi
2022-11-03  9:27 ` patch 'net/hns3: fix RSS filter restore' " luca.boccassi
2022-11-03  9:27 ` patch 'net/hns3: fix lock protection of RSS flow rule' " luca.boccassi
2022-11-03  9:27 ` patch 'net/hns3: fix RSS flow rule restore' " luca.boccassi
2022-11-03  9:27 ` patch 'net/hns3: move flow direction rule recovery' " luca.boccassi
2022-11-03  9:27 ` patch 'net/hns3: fix restore filter function input' " luca.boccassi
2022-11-03  9:27 ` patch 'net/hns3: fix build with gcov' " luca.boccassi
2022-11-03  9:27 ` patch 'net/hns3: fix packet type for GENEVE' " luca.boccassi
2022-11-03  9:27 ` patch 'net/hns3: fix header files includes' " luca.boccassi
2022-11-03  9:27 ` patch 'net/hns3: fix IPv4 and IPv6 RSS' " luca.boccassi
2022-11-03  9:27 ` patch 'net/hns3: fix typos in IPv6 SCTP fields' " luca.boccassi
2022-11-03  9:27 ` patch 'net/hns3: fix IPv4 RSS' " luca.boccassi
2022-11-03  9:27 ` patch 'net/hns3: add L3 and L4 RSS types' " luca.boccassi
2022-11-03  9:27 ` patch 'net/hns3: revert fix mailbox communication with HW' " luca.boccassi
2022-11-03  9:27 ` patch 'net/hns3: fix VF mailbox message handling' " luca.boccassi
2022-11-03  9:27 ` patch 'net/hns3: fix minimum Tx frame length' " luca.boccassi
2022-11-03  9:27 ` patch 'net/nfp: fix memory leak in Rx' " luca.boccassi
2022-11-03  9:27 ` patch 'net/dpaa: fix jumbo packet Rx in case of VSP' " luca.boccassi
2022-11-03  9:27 ` patch 'net/dpaa: fix buffer freeing in slow path' " luca.boccassi
2022-11-03  9:27 ` patch 'mempool: fix get objects from mempool with cache' " luca.boccassi
2022-11-03  9:27 ` patch 'gro: trim tail padding bytes' " luca.boccassi
2022-11-03  9:27 ` patch 'net/bonding: fix Tx hash for TCP' " luca.boccassi
2022-11-03  9:27 ` patch 'eal: fix data race in multi-process support' " luca.boccassi
2022-11-03  9:27 ` patch 'graph: fix node objects allocation' " luca.boccassi
2022-11-03  9:27 ` patch 'node: check Rx element " luca.boccassi
2022-11-05 17:11   ` patch 'trace: fix mode for new trace point' " luca.boccassi
2022-11-05 17:11     ` patch 'trace: fix mode change' " luca.boccassi
2022-11-05 17:11     ` patch 'trace: fix leak with regexp' " luca.boccassi
2022-11-05 17:11     ` patch 'trace: fix dynamically enabling trace points' " luca.boccassi
2022-11-05 17:11     ` patch 'trace: fix race in debug dump' " luca.boccassi
2022-11-05 17:11     ` patch 'trace: fix metadata " luca.boccassi
2022-11-05 17:11     ` patch 'pdump: do not allow enable/disable in primary process' " luca.boccassi
2022-11-05 17:11     ` patch 'service: fix early move to inactive status' " luca.boccassi
2022-11-05 17:11     ` patch 'event/sw: fix flow ID init in self test' " luca.boccassi
2022-11-05 17:11     ` patch 'event/sw: fix log " luca.boccassi
2022-11-05 17:11     ` patch 'eventdev/crypto: fix multi-process' " luca.boccassi
2022-11-05 17:11     ` patch 'eventdev/eth_tx: fix queue delete' " luca.boccassi
2022-11-05 17:11     ` patch 'gro: check payload length after trim' " luca.boccassi
2022-11-05 17:11     ` patch 'license: fix paths' " luca.boccassi
2022-11-05 17:11     ` patch 'net/bonding: fix mode 4 with dedicated queues' " luca.boccassi
2022-11-05 17:11     ` patch 'net/bonding: fix descriptor limit reporting' " luca.boccassi
2022-11-05 17:11     ` patch 'net/ionic: fix endianness for Rx and Tx' " luca.boccassi
2022-11-05 17:11     ` patch 'net/ionic: fix endianness for RSS' " luca.boccassi
2022-11-05 17:11     ` patch 'net/ionic: fix adapter name for logging' " luca.boccassi
2022-11-05 17:11     ` patch 'net/ionic: fix reported error stats' " luca.boccassi
2022-11-05 17:11     ` patch 'net/bonding: fix flow flush order on close' " luca.boccassi
2022-11-05 17:11     ` patch 'vhost: add non-blocking API for posting interrupt' " luca.boccassi
2022-11-05 17:11     ` patch 'net/virtio: remove declaration of undefined function' " luca.boccassi
2022-11-05 17:11     ` patch 'net/mlx5: fix thread workspace memory leak' " luca.boccassi
2022-11-05 17:11     ` patch 'net/mlx5: fix RSS expansion buffer size' " luca.boccassi
2022-11-05 17:11     ` patch 'net/mlx5: fix tunnel header with IPIP offload' " luca.boccassi
2022-11-05 17:11     ` patch 'sched: fix subport profile configuration' " luca.boccassi
2022-11-05 17:11     ` patch 'examples/qos_sched: fix number of subport profiles' " luca.boccassi
2022-11-05 17:11     ` patch 'net/ixgbe: fix broadcast Rx on VF after promisc removal' " luca.boccassi
2022-11-05 17:11     ` patch 'net/ixgbe: fix unexpected VLAN Rx in promisc mode on VF' " luca.boccassi
2022-11-05 17:11     ` patch 'net/ice: fix null function pointer call' " luca.boccassi
2022-11-05 17:11     ` patch 'net/iavf: add thread for event callbacks' " luca.boccassi
2022-11-05 17:11     ` patch 'net/iavf: fix queue stop for large VF' " luca.boccassi
2022-11-05 17:11     ` patch 'examples/l2fwd-crypto: fix typo in error message' " luca.boccassi
2022-11-05 17:11     ` patch 'test/crypto: fix wireless auth digest segment' " luca.boccassi
2022-11-05 17:11     ` patch 'baseband/acc100: fix memory leak' " luca.boccassi
2022-11-05 17:11     ` patch 'baseband/acc100: add LDPC encoder padding function' " luca.boccassi
2022-11-05 17:11     ` patch 'baseband/acc100: check turbo dec/enc input' " luca.boccassi
2022-11-05 17:11     ` patch 'baseband/acc100: add null checks' " luca.boccassi
2022-11-05 17:11     ` patch 'baseband/acc100: fix ring/queue allocation' " luca.boccassi
2022-11-05 17:11     ` patch 'baseband/acc100: fix input length for CRC24B' " luca.boccassi
2022-11-05 17:11     ` patch 'baseband/acc100: fix clearing PF IR outside handler' " luca.boccassi
2022-11-05 17:11     ` patch 'baseband/acc100: fix device minimum alignment' " luca.boccassi
2022-11-05 17:11     ` patch 'baseband/acc100: fix close cleanup' " luca.boccassi
2022-11-05 17:11     ` patch 'test/crypto: fix PDCP vectors' " luca.boccassi
2022-11-05 17:11     ` patch 'examples/ipsec-secgw: fix Tx checksum offload flag' " luca.boccassi
2022-11-05 17:11     ` patch 'crypto/qat: fix null hash algorithm digest size' " luca.boccassi
2022-11-17 23:08       ` patch 'net/bonding: set initial value of descriptor count alignment' " luca.boccassi
2022-11-17 23:08         ` patch 'net/bonding: fix slave device Rx/Tx offload configuration' " luca.boccassi
2022-11-17 23:08         ` patch 'app/testpmd: fix MAC header in checksum forward engine' " luca.boccassi
2022-11-17 23:08         ` patch 'net/bonding: fix dropping valid MAC packets' " luca.boccassi
2022-11-17 23:08         ` patch 'app/testpmd: make quit flag volatile' " luca.boccassi
2022-11-17 23:08         ` patch 'net/bonding: fix mbuf fast free handling' " luca.boccassi
2022-11-17 23:08         ` patch 'eal: fix doxygen comments for UUID' " luca.boccassi
2022-11-17 23:08         ` patch 'power: fix some doxygen comments' " luca.boccassi
2022-11-17 23:08         ` patch 'hash: fix RCU configuration memory leak' " luca.boccassi
2022-11-17 23:08         ` patch 'test/hash: remove dead code in extendable bucket test' " luca.boccassi
2022-11-17 23:08         ` patch 'test/hash: fix bulk lookup check' " luca.boccassi
2022-11-17 23:08         ` patch 'net/mlx5: fix race condition in counter pool resizing' " luca.boccassi
2022-11-17 23:08         ` patch 'net/mlx5: fix hairpin split with set VLAN VID action' " luca.boccassi
2022-11-17 23:08         ` patch 'net/mlx5: fix first segment inline length' " luca.boccassi
2022-11-17 23:08         ` patch 'net/mlx5: fix port initialization with small LRO' " luca.boccassi
2022-11-17 23:08         ` patch 'net/mlx5: fix port event cleaning order' " luca.boccassi
2022-11-18 12:53           ` Michael Baum
2022-11-18 14:27             ` Luca Boccassi
2022-11-20  7:28               ` Michael Baum
2022-11-21 14:12                 ` Luca Boccassi
2022-11-21 14:17                   ` Michael Baum
2022-11-21 20:36                     ` Michael Baum
2022-11-17 23:08         ` patch 'net/mlx5: fix drop action validation' " luca.boccassi
2022-11-17 23:08         ` patch 'net/ice/base: fix duplicate flow rules' " luca.boccassi
2022-11-17 23:08         ` patch 'net/i40e: fix jumbo frame Rx with X722' " luca.boccassi
2022-11-17 23:08         ` patch 'net/iavf: fix tainted scalar' " luca.boccassi
2022-11-17 23:08         ` patch 'net/ice: fix scalar Rx path segment' " luca.boccassi
2022-11-17 23:08         ` patch 'net/ice: fix scalar Tx " luca.boccassi
2022-11-17 23:08         ` patch 'ci: bump versions of actions in GHA' " luca.boccassi
2022-11-17 23:08         ` patch 'ci: update to new API for step outputs " luca.boccassi
2022-11-17 23:08         ` patch 'doc: fix event timer adapter guide' " luca.boccassi
2022-11-17 23:08         ` patch 'examples/fips_validation: fix typo in error log' " luca.boccassi
2022-11-17 23:08         ` patch 'baseband/acc100: fix input error related to padding' " luca.boccassi
2022-11-17 23:08         ` patch 'doc: fix application name in procinfo guide' " luca.boccassi
2022-11-17 23:08         ` patch 'test/crypto: fix bitwise operator in a SNOW3G case' " luca.boccassi
2022-11-17 23:08         ` patch 'doc: fix typo depreciated instead of deprecated' " luca.boccassi
2022-11-17 23:08         ` patch 'drivers: fix typos found by Lintian' " luca.boccassi
2022-11-17 23:08         ` patch 'doc: fix net drivers ordering' " luca.boccassi
2022-11-17 23:08         ` patch 'ring: fix description' " luca.boccassi
2022-11-17 23:08         ` patch 'ring: remove leftover comment about watermark' " luca.boccassi
2022-11-22 22:02           ` patch 'vdpa/ifc: handle data path update failure' " luca.boccassi
2022-11-22 22:02             ` patch 'service: fix build with clang 15' " luca.boccassi
2022-11-22 22:02             ` patch 'bus/dpaa: " luca.boccassi
2022-11-22 22:02             ` patch 'net/atlantic: " luca.boccassi
2022-11-22 22:02             ` patch 'app/testpmd: " luca.boccassi
2022-11-22 22:02             ` patch 'app/testpmd: fix build with clang 15 in flow code' " luca.boccassi
2022-11-22 22:02             ` patch 'test/efd: fix build with clang 15' " luca.boccassi
2022-11-22 22:02             ` patch 'test/member: " luca.boccassi
2022-11-22 22:02             ` patch 'test/event: " luca.boccassi
2022-11-22 22:02             ` patch 'net/ixgbevf: fix promiscuous and allmulti' " luca.boccassi
2022-11-22 22:02             ` patch 'net/mlx5: fix maximum LRO message size' " luca.boccassi
2022-11-22 22:02             ` patch 'doc: add LRO size limitation in mlx5 guide' " luca.boccassi
2022-11-22 22:02             ` patch 'doc: fix underlines in testpmd " luca.boccassi
2022-11-22 22:02             ` patch 'doc: fix colons in testpmd aged flow rules' " luca.boccassi
2022-11-22 22:02             ` patch 'net/nfp: fix Rx descriptor DMA address' " luca.boccassi
2022-11-22 22:02             ` patch 'doc: fix maximum packet size of virtio driver' " luca.boccassi
2022-11-22 22:02             ` patch 'doc: avoid meson deprecation in setup' " luca.boccassi
2022-11-28 10:47               ` patch 'devtools: fix checkpatch header retrieval from stdin' " luca.boccassi

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20221103092758.1099402-18-luca.boccassi@gmail.com \
    --to=luca.boccassi@gmail.com \
    --cc=mattias.ronnblom@ericsson.com \
    --cc=stable@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).