DPDK patches and discussions
 help / color / mirror / Atom feed
From: <pbhagavatula@marvell.com>
To: <jerinj@marvell.com>
Cc: <dev@dpdk.org>, Pavan Nikhilesh <pbhagavatula@marvell.com>
Subject: [PATCH v2 4/6] examples/l3fwd: clean up worker state before exit
Date: Fri, 13 May 2022 21:37:17 +0530	[thread overview]
Message-ID: <20220513160719.10558-4-pbhagavatula@marvell.com> (raw)
In-Reply-To: <20220513160719.10558-1-pbhagavatula@marvell.com>

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Event ports are configured to implicitly release the scheduler contexts
currently held in the next call to rte_event_dequeue_burst().
A worker core might still hold a scheduling context during exit, as the
next call to rte_event_dequeue_burst() is never made.
This might lead to deadlock based on the worker exit timing and when
there are very less number of flows.

Add clean up function to release any scheduling contexts held by the
worker by using RTE_EVENT_OP_RELEASE.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 examples/l3fwd/l3fwd_em.c    | 32 ++++++++++++++++++++++----------
 examples/l3fwd/l3fwd_event.c | 34 ++++++++++++++++++++++++++++++++++
 examples/l3fwd/l3fwd_event.h |  5 +++++
 examples/l3fwd/l3fwd_fib.c   | 10 ++++++++--
 examples/l3fwd/l3fwd_lpm.c   | 32 ++++++++++++++++++++++----------
 5 files changed, 91 insertions(+), 22 deletions(-)

diff --git a/examples/l3fwd/l3fwd_em.c b/examples/l3fwd/l3fwd_em.c
index 24d0910fe0..6f8d94f120 100644
--- a/examples/l3fwd/l3fwd_em.c
+++ b/examples/l3fwd/l3fwd_em.c
@@ -653,6 +653,7 @@ em_event_loop_single(struct l3fwd_event_resources *evt_rsrc,
 	const uint8_t tx_q_id = evt_rsrc->evq.event_q_id[
 		evt_rsrc->evq.nb_queues - 1];
 	const uint8_t event_d_id = evt_rsrc->event_d_id;
+	uint8_t deq = 0, enq = 0;
 	struct lcore_conf *lconf;
 	unsigned int lcore_id;
 	struct rte_event ev;
@@ -665,7 +666,9 @@ em_event_loop_single(struct l3fwd_event_resources *evt_rsrc,
 
 	RTE_LOG(INFO, L3FWD, "entering %s on lcore %u\n", __func__, lcore_id);
 	while (!force_quit) {
-		if (!rte_event_dequeue_burst(event_d_id, event_p_id, &ev, 1, 0))
+		deq = rte_event_dequeue_burst(event_d_id, event_p_id, &ev, 1,
+					      0);
+		if (!deq)
 			continue;
 
 		struct rte_mbuf *mbuf = ev.mbuf;
@@ -684,19 +687,22 @@ em_event_loop_single(struct l3fwd_event_resources *evt_rsrc,
 		if (flags & L3FWD_EVENT_TX_ENQ) {
 			ev.queue_id = tx_q_id;
 			ev.op = RTE_EVENT_OP_FORWARD;
-			while (rte_event_enqueue_burst(event_d_id, event_p_id,
-						&ev, 1) && !force_quit)
-				;
+			do {
+				enq = rte_event_enqueue_burst(
+					event_d_id, event_p_id, &ev, 1);
+			} while (!enq && !force_quit);
 		}
 
 		if (flags & L3FWD_EVENT_TX_DIRECT) {
 			rte_event_eth_tx_adapter_txq_set(mbuf, 0);
-			while (!rte_event_eth_tx_adapter_enqueue(event_d_id,
-						event_p_id, &ev, 1, 0) &&
-					!force_quit)
-				;
+			do {
+				enq = rte_event_eth_tx_adapter_enqueue(
+					event_d_id, event_p_id, &ev, 1, 0);
+			} while (!enq && !force_quit);
 		}
 	}
+
+	l3fwd_event_worker_cleanup(event_d_id, event_p_id, &ev, enq, deq, 0);
 }
 
 static __rte_always_inline void
@@ -709,9 +715,9 @@ em_event_loop_burst(struct l3fwd_event_resources *evt_rsrc,
 	const uint8_t event_d_id = evt_rsrc->event_d_id;
 	const uint16_t deq_len = evt_rsrc->deq_depth;
 	struct rte_event events[MAX_PKT_BURST];
+	int i, nb_enq = 0, nb_deq = 0;
 	struct lcore_conf *lconf;
 	unsigned int lcore_id;
-	int i, nb_enq, nb_deq;
 
 	if (event_p_id < 0)
 		return;
@@ -769,6 +775,9 @@ em_event_loop_burst(struct l3fwd_event_resources *evt_rsrc,
 						nb_deq - nb_enq, 0);
 		}
 	}
+
+	l3fwd_event_worker_cleanup(event_d_id, event_p_id, events, nb_enq,
+				   nb_deq, 0);
 }
 
 static __rte_always_inline void
@@ -832,9 +841,9 @@ em_event_loop_vector(struct l3fwd_event_resources *evt_rsrc,
 	const uint8_t event_d_id = evt_rsrc->event_d_id;
 	const uint16_t deq_len = evt_rsrc->deq_depth;
 	struct rte_event events[MAX_PKT_BURST];
+	int i, nb_enq = 0, nb_deq = 0;
 	struct lcore_conf *lconf;
 	unsigned int lcore_id;
-	int i, nb_enq, nb_deq;
 
 	if (event_p_id < 0)
 		return;
@@ -887,6 +896,9 @@ em_event_loop_vector(struct l3fwd_event_resources *evt_rsrc,
 					nb_deq - nb_enq, 0);
 		}
 	}
+
+	l3fwd_event_worker_cleanup(event_d_id, event_p_id, events, nb_enq,
+				   nb_deq, 1);
 }
 
 int __rte_noinline
diff --git a/examples/l3fwd/l3fwd_event.c b/examples/l3fwd/l3fwd_event.c
index 7a401290f8..a14a21b414 100644
--- a/examples/l3fwd/l3fwd_event.c
+++ b/examples/l3fwd/l3fwd_event.c
@@ -287,3 +287,37 @@ l3fwd_event_resource_setup(struct rte_eth_conf *port_conf)
 		fib_event_loop[evt_rsrc->vector_enabled][evt_rsrc->tx_mode_q]
 			      [evt_rsrc->has_burst];
 }
+
+static void
+l3fwd_event_vector_array_free(struct rte_event events[], uint16_t num)
+{
+	uint16_t i;
+
+	for (i = 0; i < num; i++) {
+		rte_pktmbuf_free_bulk(events[i].vec->mbufs,
+				      events[i].vec->nb_elem);
+		rte_mempool_put(rte_mempool_from_obj(events[i].vec),
+				events[i].vec);
+	}
+}
+
+void
+l3fwd_event_worker_cleanup(uint8_t event_d_id, uint8_t event_p_id,
+			   struct rte_event events[], uint16_t nb_enq,
+			   uint16_t nb_deq, uint8_t is_vector)
+{
+	int i;
+
+	if (nb_deq) {
+		if (is_vector)
+			l3fwd_event_vector_array_free(events + nb_enq,
+						      nb_deq - nb_enq);
+		else
+			for (i = nb_enq; i < nb_deq; i++)
+				rte_pktmbuf_free(events[i].mbuf);
+
+		for (i = 0; i < nb_deq; i++)
+			events[i].op = RTE_EVENT_OP_RELEASE;
+		rte_event_enqueue_burst(event_d_id, event_p_id, events, nb_deq);
+	}
+}
diff --git a/examples/l3fwd/l3fwd_event.h b/examples/l3fwd/l3fwd_event.h
index f139632016..b93841a16f 100644
--- a/examples/l3fwd/l3fwd_event.h
+++ b/examples/l3fwd/l3fwd_event.h
@@ -103,10 +103,15 @@ event_vector_txq_set(struct rte_event_vector *vec, uint16_t txq)
 	}
 }
 
+
+
 struct l3fwd_event_resources *l3fwd_get_eventdev_rsrc(void);
 void l3fwd_event_resource_setup(struct rte_eth_conf *port_conf);
 int l3fwd_get_free_event_port(struct l3fwd_event_resources *eventdev_rsrc);
 void l3fwd_event_set_generic_ops(struct l3fwd_event_setup_ops *ops);
 void l3fwd_event_set_internal_port_ops(struct l3fwd_event_setup_ops *ops);
+void l3fwd_event_worker_cleanup(uint8_t event_d_id, uint8_t event_p_id,
+				struct rte_event events[], uint16_t nb_enq,
+				uint16_t nb_deq, uint8_t is_vector);
 
 #endif /* __L3FWD_EVENTDEV_H__ */
diff --git a/examples/l3fwd/l3fwd_fib.c b/examples/l3fwd/l3fwd_fib.c
index 6e0054b4cb..26d0767ae2 100644
--- a/examples/l3fwd/l3fwd_fib.c
+++ b/examples/l3fwd/l3fwd_fib.c
@@ -252,9 +252,9 @@ fib_event_loop(struct l3fwd_event_resources *evt_rsrc,
 	const uint8_t event_d_id = evt_rsrc->event_d_id;
 	const uint16_t deq_len = evt_rsrc->deq_depth;
 	struct rte_event events[MAX_PKT_BURST];
+	int i, nb_enq = 0, nb_deq = 0;
 	struct lcore_conf *lconf;
 	unsigned int lcore_id;
-	int nb_enq, nb_deq, i;
 
 	uint32_t ipv4_arr[MAX_PKT_BURST];
 	uint8_t ipv6_arr[MAX_PKT_BURST][RTE_FIB6_IPV6_ADDR_SIZE];
@@ -370,6 +370,9 @@ fib_event_loop(struct l3fwd_event_resources *evt_rsrc,
 						nb_deq - nb_enq, 0);
 		}
 	}
+
+	l3fwd_event_worker_cleanup(event_d_id, event_p_id, events, nb_enq,
+				   nb_deq, 0);
 }
 
 int __rte_noinline
@@ -491,7 +494,7 @@ fib_event_loop_vector(struct l3fwd_event_resources *evt_rsrc,
 	const uint8_t event_d_id = evt_rsrc->event_d_id;
 	const uint16_t deq_len = evt_rsrc->deq_depth;
 	struct rte_event events[MAX_PKT_BURST];
-	int nb_enq, nb_deq, i;
+	int nb_enq = 0, nb_deq = 0, i;
 
 	if (event_p_id < 0)
 		return;
@@ -538,6 +541,9 @@ fib_event_loop_vector(struct l3fwd_event_resources *evt_rsrc,
 					nb_deq - nb_enq, 0);
 		}
 	}
+
+	l3fwd_event_worker_cleanup(event_d_id, event_p_id, events, nb_enq,
+				   nb_deq, 1);
 }
 
 int __rte_noinline
diff --git a/examples/l3fwd/l3fwd_lpm.c b/examples/l3fwd/l3fwd_lpm.c
index bec22c44cd..501fc5db5e 100644
--- a/examples/l3fwd/l3fwd_lpm.c
+++ b/examples/l3fwd/l3fwd_lpm.c
@@ -273,6 +273,7 @@ lpm_event_loop_single(struct l3fwd_event_resources *evt_rsrc,
 	const uint8_t tx_q_id = evt_rsrc->evq.event_q_id[
 		evt_rsrc->evq.nb_queues - 1];
 	const uint8_t event_d_id = evt_rsrc->event_d_id;
+	uint8_t enq = 0, deq = 0;
 	struct lcore_conf *lconf;
 	unsigned int lcore_id;
 	struct rte_event ev;
@@ -285,7 +286,9 @@ lpm_event_loop_single(struct l3fwd_event_resources *evt_rsrc,
 
 	RTE_LOG(INFO, L3FWD, "entering %s on lcore %u\n", __func__, lcore_id);
 	while (!force_quit) {
-		if (!rte_event_dequeue_burst(event_d_id, event_p_id, &ev, 1, 0))
+		deq = rte_event_dequeue_burst(event_d_id, event_p_id, &ev, 1,
+					      0);
+		if (!deq)
 			continue;
 
 		if (lpm_process_event_pkt(lconf, ev.mbuf) == BAD_PORT) {
@@ -296,19 +299,22 @@ lpm_event_loop_single(struct l3fwd_event_resources *evt_rsrc,
 		if (flags & L3FWD_EVENT_TX_ENQ) {
 			ev.queue_id = tx_q_id;
 			ev.op = RTE_EVENT_OP_FORWARD;
-			while (rte_event_enqueue_burst(event_d_id, event_p_id,
-						&ev, 1) && !force_quit)
-				;
+			do {
+				enq = rte_event_enqueue_burst(
+					event_d_id, event_p_id, &ev, 1);
+			} while (!enq && !force_quit);
 		}
 
 		if (flags & L3FWD_EVENT_TX_DIRECT) {
 			rte_event_eth_tx_adapter_txq_set(ev.mbuf, 0);
-			while (!rte_event_eth_tx_adapter_enqueue(event_d_id,
-						event_p_id, &ev, 1, 0) &&
-					!force_quit)
-				;
+			do {
+				enq = rte_event_eth_tx_adapter_enqueue(
+					event_d_id, event_p_id, &ev, 1, 0);
+			} while (!enq && !force_quit);
 		}
 	}
+
+	l3fwd_event_worker_cleanup(event_d_id, event_p_id, &ev, enq, deq, 0);
 }
 
 static __rte_always_inline void
@@ -321,9 +327,9 @@ lpm_event_loop_burst(struct l3fwd_event_resources *evt_rsrc,
 	const uint8_t event_d_id = evt_rsrc->event_d_id;
 	const uint16_t deq_len = evt_rsrc->deq_depth;
 	struct rte_event events[MAX_PKT_BURST];
+	int i, nb_enq = 0, nb_deq = 0;
 	struct lcore_conf *lconf;
 	unsigned int lcore_id;
-	int i, nb_enq, nb_deq;
 
 	if (event_p_id < 0)
 		return;
@@ -375,6 +381,9 @@ lpm_event_loop_burst(struct l3fwd_event_resources *evt_rsrc,
 						nb_deq - nb_enq, 0);
 		}
 	}
+
+	l3fwd_event_worker_cleanup(event_d_id, event_p_id, events, nb_enq,
+				   nb_deq, 0);
 }
 
 static __rte_always_inline void
@@ -459,9 +468,9 @@ lpm_event_loop_vector(struct l3fwd_event_resources *evt_rsrc,
 	const uint8_t event_d_id = evt_rsrc->event_d_id;
 	const uint16_t deq_len = evt_rsrc->deq_depth;
 	struct rte_event events[MAX_PKT_BURST];
+	int i, nb_enq = 0, nb_deq = 0;
 	struct lcore_conf *lconf;
 	unsigned int lcore_id;
-	int i, nb_enq, nb_deq;
 
 	if (event_p_id < 0)
 		return;
@@ -510,6 +519,9 @@ lpm_event_loop_vector(struct l3fwd_event_resources *evt_rsrc,
 					nb_deq - nb_enq, 0);
 		}
 	}
+
+	l3fwd_event_worker_cleanup(event_d_id, event_p_id, events, nb_enq,
+				   nb_deq, 1);
 }
 
 int __rte_noinline
-- 
2.25.1


  parent reply	other threads:[~2022-05-13 16:07 UTC|newest]

Thread overview: 17+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-04-26 21:14 [PATCH 1/6] app/eventdev: simplify signal handling and teardown Pavan Nikhilesh
2022-04-26 21:14 ` [PATCH 2/6] app/eventdev: clean up worker state before exit Pavan Nikhilesh
2022-05-13 13:40   ` Jerin Jacob
2022-04-26 21:14 ` [PATCH 3/6] examples/eventdev: " Pavan Nikhilesh
2022-04-26 21:14 ` [PATCH 4/6] examples/l3fwd: " Pavan Nikhilesh
2022-04-26 21:14 ` [PATCH 5/6] examples/l2fwd-event: " Pavan Nikhilesh
2022-04-26 21:14 ` [PATCH 6/6] examples/ipsec-secgw: cleanup " Pavan Nikhilesh
2022-05-13 13:41   ` Jerin Jacob
2022-05-13 11:49 ` [PATCH 1/6] app/eventdev: simplify signal handling and teardown Jerin Jacob
2022-05-13 13:39 ` Jerin Jacob
2022-05-13 16:07 ` [PATCH v2 " pbhagavatula
2022-05-13 16:07   ` [PATCH v2 2/6] app/eventdev: clean up worker state before exit pbhagavatula
2022-05-13 16:07   ` [PATCH v2 3/6] examples/eventdev: " pbhagavatula
2022-05-13 16:07   ` pbhagavatula [this message]
2022-05-13 16:07   ` [PATCH v2 5/6] examples/l2fwd-event: " pbhagavatula
2022-05-13 16:07   ` [PATCH v2 6/6] examples/ipsec-secgw: cleanup " pbhagavatula
2022-05-16 16:46   ` [PATCH v2 1/6] app/eventdev: simplify signal handling and teardown Jerin Jacob

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220513160719.10558-4-pbhagavatula@marvell.com \
    --to=pbhagavatula@marvell.com \
    --cc=dev@dpdk.org \
    --cc=jerinj@marvell.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).