* [PATCH 1/6] app/eventdev: simplify signal handling and teardown
@ 2022-04-26 21:14 Pavan Nikhilesh
2022-04-26 21:14 ` [PATCH 2/6] app/eventdev: clean up worker state before exit Pavan Nikhilesh
` (7 more replies)
0 siblings, 8 replies; 17+ messages in thread
From: Pavan Nikhilesh @ 2022-04-26 21:14 UTC (permalink / raw)
To: jerinj; +Cc: dev, Pavan Nikhilesh
Remove rte_*_dev calls from signal handler callback.
Split ethernet device teardown into Rx and Tx sections, wait for
workers to finish processing after disabling Rx to allow workers
to complete processing currently held packets.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
app/test-eventdev/evt_main.c | 58 +++++++++---------------
app/test-eventdev/evt_test.h | 3 ++
app/test-eventdev/test_perf_atq.c | 1 +
app/test-eventdev/test_perf_common.c | 20 +++++++-
app/test-eventdev/test_perf_common.h | 4 +-
app/test-eventdev/test_perf_queue.c | 1 +
app/test-eventdev/test_pipeline_atq.c | 1 +
app/test-eventdev/test_pipeline_common.c | 19 +++++++-
app/test-eventdev/test_pipeline_common.h | 5 +-
app/test-eventdev/test_pipeline_queue.c | 1 +
10 files changed, 72 insertions(+), 41 deletions(-)
diff --git a/app/test-eventdev/evt_main.c b/app/test-eventdev/evt_main.c
index a7d6b0c1cf..c5d63061bf 100644
--- a/app/test-eventdev/evt_main.c
+++ b/app/test-eventdev/evt_main.c
@@ -19,11 +19,7 @@ struct evt_test *test;
static void
signal_handler(int signum)
{
- int i;
- static uint8_t once;
-
- if ((signum == SIGINT || signum == SIGTERM) && !once) {
- once = true;
+ if (signum == SIGINT || signum == SIGTERM) {
printf("\nSignal %d received, preparing to exit...\n",
signum);
@@ -31,36 +27,7 @@ signal_handler(int signum)
/* request all lcores to exit from the main loop */
*(int *)test->test_priv = true;
rte_wmb();
-
- if (test->ops.ethdev_destroy)
- test->ops.ethdev_destroy(test, &opt);
-
- if (test->ops.cryptodev_destroy)
- test->ops.cryptodev_destroy(test, &opt);
-
- rte_eal_mp_wait_lcore();
-
- if (test->ops.test_result)
- test->ops.test_result(test, &opt);
-
- if (opt.prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
- RTE_ETH_FOREACH_DEV(i)
- rte_eth_dev_close(i);
- }
-
- if (test->ops.eventdev_destroy)
- test->ops.eventdev_destroy(test, &opt);
-
- if (test->ops.mempool_destroy)
- test->ops.mempool_destroy(test, &opt);
-
- if (test->ops.test_destroy)
- test->ops.test_destroy(test, &opt);
}
-
- /* exit with the expected status */
- signal(signum, SIG_DFL);
- kill(getpid(), signum);
}
}
@@ -189,10 +156,29 @@ main(int argc, char **argv)
}
}
+ if (test->ops.ethdev_rx_stop)
+ test->ops.ethdev_rx_stop(test, &opt);
+
+ if (test->ops.cryptodev_destroy)
+ test->ops.cryptodev_destroy(test, &opt);
+
rte_eal_mp_wait_lcore();
- /* Print the test result */
- ret = test->ops.test_result(test, &opt);
+ if (test->ops.test_result)
+ test->ops.test_result(test, &opt);
+
+ if (test->ops.ethdev_destroy)
+ test->ops.ethdev_destroy(test, &opt);
+
+ if (test->ops.eventdev_destroy)
+ test->ops.eventdev_destroy(test, &opt);
+
+ if (test->ops.mempool_destroy)
+ test->ops.mempool_destroy(test, &opt);
+
+ if (test->ops.test_destroy)
+ test->ops.test_destroy(test, &opt);
+
nocap:
if (ret == EVT_TEST_SUCCESS) {
printf("Result: "CLGRN"%s"CLNRM"\n", "Success");
diff --git a/app/test-eventdev/evt_test.h b/app/test-eventdev/evt_test.h
index 50fa474ec2..1049f99ddc 100644
--- a/app/test-eventdev/evt_test.h
+++ b/app/test-eventdev/evt_test.h
@@ -41,6 +41,8 @@ typedef void (*evt_test_eventdev_destroy_t)
(struct evt_test *test, struct evt_options *opt);
typedef void (*evt_test_ethdev_destroy_t)
(struct evt_test *test, struct evt_options *opt);
+typedef void (*evt_test_ethdev_rx_stop_t)(struct evt_test *test,
+ struct evt_options *opt);
typedef void (*evt_test_cryptodev_destroy_t)
(struct evt_test *test, struct evt_options *opt);
typedef void (*evt_test_mempool_destroy_t)
@@ -60,6 +62,7 @@ struct evt_test_ops {
evt_test_launch_lcores_t launch_lcores;
evt_test_result_t test_result;
evt_test_eventdev_destroy_t eventdev_destroy;
+ evt_test_ethdev_rx_stop_t ethdev_rx_stop;
evt_test_ethdev_destroy_t ethdev_destroy;
evt_test_cryptodev_destroy_t cryptodev_destroy;
evt_test_mempool_destroy_t mempool_destroy;
diff --git a/app/test-eventdev/test_perf_atq.c b/app/test-eventdev/test_perf_atq.c
index 67ff681666..bac3ea602f 100644
--- a/app/test-eventdev/test_perf_atq.c
+++ b/app/test-eventdev/test_perf_atq.c
@@ -343,6 +343,7 @@ static const struct evt_test_ops perf_atq = {
.test_setup = perf_test_setup,
.ethdev_setup = perf_ethdev_setup,
.cryptodev_setup = perf_cryptodev_setup,
+ .ethdev_rx_stop = perf_ethdev_rx_stop,
.mempool_setup = perf_mempool_setup,
.eventdev_setup = perf_atq_eventdev_setup,
.launch_lcores = perf_atq_launch_lcores,
diff --git a/app/test-eventdev/test_perf_common.c b/app/test-eventdev/test_perf_common.c
index 9d1f4a4567..e93b0e7272 100644
--- a/app/test-eventdev/test_perf_common.c
+++ b/app/test-eventdev/test_perf_common.c
@@ -1087,7 +1087,8 @@ perf_ethdev_setup(struct evt_test *test, struct evt_options *opt)
return 0;
}
-void perf_ethdev_destroy(struct evt_test *test, struct evt_options *opt)
+void
+perf_ethdev_rx_stop(struct evt_test *test, struct evt_options *opt)
{
uint16_t i;
RTE_SET_USED(test);
@@ -1095,6 +1096,23 @@ void perf_ethdev_destroy(struct evt_test *test, struct evt_options *opt)
if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
RTE_ETH_FOREACH_DEV(i) {
rte_event_eth_rx_adapter_stop(i);
+ rte_event_eth_rx_adapter_queue_del(i, i, -1);
+ rte_eth_dev_rx_queue_stop(i, 0);
+ }
+ }
+}
+
+void
+perf_ethdev_destroy(struct evt_test *test, struct evt_options *opt)
+{
+ uint16_t i;
+ RTE_SET_USED(test);
+
+ if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
+ RTE_ETH_FOREACH_DEV (i) {
+ rte_event_eth_tx_adapter_stop(i);
+ rte_event_eth_tx_adapter_queue_del(i, i, -1);
+ rte_eth_dev_tx_queue_stop(i, 0);
rte_eth_dev_stop(i);
}
}
diff --git a/app/test-eventdev/test_perf_common.h b/app/test-eventdev/test_perf_common.h
index ea0907d61a..e504bb1df9 100644
--- a/app/test-eventdev/test_perf_common.h
+++ b/app/test-eventdev/test_perf_common.h
@@ -12,10 +12,11 @@
#include <rte_cryptodev.h>
#include <rte_cycles.h>
#include <rte_ethdev.h>
-#include <rte_eventdev.h>
#include <rte_event_crypto_adapter.h>
#include <rte_event_eth_rx_adapter.h>
+#include <rte_event_eth_tx_adapter.h>
#include <rte_event_timer_adapter.h>
+#include <rte_eventdev.h>
#include <rte_lcore.h>
#include <rte_malloc.h>
#include <rte_mempool.h>
@@ -181,6 +182,7 @@ void perf_test_destroy(struct evt_test *test, struct evt_options *opt);
void perf_eventdev_destroy(struct evt_test *test, struct evt_options *opt);
void perf_cryptodev_destroy(struct evt_test *test, struct evt_options *opt);
void perf_ethdev_destroy(struct evt_test *test, struct evt_options *opt);
+void perf_ethdev_rx_stop(struct evt_test *test, struct evt_options *opt);
void perf_mempool_destroy(struct evt_test *test, struct evt_options *opt);
#endif /* _TEST_PERF_COMMON_ */
diff --git a/app/test-eventdev/test_perf_queue.c b/app/test-eventdev/test_perf_queue.c
index dcf6d82947..108f1742a7 100644
--- a/app/test-eventdev/test_perf_queue.c
+++ b/app/test-eventdev/test_perf_queue.c
@@ -360,6 +360,7 @@ static const struct evt_test_ops perf_queue = {
.mempool_setup = perf_mempool_setup,
.ethdev_setup = perf_ethdev_setup,
.cryptodev_setup = perf_cryptodev_setup,
+ .ethdev_rx_stop = perf_ethdev_rx_stop,
.eventdev_setup = perf_queue_eventdev_setup,
.launch_lcores = perf_queue_launch_lcores,
.eventdev_destroy = perf_eventdev_destroy,
diff --git a/app/test-eventdev/test_pipeline_atq.c b/app/test-eventdev/test_pipeline_atq.c
index 84dd4f44e3..79218502ba 100644
--- a/app/test-eventdev/test_pipeline_atq.c
+++ b/app/test-eventdev/test_pipeline_atq.c
@@ -772,6 +772,7 @@ static const struct evt_test_ops pipeline_atq = {
.ethdev_setup = pipeline_ethdev_setup,
.eventdev_setup = pipeline_atq_eventdev_setup,
.launch_lcores = pipeline_atq_launch_lcores,
+ .ethdev_rx_stop = pipeline_ethdev_rx_stop,
.eventdev_destroy = pipeline_eventdev_destroy,
.mempool_destroy = pipeline_mempool_destroy,
.ethdev_destroy = pipeline_ethdev_destroy,
diff --git a/app/test-eventdev/test_pipeline_common.c b/app/test-eventdev/test_pipeline_common.c
index ddaa9f3fdb..d994c91678 100644
--- a/app/test-eventdev/test_pipeline_common.c
+++ b/app/test-eventdev/test_pipeline_common.c
@@ -505,6 +505,22 @@ pipeline_event_tx_adapter_setup(struct evt_options *opt,
return ret;
}
+void
+pipeline_ethdev_rx_stop(struct evt_test *test, struct evt_options *opt)
+{
+ uint16_t i, j;
+ RTE_SET_USED(test);
+
+ if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
+ RTE_ETH_FOREACH_DEV (i) {
+ rte_event_eth_rx_adapter_stop(i);
+ rte_event_eth_rx_adapter_queue_del(i, i, -1);
+ for (j = 0; j < opt->eth_queues; j++)
+ rte_eth_dev_rx_queue_stop(i, j);
+ }
+ }
+}
+
void
pipeline_ethdev_destroy(struct evt_test *test, struct evt_options *opt)
{
@@ -513,8 +529,9 @@ pipeline_ethdev_destroy(struct evt_test *test, struct evt_options *opt)
RTE_SET_USED(opt);
RTE_ETH_FOREACH_DEV(i) {
- rte_event_eth_rx_adapter_stop(i);
rte_event_eth_tx_adapter_stop(i);
+ rte_event_eth_tx_adapter_queue_del(i, i, -1);
+ rte_eth_dev_tx_queue_stop(i, 0);
rte_eth_dev_stop(i);
}
}
diff --git a/app/test-eventdev/test_pipeline_common.h b/app/test-eventdev/test_pipeline_common.h
index d69e2f8a3e..c979c33772 100644
--- a/app/test-eventdev/test_pipeline_common.h
+++ b/app/test-eventdev/test_pipeline_common.h
@@ -12,16 +12,16 @@
#include <rte_cycles.h>
#include <rte_ethdev.h>
-#include <rte_eventdev.h>
#include <rte_event_eth_rx_adapter.h>
#include <rte_event_eth_tx_adapter.h>
+#include <rte_eventdev.h>
#include <rte_lcore.h>
#include <rte_malloc.h>
#include <rte_mempool.h>
#include <rte_prefetch.h>
-#include <rte_spinlock.h>
#include <rte_service.h>
#include <rte_service_component.h>
+#include <rte_spinlock.h>
#include "evt_common.h"
#include "evt_options.h"
@@ -186,6 +186,7 @@ void pipeline_opt_dump(struct evt_options *opt, uint8_t nb_queues);
void pipeline_test_destroy(struct evt_test *test, struct evt_options *opt);
void pipeline_eventdev_destroy(struct evt_test *test, struct evt_options *opt);
void pipeline_ethdev_destroy(struct evt_test *test, struct evt_options *opt);
+void pipeline_ethdev_rx_stop(struct evt_test *test, struct evt_options *opt);
void pipeline_mempool_destroy(struct evt_test *test, struct evt_options *opt);
#endif /* _TEST_PIPELINE_COMMON_ */
diff --git a/app/test-eventdev/test_pipeline_queue.c b/app/test-eventdev/test_pipeline_queue.c
index f6cc3e358e..343f8f3b1d 100644
--- a/app/test-eventdev/test_pipeline_queue.c
+++ b/app/test-eventdev/test_pipeline_queue.c
@@ -798,6 +798,7 @@ static const struct evt_test_ops pipeline_queue = {
.ethdev_setup = pipeline_ethdev_setup,
.eventdev_setup = pipeline_queue_eventdev_setup,
.launch_lcores = pipeline_queue_launch_lcores,
+ .ethdev_rx_stop = pipeline_ethdev_rx_stop,
.eventdev_destroy = pipeline_eventdev_destroy,
.mempool_destroy = pipeline_mempool_destroy,
.ethdev_destroy = pipeline_ethdev_destroy,
--
2.25.1
^ permalink raw reply [flat|nested] 17+ messages in thread
* [PATCH 2/6] app/eventdev: clean up worker state before exit
2022-04-26 21:14 [PATCH 1/6] app/eventdev: simplify signal handling and teardown Pavan Nikhilesh
@ 2022-04-26 21:14 ` Pavan Nikhilesh
2022-05-13 13:40 ` Jerin Jacob
2022-04-26 21:14 ` [PATCH 3/6] examples/eventdev: " Pavan Nikhilesh
` (6 subsequent siblings)
7 siblings, 1 reply; 17+ messages in thread
From: Pavan Nikhilesh @ 2022-04-26 21:14 UTC (permalink / raw)
To: jerinj; +Cc: dev, Pavan Nikhilesh
Event ports are configured to implicitly release the scheduler contexts
currently held in the next call to rte_event_dequeue_burst().
A worker core might still hold a scheduling context during exit, as the
next call to rte_event_dequeue_burst() is never made.
This might lead to deadlock based on the worker exit timing and when
there are very less number of flows.
Add clean up function to release any scheduling contexts held by the
worker by using RTE_EVENT_OP_RELEASE.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
app/test-eventdev/test_perf_atq.c | 31 +++--
app/test-eventdev/test_perf_common.c | 17 +++
app/test-eventdev/test_perf_common.h | 3 +
app/test-eventdev/test_perf_queue.c | 30 +++--
app/test-eventdev/test_pipeline_atq.c | 134 ++++++++++++---------
app/test-eventdev/test_pipeline_common.c | 39 ++++++
app/test-eventdev/test_pipeline_common.h | 59 ++++++---
app/test-eventdev/test_pipeline_queue.c | 145 ++++++++++++++---------
8 files changed, 304 insertions(+), 154 deletions(-)
diff --git a/app/test-eventdev/test_perf_atq.c b/app/test-eventdev/test_perf_atq.c
index bac3ea602f..5a0b190384 100644
--- a/app/test-eventdev/test_perf_atq.c
+++ b/app/test-eventdev/test_perf_atq.c
@@ -37,13 +37,14 @@ atq_fwd_event(struct rte_event *const ev, uint8_t *const sched_type_list,
static int
perf_atq_worker(void *arg, const int enable_fwd_latency)
{
- PERF_WORKER_INIT;
+ uint16_t enq = 0, deq = 0;
struct rte_event ev;
+ PERF_WORKER_INIT;
while (t->done == false) {
- uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+ deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
- if (!event) {
+ if (!deq) {
rte_pause();
continue;
}
@@ -78,24 +79,29 @@ perf_atq_worker(void *arg, const int enable_fwd_latency)
bufs, sz, cnt);
} else {
atq_fwd_event(&ev, sched_type_list, nb_stages);
- while (rte_event_enqueue_burst(dev, port, &ev, 1) != 1)
- rte_pause();
+ do {
+ enq = rte_event_enqueue_burst(dev, port, &ev,
+ 1);
+ } while (!enq && !t->done);
}
}
+
+ perf_worker_cleanup(pool, dev, port, &ev, enq, deq);
+
return 0;
}
static int
perf_atq_worker_burst(void *arg, const int enable_fwd_latency)
{
- PERF_WORKER_INIT;
- uint16_t i;
/* +1 to avoid prefetch out of array check */
struct rte_event ev[BURST_SIZE + 1];
+ uint16_t enq = 0, nb_rx = 0;
+ PERF_WORKER_INIT;
+ uint16_t i;
while (t->done == false) {
- uint16_t const nb_rx = rte_event_dequeue_burst(dev, port, ev,
- BURST_SIZE, 0);
+ nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
if (!nb_rx) {
rte_pause();
@@ -146,14 +152,15 @@ perf_atq_worker_burst(void *arg, const int enable_fwd_latency)
}
}
- uint16_t enq;
-
enq = rte_event_enqueue_burst(dev, port, ev, nb_rx);
- while (enq < nb_rx) {
+ while ((enq < nb_rx) && !t->done) {
enq += rte_event_enqueue_burst(dev, port,
ev + enq, nb_rx - enq);
}
}
+
+ perf_worker_cleanup(pool, dev, port, ev, enq, nb_rx);
+
return 0;
}
diff --git a/app/test-eventdev/test_perf_common.c b/app/test-eventdev/test_perf_common.c
index e93b0e7272..f673a9fddd 100644
--- a/app/test-eventdev/test_perf_common.c
+++ b/app/test-eventdev/test_perf_common.c
@@ -985,6 +985,23 @@ perf_opt_dump(struct evt_options *opt, uint8_t nb_queues)
evt_dump("prod_enq_burst_sz", "%d", opt->prod_enq_burst_sz);
}
+void
+perf_worker_cleanup(struct rte_mempool *const pool, uint8_t dev_id,
+ uint8_t port_id, struct rte_event events[], uint16_t nb_enq,
+ uint16_t nb_deq)
+{
+ int i;
+
+ if (nb_deq) {
+ for (i = nb_enq; i < nb_deq; i++)
+ rte_mempool_put(pool, events[i].event_ptr);
+
+ for (i = 0; i < nb_deq; i++)
+ events[i].op = RTE_EVENT_OP_RELEASE;
+ rte_event_enqueue_burst(dev_id, port_id, events, nb_deq);
+ }
+}
+
void
perf_eventdev_destroy(struct evt_test *test, struct evt_options *opt)
{
diff --git a/app/test-eventdev/test_perf_common.h b/app/test-eventdev/test_perf_common.h
index e504bb1df9..f6bfc73be0 100644
--- a/app/test-eventdev/test_perf_common.h
+++ b/app/test-eventdev/test_perf_common.h
@@ -184,5 +184,8 @@ void perf_cryptodev_destroy(struct evt_test *test, struct evt_options *opt);
void perf_ethdev_destroy(struct evt_test *test, struct evt_options *opt);
void perf_ethdev_rx_stop(struct evt_test *test, struct evt_options *opt);
void perf_mempool_destroy(struct evt_test *test, struct evt_options *opt);
+void perf_worker_cleanup(struct rte_mempool *const pool, uint8_t dev_id,
+ uint8_t port_id, struct rte_event events[],
+ uint16_t nb_enq, uint16_t nb_deq);
#endif /* _TEST_PERF_COMMON_ */
diff --git a/app/test-eventdev/test_perf_queue.c b/app/test-eventdev/test_perf_queue.c
index 108f1742a7..b498cacef6 100644
--- a/app/test-eventdev/test_perf_queue.c
+++ b/app/test-eventdev/test_perf_queue.c
@@ -39,13 +39,14 @@ fwd_event(struct rte_event *const ev, uint8_t *const sched_type_list,
static int
perf_queue_worker(void *arg, const int enable_fwd_latency)
{
- PERF_WORKER_INIT;
+ uint16_t enq = 0, deq = 0;
struct rte_event ev;
+ PERF_WORKER_INIT;
while (t->done == false) {
- uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+ deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
- if (!event) {
+ if (!deq) {
rte_pause();
continue;
}
@@ -80,24 +81,29 @@ perf_queue_worker(void *arg, const int enable_fwd_latency)
&ev, w, bufs, sz, cnt);
} else {
fwd_event(&ev, sched_type_list, nb_stages);
- while (rte_event_enqueue_burst(dev, port, &ev, 1) != 1)
- rte_pause();
+ do {
+ enq = rte_event_enqueue_burst(dev, port, &ev,
+ 1);
+ } while (!enq && !t->done);
}
}
+
+ perf_worker_cleanup(pool, dev, port, &ev, enq, deq);
+
return 0;
}
static int
perf_queue_worker_burst(void *arg, const int enable_fwd_latency)
{
- PERF_WORKER_INIT;
- uint16_t i;
/* +1 to avoid prefetch out of array check */
struct rte_event ev[BURST_SIZE + 1];
+ uint16_t enq = 0, nb_rx = 0;
+ PERF_WORKER_INIT;
+ uint16_t i;
while (t->done == false) {
- uint16_t const nb_rx = rte_event_dequeue_burst(dev, port, ev,
- BURST_SIZE, 0);
+ nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
if (!nb_rx) {
rte_pause();
@@ -147,14 +153,16 @@ perf_queue_worker_burst(void *arg, const int enable_fwd_latency)
}
}
- uint16_t enq;
enq = rte_event_enqueue_burst(dev, port, ev, nb_rx);
- while (enq < nb_rx) {
+ while (enq < nb_rx && !t->done) {
enq += rte_event_enqueue_burst(dev, port,
ev + enq, nb_rx - enq);
}
}
+
+ perf_worker_cleanup(pool, dev, port, ev, enq, nb_rx);
+
return 0;
}
diff --git a/app/test-eventdev/test_pipeline_atq.c b/app/test-eventdev/test_pipeline_atq.c
index 79218502ba..4b10197127 100644
--- a/app/test-eventdev/test_pipeline_atq.c
+++ b/app/test-eventdev/test_pipeline_atq.c
@@ -21,18 +21,20 @@ static __rte_noinline int
pipeline_atq_worker_single_stage_tx(void *arg)
{
PIPELINE_WORKER_SINGLE_STAGE_INIT;
+ uint8_t enq = 0, deq = 0;
while (t->done == false) {
- uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+ deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
- if (!event) {
+ if (!deq) {
rte_pause();
continue;
}
- pipeline_event_tx(dev, port, &ev);
+ deq = pipeline_event_tx(dev, port, &ev, t);
w->processed_pkts++;
}
+ pipeline_worker_cleanup(dev, port, &ev, enq, deq);
return 0;
}
@@ -42,20 +44,22 @@ pipeline_atq_worker_single_stage_fwd(void *arg)
{
PIPELINE_WORKER_SINGLE_STAGE_INIT;
const uint8_t *tx_queue = t->tx_evqueue_id;
+ uint8_t enq = 0, deq = 0;
while (t->done == false) {
- uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+ deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
- if (!event) {
+ if (!deq) {
rte_pause();
continue;
}
ev.queue_id = tx_queue[ev.mbuf->port];
pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
- pipeline_event_enqueue(dev, port, &ev);
+ enq = pipeline_event_enqueue(dev, port, &ev, t);
w->processed_pkts++;
}
+ pipeline_worker_cleanup(dev, port, &ev, enq, deq);
return 0;
}
@@ -64,10 +68,10 @@ static __rte_noinline int
pipeline_atq_worker_single_stage_burst_tx(void *arg)
{
PIPELINE_WORKER_SINGLE_STAGE_BURST_INIT;
+ uint16_t nb_rx = 0, nb_tx = 0;
while (t->done == false) {
- uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
- BURST_SIZE, 0);
+ nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
if (!nb_rx) {
rte_pause();
@@ -79,9 +83,10 @@ pipeline_atq_worker_single_stage_burst_tx(void *arg)
rte_event_eth_tx_adapter_txq_set(ev[i].mbuf, 0);
}
- pipeline_event_tx_burst(dev, port, ev, nb_rx);
- w->processed_pkts += nb_rx;
+ nb_tx = pipeline_event_tx_burst(dev, port, ev, nb_rx, t);
+ w->processed_pkts += nb_tx;
}
+ pipeline_worker_cleanup(dev, port, ev, nb_tx, nb_rx);
return 0;
}
@@ -91,10 +96,10 @@ pipeline_atq_worker_single_stage_burst_fwd(void *arg)
{
PIPELINE_WORKER_SINGLE_STAGE_BURST_INIT;
const uint8_t *tx_queue = t->tx_evqueue_id;
+ uint16_t nb_rx = 0, nb_tx = 0;
while (t->done == false) {
- uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
- BURST_SIZE, 0);
+ nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
if (!nb_rx) {
rte_pause();
@@ -108,9 +113,10 @@ pipeline_atq_worker_single_stage_burst_fwd(void *arg)
pipeline_fwd_event(&ev[i], RTE_SCHED_TYPE_ATOMIC);
}
- pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
- w->processed_pkts += nb_rx;
+ nb_tx = pipeline_event_enqueue_burst(dev, port, ev, nb_rx, t);
+ w->processed_pkts += nb_tx;
}
+ pipeline_worker_cleanup(dev, port, ev, nb_tx, nb_rx);
return 0;
}
@@ -119,19 +125,21 @@ static __rte_noinline int
pipeline_atq_worker_single_stage_tx_vector(void *arg)
{
PIPELINE_WORKER_SINGLE_STAGE_INIT;
+ uint8_t enq = 0, deq = 0;
uint16_t vector_sz;
while (!t->done) {
- uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+ deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
- if (!event) {
+ if (!deq) {
rte_pause();
continue;
}
vector_sz = ev.vec->nb_elem;
- pipeline_event_tx_vector(dev, port, &ev);
+ enq = pipeline_event_tx_vector(dev, port, &ev, t);
w->processed_pkts += vector_sz;
}
+ pipeline_worker_cleanup(dev, port, &ev, enq, deq);
return 0;
}
@@ -141,12 +149,13 @@ pipeline_atq_worker_single_stage_fwd_vector(void *arg)
{
PIPELINE_WORKER_SINGLE_STAGE_INIT;
const uint8_t *tx_queue = t->tx_evqueue_id;
+ uint8_t enq = 0, deq = 0;
uint16_t vector_sz;
while (!t->done) {
- uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+ deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
- if (!event) {
+ if (!deq) {
rte_pause();
continue;
}
@@ -155,9 +164,10 @@ pipeline_atq_worker_single_stage_fwd_vector(void *arg)
ev.queue_id = tx_queue[ev.vec->port];
ev.vec->queue = 0;
pipeline_fwd_event_vector(&ev, RTE_SCHED_TYPE_ATOMIC);
- pipeline_event_enqueue(dev, port, &ev);
+ enq = pipeline_event_enqueue(dev, port, &ev, t);
w->processed_pkts += vector_sz;
}
+ pipeline_worker_cleanup(dev, port, &ev, enq, deq);
return 0;
}
@@ -166,11 +176,11 @@ static __rte_noinline int
pipeline_atq_worker_single_stage_burst_tx_vector(void *arg)
{
PIPELINE_WORKER_SINGLE_STAGE_BURST_INIT;
+ uint16_t nb_rx = 0, nb_tx = 0;
uint16_t vector_sz;
while (!t->done) {
- uint16_t nb_rx =
- rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
+ nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
if (!nb_rx) {
rte_pause();
@@ -182,9 +192,10 @@ pipeline_atq_worker_single_stage_burst_tx_vector(void *arg)
ev[i].vec->queue = 0;
}
- pipeline_event_tx_burst(dev, port, ev, nb_rx);
+ nb_tx = pipeline_event_tx_burst(dev, port, ev, nb_rx, t);
w->processed_pkts += vector_sz;
}
+ pipeline_worker_cleanup(dev, port, ev, nb_tx, nb_rx);
return 0;
}
@@ -194,11 +205,11 @@ pipeline_atq_worker_single_stage_burst_fwd_vector(void *arg)
{
PIPELINE_WORKER_SINGLE_STAGE_BURST_INIT;
const uint8_t *tx_queue = t->tx_evqueue_id;
+ uint16_t nb_rx = 0, nb_tx = 0;
uint16_t vector_sz;
while (!t->done) {
- uint16_t nb_rx =
- rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
+ nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
if (!nb_rx) {
rte_pause();
@@ -214,9 +225,10 @@ pipeline_atq_worker_single_stage_burst_fwd_vector(void *arg)
RTE_SCHED_TYPE_ATOMIC);
}
- pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
+ nb_tx = pipeline_event_enqueue_burst(dev, port, ev, nb_rx, t);
w->processed_pkts += vector_sz;
}
+ pipeline_worker_cleanup(dev, port, ev, nb_tx, nb_rx);
return 0;
}
@@ -225,11 +237,12 @@ static __rte_noinline int
pipeline_atq_worker_multi_stage_tx(void *arg)
{
PIPELINE_WORKER_MULTI_STAGE_INIT;
+ uint8_t enq = 0, deq = 0;
while (t->done == false) {
- uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+ deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
- if (!event) {
+ if (!deq) {
rte_pause();
continue;
}
@@ -237,15 +250,16 @@ pipeline_atq_worker_multi_stage_tx(void *arg)
cq_id = ev.sub_event_type % nb_stages;
if (cq_id == last_queue) {
- pipeline_event_tx(dev, port, &ev);
+ enq = pipeline_event_tx(dev, port, &ev, t);
w->processed_pkts++;
continue;
}
ev.sub_event_type++;
pipeline_fwd_event(&ev, sched_type_list[cq_id]);
- pipeline_event_enqueue(dev, port, &ev);
+ enq = pipeline_event_enqueue(dev, port, &ev, t);
}
+ pipeline_worker_cleanup(dev, port, &ev, enq, deq);
return 0;
}
@@ -255,11 +269,12 @@ pipeline_atq_worker_multi_stage_fwd(void *arg)
{
PIPELINE_WORKER_MULTI_STAGE_INIT;
const uint8_t *tx_queue = t->tx_evqueue_id;
+ uint8_t enq = 0, deq = 0;
while (t->done == false) {
- uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+ deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
- if (!event) {
+ if (!deq) {
rte_pause();
continue;
}
@@ -275,8 +290,9 @@ pipeline_atq_worker_multi_stage_fwd(void *arg)
pipeline_fwd_event(&ev, sched_type_list[cq_id]);
}
- pipeline_event_enqueue(dev, port, &ev);
+ enq = pipeline_event_enqueue(dev, port, &ev, t);
}
+ pipeline_worker_cleanup(dev, port, &ev, enq, deq);
return 0;
}
@@ -285,10 +301,10 @@ static __rte_noinline int
pipeline_atq_worker_multi_stage_burst_tx(void *arg)
{
PIPELINE_WORKER_MULTI_STAGE_BURST_INIT;
+ uint16_t nb_rx = 0, nb_tx = 0;
while (t->done == false) {
- uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
- BURST_SIZE, 0);
+ nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
if (!nb_rx) {
rte_pause();
@@ -300,7 +316,7 @@ pipeline_atq_worker_multi_stage_burst_tx(void *arg)
cq_id = ev[i].sub_event_type % nb_stages;
if (cq_id == last_queue) {
- pipeline_event_tx(dev, port, &ev[i]);
+ pipeline_event_tx(dev, port, &ev[i], t);
ev[i].op = RTE_EVENT_OP_RELEASE;
w->processed_pkts++;
continue;
@@ -310,8 +326,9 @@ pipeline_atq_worker_multi_stage_burst_tx(void *arg)
pipeline_fwd_event(&ev[i], sched_type_list[cq_id]);
}
- pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
+ nb_tx = pipeline_event_enqueue_burst(dev, port, ev, nb_rx, t);
}
+ pipeline_worker_cleanup(dev, port, ev, nb_tx, nb_rx);
return 0;
}
@@ -321,10 +338,10 @@ pipeline_atq_worker_multi_stage_burst_fwd(void *arg)
{
PIPELINE_WORKER_MULTI_STAGE_BURST_INIT;
const uint8_t *tx_queue = t->tx_evqueue_id;
+ uint16_t nb_rx = 0, nb_tx = 0;
while (t->done == false) {
- uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
- BURST_SIZE, 0);
+ nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
if (!nb_rx) {
rte_pause();
@@ -347,8 +364,9 @@ pipeline_atq_worker_multi_stage_burst_fwd(void *arg)
}
}
- pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
+ nb_tx = pipeline_event_enqueue_burst(dev, port, ev, nb_rx, t);
}
+ pipeline_worker_cleanup(dev, port, ev, nb_tx, nb_rx);
return 0;
}
@@ -357,12 +375,13 @@ static __rte_noinline int
pipeline_atq_worker_multi_stage_tx_vector(void *arg)
{
PIPELINE_WORKER_MULTI_STAGE_INIT;
+ uint8_t enq = 0, deq = 0;
uint16_t vector_sz;
while (!t->done) {
- uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+ deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
- if (!event) {
+ if (!deq) {
rte_pause();
continue;
}
@@ -371,15 +390,16 @@ pipeline_atq_worker_multi_stage_tx_vector(void *arg)
if (cq_id == last_queue) {
vector_sz = ev.vec->nb_elem;
- pipeline_event_tx_vector(dev, port, &ev);
+ enq = pipeline_event_tx_vector(dev, port, &ev, t);
w->processed_pkts += vector_sz;
continue;
}
ev.sub_event_type++;
pipeline_fwd_event_vector(&ev, sched_type_list[cq_id]);
- pipeline_event_enqueue(dev, port, &ev);
+ enq = pipeline_event_enqueue(dev, port, &ev, t);
}
+ pipeline_worker_cleanup(dev, port, &ev, enq, deq);
return 0;
}
@@ -389,12 +409,13 @@ pipeline_atq_worker_multi_stage_fwd_vector(void *arg)
{
PIPELINE_WORKER_MULTI_STAGE_INIT;
const uint8_t *tx_queue = t->tx_evqueue_id;
+ uint8_t enq = 0, deq = 0;
uint16_t vector_sz;
while (!t->done) {
- uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+ deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
- if (!event) {
+ if (!deq) {
rte_pause();
continue;
}
@@ -406,14 +427,15 @@ pipeline_atq_worker_multi_stage_fwd_vector(void *arg)
ev.vec->queue = 0;
vector_sz = ev.vec->nb_elem;
pipeline_fwd_event_vector(&ev, RTE_SCHED_TYPE_ATOMIC);
- pipeline_event_enqueue(dev, port, &ev);
+ enq = pipeline_event_enqueue(dev, port, &ev, t);
w->processed_pkts += vector_sz;
} else {
ev.sub_event_type++;
pipeline_fwd_event_vector(&ev, sched_type_list[cq_id]);
- pipeline_event_enqueue(dev, port, &ev);
+ enq = pipeline_event_enqueue(dev, port, &ev, t);
}
}
+ pipeline_worker_cleanup(dev, port, &ev, enq, deq);
return 0;
}
@@ -422,11 +444,11 @@ static __rte_noinline int
pipeline_atq_worker_multi_stage_burst_tx_vector(void *arg)
{
PIPELINE_WORKER_MULTI_STAGE_BURST_INIT;
+ uint16_t nb_rx = 0, nb_tx = 0;
uint16_t vector_sz;
while (!t->done) {
- uint16_t nb_rx =
- rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
+ nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
if (!nb_rx) {
rte_pause();
@@ -438,7 +460,7 @@ pipeline_atq_worker_multi_stage_burst_tx_vector(void *arg)
if (cq_id == last_queue) {
vector_sz = ev[i].vec->nb_elem;
- pipeline_event_tx_vector(dev, port, &ev[i]);
+ pipeline_event_tx_vector(dev, port, &ev[i], t);
ev[i].op = RTE_EVENT_OP_RELEASE;
w->processed_pkts += vector_sz;
continue;
@@ -449,8 +471,9 @@ pipeline_atq_worker_multi_stage_burst_tx_vector(void *arg)
sched_type_list[cq_id]);
}
- pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
+ nb_tx = pipeline_event_enqueue_burst(dev, port, ev, nb_rx, t);
}
+ pipeline_worker_cleanup(dev, port, ev, nb_tx, nb_rx);
return 0;
}
@@ -460,11 +483,11 @@ pipeline_atq_worker_multi_stage_burst_fwd_vector(void *arg)
{
PIPELINE_WORKER_MULTI_STAGE_BURST_INIT;
const uint8_t *tx_queue = t->tx_evqueue_id;
+ uint16_t nb_rx = 0, nb_tx = 0;
uint16_t vector_sz;
while (!t->done) {
- uint16_t nb_rx =
- rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
+ nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
if (!nb_rx) {
rte_pause();
@@ -488,8 +511,9 @@ pipeline_atq_worker_multi_stage_burst_fwd_vector(void *arg)
}
}
- pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
+ nb_tx = pipeline_event_enqueue_burst(dev, port, ev, nb_rx, t);
}
+ pipeline_worker_cleanup(dev, port, ev, nb_tx, nb_rx);
return 0;
}
diff --git a/app/test-eventdev/test_pipeline_common.c b/app/test-eventdev/test_pipeline_common.c
index d994c91678..a8dd070000 100644
--- a/app/test-eventdev/test_pipeline_common.c
+++ b/app/test-eventdev/test_pipeline_common.c
@@ -505,6 +505,45 @@ pipeline_event_tx_adapter_setup(struct evt_options *opt,
return ret;
}
+static void
+pipeline_vector_array_free(struct rte_event events[], uint16_t num)
+{
+ uint16_t i;
+
+ for (i = 0; i < num; i++) {
+ rte_pktmbuf_free_bulk(events[i].vec->mbufs,
+ events[i].vec->nb_elem);
+ rte_mempool_put(rte_mempool_from_obj(events[i].vec),
+ events[i].vec);
+ }
+}
+
+void
+pipeline_worker_cleanup(uint8_t dev, uint8_t port, struct rte_event ev[],
+ uint16_t enq, uint16_t deq)
+{
+ int i;
+
+ if (!(deq - enq))
+ return;
+
+ if (deq) {
+ for (i = enq; i < deq; i++) {
+ if (ev[i].op == RTE_EVENT_OP_RELEASE)
+ continue;
+ if (ev[i].event_type & RTE_EVENT_TYPE_VECTOR)
+ pipeline_vector_array_free(&ev[i], 1);
+ else
+ rte_pktmbuf_free(ev[i].mbuf);
+ }
+
+ for (i = 0; i < deq; i++)
+ ev[i].op = RTE_EVENT_OP_RELEASE;
+
+ rte_event_enqueue_burst(dev, port, ev, deq);
+ }
+}
+
void
pipeline_ethdev_rx_stop(struct evt_test *test, struct evt_options *opt)
{
diff --git a/app/test-eventdev/test_pipeline_common.h b/app/test-eventdev/test_pipeline_common.h
index c979c33772..a6443faea4 100644
--- a/app/test-eventdev/test_pipeline_common.h
+++ b/app/test-eventdev/test_pipeline_common.h
@@ -109,59 +109,80 @@ pipeline_fwd_event_vector(struct rte_event *ev, uint8_t sched)
ev->sched_type = sched;
}
-static __rte_always_inline void
+static __rte_always_inline uint8_t
pipeline_event_tx(const uint8_t dev, const uint8_t port,
- struct rte_event * const ev)
+ struct rte_event *const ev, struct test_pipeline *t)
{
+ uint8_t enq;
+
rte_event_eth_tx_adapter_txq_set(ev->mbuf, 0);
- while (!rte_event_eth_tx_adapter_enqueue(dev, port, ev, 1, 0))
- rte_pause();
+ do {
+ enq = rte_event_eth_tx_adapter_enqueue(dev, port, ev, 1, 0);
+ } while (!enq && !t->done);
+
+ return enq;
}
-static __rte_always_inline void
+static __rte_always_inline uint8_t
pipeline_event_tx_vector(const uint8_t dev, const uint8_t port,
- struct rte_event *const ev)
+ struct rte_event *const ev, struct test_pipeline *t)
{
+ uint8_t enq;
+
ev->vec->queue = 0;
+ do {
+ enq = rte_event_eth_tx_adapter_enqueue(dev, port, ev, 1, 0);
+ } while (!enq && !t->done);
- while (!rte_event_eth_tx_adapter_enqueue(dev, port, ev, 1, 0))
- rte_pause();
+ return enq;
}
-static __rte_always_inline void
+static __rte_always_inline uint16_t
pipeline_event_tx_burst(const uint8_t dev, const uint8_t port,
- struct rte_event *ev, const uint16_t nb_rx)
+ struct rte_event *ev, const uint16_t nb_rx,
+ struct test_pipeline *t)
{
uint16_t enq;
enq = rte_event_eth_tx_adapter_enqueue(dev, port, ev, nb_rx, 0);
- while (enq < nb_rx) {
+ while (enq < nb_rx && !t->done) {
enq += rte_event_eth_tx_adapter_enqueue(dev, port,
ev + enq, nb_rx - enq, 0);
}
+
+ return enq;
}
-static __rte_always_inline void
+static __rte_always_inline uint8_t
pipeline_event_enqueue(const uint8_t dev, const uint8_t port,
- struct rte_event *ev)
+ struct rte_event *ev, struct test_pipeline *t)
{
- while (rte_event_enqueue_burst(dev, port, ev, 1) != 1)
- rte_pause();
+ uint8_t enq;
+
+ do {
+ enq = rte_event_enqueue_burst(dev, port, ev, 1);
+ } while (!enq && !t->done);
+
+ return enq;
}
-static __rte_always_inline void
+static __rte_always_inline uint16_t
pipeline_event_enqueue_burst(const uint8_t dev, const uint8_t port,
- struct rte_event *ev, const uint16_t nb_rx)
+ struct rte_event *ev, const uint16_t nb_rx,
+ struct test_pipeline *t)
{
uint16_t enq;
enq = rte_event_enqueue_burst(dev, port, ev, nb_rx);
- while (enq < nb_rx) {
+ while (enq < nb_rx && !t->done) {
enq += rte_event_enqueue_burst(dev, port,
ev + enq, nb_rx - enq);
}
+
+ return enq;
}
+
static inline int
pipeline_nb_event_ports(struct evt_options *opt)
{
@@ -188,5 +209,7 @@ void pipeline_eventdev_destroy(struct evt_test *test, struct evt_options *opt);
void pipeline_ethdev_destroy(struct evt_test *test, struct evt_options *opt);
void pipeline_ethdev_rx_stop(struct evt_test *test, struct evt_options *opt);
void pipeline_mempool_destroy(struct evt_test *test, struct evt_options *opt);
+void pipeline_worker_cleanup(uint8_t dev, uint8_t port, struct rte_event ev[],
+ uint16_t enq, uint16_t deq);
#endif /* _TEST_PIPELINE_COMMON_ */
diff --git a/app/test-eventdev/test_pipeline_queue.c b/app/test-eventdev/test_pipeline_queue.c
index 343f8f3b1d..e989396474 100644
--- a/app/test-eventdev/test_pipeline_queue.c
+++ b/app/test-eventdev/test_pipeline_queue.c
@@ -21,24 +21,27 @@ static __rte_noinline int
pipeline_queue_worker_single_stage_tx(void *arg)
{
PIPELINE_WORKER_SINGLE_STAGE_INIT;
+ uint8_t enq = 0, deq = 0;
while (t->done == false) {
- uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+ deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
- if (!event) {
+ if (!deq) {
rte_pause();
continue;
}
if (ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
- pipeline_event_tx(dev, port, &ev);
+ enq = pipeline_event_tx(dev, port, &ev, t);
+ ev.op = RTE_EVENT_OP_RELEASE;
w->processed_pkts++;
} else {
ev.queue_id++;
pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
- pipeline_event_enqueue(dev, port, &ev);
+ enq = pipeline_event_enqueue(dev, port, &ev, t);
}
}
+ pipeline_worker_cleanup(dev, port, &ev, enq, deq);
return 0;
}
@@ -48,11 +51,12 @@ pipeline_queue_worker_single_stage_fwd(void *arg)
{
PIPELINE_WORKER_SINGLE_STAGE_INIT;
const uint8_t *tx_queue = t->tx_evqueue_id;
+ uint8_t enq = 0, deq = 0;
while (t->done == false) {
- uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+ deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
- if (!event) {
+ if (!deq) {
rte_pause();
continue;
}
@@ -60,9 +64,10 @@ pipeline_queue_worker_single_stage_fwd(void *arg)
ev.queue_id = tx_queue[ev.mbuf->port];
rte_event_eth_tx_adapter_txq_set(ev.mbuf, 0);
pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
- pipeline_event_enqueue(dev, port, &ev);
+ enq = pipeline_event_enqueue(dev, port, &ev, t);
w->processed_pkts++;
}
+ pipeline_worker_cleanup(dev, port, &ev, enq, deq);
return 0;
}
@@ -71,10 +76,10 @@ static __rte_noinline int
pipeline_queue_worker_single_stage_burst_tx(void *arg)
{
PIPELINE_WORKER_SINGLE_STAGE_BURST_INIT;
+ uint16_t nb_rx = 0, nb_tx = 0;
while (t->done == false) {
- uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
- BURST_SIZE, 0);
+ nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
if (!nb_rx) {
rte_pause();
@@ -84,17 +89,18 @@ pipeline_queue_worker_single_stage_burst_tx(void *arg)
for (i = 0; i < nb_rx; i++) {
rte_prefetch0(ev[i + 1].mbuf);
if (ev[i].sched_type == RTE_SCHED_TYPE_ATOMIC) {
- pipeline_event_tx(dev, port, &ev[i]);
+ pipeline_event_tx(dev, port, &ev[i], t);
+ ev[i].op = RTE_EVENT_OP_RELEASE;
w->processed_pkts++;
} else {
ev[i].queue_id++;
pipeline_fwd_event(&ev[i],
RTE_SCHED_TYPE_ATOMIC);
- pipeline_event_enqueue_burst(dev, port, ev,
- nb_rx);
}
}
+ nb_tx = pipeline_event_enqueue_burst(dev, port, ev, nb_rx, t);
}
+ pipeline_worker_cleanup(dev, port, ev, nb_tx, nb_rx);
return 0;
}
@@ -104,10 +110,10 @@ pipeline_queue_worker_single_stage_burst_fwd(void *arg)
{
PIPELINE_WORKER_SINGLE_STAGE_BURST_INIT;
const uint8_t *tx_queue = t->tx_evqueue_id;
+ uint16_t nb_rx = 0, nb_tx = 0;
while (t->done == false) {
- uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
- BURST_SIZE, 0);
+ nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
if (!nb_rx) {
rte_pause();
@@ -121,9 +127,10 @@ pipeline_queue_worker_single_stage_burst_fwd(void *arg)
pipeline_fwd_event(&ev[i], RTE_SCHED_TYPE_ATOMIC);
}
- pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
+ nb_tx = pipeline_event_enqueue_burst(dev, port, ev, nb_rx, t);
w->processed_pkts += nb_rx;
}
+ pipeline_worker_cleanup(dev, port, ev, nb_tx, nb_rx);
return 0;
}
@@ -132,26 +139,29 @@ static __rte_noinline int
pipeline_queue_worker_single_stage_tx_vector(void *arg)
{
PIPELINE_WORKER_SINGLE_STAGE_INIT;
+ uint8_t enq = 0, deq = 0;
uint16_t vector_sz;
while (!t->done) {
- uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+ deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
- if (!event) {
+ if (!deq) {
rte_pause();
continue;
}
if (ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
vector_sz = ev.vec->nb_elem;
- pipeline_event_tx_vector(dev, port, &ev);
+ enq = pipeline_event_tx_vector(dev, port, &ev, t);
+ ev.op = RTE_EVENT_OP_RELEASE;
w->processed_pkts += vector_sz;
} else {
ev.queue_id++;
pipeline_fwd_event_vector(&ev, RTE_SCHED_TYPE_ATOMIC);
- pipeline_event_enqueue(dev, port, &ev);
+ enq = pipeline_event_enqueue(dev, port, &ev, t);
}
}
+ pipeline_worker_cleanup(dev, port, &ev, enq, deq);
return 0;
}
@@ -161,12 +171,13 @@ pipeline_queue_worker_single_stage_fwd_vector(void *arg)
{
PIPELINE_WORKER_SINGLE_STAGE_INIT;
const uint8_t *tx_queue = t->tx_evqueue_id;
+ uint8_t enq = 0, deq = 0;
uint16_t vector_sz;
while (!t->done) {
- uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+ deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
- if (!event) {
+ if (!deq) {
rte_pause();
continue;
}
@@ -175,9 +186,10 @@ pipeline_queue_worker_single_stage_fwd_vector(void *arg)
ev.vec->queue = 0;
vector_sz = ev.vec->nb_elem;
pipeline_fwd_event_vector(&ev, RTE_SCHED_TYPE_ATOMIC);
- pipeline_event_enqueue(dev, port, &ev);
+ enq = pipeline_event_enqueue(dev, port, &ev, t);
w->processed_pkts += vector_sz;
}
+ pipeline_worker_cleanup(dev, port, &ev, enq, deq);
return 0;
}
@@ -186,11 +198,11 @@ static __rte_noinline int
pipeline_queue_worker_single_stage_burst_tx_vector(void *arg)
{
PIPELINE_WORKER_SINGLE_STAGE_BURST_INIT;
+ uint16_t nb_rx = 0, nb_tx = 0;
uint16_t vector_sz;
while (!t->done) {
- uint16_t nb_rx =
- rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
+ nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
if (!nb_rx) {
rte_pause();
@@ -200,7 +212,7 @@ pipeline_queue_worker_single_stage_burst_tx_vector(void *arg)
for (i = 0; i < nb_rx; i++) {
if (ev[i].sched_type == RTE_SCHED_TYPE_ATOMIC) {
vector_sz = ev[i].vec->nb_elem;
- pipeline_event_tx_vector(dev, port, &ev[i]);
+ pipeline_event_tx_vector(dev, port, &ev[i], t);
ev[i].op = RTE_EVENT_OP_RELEASE;
w->processed_pkts += vector_sz;
} else {
@@ -210,8 +222,9 @@ pipeline_queue_worker_single_stage_burst_tx_vector(void *arg)
}
}
- pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
+ nb_tx = pipeline_event_enqueue_burst(dev, port, ev, nb_rx, t);
}
+ pipeline_worker_cleanup(dev, port, ev, nb_tx, nb_rx);
return 0;
}
@@ -221,11 +234,11 @@ pipeline_queue_worker_single_stage_burst_fwd_vector(void *arg)
{
PIPELINE_WORKER_SINGLE_STAGE_BURST_INIT;
const uint8_t *tx_queue = t->tx_evqueue_id;
+ uint16_t nb_rx = 0, nb_tx = 0;
uint16_t vector_sz;
while (!t->done) {
- uint16_t nb_rx =
- rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
+ nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
if (!nb_rx) {
rte_pause();
@@ -241,9 +254,10 @@ pipeline_queue_worker_single_stage_burst_fwd_vector(void *arg)
RTE_SCHED_TYPE_ATOMIC);
}
- pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
+ nb_tx = pipeline_event_enqueue_burst(dev, port, ev, nb_rx, t);
w->processed_pkts += vector_sz;
}
+ pipeline_worker_cleanup(dev, port, ev, nb_tx, nb_rx);
return 0;
}
@@ -253,11 +267,12 @@ pipeline_queue_worker_multi_stage_tx(void *arg)
{
PIPELINE_WORKER_MULTI_STAGE_INIT;
const uint8_t *tx_queue = t->tx_evqueue_id;
+ uint8_t enq = 0, deq = 0;
while (t->done == false) {
- uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+ deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
- if (!event) {
+ if (!deq) {
rte_pause();
continue;
}
@@ -265,7 +280,8 @@ pipeline_queue_worker_multi_stage_tx(void *arg)
cq_id = ev.queue_id % nb_stages;
if (ev.queue_id == tx_queue[ev.mbuf->port]) {
- pipeline_event_tx(dev, port, &ev);
+ enq = pipeline_event_tx(dev, port, &ev, t);
+ ev.op = RTE_EVENT_OP_RELEASE;
w->processed_pkts++;
continue;
}
@@ -274,8 +290,9 @@ pipeline_queue_worker_multi_stage_tx(void *arg)
pipeline_fwd_event(&ev, cq_id != last_queue ?
sched_type_list[cq_id] :
RTE_SCHED_TYPE_ATOMIC);
- pipeline_event_enqueue(dev, port, &ev);
+ enq = pipeline_event_enqueue(dev, port, &ev, t);
}
+ pipeline_worker_cleanup(dev, port, &ev, enq, deq);
return 0;
}
@@ -285,11 +302,12 @@ pipeline_queue_worker_multi_stage_fwd(void *arg)
{
PIPELINE_WORKER_MULTI_STAGE_INIT;
const uint8_t *tx_queue = t->tx_evqueue_id;
+ uint8_t enq = 0, deq = 0;
while (t->done == false) {
- uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+ deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
- if (!event) {
+ if (!deq) {
rte_pause();
continue;
}
@@ -300,14 +318,15 @@ pipeline_queue_worker_multi_stage_fwd(void *arg)
ev.queue_id = tx_queue[ev.mbuf->port];
rte_event_eth_tx_adapter_txq_set(ev.mbuf, 0);
pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
- pipeline_event_enqueue(dev, port, &ev);
+ enq = pipeline_event_enqueue(dev, port, &ev, t);
w->processed_pkts++;
} else {
ev.queue_id++;
pipeline_fwd_event(&ev, sched_type_list[cq_id]);
- pipeline_event_enqueue(dev, port, &ev);
+ enq = pipeline_event_enqueue(dev, port, &ev, t);
}
}
+ pipeline_worker_cleanup(dev, port, &ev, enq, deq);
return 0;
}
@@ -317,10 +336,10 @@ pipeline_queue_worker_multi_stage_burst_tx(void *arg)
{
PIPELINE_WORKER_MULTI_STAGE_BURST_INIT;
const uint8_t *tx_queue = t->tx_evqueue_id;
+ uint16_t nb_rx = 0, nb_tx = 0;
while (t->done == false) {
- uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
- BURST_SIZE, 0);
+ nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
if (!nb_rx) {
rte_pause();
@@ -332,7 +351,8 @@ pipeline_queue_worker_multi_stage_burst_tx(void *arg)
cq_id = ev[i].queue_id % nb_stages;
if (ev[i].queue_id == tx_queue[ev[i].mbuf->port]) {
- pipeline_event_tx(dev, port, &ev[i]);
+ pipeline_event_tx(dev, port, &ev[i], t);
+ ev[i].op = RTE_EVENT_OP_RELEASE;
w->processed_pkts++;
continue;
}
@@ -341,9 +361,10 @@ pipeline_queue_worker_multi_stage_burst_tx(void *arg)
pipeline_fwd_event(&ev[i], cq_id != last_queue ?
sched_type_list[cq_id] :
RTE_SCHED_TYPE_ATOMIC);
- pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
}
+ nb_tx = pipeline_event_enqueue_burst(dev, port, ev, nb_rx, t);
}
+ pipeline_worker_cleanup(dev, port, ev, nb_tx, nb_rx);
return 0;
}
@@ -353,11 +374,11 @@ pipeline_queue_worker_multi_stage_burst_fwd(void *arg)
{
PIPELINE_WORKER_MULTI_STAGE_BURST_INIT;
const uint8_t *tx_queue = t->tx_evqueue_id;
+ uint16_t nb_rx = 0, nb_tx = 0;
while (t->done == false) {
uint16_t processed_pkts = 0;
- uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
- BURST_SIZE, 0);
+ nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
if (!nb_rx) {
rte_pause();
@@ -381,9 +402,10 @@ pipeline_queue_worker_multi_stage_burst_fwd(void *arg)
}
}
- pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
+ nb_tx = pipeline_event_enqueue_burst(dev, port, ev, nb_rx, t);
w->processed_pkts += processed_pkts;
}
+ pipeline_worker_cleanup(dev, port, ev, nb_tx, nb_rx);
return 0;
}
@@ -393,12 +415,13 @@ pipeline_queue_worker_multi_stage_tx_vector(void *arg)
{
PIPELINE_WORKER_MULTI_STAGE_INIT;
const uint8_t *tx_queue = t->tx_evqueue_id;
+ uint8_t enq = 0, deq = 0;
uint16_t vector_sz;
while (!t->done) {
- uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+ deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
- if (!event) {
+ if (!deq) {
rte_pause();
continue;
}
@@ -407,8 +430,9 @@ pipeline_queue_worker_multi_stage_tx_vector(void *arg)
if (ev.queue_id == tx_queue[ev.vec->port]) {
vector_sz = ev.vec->nb_elem;
- pipeline_event_tx_vector(dev, port, &ev);
+ enq = pipeline_event_tx_vector(dev, port, &ev, t);
w->processed_pkts += vector_sz;
+ ev.op = RTE_EVENT_OP_RELEASE;
continue;
}
@@ -416,8 +440,9 @@ pipeline_queue_worker_multi_stage_tx_vector(void *arg)
pipeline_fwd_event_vector(&ev, cq_id != last_queue
? sched_type_list[cq_id]
: RTE_SCHED_TYPE_ATOMIC);
- pipeline_event_enqueue(dev, port, &ev);
+ enq = pipeline_event_enqueue(dev, port, &ev, t);
}
+ pipeline_worker_cleanup(dev, port, &ev, enq, deq);
return 0;
}
@@ -427,12 +452,13 @@ pipeline_queue_worker_multi_stage_fwd_vector(void *arg)
{
PIPELINE_WORKER_MULTI_STAGE_INIT;
const uint8_t *tx_queue = t->tx_evqueue_id;
+ uint8_t enq = 0, deq = 0;
uint16_t vector_sz;
while (!t->done) {
- uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+ deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
- if (!event) {
+ if (!deq) {
rte_pause();
continue;
}
@@ -449,8 +475,9 @@ pipeline_queue_worker_multi_stage_fwd_vector(void *arg)
pipeline_fwd_event_vector(&ev, sched_type_list[cq_id]);
}
- pipeline_event_enqueue(dev, port, &ev);
+ enq = pipeline_event_enqueue(dev, port, &ev, t);
}
+ pipeline_worker_cleanup(dev, port, &ev, enq, deq);
return 0;
}
@@ -460,11 +487,11 @@ pipeline_queue_worker_multi_stage_burst_tx_vector(void *arg)
{
PIPELINE_WORKER_MULTI_STAGE_BURST_INIT;
const uint8_t *tx_queue = t->tx_evqueue_id;
+ uint16_t nb_rx = 0, nb_tx = 0;
uint16_t vector_sz;
while (!t->done) {
- uint16_t nb_rx =
- rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
+ nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
if (!nb_rx) {
rte_pause();
@@ -476,7 +503,7 @@ pipeline_queue_worker_multi_stage_burst_tx_vector(void *arg)
if (ev[i].queue_id == tx_queue[ev[i].vec->port]) {
vector_sz = ev[i].vec->nb_elem;
- pipeline_event_tx_vector(dev, port, &ev[i]);
+ pipeline_event_tx_vector(dev, port, &ev[i], t);
ev[i].op = RTE_EVENT_OP_RELEASE;
w->processed_pkts += vector_sz;
continue;
@@ -489,8 +516,9 @@ pipeline_queue_worker_multi_stage_burst_tx_vector(void *arg)
: RTE_SCHED_TYPE_ATOMIC);
}
- pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
+ nb_tx = pipeline_event_enqueue_burst(dev, port, ev, nb_rx, t);
}
+ pipeline_worker_cleanup(dev, port, ev, nb_tx, nb_rx);
return 0;
}
@@ -500,11 +528,11 @@ pipeline_queue_worker_multi_stage_burst_fwd_vector(void *arg)
{
PIPELINE_WORKER_MULTI_STAGE_BURST_INIT;
const uint8_t *tx_queue = t->tx_evqueue_id;
+ uint16_t nb_rx = 0, nb_tx = 0;
uint16_t vector_sz;
while (!t->done) {
- uint16_t nb_rx =
- rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
+ nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
if (!nb_rx) {
rte_pause();
@@ -527,8 +555,9 @@ pipeline_queue_worker_multi_stage_burst_fwd_vector(void *arg)
}
}
- pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
+ nb_tx = pipeline_event_enqueue_burst(dev, port, ev, nb_rx, t);
}
+ pipeline_worker_cleanup(dev, port, ev, nb_tx, nb_rx);
return 0;
}
--
2.25.1
^ permalink raw reply [flat|nested] 17+ messages in thread
* [PATCH 3/6] examples/eventdev: clean up worker state before exit
2022-04-26 21:14 [PATCH 1/6] app/eventdev: simplify signal handling and teardown Pavan Nikhilesh
2022-04-26 21:14 ` [PATCH 2/6] app/eventdev: clean up worker state before exit Pavan Nikhilesh
@ 2022-04-26 21:14 ` Pavan Nikhilesh
2022-04-26 21:14 ` [PATCH 4/6] examples/l3fwd: " Pavan Nikhilesh
` (5 subsequent siblings)
7 siblings, 0 replies; 17+ messages in thread
From: Pavan Nikhilesh @ 2022-04-26 21:14 UTC (permalink / raw)
To: jerinj, Harry van Haaren; +Cc: dev, Pavan Nikhilesh
Event ports are configured to implicitly release the scheduler contexts
currently held in the next call to rte_event_dequeue_burst().
A worker core might still hold a scheduling context during exit, as the
next call to rte_event_dequeue_burst() is never made.
This might lead to deadlock based on the worker exit timing and when
there are very less number of flows.
Add clean up function to release any scheduling contexts held by the
worker by using RTE_EVENT_OP_RELEASE.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
examples/eventdev_pipeline/pipeline_common.h | 22 ++++++
.../pipeline_worker_generic.c | 23 +++---
.../eventdev_pipeline/pipeline_worker_tx.c | 79 ++++++++++++-------
3 files changed, 87 insertions(+), 37 deletions(-)
diff --git a/examples/eventdev_pipeline/pipeline_common.h b/examples/eventdev_pipeline/pipeline_common.h
index b12eb281e1..9899b257b0 100644
--- a/examples/eventdev_pipeline/pipeline_common.h
+++ b/examples/eventdev_pipeline/pipeline_common.h
@@ -140,5 +140,27 @@ schedule_devices(unsigned int lcore_id)
}
}
+static inline void
+worker_cleanup(uint8_t dev_id, uint8_t port_id, struct rte_event events[],
+ uint16_t nb_enq, uint16_t nb_deq)
+{
+ int i;
+
+ if (!(nb_deq - nb_enq))
+ return;
+
+ if (nb_deq) {
+ for (i = nb_enq; i < nb_deq; i++) {
+ if (events[i].op == RTE_EVENT_OP_RELEASE)
+ continue;
+ rte_pktmbuf_free(events[i].mbuf);
+ }
+
+ for (i = 0; i < nb_deq; i++)
+ events[i].op = RTE_EVENT_OP_RELEASE;
+ rte_event_enqueue_burst(dev_id, port_id, events, nb_deq);
+ }
+}
+
void set_worker_generic_setup_data(struct setup_data *caps, bool burst);
void set_worker_tx_enq_setup_data(struct setup_data *caps, bool burst);
diff --git a/examples/eventdev_pipeline/pipeline_worker_generic.c b/examples/eventdev_pipeline/pipeline_worker_generic.c
index ce1e92d59e..c564c808e2 100644
--- a/examples/eventdev_pipeline/pipeline_worker_generic.c
+++ b/examples/eventdev_pipeline/pipeline_worker_generic.c
@@ -16,6 +16,7 @@ worker_generic(void *arg)
uint8_t port_id = data->port_id;
size_t sent = 0, received = 0;
unsigned int lcore_id = rte_lcore_id();
+ uint16_t nb_rx = 0, nb_tx = 0;
while (!fdata->done) {
@@ -27,8 +28,7 @@ worker_generic(void *arg)
continue;
}
- const uint16_t nb_rx = rte_event_dequeue_burst(dev_id, port_id,
- &ev, 1, 0);
+ nb_rx = rte_event_dequeue_burst(dev_id, port_id, &ev, 1, 0);
if (nb_rx == 0) {
rte_pause();
@@ -47,11 +47,14 @@ worker_generic(void *arg)
work();
- while (rte_event_enqueue_burst(dev_id, port_id, &ev, 1) != 1)
- rte_pause();
+ do {
+ nb_tx = rte_event_enqueue_burst(dev_id, port_id, &ev,
+ 1);
+ } while (!nb_tx && !fdata->done);
sent++;
}
+ worker_cleanup(dev_id, port_id, &ev, nb_tx, nb_rx);
if (!cdata.quiet)
printf(" worker %u thread done. RX=%zu TX=%zu\n",
rte_lcore_id(), received, sent);
@@ -69,10 +72,9 @@ worker_generic_burst(void *arg)
uint8_t port_id = data->port_id;
size_t sent = 0, received = 0;
unsigned int lcore_id = rte_lcore_id();
+ uint16_t i, nb_rx = 0, nb_tx = 0;
while (!fdata->done) {
- uint16_t i;
-
if (fdata->cap.scheduler)
fdata->cap.scheduler(lcore_id);
@@ -81,8 +83,8 @@ worker_generic_burst(void *arg)
continue;
}
- const uint16_t nb_rx = rte_event_dequeue_burst(dev_id, port_id,
- events, RTE_DIM(events), 0);
+ nb_rx = rte_event_dequeue_burst(dev_id, port_id, events,
+ RTE_DIM(events), 0);
if (nb_rx == 0) {
rte_pause();
@@ -103,8 +105,7 @@ worker_generic_burst(void *arg)
work();
}
- uint16_t nb_tx = rte_event_enqueue_burst(dev_id, port_id,
- events, nb_rx);
+ nb_tx = rte_event_enqueue_burst(dev_id, port_id, events, nb_rx);
while (nb_tx < nb_rx && !fdata->done)
nb_tx += rte_event_enqueue_burst(dev_id, port_id,
events + nb_tx,
@@ -112,6 +113,8 @@ worker_generic_burst(void *arg)
sent += nb_tx;
}
+ worker_cleanup(dev_id, port_id, events, nb_tx, nb_rx);
+
if (!cdata.quiet)
printf(" worker %u thread done. RX=%zu TX=%zu\n",
rte_lcore_id(), received, sent);
diff --git a/examples/eventdev_pipeline/pipeline_worker_tx.c b/examples/eventdev_pipeline/pipeline_worker_tx.c
index 473940f8c7..a82e064c1c 100644
--- a/examples/eventdev_pipeline/pipeline_worker_tx.c
+++ b/examples/eventdev_pipeline/pipeline_worker_tx.c
@@ -18,21 +18,22 @@ static __rte_always_inline void
worker_event_enqueue(const uint8_t dev, const uint8_t port,
struct rte_event *ev)
{
- while (rte_event_enqueue_burst(dev, port, ev, 1) != 1)
+ while (!rte_event_enqueue_burst(dev, port, ev, 1) && !fdata->done)
rte_pause();
}
-static __rte_always_inline void
+static __rte_always_inline uint16_t
worker_event_enqueue_burst(const uint8_t dev, const uint8_t port,
- struct rte_event *ev, const uint16_t nb_rx)
+ struct rte_event *ev, const uint16_t nb_rx)
{
uint16_t enq;
enq = rte_event_enqueue_burst(dev, port, ev, nb_rx);
- while (enq < nb_rx) {
+ while (enq < nb_rx && !fdata->done)
enq += rte_event_enqueue_burst(dev, port,
ev + enq, nb_rx - enq);
- }
+
+ return enq;
}
static __rte_always_inline void
@@ -40,7 +41,8 @@ worker_tx_pkt(const uint8_t dev, const uint8_t port, struct rte_event *ev)
{
exchange_mac(ev->mbuf);
rte_event_eth_tx_adapter_txq_set(ev->mbuf, 0);
- while (!rte_event_eth_tx_adapter_enqueue(dev, port, ev, 1, 0))
+ while (!rte_event_eth_tx_adapter_enqueue(dev, port, ev, 1, 0) &&
+ !fdata->done)
rte_pause();
}
@@ -76,6 +78,11 @@ worker_do_tx_single(void *arg)
}
}
+ if (ev.u64) {
+ ev.op = RTE_EVENT_OP_RELEASE;
+ rte_event_enqueue_burst(dev, port, &ev, 1);
+ }
+
if (!cdata.quiet)
printf(" worker %u thread done. RX=%zu FWD=%zu TX=%zu\n",
rte_lcore_id(), received, fwd, tx);
@@ -111,6 +118,11 @@ worker_do_tx_single_atq(void *arg)
}
}
+ if (ev.u64) {
+ ev.op = RTE_EVENT_OP_RELEASE;
+ rte_event_enqueue_burst(dev, port, &ev, 1);
+ }
+
if (!cdata.quiet)
printf(" worker %u thread done. RX=%zu FWD=%zu TX=%zu\n",
rte_lcore_id(), received, fwd, tx);
@@ -126,11 +138,10 @@ worker_do_tx_single_burst(void *arg)
const uint8_t dev = data->dev_id;
const uint8_t port = data->port_id;
size_t fwd = 0, received = 0, tx = 0;
+ uint16_t nb_tx = 0, nb_rx = 0, i;
while (!fdata->done) {
- uint16_t i;
- uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
- BATCH_SIZE, 0);
+ nb_rx = rte_event_dequeue_burst(dev, port, ev, BATCH_SIZE, 0);
if (!nb_rx) {
rte_pause();
@@ -153,10 +164,12 @@ worker_do_tx_single_burst(void *arg)
work();
}
- worker_event_enqueue_burst(dev, port, ev, nb_rx);
- fwd += nb_rx;
+ nb_tx = worker_event_enqueue_burst(dev, port, ev, nb_rx);
+ fwd += nb_tx;
}
+ worker_cleanup(dev, port, ev, nb_tx, nb_rx);
+
if (!cdata.quiet)
printf(" worker %u thread done. RX=%zu FWD=%zu TX=%zu\n",
rte_lcore_id(), received, fwd, tx);
@@ -172,11 +185,10 @@ worker_do_tx_single_burst_atq(void *arg)
const uint8_t dev = data->dev_id;
const uint8_t port = data->port_id;
size_t fwd = 0, received = 0, tx = 0;
+ uint16_t i, nb_rx = 0, nb_tx = 0;
while (!fdata->done) {
- uint16_t i;
- uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
- BATCH_SIZE, 0);
+ nb_rx = rte_event_dequeue_burst(dev, port, ev, BATCH_SIZE, 0);
if (!nb_rx) {
rte_pause();
@@ -197,10 +209,12 @@ worker_do_tx_single_burst_atq(void *arg)
work();
}
- worker_event_enqueue_burst(dev, port, ev, nb_rx);
- fwd += nb_rx;
+ nb_tx = worker_event_enqueue_burst(dev, port, ev, nb_rx);
+ fwd += nb_tx;
}
+ worker_cleanup(dev, port, ev, nb_tx, nb_rx);
+
if (!cdata.quiet)
printf(" worker %u thread done. RX=%zu FWD=%zu TX=%zu\n",
rte_lcore_id(), received, fwd, tx);
@@ -251,6 +265,11 @@ worker_do_tx(void *arg)
fwd++;
}
+ if (ev.u64) {
+ ev.op = RTE_EVENT_OP_RELEASE;
+ rte_event_enqueue_burst(dev, port, &ev, 1);
+ }
+
if (!cdata.quiet)
printf(" worker %u thread done. RX=%zu FWD=%zu TX=%zu\n",
rte_lcore_id(), received, fwd, tx);
@@ -297,6 +316,11 @@ worker_do_tx_atq(void *arg)
fwd++;
}
+ if (ev.u64) {
+ ev.op = RTE_EVENT_OP_RELEASE;
+ rte_event_enqueue_burst(dev, port, &ev, 1);
+ }
+
if (!cdata.quiet)
printf(" worker %u thread done. RX=%zu FWD=%zu TX=%zu\n",
rte_lcore_id(), received, fwd, tx);
@@ -314,11 +338,10 @@ worker_do_tx_burst(void *arg)
uint8_t port = data->port_id;
uint8_t lst_qid = cdata.num_stages - 1;
size_t fwd = 0, received = 0, tx = 0;
+ uint16_t i, nb_rx = 0, nb_tx = 0;
while (!fdata->done) {
- uint16_t i;
- const uint16_t nb_rx = rte_event_dequeue_burst(dev, port,
- ev, BATCH_SIZE, 0);
+ nb_rx = rte_event_dequeue_burst(dev, port, ev, BATCH_SIZE, 0);
if (nb_rx == 0) {
rte_pause();
@@ -347,11 +370,13 @@ worker_do_tx_burst(void *arg)
}
work();
}
- worker_event_enqueue_burst(dev, port, ev, nb_rx);
- fwd += nb_rx;
+ nb_tx = worker_event_enqueue_burst(dev, port, ev, nb_rx);
+ fwd += nb_tx;
}
+ worker_cleanup(dev, port, ev, nb_tx, nb_rx);
+
if (!cdata.quiet)
printf(" worker %u thread done. RX=%zu FWD=%zu TX=%zu\n",
rte_lcore_id(), received, fwd, tx);
@@ -369,12 +394,10 @@ worker_do_tx_burst_atq(void *arg)
uint8_t port = data->port_id;
uint8_t lst_qid = cdata.num_stages - 1;
size_t fwd = 0, received = 0, tx = 0;
+ uint16_t i, nb_rx = 0, nb_tx = 0;
while (!fdata->done) {
- uint16_t i;
-
- const uint16_t nb_rx = rte_event_dequeue_burst(dev, port,
- ev, BATCH_SIZE, 0);
+ nb_rx = rte_event_dequeue_burst(dev, port, ev, BATCH_SIZE, 0);
if (nb_rx == 0) {
rte_pause();
@@ -402,10 +425,12 @@ worker_do_tx_burst_atq(void *arg)
work();
}
- worker_event_enqueue_burst(dev, port, ev, nb_rx);
- fwd += nb_rx;
+ nb_tx = worker_event_enqueue_burst(dev, port, ev, nb_rx);
+ fwd += nb_tx;
}
+ worker_cleanup(dev, port, ev, nb_tx, nb_rx);
+
if (!cdata.quiet)
printf(" worker %u thread done. RX=%zu FWD=%zu TX=%zu\n",
rte_lcore_id(), received, fwd, tx);
--
2.25.1
^ permalink raw reply [flat|nested] 17+ messages in thread
* [PATCH 4/6] examples/l3fwd: clean up worker state before exit
2022-04-26 21:14 [PATCH 1/6] app/eventdev: simplify signal handling and teardown Pavan Nikhilesh
2022-04-26 21:14 ` [PATCH 2/6] app/eventdev: clean up worker state before exit Pavan Nikhilesh
2022-04-26 21:14 ` [PATCH 3/6] examples/eventdev: " Pavan Nikhilesh
@ 2022-04-26 21:14 ` Pavan Nikhilesh
2022-04-26 21:14 ` [PATCH 5/6] examples/l2fwd-event: " Pavan Nikhilesh
` (4 subsequent siblings)
7 siblings, 0 replies; 17+ messages in thread
From: Pavan Nikhilesh @ 2022-04-26 21:14 UTC (permalink / raw)
To: jerinj; +Cc: dev, Pavan Nikhilesh
Event ports are configured to implicitly release the scheduler contexts
currently held in the next call to rte_event_dequeue_burst().
A worker core might still hold a scheduling context during exit, as the
next call to rte_event_dequeue_burst() is never made.
This might lead to deadlock based on the worker exit timing and when
there are very less number of flows.
Add clean up function to release any scheduling contexts held by the
worker by using RTE_EVENT_OP_RELEASE.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
examples/l3fwd/l3fwd_em.c | 32 ++++++++++++++++++++++----------
examples/l3fwd/l3fwd_event.c | 34 ++++++++++++++++++++++++++++++++++
examples/l3fwd/l3fwd_event.h | 5 +++++
examples/l3fwd/l3fwd_fib.c | 10 ++++++++--
examples/l3fwd/l3fwd_lpm.c | 32 ++++++++++++++++++++++----------
5 files changed, 91 insertions(+), 22 deletions(-)
diff --git a/examples/l3fwd/l3fwd_em.c b/examples/l3fwd/l3fwd_em.c
index 24d0910fe0..6f8d94f120 100644
--- a/examples/l3fwd/l3fwd_em.c
+++ b/examples/l3fwd/l3fwd_em.c
@@ -653,6 +653,7 @@ em_event_loop_single(struct l3fwd_event_resources *evt_rsrc,
const uint8_t tx_q_id = evt_rsrc->evq.event_q_id[
evt_rsrc->evq.nb_queues - 1];
const uint8_t event_d_id = evt_rsrc->event_d_id;
+ uint8_t deq = 0, enq = 0;
struct lcore_conf *lconf;
unsigned int lcore_id;
struct rte_event ev;
@@ -665,7 +666,9 @@ em_event_loop_single(struct l3fwd_event_resources *evt_rsrc,
RTE_LOG(INFO, L3FWD, "entering %s on lcore %u\n", __func__, lcore_id);
while (!force_quit) {
- if (!rte_event_dequeue_burst(event_d_id, event_p_id, &ev, 1, 0))
+ deq = rte_event_dequeue_burst(event_d_id, event_p_id, &ev, 1,
+ 0);
+ if (!deq)
continue;
struct rte_mbuf *mbuf = ev.mbuf;
@@ -684,19 +687,22 @@ em_event_loop_single(struct l3fwd_event_resources *evt_rsrc,
if (flags & L3FWD_EVENT_TX_ENQ) {
ev.queue_id = tx_q_id;
ev.op = RTE_EVENT_OP_FORWARD;
- while (rte_event_enqueue_burst(event_d_id, event_p_id,
- &ev, 1) && !force_quit)
- ;
+ do {
+ enq = rte_event_enqueue_burst(
+ event_d_id, event_p_id, &ev, 1);
+ } while (!enq && !force_quit);
}
if (flags & L3FWD_EVENT_TX_DIRECT) {
rte_event_eth_tx_adapter_txq_set(mbuf, 0);
- while (!rte_event_eth_tx_adapter_enqueue(event_d_id,
- event_p_id, &ev, 1, 0) &&
- !force_quit)
- ;
+ do {
+ enq = rte_event_eth_tx_adapter_enqueue(
+ event_d_id, event_p_id, &ev, 1, 0);
+ } while (!enq && !force_quit);
}
}
+
+ l3fwd_event_worker_cleanup(event_d_id, event_p_id, &ev, enq, deq, 0);
}
static __rte_always_inline void
@@ -709,9 +715,9 @@ em_event_loop_burst(struct l3fwd_event_resources *evt_rsrc,
const uint8_t event_d_id = evt_rsrc->event_d_id;
const uint16_t deq_len = evt_rsrc->deq_depth;
struct rte_event events[MAX_PKT_BURST];
+ int i, nb_enq = 0, nb_deq = 0;
struct lcore_conf *lconf;
unsigned int lcore_id;
- int i, nb_enq, nb_deq;
if (event_p_id < 0)
return;
@@ -769,6 +775,9 @@ em_event_loop_burst(struct l3fwd_event_resources *evt_rsrc,
nb_deq - nb_enq, 0);
}
}
+
+ l3fwd_event_worker_cleanup(event_d_id, event_p_id, events, nb_enq,
+ nb_deq, 0);
}
static __rte_always_inline void
@@ -832,9 +841,9 @@ em_event_loop_vector(struct l3fwd_event_resources *evt_rsrc,
const uint8_t event_d_id = evt_rsrc->event_d_id;
const uint16_t deq_len = evt_rsrc->deq_depth;
struct rte_event events[MAX_PKT_BURST];
+ int i, nb_enq = 0, nb_deq = 0;
struct lcore_conf *lconf;
unsigned int lcore_id;
- int i, nb_enq, nb_deq;
if (event_p_id < 0)
return;
@@ -887,6 +896,9 @@ em_event_loop_vector(struct l3fwd_event_resources *evt_rsrc,
nb_deq - nb_enq, 0);
}
}
+
+ l3fwd_event_worker_cleanup(event_d_id, event_p_id, events, nb_enq,
+ nb_deq, 1);
}
int __rte_noinline
diff --git a/examples/l3fwd/l3fwd_event.c b/examples/l3fwd/l3fwd_event.c
index 7a401290f8..a14a21b414 100644
--- a/examples/l3fwd/l3fwd_event.c
+++ b/examples/l3fwd/l3fwd_event.c
@@ -287,3 +287,37 @@ l3fwd_event_resource_setup(struct rte_eth_conf *port_conf)
fib_event_loop[evt_rsrc->vector_enabled][evt_rsrc->tx_mode_q]
[evt_rsrc->has_burst];
}
+
+static void
+l3fwd_event_vector_array_free(struct rte_event events[], uint16_t num)
+{
+ uint16_t i;
+
+ for (i = 0; i < num; i++) {
+ rte_pktmbuf_free_bulk(events[i].vec->mbufs,
+ events[i].vec->nb_elem);
+ rte_mempool_put(rte_mempool_from_obj(events[i].vec),
+ events[i].vec);
+ }
+}
+
+void
+l3fwd_event_worker_cleanup(uint8_t event_d_id, uint8_t event_p_id,
+ struct rte_event events[], uint16_t nb_enq,
+ uint16_t nb_deq, uint8_t is_vector)
+{
+ int i;
+
+ if (nb_deq) {
+ if (is_vector)
+ l3fwd_event_vector_array_free(events + nb_enq,
+ nb_deq - nb_enq);
+ else
+ for (i = nb_enq; i < nb_deq; i++)
+ rte_pktmbuf_free(events[i].mbuf);
+
+ for (i = 0; i < nb_deq; i++)
+ events[i].op = RTE_EVENT_OP_RELEASE;
+ rte_event_enqueue_burst(event_d_id, event_p_id, events, nb_deq);
+ }
+}
diff --git a/examples/l3fwd/l3fwd_event.h b/examples/l3fwd/l3fwd_event.h
index f139632016..b93841a16f 100644
--- a/examples/l3fwd/l3fwd_event.h
+++ b/examples/l3fwd/l3fwd_event.h
@@ -103,10 +103,15 @@ event_vector_txq_set(struct rte_event_vector *vec, uint16_t txq)
}
}
+
+
struct l3fwd_event_resources *l3fwd_get_eventdev_rsrc(void);
void l3fwd_event_resource_setup(struct rte_eth_conf *port_conf);
int l3fwd_get_free_event_port(struct l3fwd_event_resources *eventdev_rsrc);
void l3fwd_event_set_generic_ops(struct l3fwd_event_setup_ops *ops);
void l3fwd_event_set_internal_port_ops(struct l3fwd_event_setup_ops *ops);
+void l3fwd_event_worker_cleanup(uint8_t event_d_id, uint8_t event_p_id,
+ struct rte_event events[], uint16_t nb_enq,
+ uint16_t nb_deq, uint8_t is_vector);
#endif /* __L3FWD_EVENTDEV_H__ */
diff --git a/examples/l3fwd/l3fwd_fib.c b/examples/l3fwd/l3fwd_fib.c
index 6e0054b4cb..26d0767ae2 100644
--- a/examples/l3fwd/l3fwd_fib.c
+++ b/examples/l3fwd/l3fwd_fib.c
@@ -252,9 +252,9 @@ fib_event_loop(struct l3fwd_event_resources *evt_rsrc,
const uint8_t event_d_id = evt_rsrc->event_d_id;
const uint16_t deq_len = evt_rsrc->deq_depth;
struct rte_event events[MAX_PKT_BURST];
+ int i, nb_enq = 0, nb_deq = 0;
struct lcore_conf *lconf;
unsigned int lcore_id;
- int nb_enq, nb_deq, i;
uint32_t ipv4_arr[MAX_PKT_BURST];
uint8_t ipv6_arr[MAX_PKT_BURST][RTE_FIB6_IPV6_ADDR_SIZE];
@@ -370,6 +370,9 @@ fib_event_loop(struct l3fwd_event_resources *evt_rsrc,
nb_deq - nb_enq, 0);
}
}
+
+ l3fwd_event_worker_cleanup(event_d_id, event_p_id, events, nb_enq,
+ nb_deq, 0);
}
int __rte_noinline
@@ -491,7 +494,7 @@ fib_event_loop_vector(struct l3fwd_event_resources *evt_rsrc,
const uint8_t event_d_id = evt_rsrc->event_d_id;
const uint16_t deq_len = evt_rsrc->deq_depth;
struct rte_event events[MAX_PKT_BURST];
- int nb_enq, nb_deq, i;
+ int nb_enq = 0, nb_deq = 0, i;
if (event_p_id < 0)
return;
@@ -538,6 +541,9 @@ fib_event_loop_vector(struct l3fwd_event_resources *evt_rsrc,
nb_deq - nb_enq, 0);
}
}
+
+ l3fwd_event_worker_cleanup(event_d_id, event_p_id, events, nb_enq,
+ nb_deq, 1);
}
int __rte_noinline
diff --git a/examples/l3fwd/l3fwd_lpm.c b/examples/l3fwd/l3fwd_lpm.c
index bec22c44cd..501fc5db5e 100644
--- a/examples/l3fwd/l3fwd_lpm.c
+++ b/examples/l3fwd/l3fwd_lpm.c
@@ -273,6 +273,7 @@ lpm_event_loop_single(struct l3fwd_event_resources *evt_rsrc,
const uint8_t tx_q_id = evt_rsrc->evq.event_q_id[
evt_rsrc->evq.nb_queues - 1];
const uint8_t event_d_id = evt_rsrc->event_d_id;
+ uint8_t enq = 0, deq = 0;
struct lcore_conf *lconf;
unsigned int lcore_id;
struct rte_event ev;
@@ -285,7 +286,9 @@ lpm_event_loop_single(struct l3fwd_event_resources *evt_rsrc,
RTE_LOG(INFO, L3FWD, "entering %s on lcore %u\n", __func__, lcore_id);
while (!force_quit) {
- if (!rte_event_dequeue_burst(event_d_id, event_p_id, &ev, 1, 0))
+ deq = rte_event_dequeue_burst(event_d_id, event_p_id, &ev, 1,
+ 0);
+ if (!deq)
continue;
if (lpm_process_event_pkt(lconf, ev.mbuf) == BAD_PORT) {
@@ -296,19 +299,22 @@ lpm_event_loop_single(struct l3fwd_event_resources *evt_rsrc,
if (flags & L3FWD_EVENT_TX_ENQ) {
ev.queue_id = tx_q_id;
ev.op = RTE_EVENT_OP_FORWARD;
- while (rte_event_enqueue_burst(event_d_id, event_p_id,
- &ev, 1) && !force_quit)
- ;
+ do {
+ enq = rte_event_enqueue_burst(
+ event_d_id, event_p_id, &ev, 1);
+ } while (!enq && !force_quit);
}
if (flags & L3FWD_EVENT_TX_DIRECT) {
rte_event_eth_tx_adapter_txq_set(ev.mbuf, 0);
- while (!rte_event_eth_tx_adapter_enqueue(event_d_id,
- event_p_id, &ev, 1, 0) &&
- !force_quit)
- ;
+ do {
+ enq = rte_event_eth_tx_adapter_enqueue(
+ event_d_id, event_p_id, &ev, 1, 0);
+ } while (!enq && !force_quit);
}
}
+
+ l3fwd_event_worker_cleanup(event_d_id, event_p_id, &ev, enq, deq, 0);
}
static __rte_always_inline void
@@ -321,9 +327,9 @@ lpm_event_loop_burst(struct l3fwd_event_resources *evt_rsrc,
const uint8_t event_d_id = evt_rsrc->event_d_id;
const uint16_t deq_len = evt_rsrc->deq_depth;
struct rte_event events[MAX_PKT_BURST];
+ int i, nb_enq = 0, nb_deq = 0;
struct lcore_conf *lconf;
unsigned int lcore_id;
- int i, nb_enq, nb_deq;
if (event_p_id < 0)
return;
@@ -375,6 +381,9 @@ lpm_event_loop_burst(struct l3fwd_event_resources *evt_rsrc,
nb_deq - nb_enq, 0);
}
}
+
+ l3fwd_event_worker_cleanup(event_d_id, event_p_id, events, nb_enq,
+ nb_deq, 0);
}
static __rte_always_inline void
@@ -459,9 +468,9 @@ lpm_event_loop_vector(struct l3fwd_event_resources *evt_rsrc,
const uint8_t event_d_id = evt_rsrc->event_d_id;
const uint16_t deq_len = evt_rsrc->deq_depth;
struct rte_event events[MAX_PKT_BURST];
+ int i, nb_enq = 0, nb_deq = 0;
struct lcore_conf *lconf;
unsigned int lcore_id;
- int i, nb_enq, nb_deq;
if (event_p_id < 0)
return;
@@ -510,6 +519,9 @@ lpm_event_loop_vector(struct l3fwd_event_resources *evt_rsrc,
nb_deq - nb_enq, 0);
}
}
+
+ l3fwd_event_worker_cleanup(event_d_id, event_p_id, events, nb_enq,
+ nb_deq, 1);
}
int __rte_noinline
--
2.25.1
^ permalink raw reply [flat|nested] 17+ messages in thread
* [PATCH 5/6] examples/l2fwd-event: clean up worker state before exit
2022-04-26 21:14 [PATCH 1/6] app/eventdev: simplify signal handling and teardown Pavan Nikhilesh
` (2 preceding siblings ...)
2022-04-26 21:14 ` [PATCH 4/6] examples/l3fwd: " Pavan Nikhilesh
@ 2022-04-26 21:14 ` Pavan Nikhilesh
2022-04-26 21:14 ` [PATCH 6/6] examples/ipsec-secgw: cleanup " Pavan Nikhilesh
` (3 subsequent siblings)
7 siblings, 0 replies; 17+ messages in thread
From: Pavan Nikhilesh @ 2022-04-26 21:14 UTC (permalink / raw)
To: jerinj, Sunil Kumar Kori, Pavan Nikhilesh; +Cc: dev
Event ports are configured to implicitly release the scheduler contexts
currently held in the next call to rte_event_dequeue_burst().
A worker core might still hold a scheduling context during exit, as the
next call to rte_event_dequeue_burst() is never made.
This might lead to deadlock based on the worker exit timing and when
there are very less number of flows.
Add clean up function to release any scheduling contexts held by the
worker by using RTE_EVENT_OP_RELEASE.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
examples/l2fwd-event/l2fwd_common.c | 34 +++++++++++++++++++++++++++++
examples/l2fwd-event/l2fwd_common.h | 3 +++
examples/l2fwd-event/l2fwd_event.c | 31 ++++++++++++++++----------
3 files changed, 56 insertions(+), 12 deletions(-)
diff --git a/examples/l2fwd-event/l2fwd_common.c b/examples/l2fwd-event/l2fwd_common.c
index cf3d1b8aaf..15bfe790a0 100644
--- a/examples/l2fwd-event/l2fwd_common.c
+++ b/examples/l2fwd-event/l2fwd_common.c
@@ -114,3 +114,37 @@ l2fwd_event_init_ports(struct l2fwd_resources *rsrc)
return nb_ports_available;
}
+
+static void
+l2fwd_event_vector_array_free(struct rte_event events[], uint16_t num)
+{
+ uint16_t i;
+
+ for (i = 0; i < num; i++) {
+ rte_pktmbuf_free_bulk(events[i].vec->mbufs,
+ events[i].vec->nb_elem);
+ rte_mempool_put(rte_mempool_from_obj(events[i].vec),
+ events[i].vec);
+ }
+}
+
+void
+l2fwd_event_worker_cleanup(uint8_t event_d_id, uint8_t port_id,
+ struct rte_event events[], uint16_t nb_enq,
+ uint16_t nb_deq, uint8_t is_vector)
+{
+ int i;
+
+ if (nb_deq) {
+ if (is_vector)
+ l2fwd_event_vector_array_free(events + nb_enq,
+ nb_deq - nb_enq);
+ else
+ for (i = nb_enq; i < nb_deq; i++)
+ rte_pktmbuf_free(events[i].mbuf);
+
+ for (i = 0; i < nb_deq; i++)
+ events[i].op = RTE_EVENT_OP_RELEASE;
+ rte_event_enqueue_burst(event_d_id, port_id, events, nb_deq);
+ }
+}
diff --git a/examples/l2fwd-event/l2fwd_common.h b/examples/l2fwd-event/l2fwd_common.h
index 396e238c6a..bff3b65abf 100644
--- a/examples/l2fwd-event/l2fwd_common.h
+++ b/examples/l2fwd-event/l2fwd_common.h
@@ -140,5 +140,8 @@ l2fwd_get_rsrc(void)
}
int l2fwd_event_init_ports(struct l2fwd_resources *rsrc);
+void l2fwd_event_worker_cleanup(uint8_t event_d_id, uint8_t port_id,
+ struct rte_event events[], uint16_t nb_enq,
+ uint16_t nb_deq, uint8_t is_vector);
#endif /* __L2FWD_COMMON_H__ */
diff --git a/examples/l2fwd-event/l2fwd_event.c b/examples/l2fwd-event/l2fwd_event.c
index 6df3cdfeab..63450537fe 100644
--- a/examples/l2fwd-event/l2fwd_event.c
+++ b/examples/l2fwd-event/l2fwd_event.c
@@ -193,6 +193,7 @@ l2fwd_event_loop_single(struct l2fwd_resources *rsrc,
evt_rsrc->evq.nb_queues - 1];
const uint64_t timer_period = rsrc->timer_period;
const uint8_t event_d_id = evt_rsrc->event_d_id;
+ uint8_t enq = 0, deq = 0;
struct rte_event ev;
if (port_id < 0)
@@ -203,26 +204,28 @@ l2fwd_event_loop_single(struct l2fwd_resources *rsrc,
while (!rsrc->force_quit) {
/* Read packet from eventdev */
- if (!rte_event_dequeue_burst(event_d_id, port_id, &ev, 1, 0))
+ deq = rte_event_dequeue_burst(event_d_id, port_id, &ev, 1, 0);
+ if (!deq)
continue;
l2fwd_event_fwd(rsrc, &ev, tx_q_id, timer_period, flags);
if (flags & L2FWD_EVENT_TX_ENQ) {
- while (rte_event_enqueue_burst(event_d_id, port_id,
- &ev, 1) &&
- !rsrc->force_quit)
- ;
+ do {
+ enq = rte_event_enqueue_burst(event_d_id,
+ port_id, &ev, 1);
+ } while (!enq && !rsrc->force_quit);
}
if (flags & L2FWD_EVENT_TX_DIRECT) {
- while (!rte_event_eth_tx_adapter_enqueue(event_d_id,
- port_id,
- &ev, 1, 0) &&
- !rsrc->force_quit)
- ;
+ do {
+ enq = rte_event_eth_tx_adapter_enqueue(
+ event_d_id, port_id, &ev, 1, 0);
+ } while (!enq && !rsrc->force_quit);
}
}
+
+ l2fwd_event_worker_cleanup(event_d_id, port_id, &ev, enq, deq, 0);
}
static __rte_always_inline void
@@ -237,7 +240,7 @@ l2fwd_event_loop_burst(struct l2fwd_resources *rsrc,
const uint8_t event_d_id = evt_rsrc->event_d_id;
const uint8_t deq_len = evt_rsrc->deq_depth;
struct rte_event ev[MAX_PKT_BURST];
- uint16_t nb_rx, nb_tx;
+ uint16_t nb_rx = 0, nb_tx = 0;
uint8_t i;
if (port_id < 0)
@@ -280,6 +283,8 @@ l2fwd_event_loop_burst(struct l2fwd_resources *rsrc,
ev + nb_tx, nb_rx - nb_tx, 0);
}
}
+
+ l2fwd_event_worker_cleanup(event_d_id, port_id, ev, nb_rx, nb_tx, 0);
}
static __rte_always_inline void
@@ -419,7 +424,7 @@ l2fwd_event_loop_vector(struct l2fwd_resources *rsrc, const uint32_t flags)
const uint8_t event_d_id = evt_rsrc->event_d_id;
const uint8_t deq_len = evt_rsrc->deq_depth;
struct rte_event ev[MAX_PKT_BURST];
- uint16_t nb_rx, nb_tx;
+ uint16_t nb_rx = 0, nb_tx = 0;
uint8_t i;
if (port_id < 0)
@@ -462,6 +467,8 @@ l2fwd_event_loop_vector(struct l2fwd_resources *rsrc, const uint32_t flags)
nb_rx - nb_tx, 0);
}
}
+
+ l2fwd_event_worker_cleanup(event_d_id, port_id, ev, nb_rx, nb_tx, 1);
}
static void __rte_noinline
--
2.25.1
^ permalink raw reply [flat|nested] 17+ messages in thread
* [PATCH 6/6] examples/ipsec-secgw: cleanup worker state before exit
2022-04-26 21:14 [PATCH 1/6] app/eventdev: simplify signal handling and teardown Pavan Nikhilesh
` (3 preceding siblings ...)
2022-04-26 21:14 ` [PATCH 5/6] examples/l2fwd-event: " Pavan Nikhilesh
@ 2022-04-26 21:14 ` Pavan Nikhilesh
2022-05-13 13:41 ` Jerin Jacob
2022-05-13 11:49 ` [PATCH 1/6] app/eventdev: simplify signal handling and teardown Jerin Jacob
` (2 subsequent siblings)
7 siblings, 1 reply; 17+ messages in thread
From: Pavan Nikhilesh @ 2022-04-26 21:14 UTC (permalink / raw)
To: jerinj, Radu Nicolau, Akhil Goyal; +Cc: dev, Pavan Nikhilesh
Event ports are configured to implicitly release the scheduler contexts
currently held in the next call to rte_event_dequeue_burst().
A worker core might still hold a scheduling context during exit as the
next call to rte_event_dequeue_burst() is never made.
This might lead to deadlock based on the worker exit timing and when
there are very less number of flows.
Add a cleanup function to release any scheduling contexts held by the
worker by using RTE_EVENT_OP_RELEASE.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
examples/ipsec-secgw/ipsec_worker.c | 40 ++++++++++++++++++++---------
1 file changed, 28 insertions(+), 12 deletions(-)
diff --git a/examples/ipsec-secgw/ipsec_worker.c b/examples/ipsec-secgw/ipsec_worker.c
index 8639426c5c..3df5acf384 100644
--- a/examples/ipsec-secgw/ipsec_worker.c
+++ b/examples/ipsec-secgw/ipsec_worker.c
@@ -749,7 +749,7 @@ ipsec_wrkr_non_burst_int_port_drv_mode(struct eh_event_link_info *links,
uint8_t nb_links)
{
struct port_drv_mode_data data[RTE_MAX_ETHPORTS];
- unsigned int nb_rx = 0;
+ unsigned int nb_rx = 0, nb_tx;
struct rte_mbuf *pkt;
struct rte_event ev;
uint32_t lcore_id;
@@ -847,11 +847,19 @@ ipsec_wrkr_non_burst_int_port_drv_mode(struct eh_event_link_info *links,
* directly enqueued to the adapter and it would be
* internally submitted to the eth device.
*/
- rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
- links[0].event_port_id,
- &ev, /* events */
- 1, /* nb_events */
- 0 /* flags */);
+ nb_tx = rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
+ links[0].event_port_id,
+ &ev, /* events */
+ 1, /* nb_events */
+ 0 /* flags */);
+ if (!nb_tx)
+ rte_pktmbuf_free(ev.mbuf);
+ }
+
+ if (ev.u64) {
+ ev.op = RTE_EVENT_OP_RELEASE;
+ rte_event_enqueue_burst(links[0].eventdev_id,
+ links[0].event_port_id, &ev, 1);
}
}
@@ -864,7 +872,7 @@ ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links,
uint8_t nb_links)
{
struct lcore_conf_ev_tx_int_port_wrkr lconf;
- unsigned int nb_rx = 0;
+ unsigned int nb_rx = 0, nb_tx;
struct rte_event ev;
uint32_t lcore_id;
int32_t socket_id;
@@ -952,11 +960,19 @@ ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links,
* directly enqueued to the adapter and it would be
* internally submitted to the eth device.
*/
- rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
- links[0].event_port_id,
- &ev, /* events */
- 1, /* nb_events */
- 0 /* flags */);
+ nb_tx = rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
+ links[0].event_port_id,
+ &ev, /* events */
+ 1, /* nb_events */
+ 0 /* flags */);
+ if (!nb_tx)
+ rte_pktmbuf_free(ev.mbuf);
+ }
+
+ if (ev.u64) {
+ ev.op = RTE_EVENT_OP_RELEASE;
+ rte_event_enqueue_burst(links[0].eventdev_id,
+ links[0].event_port_id, &ev, 1);
}
}
--
2.25.1
^ permalink raw reply [flat|nested] 17+ messages in thread
* Re: [PATCH 1/6] app/eventdev: simplify signal handling and teardown
2022-04-26 21:14 [PATCH 1/6] app/eventdev: simplify signal handling and teardown Pavan Nikhilesh
` (4 preceding siblings ...)
2022-04-26 21:14 ` [PATCH 6/6] examples/ipsec-secgw: cleanup " Pavan Nikhilesh
@ 2022-05-13 11:49 ` Jerin Jacob
2022-05-13 13:39 ` Jerin Jacob
2022-05-13 16:07 ` [PATCH v2 " pbhagavatula
7 siblings, 0 replies; 17+ messages in thread
From: Jerin Jacob @ 2022-05-13 11:49 UTC (permalink / raw)
To: Pavan Nikhilesh; +Cc: Jerin Jacob, dpdk-dev
On Wed, Apr 27, 2022 at 2:44 AM Pavan Nikhilesh
<pbhagavatula@marvell.com> wrote:
>
> Remove rte_*_dev calls from signal handler callback.
> Split ethernet device teardown into Rx and Tx sections, wait for
> workers to finish processing after disabling Rx to allow workers
> to complete processing currently held packets.
>
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
The changes look good to me.
Just to make sure it does not break any SW drivers, Could you test the
SW driver with series and share the test command with SW drivers.
> ---
> app/test-eventdev/evt_main.c | 58 +++++++++---------------
> app/test-eventdev/evt_test.h | 3 ++
> app/test-eventdev/test_perf_atq.c | 1 +
> app/test-eventdev/test_perf_common.c | 20 +++++++-
> app/test-eventdev/test_perf_common.h | 4 +-
> app/test-eventdev/test_perf_queue.c | 1 +
> app/test-eventdev/test_pipeline_atq.c | 1 +
> app/test-eventdev/test_pipeline_common.c | 19 +++++++-
> app/test-eventdev/test_pipeline_common.h | 5 +-
> app/test-eventdev/test_pipeline_queue.c | 1 +
> 10 files changed, 72 insertions(+), 41 deletions(-)
>
> diff --git a/app/test-eventdev/evt_main.c b/app/test-eventdev/evt_main.c
> index a7d6b0c1cf..c5d63061bf 100644
> --- a/app/test-eventdev/evt_main.c
> +++ b/app/test-eventdev/evt_main.c
> @@ -19,11 +19,7 @@ struct evt_test *test;
> static void
> signal_handler(int signum)
> {
> - int i;
> - static uint8_t once;
> -
> - if ((signum == SIGINT || signum == SIGTERM) && !once) {
> - once = true;
> + if (signum == SIGINT || signum == SIGTERM) {
> printf("\nSignal %d received, preparing to exit...\n",
> signum);
>
> @@ -31,36 +27,7 @@ signal_handler(int signum)
> /* request all lcores to exit from the main loop */
> *(int *)test->test_priv = true;
> rte_wmb();
> -
> - if (test->ops.ethdev_destroy)
> - test->ops.ethdev_destroy(test, &opt);
> -
> - if (test->ops.cryptodev_destroy)
> - test->ops.cryptodev_destroy(test, &opt);
> -
> - rte_eal_mp_wait_lcore();
> -
> - if (test->ops.test_result)
> - test->ops.test_result(test, &opt);
> -
> - if (opt.prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
> - RTE_ETH_FOREACH_DEV(i)
> - rte_eth_dev_close(i);
> - }
> -
> - if (test->ops.eventdev_destroy)
> - test->ops.eventdev_destroy(test, &opt);
> -
> - if (test->ops.mempool_destroy)
> - test->ops.mempool_destroy(test, &opt);
> -
> - if (test->ops.test_destroy)
> - test->ops.test_destroy(test, &opt);
> }
> -
> - /* exit with the expected status */
> - signal(signum, SIG_DFL);
> - kill(getpid(), signum);
> }
> }
>
> @@ -189,10 +156,29 @@ main(int argc, char **argv)
> }
> }
>
> + if (test->ops.ethdev_rx_stop)
> + test->ops.ethdev_rx_stop(test, &opt);
> +
> + if (test->ops.cryptodev_destroy)
> + test->ops.cryptodev_destroy(test, &opt);
> +
> rte_eal_mp_wait_lcore();
>
> - /* Print the test result */
> - ret = test->ops.test_result(test, &opt);
> + if (test->ops.test_result)
> + test->ops.test_result(test, &opt);
> +
> + if (test->ops.ethdev_destroy)
> + test->ops.ethdev_destroy(test, &opt);
> +
> + if (test->ops.eventdev_destroy)
> + test->ops.eventdev_destroy(test, &opt);
> +
> + if (test->ops.mempool_destroy)
> + test->ops.mempool_destroy(test, &opt);
> +
> + if (test->ops.test_destroy)
> + test->ops.test_destroy(test, &opt);
> +
> nocap:
> if (ret == EVT_TEST_SUCCESS) {
> printf("Result: "CLGRN"%s"CLNRM"\n", "Success");
> diff --git a/app/test-eventdev/evt_test.h b/app/test-eventdev/evt_test.h
> index 50fa474ec2..1049f99ddc 100644
> --- a/app/test-eventdev/evt_test.h
> +++ b/app/test-eventdev/evt_test.h
> @@ -41,6 +41,8 @@ typedef void (*evt_test_eventdev_destroy_t)
> (struct evt_test *test, struct evt_options *opt);
> typedef void (*evt_test_ethdev_destroy_t)
> (struct evt_test *test, struct evt_options *opt);
> +typedef void (*evt_test_ethdev_rx_stop_t)(struct evt_test *test,
> + struct evt_options *opt);
> typedef void (*evt_test_cryptodev_destroy_t)
> (struct evt_test *test, struct evt_options *opt);
> typedef void (*evt_test_mempool_destroy_t)
> @@ -60,6 +62,7 @@ struct evt_test_ops {
> evt_test_launch_lcores_t launch_lcores;
> evt_test_result_t test_result;
> evt_test_eventdev_destroy_t eventdev_destroy;
> + evt_test_ethdev_rx_stop_t ethdev_rx_stop;
> evt_test_ethdev_destroy_t ethdev_destroy;
> evt_test_cryptodev_destroy_t cryptodev_destroy;
> evt_test_mempool_destroy_t mempool_destroy;
> diff --git a/app/test-eventdev/test_perf_atq.c b/app/test-eventdev/test_perf_atq.c
> index 67ff681666..bac3ea602f 100644
> --- a/app/test-eventdev/test_perf_atq.c
> +++ b/app/test-eventdev/test_perf_atq.c
> @@ -343,6 +343,7 @@ static const struct evt_test_ops perf_atq = {
> .test_setup = perf_test_setup,
> .ethdev_setup = perf_ethdev_setup,
> .cryptodev_setup = perf_cryptodev_setup,
> + .ethdev_rx_stop = perf_ethdev_rx_stop,
> .mempool_setup = perf_mempool_setup,
> .eventdev_setup = perf_atq_eventdev_setup,
> .launch_lcores = perf_atq_launch_lcores,
> diff --git a/app/test-eventdev/test_perf_common.c b/app/test-eventdev/test_perf_common.c
> index 9d1f4a4567..e93b0e7272 100644
> --- a/app/test-eventdev/test_perf_common.c
> +++ b/app/test-eventdev/test_perf_common.c
> @@ -1087,7 +1087,8 @@ perf_ethdev_setup(struct evt_test *test, struct evt_options *opt)
> return 0;
> }
>
> -void perf_ethdev_destroy(struct evt_test *test, struct evt_options *opt)
> +void
> +perf_ethdev_rx_stop(struct evt_test *test, struct evt_options *opt)
> {
> uint16_t i;
> RTE_SET_USED(test);
> @@ -1095,6 +1096,23 @@ void perf_ethdev_destroy(struct evt_test *test, struct evt_options *opt)
> if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
> RTE_ETH_FOREACH_DEV(i) {
> rte_event_eth_rx_adapter_stop(i);
> + rte_event_eth_rx_adapter_queue_del(i, i, -1);
> + rte_eth_dev_rx_queue_stop(i, 0);
> + }
> + }
> +}
> +
> +void
> +perf_ethdev_destroy(struct evt_test *test, struct evt_options *opt)
> +{
> + uint16_t i;
> + RTE_SET_USED(test);
> +
> + if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
> + RTE_ETH_FOREACH_DEV (i) {
> + rte_event_eth_tx_adapter_stop(i);
> + rte_event_eth_tx_adapter_queue_del(i, i, -1);
> + rte_eth_dev_tx_queue_stop(i, 0);
> rte_eth_dev_stop(i);
> }
> }
> diff --git a/app/test-eventdev/test_perf_common.h b/app/test-eventdev/test_perf_common.h
> index ea0907d61a..e504bb1df9 100644
> --- a/app/test-eventdev/test_perf_common.h
> +++ b/app/test-eventdev/test_perf_common.h
> @@ -12,10 +12,11 @@
> #include <rte_cryptodev.h>
> #include <rte_cycles.h>
> #include <rte_ethdev.h>
> -#include <rte_eventdev.h>
> #include <rte_event_crypto_adapter.h>
> #include <rte_event_eth_rx_adapter.h>
> +#include <rte_event_eth_tx_adapter.h>
> #include <rte_event_timer_adapter.h>
> +#include <rte_eventdev.h>
> #include <rte_lcore.h>
> #include <rte_malloc.h>
> #include <rte_mempool.h>
> @@ -181,6 +182,7 @@ void perf_test_destroy(struct evt_test *test, struct evt_options *opt);
> void perf_eventdev_destroy(struct evt_test *test, struct evt_options *opt);
> void perf_cryptodev_destroy(struct evt_test *test, struct evt_options *opt);
> void perf_ethdev_destroy(struct evt_test *test, struct evt_options *opt);
> +void perf_ethdev_rx_stop(struct evt_test *test, struct evt_options *opt);
> void perf_mempool_destroy(struct evt_test *test, struct evt_options *opt);
>
> #endif /* _TEST_PERF_COMMON_ */
> diff --git a/app/test-eventdev/test_perf_queue.c b/app/test-eventdev/test_perf_queue.c
> index dcf6d82947..108f1742a7 100644
> --- a/app/test-eventdev/test_perf_queue.c
> +++ b/app/test-eventdev/test_perf_queue.c
> @@ -360,6 +360,7 @@ static const struct evt_test_ops perf_queue = {
> .mempool_setup = perf_mempool_setup,
> .ethdev_setup = perf_ethdev_setup,
> .cryptodev_setup = perf_cryptodev_setup,
> + .ethdev_rx_stop = perf_ethdev_rx_stop,
> .eventdev_setup = perf_queue_eventdev_setup,
> .launch_lcores = perf_queue_launch_lcores,
> .eventdev_destroy = perf_eventdev_destroy,
> diff --git a/app/test-eventdev/test_pipeline_atq.c b/app/test-eventdev/test_pipeline_atq.c
> index 84dd4f44e3..79218502ba 100644
> --- a/app/test-eventdev/test_pipeline_atq.c
> +++ b/app/test-eventdev/test_pipeline_atq.c
> @@ -772,6 +772,7 @@ static const struct evt_test_ops pipeline_atq = {
> .ethdev_setup = pipeline_ethdev_setup,
> .eventdev_setup = pipeline_atq_eventdev_setup,
> .launch_lcores = pipeline_atq_launch_lcores,
> + .ethdev_rx_stop = pipeline_ethdev_rx_stop,
> .eventdev_destroy = pipeline_eventdev_destroy,
> .mempool_destroy = pipeline_mempool_destroy,
> .ethdev_destroy = pipeline_ethdev_destroy,
> diff --git a/app/test-eventdev/test_pipeline_common.c b/app/test-eventdev/test_pipeline_common.c
> index ddaa9f3fdb..d994c91678 100644
> --- a/app/test-eventdev/test_pipeline_common.c
> +++ b/app/test-eventdev/test_pipeline_common.c
> @@ -505,6 +505,22 @@ pipeline_event_tx_adapter_setup(struct evt_options *opt,
> return ret;
> }
>
> +void
> +pipeline_ethdev_rx_stop(struct evt_test *test, struct evt_options *opt)
> +{
> + uint16_t i, j;
> + RTE_SET_USED(test);
> +
> + if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
> + RTE_ETH_FOREACH_DEV (i) {
> + rte_event_eth_rx_adapter_stop(i);
> + rte_event_eth_rx_adapter_queue_del(i, i, -1);
> + for (j = 0; j < opt->eth_queues; j++)
> + rte_eth_dev_rx_queue_stop(i, j);
> + }
> + }
> +}
> +
> void
> pipeline_ethdev_destroy(struct evt_test *test, struct evt_options *opt)
> {
> @@ -513,8 +529,9 @@ pipeline_ethdev_destroy(struct evt_test *test, struct evt_options *opt)
> RTE_SET_USED(opt);
>
> RTE_ETH_FOREACH_DEV(i) {
> - rte_event_eth_rx_adapter_stop(i);
> rte_event_eth_tx_adapter_stop(i);
> + rte_event_eth_tx_adapter_queue_del(i, i, -1);
> + rte_eth_dev_tx_queue_stop(i, 0);
> rte_eth_dev_stop(i);
> }
> }
> diff --git a/app/test-eventdev/test_pipeline_common.h b/app/test-eventdev/test_pipeline_common.h
> index d69e2f8a3e..c979c33772 100644
> --- a/app/test-eventdev/test_pipeline_common.h
> +++ b/app/test-eventdev/test_pipeline_common.h
> @@ -12,16 +12,16 @@
>
> #include <rte_cycles.h>
> #include <rte_ethdev.h>
> -#include <rte_eventdev.h>
> #include <rte_event_eth_rx_adapter.h>
> #include <rte_event_eth_tx_adapter.h>
> +#include <rte_eventdev.h>
> #include <rte_lcore.h>
> #include <rte_malloc.h>
> #include <rte_mempool.h>
> #include <rte_prefetch.h>
> -#include <rte_spinlock.h>
> #include <rte_service.h>
> #include <rte_service_component.h>
> +#include <rte_spinlock.h>
>
> #include "evt_common.h"
> #include "evt_options.h"
> @@ -186,6 +186,7 @@ void pipeline_opt_dump(struct evt_options *opt, uint8_t nb_queues);
> void pipeline_test_destroy(struct evt_test *test, struct evt_options *opt);
> void pipeline_eventdev_destroy(struct evt_test *test, struct evt_options *opt);
> void pipeline_ethdev_destroy(struct evt_test *test, struct evt_options *opt);
> +void pipeline_ethdev_rx_stop(struct evt_test *test, struct evt_options *opt);
> void pipeline_mempool_destroy(struct evt_test *test, struct evt_options *opt);
>
> #endif /* _TEST_PIPELINE_COMMON_ */
> diff --git a/app/test-eventdev/test_pipeline_queue.c b/app/test-eventdev/test_pipeline_queue.c
> index f6cc3e358e..343f8f3b1d 100644
> --- a/app/test-eventdev/test_pipeline_queue.c
> +++ b/app/test-eventdev/test_pipeline_queue.c
> @@ -798,6 +798,7 @@ static const struct evt_test_ops pipeline_queue = {
> .ethdev_setup = pipeline_ethdev_setup,
> .eventdev_setup = pipeline_queue_eventdev_setup,
> .launch_lcores = pipeline_queue_launch_lcores,
> + .ethdev_rx_stop = pipeline_ethdev_rx_stop,
> .eventdev_destroy = pipeline_eventdev_destroy,
> .mempool_destroy = pipeline_mempool_destroy,
> .ethdev_destroy = pipeline_ethdev_destroy,
> --
> 2.25.1
>
^ permalink raw reply [flat|nested] 17+ messages in thread
* Re: [PATCH 1/6] app/eventdev: simplify signal handling and teardown
2022-04-26 21:14 [PATCH 1/6] app/eventdev: simplify signal handling and teardown Pavan Nikhilesh
` (5 preceding siblings ...)
2022-05-13 11:49 ` [PATCH 1/6] app/eventdev: simplify signal handling and teardown Jerin Jacob
@ 2022-05-13 13:39 ` Jerin Jacob
2022-05-13 16:07 ` [PATCH v2 " pbhagavatula
7 siblings, 0 replies; 17+ messages in thread
From: Jerin Jacob @ 2022-05-13 13:39 UTC (permalink / raw)
To: Pavan Nikhilesh; +Cc: Jerin Jacob, dpdk-dev
On Wed, Apr 27, 2022 at 2:44 AM Pavan Nikhilesh
<pbhagavatula@marvell.com> wrote:
>
> Remove rte_*_dev calls from signal handler callback.
Pease adds the reason.
> Split ethernet device teardown into Rx and Tx sections, wait for
> workers to finish processing after disabling Rx to allow workers
> to complete processing currently held packets.
>
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
> ---
> app/test-eventdev/evt_main.c | 58 +++++++++---------------
> app/test-eventdev/evt_test.h | 3 ++
> app/test-eventdev/test_perf_atq.c | 1 +
> app/test-eventdev/test_perf_common.c | 20 +++++++-
> app/test-eventdev/test_perf_common.h | 4 +-
> app/test-eventdev/test_perf_queue.c | 1 +
> app/test-eventdev/test_pipeline_atq.c | 1 +
> app/test-eventdev/test_pipeline_common.c | 19 +++++++-
> app/test-eventdev/test_pipeline_common.h | 5 +-
> app/test-eventdev/test_pipeline_queue.c | 1 +
> 10 files changed, 72 insertions(+), 41 deletions(-)
>
> diff --git a/app/test-eventdev/evt_main.c b/app/test-eventdev/evt_main.c
> index a7d6b0c1cf..c5d63061bf 100644
> --- a/app/test-eventdev/evt_main.c
> +++ b/app/test-eventdev/evt_main.c
> @@ -19,11 +19,7 @@ struct evt_test *test;
> static void
> signal_handler(int signum)
> {
> - int i;
> - static uint8_t once;
> -
> - if ((signum == SIGINT || signum == SIGTERM) && !once) {
> - once = true;
> + if (signum == SIGINT || signum == SIGTERM) {
> printf("\nSignal %d received, preparing to exit...\n",
> signum);
>
> @@ -31,36 +27,7 @@ signal_handler(int signum)
> /* request all lcores to exit from the main loop */
> *(int *)test->test_priv = true;
> rte_wmb();
> -
> - if (test->ops.ethdev_destroy)
> - test->ops.ethdev_destroy(test, &opt);
> -
> - if (test->ops.cryptodev_destroy)
> - test->ops.cryptodev_destroy(test, &opt);
> -
> - rte_eal_mp_wait_lcore();
> -
> - if (test->ops.test_result)
> - test->ops.test_result(test, &opt);
> -
> - if (opt.prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
> - RTE_ETH_FOREACH_DEV(i)
> - rte_eth_dev_close(i);
> - }
> -
> - if (test->ops.eventdev_destroy)
> - test->ops.eventdev_destroy(test, &opt);
> -
> - if (test->ops.mempool_destroy)
> - test->ops.mempool_destroy(test, &opt);
> -
> - if (test->ops.test_destroy)
> - test->ops.test_destroy(test, &opt);
> }
> -
> - /* exit with the expected status */
> - signal(signum, SIG_DFL);
> - kill(getpid(), signum);
> }
> }
>
> @@ -189,10 +156,29 @@ main(int argc, char **argv)
> }
> }
>
> + if (test->ops.ethdev_rx_stop)
> + test->ops.ethdev_rx_stop(test, &opt);
> +
> + if (test->ops.cryptodev_destroy)
> + test->ops.cryptodev_destroy(test, &opt);
> +
> rte_eal_mp_wait_lcore();
>
> - /* Print the test result */
> - ret = test->ops.test_result(test, &opt);
> + if (test->ops.test_result)
> + test->ops.test_result(test, &opt);
> +
> + if (test->ops.ethdev_destroy)
> + test->ops.ethdev_destroy(test, &opt);
> +
> + if (test->ops.eventdev_destroy)
> + test->ops.eventdev_destroy(test, &opt);
> +
> + if (test->ops.mempool_destroy)
> + test->ops.mempool_destroy(test, &opt);
> +
> + if (test->ops.test_destroy)
> + test->ops.test_destroy(test, &opt);
> +
> nocap:
> if (ret == EVT_TEST_SUCCESS) {
> printf("Result: "CLGRN"%s"CLNRM"\n", "Success");
> diff --git a/app/test-eventdev/evt_test.h b/app/test-eventdev/evt_test.h
> index 50fa474ec2..1049f99ddc 100644
> --- a/app/test-eventdev/evt_test.h
> +++ b/app/test-eventdev/evt_test.h
> @@ -41,6 +41,8 @@ typedef void (*evt_test_eventdev_destroy_t)
> (struct evt_test *test, struct evt_options *opt);
> typedef void (*evt_test_ethdev_destroy_t)
> (struct evt_test *test, struct evt_options *opt);
> +typedef void (*evt_test_ethdev_rx_stop_t)(struct evt_test *test,
> + struct evt_options *opt);
> typedef void (*evt_test_cryptodev_destroy_t)
> (struct evt_test *test, struct evt_options *opt);
> typedef void (*evt_test_mempool_destroy_t)
> @@ -60,6 +62,7 @@ struct evt_test_ops {
> evt_test_launch_lcores_t launch_lcores;
> evt_test_result_t test_result;
> evt_test_eventdev_destroy_t eventdev_destroy;
> + evt_test_ethdev_rx_stop_t ethdev_rx_stop;
> evt_test_ethdev_destroy_t ethdev_destroy;
> evt_test_cryptodev_destroy_t cryptodev_destroy;
> evt_test_mempool_destroy_t mempool_destroy;
> diff --git a/app/test-eventdev/test_perf_atq.c b/app/test-eventdev/test_perf_atq.c
> index 67ff681666..bac3ea602f 100644
> --- a/app/test-eventdev/test_perf_atq.c
> +++ b/app/test-eventdev/test_perf_atq.c
> @@ -343,6 +343,7 @@ static const struct evt_test_ops perf_atq = {
> .test_setup = perf_test_setup,
> .ethdev_setup = perf_ethdev_setup,
> .cryptodev_setup = perf_cryptodev_setup,
> + .ethdev_rx_stop = perf_ethdev_rx_stop,
> .mempool_setup = perf_mempool_setup,
> .eventdev_setup = perf_atq_eventdev_setup,
> .launch_lcores = perf_atq_launch_lcores,
> diff --git a/app/test-eventdev/test_perf_common.c b/app/test-eventdev/test_perf_common.c
> index 9d1f4a4567..e93b0e7272 100644
> --- a/app/test-eventdev/test_perf_common.c
> +++ b/app/test-eventdev/test_perf_common.c
> @@ -1087,7 +1087,8 @@ perf_ethdev_setup(struct evt_test *test, struct evt_options *opt)
> return 0;
> }
>
> -void perf_ethdev_destroy(struct evt_test *test, struct evt_options *opt)
> +void
> +perf_ethdev_rx_stop(struct evt_test *test, struct evt_options *opt)
> {
> uint16_t i;
> RTE_SET_USED(test);
> @@ -1095,6 +1096,23 @@ void perf_ethdev_destroy(struct evt_test *test, struct evt_options *opt)
> if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
> RTE_ETH_FOREACH_DEV(i) {
> rte_event_eth_rx_adapter_stop(i);
> + rte_event_eth_rx_adapter_queue_del(i, i, -1);
> + rte_eth_dev_rx_queue_stop(i, 0);
> + }
> + }
> +}
> +
> +void
> +perf_ethdev_destroy(struct evt_test *test, struct evt_options *opt)
> +{
> + uint16_t i;
> + RTE_SET_USED(test);
> +
> + if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
> + RTE_ETH_FOREACH_DEV (i) {
> + rte_event_eth_tx_adapter_stop(i);
> + rte_event_eth_tx_adapter_queue_del(i, i, -1);
> + rte_eth_dev_tx_queue_stop(i, 0);
> rte_eth_dev_stop(i);
> }
> }
> diff --git a/app/test-eventdev/test_perf_common.h b/app/test-eventdev/test_perf_common.h
> index ea0907d61a..e504bb1df9 100644
> --- a/app/test-eventdev/test_perf_common.h
> +++ b/app/test-eventdev/test_perf_common.h
> @@ -12,10 +12,11 @@
> #include <rte_cryptodev.h>
> #include <rte_cycles.h>
> #include <rte_ethdev.h>
> -#include <rte_eventdev.h>
> #include <rte_event_crypto_adapter.h>
> #include <rte_event_eth_rx_adapter.h>
> +#include <rte_event_eth_tx_adapter.h>
> #include <rte_event_timer_adapter.h>
> +#include <rte_eventdev.h>
> #include <rte_lcore.h>
> #include <rte_malloc.h>
> #include <rte_mempool.h>
> @@ -181,6 +182,7 @@ void perf_test_destroy(struct evt_test *test, struct evt_options *opt);
> void perf_eventdev_destroy(struct evt_test *test, struct evt_options *opt);
> void perf_cryptodev_destroy(struct evt_test *test, struct evt_options *opt);
> void perf_ethdev_destroy(struct evt_test *test, struct evt_options *opt);
> +void perf_ethdev_rx_stop(struct evt_test *test, struct evt_options *opt);
> void perf_mempool_destroy(struct evt_test *test, struct evt_options *opt);
>
> #endif /* _TEST_PERF_COMMON_ */
> diff --git a/app/test-eventdev/test_perf_queue.c b/app/test-eventdev/test_perf_queue.c
> index dcf6d82947..108f1742a7 100644
> --- a/app/test-eventdev/test_perf_queue.c
> +++ b/app/test-eventdev/test_perf_queue.c
> @@ -360,6 +360,7 @@ static const struct evt_test_ops perf_queue = {
> .mempool_setup = perf_mempool_setup,
> .ethdev_setup = perf_ethdev_setup,
> .cryptodev_setup = perf_cryptodev_setup,
> + .ethdev_rx_stop = perf_ethdev_rx_stop,
> .eventdev_setup = perf_queue_eventdev_setup,
> .launch_lcores = perf_queue_launch_lcores,
> .eventdev_destroy = perf_eventdev_destroy,
> diff --git a/app/test-eventdev/test_pipeline_atq.c b/app/test-eventdev/test_pipeline_atq.c
> index 84dd4f44e3..79218502ba 100644
> --- a/app/test-eventdev/test_pipeline_atq.c
> +++ b/app/test-eventdev/test_pipeline_atq.c
> @@ -772,6 +772,7 @@ static const struct evt_test_ops pipeline_atq = {
> .ethdev_setup = pipeline_ethdev_setup,
> .eventdev_setup = pipeline_atq_eventdev_setup,
> .launch_lcores = pipeline_atq_launch_lcores,
> + .ethdev_rx_stop = pipeline_ethdev_rx_stop,
> .eventdev_destroy = pipeline_eventdev_destroy,
> .mempool_destroy = pipeline_mempool_destroy,
> .ethdev_destroy = pipeline_ethdev_destroy,
> diff --git a/app/test-eventdev/test_pipeline_common.c b/app/test-eventdev/test_pipeline_common.c
> index ddaa9f3fdb..d994c91678 100644
> --- a/app/test-eventdev/test_pipeline_common.c
> +++ b/app/test-eventdev/test_pipeline_common.c
> @@ -505,6 +505,22 @@ pipeline_event_tx_adapter_setup(struct evt_options *opt,
> return ret;
> }
>
> +void
> +pipeline_ethdev_rx_stop(struct evt_test *test, struct evt_options *opt)
> +{
> + uint16_t i, j;
> + RTE_SET_USED(test);
> +
> + if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
> + RTE_ETH_FOREACH_DEV (i) {
> + rte_event_eth_rx_adapter_stop(i);
> + rte_event_eth_rx_adapter_queue_del(i, i, -1);
> + for (j = 0; j < opt->eth_queues; j++)
> + rte_eth_dev_rx_queue_stop(i, j);
> + }
> + }
> +}
> +
> void
> pipeline_ethdev_destroy(struct evt_test *test, struct evt_options *opt)
> {
> @@ -513,8 +529,9 @@ pipeline_ethdev_destroy(struct evt_test *test, struct evt_options *opt)
> RTE_SET_USED(opt);
>
> RTE_ETH_FOREACH_DEV(i) {
> - rte_event_eth_rx_adapter_stop(i);
> rte_event_eth_tx_adapter_stop(i);
> + rte_event_eth_tx_adapter_queue_del(i, i, -1);
> + rte_eth_dev_tx_queue_stop(i, 0);
> rte_eth_dev_stop(i);
> }
> }
> diff --git a/app/test-eventdev/test_pipeline_common.h b/app/test-eventdev/test_pipeline_common.h
> index d69e2f8a3e..c979c33772 100644
> --- a/app/test-eventdev/test_pipeline_common.h
> +++ b/app/test-eventdev/test_pipeline_common.h
> @@ -12,16 +12,16 @@
>
> #include <rte_cycles.h>
> #include <rte_ethdev.h>
> -#include <rte_eventdev.h>
> #include <rte_event_eth_rx_adapter.h>
> #include <rte_event_eth_tx_adapter.h>
> +#include <rte_eventdev.h>
> #include <rte_lcore.h>
> #include <rte_malloc.h>
> #include <rte_mempool.h>
> #include <rte_prefetch.h>
> -#include <rte_spinlock.h>
> #include <rte_service.h>
> #include <rte_service_component.h>
> +#include <rte_spinlock.h>
>
> #include "evt_common.h"
> #include "evt_options.h"
> @@ -186,6 +186,7 @@ void pipeline_opt_dump(struct evt_options *opt, uint8_t nb_queues);
> void pipeline_test_destroy(struct evt_test *test, struct evt_options *opt);
> void pipeline_eventdev_destroy(struct evt_test *test, struct evt_options *opt);
> void pipeline_ethdev_destroy(struct evt_test *test, struct evt_options *opt);
> +void pipeline_ethdev_rx_stop(struct evt_test *test, struct evt_options *opt);
> void pipeline_mempool_destroy(struct evt_test *test, struct evt_options *opt);
>
> #endif /* _TEST_PIPELINE_COMMON_ */
> diff --git a/app/test-eventdev/test_pipeline_queue.c b/app/test-eventdev/test_pipeline_queue.c
> index f6cc3e358e..343f8f3b1d 100644
> --- a/app/test-eventdev/test_pipeline_queue.c
> +++ b/app/test-eventdev/test_pipeline_queue.c
> @@ -798,6 +798,7 @@ static const struct evt_test_ops pipeline_queue = {
> .ethdev_setup = pipeline_ethdev_setup,
> .eventdev_setup = pipeline_queue_eventdev_setup,
> .launch_lcores = pipeline_queue_launch_lcores,
> + .ethdev_rx_stop = pipeline_ethdev_rx_stop,
> .eventdev_destroy = pipeline_eventdev_destroy,
> .mempool_destroy = pipeline_mempool_destroy,
> .ethdev_destroy = pipeline_ethdev_destroy,
> --
> 2.25.1
>
^ permalink raw reply [flat|nested] 17+ messages in thread
* Re: [PATCH 2/6] app/eventdev: clean up worker state before exit
2022-04-26 21:14 ` [PATCH 2/6] app/eventdev: clean up worker state before exit Pavan Nikhilesh
@ 2022-05-13 13:40 ` Jerin Jacob
0 siblings, 0 replies; 17+ messages in thread
From: Jerin Jacob @ 2022-05-13 13:40 UTC (permalink / raw)
To: Pavan Nikhilesh; +Cc: Jerin Jacob, dpdk-dev
On Wed, Apr 27, 2022 at 2:44 AM Pavan Nikhilesh
<pbhagavatula@marvell.com> wrote:
>
> Event ports are configured to implicitly release the scheduler contexts
> currently held in the next call to rte_event_dequeue_burst().
> A worker core might still hold a scheduling context during exit, as the
> next call to rte_event_dequeue_burst() is never made.
> This might lead to deadlock based on the worker exit timing and when
> there are very less number of flows.
>
> Add clean up function to release any scheduling contexts held by the
> worker by using RTE_EVENT_OP_RELEASE.
>
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Acked-by: Jerin Jacob <jerinj@marvell.com>
> ---
> app/test-eventdev/test_perf_atq.c | 31 +++--
> app/test-eventdev/test_perf_common.c | 17 +++
> app/test-eventdev/test_perf_common.h | 3 +
> app/test-eventdev/test_perf_queue.c | 30 +++--
> app/test-eventdev/test_pipeline_atq.c | 134 ++++++++++++---------
> app/test-eventdev/test_pipeline_common.c | 39 ++++++
> app/test-eventdev/test_pipeline_common.h | 59 ++++++---
> app/test-eventdev/test_pipeline_queue.c | 145 ++++++++++++++---------
> 8 files changed, 304 insertions(+), 154 deletions(-)
>
> diff --git a/app/test-eventdev/test_perf_atq.c b/app/test-eventdev/test_perf_atq.c
> index bac3ea602f..5a0b190384 100644
> --- a/app/test-eventdev/test_perf_atq.c
> +++ b/app/test-eventdev/test_perf_atq.c
> @@ -37,13 +37,14 @@ atq_fwd_event(struct rte_event *const ev, uint8_t *const sched_type_list,
> static int
> perf_atq_worker(void *arg, const int enable_fwd_latency)
> {
> - PERF_WORKER_INIT;
> + uint16_t enq = 0, deq = 0;
> struct rte_event ev;
> + PERF_WORKER_INIT;
>
> while (t->done == false) {
> - uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
> + deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
>
> - if (!event) {
> + if (!deq) {
> rte_pause();
> continue;
> }
> @@ -78,24 +79,29 @@ perf_atq_worker(void *arg, const int enable_fwd_latency)
> bufs, sz, cnt);
> } else {
> atq_fwd_event(&ev, sched_type_list, nb_stages);
> - while (rte_event_enqueue_burst(dev, port, &ev, 1) != 1)
> - rte_pause();
> + do {
> + enq = rte_event_enqueue_burst(dev, port, &ev,
> + 1);
> + } while (!enq && !t->done);
> }
> }
> +
> + perf_worker_cleanup(pool, dev, port, &ev, enq, deq);
> +
> return 0;
> }
>
> static int
> perf_atq_worker_burst(void *arg, const int enable_fwd_latency)
> {
> - PERF_WORKER_INIT;
> - uint16_t i;
> /* +1 to avoid prefetch out of array check */
> struct rte_event ev[BURST_SIZE + 1];
> + uint16_t enq = 0, nb_rx = 0;
> + PERF_WORKER_INIT;
> + uint16_t i;
>
> while (t->done == false) {
> - uint16_t const nb_rx = rte_event_dequeue_burst(dev, port, ev,
> - BURST_SIZE, 0);
> + nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
>
> if (!nb_rx) {
> rte_pause();
> @@ -146,14 +152,15 @@ perf_atq_worker_burst(void *arg, const int enable_fwd_latency)
> }
> }
>
> - uint16_t enq;
> -
> enq = rte_event_enqueue_burst(dev, port, ev, nb_rx);
> - while (enq < nb_rx) {
> + while ((enq < nb_rx) && !t->done) {
> enq += rte_event_enqueue_burst(dev, port,
> ev + enq, nb_rx - enq);
> }
> }
> +
> + perf_worker_cleanup(pool, dev, port, ev, enq, nb_rx);
> +
> return 0;
> }
>
> diff --git a/app/test-eventdev/test_perf_common.c b/app/test-eventdev/test_perf_common.c
> index e93b0e7272..f673a9fddd 100644
> --- a/app/test-eventdev/test_perf_common.c
> +++ b/app/test-eventdev/test_perf_common.c
> @@ -985,6 +985,23 @@ perf_opt_dump(struct evt_options *opt, uint8_t nb_queues)
> evt_dump("prod_enq_burst_sz", "%d", opt->prod_enq_burst_sz);
> }
>
> +void
> +perf_worker_cleanup(struct rte_mempool *const pool, uint8_t dev_id,
> + uint8_t port_id, struct rte_event events[], uint16_t nb_enq,
> + uint16_t nb_deq)
> +{
> + int i;
> +
> + if (nb_deq) {
> + for (i = nb_enq; i < nb_deq; i++)
> + rte_mempool_put(pool, events[i].event_ptr);
> +
> + for (i = 0; i < nb_deq; i++)
> + events[i].op = RTE_EVENT_OP_RELEASE;
> + rte_event_enqueue_burst(dev_id, port_id, events, nb_deq);
> + }
> +}
> +
> void
> perf_eventdev_destroy(struct evt_test *test, struct evt_options *opt)
> {
> diff --git a/app/test-eventdev/test_perf_common.h b/app/test-eventdev/test_perf_common.h
> index e504bb1df9..f6bfc73be0 100644
> --- a/app/test-eventdev/test_perf_common.h
> +++ b/app/test-eventdev/test_perf_common.h
> @@ -184,5 +184,8 @@ void perf_cryptodev_destroy(struct evt_test *test, struct evt_options *opt);
> void perf_ethdev_destroy(struct evt_test *test, struct evt_options *opt);
> void perf_ethdev_rx_stop(struct evt_test *test, struct evt_options *opt);
> void perf_mempool_destroy(struct evt_test *test, struct evt_options *opt);
> +void perf_worker_cleanup(struct rte_mempool *const pool, uint8_t dev_id,
> + uint8_t port_id, struct rte_event events[],
> + uint16_t nb_enq, uint16_t nb_deq);
>
> #endif /* _TEST_PERF_COMMON_ */
> diff --git a/app/test-eventdev/test_perf_queue.c b/app/test-eventdev/test_perf_queue.c
> index 108f1742a7..b498cacef6 100644
> --- a/app/test-eventdev/test_perf_queue.c
> +++ b/app/test-eventdev/test_perf_queue.c
> @@ -39,13 +39,14 @@ fwd_event(struct rte_event *const ev, uint8_t *const sched_type_list,
> static int
> perf_queue_worker(void *arg, const int enable_fwd_latency)
> {
> - PERF_WORKER_INIT;
> + uint16_t enq = 0, deq = 0;
> struct rte_event ev;
> + PERF_WORKER_INIT;
>
> while (t->done == false) {
> - uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
> + deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
>
> - if (!event) {
> + if (!deq) {
> rte_pause();
> continue;
> }
> @@ -80,24 +81,29 @@ perf_queue_worker(void *arg, const int enable_fwd_latency)
> &ev, w, bufs, sz, cnt);
> } else {
> fwd_event(&ev, sched_type_list, nb_stages);
> - while (rte_event_enqueue_burst(dev, port, &ev, 1) != 1)
> - rte_pause();
> + do {
> + enq = rte_event_enqueue_burst(dev, port, &ev,
> + 1);
> + } while (!enq && !t->done);
> }
> }
> +
> + perf_worker_cleanup(pool, dev, port, &ev, enq, deq);
> +
> return 0;
> }
>
> static int
> perf_queue_worker_burst(void *arg, const int enable_fwd_latency)
> {
> - PERF_WORKER_INIT;
> - uint16_t i;
> /* +1 to avoid prefetch out of array check */
> struct rte_event ev[BURST_SIZE + 1];
> + uint16_t enq = 0, nb_rx = 0;
> + PERF_WORKER_INIT;
> + uint16_t i;
>
> while (t->done == false) {
> - uint16_t const nb_rx = rte_event_dequeue_burst(dev, port, ev,
> - BURST_SIZE, 0);
> + nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
>
> if (!nb_rx) {
> rte_pause();
> @@ -147,14 +153,16 @@ perf_queue_worker_burst(void *arg, const int enable_fwd_latency)
> }
> }
>
> - uint16_t enq;
>
> enq = rte_event_enqueue_burst(dev, port, ev, nb_rx);
> - while (enq < nb_rx) {
> + while (enq < nb_rx && !t->done) {
> enq += rte_event_enqueue_burst(dev, port,
> ev + enq, nb_rx - enq);
> }
> }
> +
> + perf_worker_cleanup(pool, dev, port, ev, enq, nb_rx);
> +
> return 0;
> }
>
> diff --git a/app/test-eventdev/test_pipeline_atq.c b/app/test-eventdev/test_pipeline_atq.c
> index 79218502ba..4b10197127 100644
> --- a/app/test-eventdev/test_pipeline_atq.c
> +++ b/app/test-eventdev/test_pipeline_atq.c
> @@ -21,18 +21,20 @@ static __rte_noinline int
> pipeline_atq_worker_single_stage_tx(void *arg)
> {
> PIPELINE_WORKER_SINGLE_STAGE_INIT;
> + uint8_t enq = 0, deq = 0;
>
> while (t->done == false) {
> - uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
> + deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
>
> - if (!event) {
> + if (!deq) {
> rte_pause();
> continue;
> }
>
> - pipeline_event_tx(dev, port, &ev);
> + deq = pipeline_event_tx(dev, port, &ev, t);
> w->processed_pkts++;
> }
> + pipeline_worker_cleanup(dev, port, &ev, enq, deq);
>
> return 0;
> }
> @@ -42,20 +44,22 @@ pipeline_atq_worker_single_stage_fwd(void *arg)
> {
> PIPELINE_WORKER_SINGLE_STAGE_INIT;
> const uint8_t *tx_queue = t->tx_evqueue_id;
> + uint8_t enq = 0, deq = 0;
>
> while (t->done == false) {
> - uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
> + deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
>
> - if (!event) {
> + if (!deq) {
> rte_pause();
> continue;
> }
>
> ev.queue_id = tx_queue[ev.mbuf->port];
> pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
> - pipeline_event_enqueue(dev, port, &ev);
> + enq = pipeline_event_enqueue(dev, port, &ev, t);
> w->processed_pkts++;
> }
> + pipeline_worker_cleanup(dev, port, &ev, enq, deq);
>
> return 0;
> }
> @@ -64,10 +68,10 @@ static __rte_noinline int
> pipeline_atq_worker_single_stage_burst_tx(void *arg)
> {
> PIPELINE_WORKER_SINGLE_STAGE_BURST_INIT;
> + uint16_t nb_rx = 0, nb_tx = 0;
>
> while (t->done == false) {
> - uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
> - BURST_SIZE, 0);
> + nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
>
> if (!nb_rx) {
> rte_pause();
> @@ -79,9 +83,10 @@ pipeline_atq_worker_single_stage_burst_tx(void *arg)
> rte_event_eth_tx_adapter_txq_set(ev[i].mbuf, 0);
> }
>
> - pipeline_event_tx_burst(dev, port, ev, nb_rx);
> - w->processed_pkts += nb_rx;
> + nb_tx = pipeline_event_tx_burst(dev, port, ev, nb_rx, t);
> + w->processed_pkts += nb_tx;
> }
> + pipeline_worker_cleanup(dev, port, ev, nb_tx, nb_rx);
>
> return 0;
> }
> @@ -91,10 +96,10 @@ pipeline_atq_worker_single_stage_burst_fwd(void *arg)
> {
> PIPELINE_WORKER_SINGLE_STAGE_BURST_INIT;
> const uint8_t *tx_queue = t->tx_evqueue_id;
> + uint16_t nb_rx = 0, nb_tx = 0;
>
> while (t->done == false) {
> - uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
> - BURST_SIZE, 0);
> + nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
>
> if (!nb_rx) {
> rte_pause();
> @@ -108,9 +113,10 @@ pipeline_atq_worker_single_stage_burst_fwd(void *arg)
> pipeline_fwd_event(&ev[i], RTE_SCHED_TYPE_ATOMIC);
> }
>
> - pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
> - w->processed_pkts += nb_rx;
> + nb_tx = pipeline_event_enqueue_burst(dev, port, ev, nb_rx, t);
> + w->processed_pkts += nb_tx;
> }
> + pipeline_worker_cleanup(dev, port, ev, nb_tx, nb_rx);
>
> return 0;
> }
> @@ -119,19 +125,21 @@ static __rte_noinline int
> pipeline_atq_worker_single_stage_tx_vector(void *arg)
> {
> PIPELINE_WORKER_SINGLE_STAGE_INIT;
> + uint8_t enq = 0, deq = 0;
> uint16_t vector_sz;
>
> while (!t->done) {
> - uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
> + deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
>
> - if (!event) {
> + if (!deq) {
> rte_pause();
> continue;
> }
> vector_sz = ev.vec->nb_elem;
> - pipeline_event_tx_vector(dev, port, &ev);
> + enq = pipeline_event_tx_vector(dev, port, &ev, t);
> w->processed_pkts += vector_sz;
> }
> + pipeline_worker_cleanup(dev, port, &ev, enq, deq);
>
> return 0;
> }
> @@ -141,12 +149,13 @@ pipeline_atq_worker_single_stage_fwd_vector(void *arg)
> {
> PIPELINE_WORKER_SINGLE_STAGE_INIT;
> const uint8_t *tx_queue = t->tx_evqueue_id;
> + uint8_t enq = 0, deq = 0;
> uint16_t vector_sz;
>
> while (!t->done) {
> - uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
> + deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
>
> - if (!event) {
> + if (!deq) {
> rte_pause();
> continue;
> }
> @@ -155,9 +164,10 @@ pipeline_atq_worker_single_stage_fwd_vector(void *arg)
> ev.queue_id = tx_queue[ev.vec->port];
> ev.vec->queue = 0;
> pipeline_fwd_event_vector(&ev, RTE_SCHED_TYPE_ATOMIC);
> - pipeline_event_enqueue(dev, port, &ev);
> + enq = pipeline_event_enqueue(dev, port, &ev, t);
> w->processed_pkts += vector_sz;
> }
> + pipeline_worker_cleanup(dev, port, &ev, enq, deq);
>
> return 0;
> }
> @@ -166,11 +176,11 @@ static __rte_noinline int
> pipeline_atq_worker_single_stage_burst_tx_vector(void *arg)
> {
> PIPELINE_WORKER_SINGLE_STAGE_BURST_INIT;
> + uint16_t nb_rx = 0, nb_tx = 0;
> uint16_t vector_sz;
>
> while (!t->done) {
> - uint16_t nb_rx =
> - rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
> + nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
>
> if (!nb_rx) {
> rte_pause();
> @@ -182,9 +192,10 @@ pipeline_atq_worker_single_stage_burst_tx_vector(void *arg)
> ev[i].vec->queue = 0;
> }
>
> - pipeline_event_tx_burst(dev, port, ev, nb_rx);
> + nb_tx = pipeline_event_tx_burst(dev, port, ev, nb_rx, t);
> w->processed_pkts += vector_sz;
> }
> + pipeline_worker_cleanup(dev, port, ev, nb_tx, nb_rx);
>
> return 0;
> }
> @@ -194,11 +205,11 @@ pipeline_atq_worker_single_stage_burst_fwd_vector(void *arg)
> {
> PIPELINE_WORKER_SINGLE_STAGE_BURST_INIT;
> const uint8_t *tx_queue = t->tx_evqueue_id;
> + uint16_t nb_rx = 0, nb_tx = 0;
> uint16_t vector_sz;
>
> while (!t->done) {
> - uint16_t nb_rx =
> - rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
> + nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
>
> if (!nb_rx) {
> rte_pause();
> @@ -214,9 +225,10 @@ pipeline_atq_worker_single_stage_burst_fwd_vector(void *arg)
> RTE_SCHED_TYPE_ATOMIC);
> }
>
> - pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
> + nb_tx = pipeline_event_enqueue_burst(dev, port, ev, nb_rx, t);
> w->processed_pkts += vector_sz;
> }
> + pipeline_worker_cleanup(dev, port, ev, nb_tx, nb_rx);
>
> return 0;
> }
> @@ -225,11 +237,12 @@ static __rte_noinline int
> pipeline_atq_worker_multi_stage_tx(void *arg)
> {
> PIPELINE_WORKER_MULTI_STAGE_INIT;
> + uint8_t enq = 0, deq = 0;
>
> while (t->done == false) {
> - uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
> + deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
>
> - if (!event) {
> + if (!deq) {
> rte_pause();
> continue;
> }
> @@ -237,15 +250,16 @@ pipeline_atq_worker_multi_stage_tx(void *arg)
> cq_id = ev.sub_event_type % nb_stages;
>
> if (cq_id == last_queue) {
> - pipeline_event_tx(dev, port, &ev);
> + enq = pipeline_event_tx(dev, port, &ev, t);
> w->processed_pkts++;
> continue;
> }
>
> ev.sub_event_type++;
> pipeline_fwd_event(&ev, sched_type_list[cq_id]);
> - pipeline_event_enqueue(dev, port, &ev);
> + enq = pipeline_event_enqueue(dev, port, &ev, t);
> }
> + pipeline_worker_cleanup(dev, port, &ev, enq, deq);
>
> return 0;
> }
> @@ -255,11 +269,12 @@ pipeline_atq_worker_multi_stage_fwd(void *arg)
> {
> PIPELINE_WORKER_MULTI_STAGE_INIT;
> const uint8_t *tx_queue = t->tx_evqueue_id;
> + uint8_t enq = 0, deq = 0;
>
> while (t->done == false) {
> - uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
> + deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
>
> - if (!event) {
> + if (!deq) {
> rte_pause();
> continue;
> }
> @@ -275,8 +290,9 @@ pipeline_atq_worker_multi_stage_fwd(void *arg)
> pipeline_fwd_event(&ev, sched_type_list[cq_id]);
> }
>
> - pipeline_event_enqueue(dev, port, &ev);
> + enq = pipeline_event_enqueue(dev, port, &ev, t);
> }
> + pipeline_worker_cleanup(dev, port, &ev, enq, deq);
>
> return 0;
> }
> @@ -285,10 +301,10 @@ static __rte_noinline int
> pipeline_atq_worker_multi_stage_burst_tx(void *arg)
> {
> PIPELINE_WORKER_MULTI_STAGE_BURST_INIT;
> + uint16_t nb_rx = 0, nb_tx = 0;
>
> while (t->done == false) {
> - uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
> - BURST_SIZE, 0);
> + nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
>
> if (!nb_rx) {
> rte_pause();
> @@ -300,7 +316,7 @@ pipeline_atq_worker_multi_stage_burst_tx(void *arg)
> cq_id = ev[i].sub_event_type % nb_stages;
>
> if (cq_id == last_queue) {
> - pipeline_event_tx(dev, port, &ev[i]);
> + pipeline_event_tx(dev, port, &ev[i], t);
> ev[i].op = RTE_EVENT_OP_RELEASE;
> w->processed_pkts++;
> continue;
> @@ -310,8 +326,9 @@ pipeline_atq_worker_multi_stage_burst_tx(void *arg)
> pipeline_fwd_event(&ev[i], sched_type_list[cq_id]);
> }
>
> - pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
> + nb_tx = pipeline_event_enqueue_burst(dev, port, ev, nb_rx, t);
> }
> + pipeline_worker_cleanup(dev, port, ev, nb_tx, nb_rx);
>
> return 0;
> }
> @@ -321,10 +338,10 @@ pipeline_atq_worker_multi_stage_burst_fwd(void *arg)
> {
> PIPELINE_WORKER_MULTI_STAGE_BURST_INIT;
> const uint8_t *tx_queue = t->tx_evqueue_id;
> + uint16_t nb_rx = 0, nb_tx = 0;
>
> while (t->done == false) {
> - uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
> - BURST_SIZE, 0);
> + nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
>
> if (!nb_rx) {
> rte_pause();
> @@ -347,8 +364,9 @@ pipeline_atq_worker_multi_stage_burst_fwd(void *arg)
> }
> }
>
> - pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
> + nb_tx = pipeline_event_enqueue_burst(dev, port, ev, nb_rx, t);
> }
> + pipeline_worker_cleanup(dev, port, ev, nb_tx, nb_rx);
>
> return 0;
> }
> @@ -357,12 +375,13 @@ static __rte_noinline int
> pipeline_atq_worker_multi_stage_tx_vector(void *arg)
> {
> PIPELINE_WORKER_MULTI_STAGE_INIT;
> + uint8_t enq = 0, deq = 0;
> uint16_t vector_sz;
>
> while (!t->done) {
> - uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
> + deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
>
> - if (!event) {
> + if (!deq) {
> rte_pause();
> continue;
> }
> @@ -371,15 +390,16 @@ pipeline_atq_worker_multi_stage_tx_vector(void *arg)
>
> if (cq_id == last_queue) {
> vector_sz = ev.vec->nb_elem;
> - pipeline_event_tx_vector(dev, port, &ev);
> + enq = pipeline_event_tx_vector(dev, port, &ev, t);
> w->processed_pkts += vector_sz;
> continue;
> }
>
> ev.sub_event_type++;
> pipeline_fwd_event_vector(&ev, sched_type_list[cq_id]);
> - pipeline_event_enqueue(dev, port, &ev);
> + enq = pipeline_event_enqueue(dev, port, &ev, t);
> }
> + pipeline_worker_cleanup(dev, port, &ev, enq, deq);
>
> return 0;
> }
> @@ -389,12 +409,13 @@ pipeline_atq_worker_multi_stage_fwd_vector(void *arg)
> {
> PIPELINE_WORKER_MULTI_STAGE_INIT;
> const uint8_t *tx_queue = t->tx_evqueue_id;
> + uint8_t enq = 0, deq = 0;
> uint16_t vector_sz;
>
> while (!t->done) {
> - uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
> + deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
>
> - if (!event) {
> + if (!deq) {
> rte_pause();
> continue;
> }
> @@ -406,14 +427,15 @@ pipeline_atq_worker_multi_stage_fwd_vector(void *arg)
> ev.vec->queue = 0;
> vector_sz = ev.vec->nb_elem;
> pipeline_fwd_event_vector(&ev, RTE_SCHED_TYPE_ATOMIC);
> - pipeline_event_enqueue(dev, port, &ev);
> + enq = pipeline_event_enqueue(dev, port, &ev, t);
> w->processed_pkts += vector_sz;
> } else {
> ev.sub_event_type++;
> pipeline_fwd_event_vector(&ev, sched_type_list[cq_id]);
> - pipeline_event_enqueue(dev, port, &ev);
> + enq = pipeline_event_enqueue(dev, port, &ev, t);
> }
> }
> + pipeline_worker_cleanup(dev, port, &ev, enq, deq);
>
> return 0;
> }
> @@ -422,11 +444,11 @@ static __rte_noinline int
> pipeline_atq_worker_multi_stage_burst_tx_vector(void *arg)
> {
> PIPELINE_WORKER_MULTI_STAGE_BURST_INIT;
> + uint16_t nb_rx = 0, nb_tx = 0;
> uint16_t vector_sz;
>
> while (!t->done) {
> - uint16_t nb_rx =
> - rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
> + nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
>
> if (!nb_rx) {
> rte_pause();
> @@ -438,7 +460,7 @@ pipeline_atq_worker_multi_stage_burst_tx_vector(void *arg)
>
> if (cq_id == last_queue) {
> vector_sz = ev[i].vec->nb_elem;
> - pipeline_event_tx_vector(dev, port, &ev[i]);
> + pipeline_event_tx_vector(dev, port, &ev[i], t);
> ev[i].op = RTE_EVENT_OP_RELEASE;
> w->processed_pkts += vector_sz;
> continue;
> @@ -449,8 +471,9 @@ pipeline_atq_worker_multi_stage_burst_tx_vector(void *arg)
> sched_type_list[cq_id]);
> }
>
> - pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
> + nb_tx = pipeline_event_enqueue_burst(dev, port, ev, nb_rx, t);
> }
> + pipeline_worker_cleanup(dev, port, ev, nb_tx, nb_rx);
>
> return 0;
> }
> @@ -460,11 +483,11 @@ pipeline_atq_worker_multi_stage_burst_fwd_vector(void *arg)
> {
> PIPELINE_WORKER_MULTI_STAGE_BURST_INIT;
> const uint8_t *tx_queue = t->tx_evqueue_id;
> + uint16_t nb_rx = 0, nb_tx = 0;
> uint16_t vector_sz;
>
> while (!t->done) {
> - uint16_t nb_rx =
> - rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
> + nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
>
> if (!nb_rx) {
> rte_pause();
> @@ -488,8 +511,9 @@ pipeline_atq_worker_multi_stage_burst_fwd_vector(void *arg)
> }
> }
>
> - pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
> + nb_tx = pipeline_event_enqueue_burst(dev, port, ev, nb_rx, t);
> }
> + pipeline_worker_cleanup(dev, port, ev, nb_tx, nb_rx);
>
> return 0;
> }
> diff --git a/app/test-eventdev/test_pipeline_common.c b/app/test-eventdev/test_pipeline_common.c
> index d994c91678..a8dd070000 100644
> --- a/app/test-eventdev/test_pipeline_common.c
> +++ b/app/test-eventdev/test_pipeline_common.c
> @@ -505,6 +505,45 @@ pipeline_event_tx_adapter_setup(struct evt_options *opt,
> return ret;
> }
>
> +static void
> +pipeline_vector_array_free(struct rte_event events[], uint16_t num)
> +{
> + uint16_t i;
> +
> + for (i = 0; i < num; i++) {
> + rte_pktmbuf_free_bulk(events[i].vec->mbufs,
> + events[i].vec->nb_elem);
> + rte_mempool_put(rte_mempool_from_obj(events[i].vec),
> + events[i].vec);
> + }
> +}
> +
> +void
> +pipeline_worker_cleanup(uint8_t dev, uint8_t port, struct rte_event ev[],
> + uint16_t enq, uint16_t deq)
> +{
> + int i;
> +
> + if (!(deq - enq))
> + return;
> +
> + if (deq) {
> + for (i = enq; i < deq; i++) {
> + if (ev[i].op == RTE_EVENT_OP_RELEASE)
> + continue;
> + if (ev[i].event_type & RTE_EVENT_TYPE_VECTOR)
> + pipeline_vector_array_free(&ev[i], 1);
> + else
> + rte_pktmbuf_free(ev[i].mbuf);
> + }
> +
> + for (i = 0; i < deq; i++)
> + ev[i].op = RTE_EVENT_OP_RELEASE;
> +
> + rte_event_enqueue_burst(dev, port, ev, deq);
> + }
> +}
> +
> void
> pipeline_ethdev_rx_stop(struct evt_test *test, struct evt_options *opt)
> {
> diff --git a/app/test-eventdev/test_pipeline_common.h b/app/test-eventdev/test_pipeline_common.h
> index c979c33772..a6443faea4 100644
> --- a/app/test-eventdev/test_pipeline_common.h
> +++ b/app/test-eventdev/test_pipeline_common.h
> @@ -109,59 +109,80 @@ pipeline_fwd_event_vector(struct rte_event *ev, uint8_t sched)
> ev->sched_type = sched;
> }
>
> -static __rte_always_inline void
> +static __rte_always_inline uint8_t
> pipeline_event_tx(const uint8_t dev, const uint8_t port,
> - struct rte_event * const ev)
> + struct rte_event *const ev, struct test_pipeline *t)
> {
> + uint8_t enq;
> +
> rte_event_eth_tx_adapter_txq_set(ev->mbuf, 0);
> - while (!rte_event_eth_tx_adapter_enqueue(dev, port, ev, 1, 0))
> - rte_pause();
> + do {
> + enq = rte_event_eth_tx_adapter_enqueue(dev, port, ev, 1, 0);
> + } while (!enq && !t->done);
> +
> + return enq;
> }
>
> -static __rte_always_inline void
> +static __rte_always_inline uint8_t
> pipeline_event_tx_vector(const uint8_t dev, const uint8_t port,
> - struct rte_event *const ev)
> + struct rte_event *const ev, struct test_pipeline *t)
> {
> + uint8_t enq;
> +
> ev->vec->queue = 0;
> + do {
> + enq = rte_event_eth_tx_adapter_enqueue(dev, port, ev, 1, 0);
> + } while (!enq && !t->done);
>
> - while (!rte_event_eth_tx_adapter_enqueue(dev, port, ev, 1, 0))
> - rte_pause();
> + return enq;
> }
>
> -static __rte_always_inline void
> +static __rte_always_inline uint16_t
> pipeline_event_tx_burst(const uint8_t dev, const uint8_t port,
> - struct rte_event *ev, const uint16_t nb_rx)
> + struct rte_event *ev, const uint16_t nb_rx,
> + struct test_pipeline *t)
> {
> uint16_t enq;
>
> enq = rte_event_eth_tx_adapter_enqueue(dev, port, ev, nb_rx, 0);
> - while (enq < nb_rx) {
> + while (enq < nb_rx && !t->done) {
> enq += rte_event_eth_tx_adapter_enqueue(dev, port,
> ev + enq, nb_rx - enq, 0);
> }
> +
> + return enq;
> }
>
> -static __rte_always_inline void
> +static __rte_always_inline uint8_t
> pipeline_event_enqueue(const uint8_t dev, const uint8_t port,
> - struct rte_event *ev)
> + struct rte_event *ev, struct test_pipeline *t)
> {
> - while (rte_event_enqueue_burst(dev, port, ev, 1) != 1)
> - rte_pause();
> + uint8_t enq;
> +
> + do {
> + enq = rte_event_enqueue_burst(dev, port, ev, 1);
> + } while (!enq && !t->done);
> +
> + return enq;
> }
>
> -static __rte_always_inline void
> +static __rte_always_inline uint16_t
> pipeline_event_enqueue_burst(const uint8_t dev, const uint8_t port,
> - struct rte_event *ev, const uint16_t nb_rx)
> + struct rte_event *ev, const uint16_t nb_rx,
> + struct test_pipeline *t)
> {
> uint16_t enq;
>
> enq = rte_event_enqueue_burst(dev, port, ev, nb_rx);
> - while (enq < nb_rx) {
> + while (enq < nb_rx && !t->done) {
> enq += rte_event_enqueue_burst(dev, port,
> ev + enq, nb_rx - enq);
> }
> +
> + return enq;
> }
>
> +
> static inline int
> pipeline_nb_event_ports(struct evt_options *opt)
> {
> @@ -188,5 +209,7 @@ void pipeline_eventdev_destroy(struct evt_test *test, struct evt_options *opt);
> void pipeline_ethdev_destroy(struct evt_test *test, struct evt_options *opt);
> void pipeline_ethdev_rx_stop(struct evt_test *test, struct evt_options *opt);
> void pipeline_mempool_destroy(struct evt_test *test, struct evt_options *opt);
> +void pipeline_worker_cleanup(uint8_t dev, uint8_t port, struct rte_event ev[],
> + uint16_t enq, uint16_t deq);
>
> #endif /* _TEST_PIPELINE_COMMON_ */
> diff --git a/app/test-eventdev/test_pipeline_queue.c b/app/test-eventdev/test_pipeline_queue.c
> index 343f8f3b1d..e989396474 100644
> --- a/app/test-eventdev/test_pipeline_queue.c
> +++ b/app/test-eventdev/test_pipeline_queue.c
> @@ -21,24 +21,27 @@ static __rte_noinline int
> pipeline_queue_worker_single_stage_tx(void *arg)
> {
> PIPELINE_WORKER_SINGLE_STAGE_INIT;
> + uint8_t enq = 0, deq = 0;
>
> while (t->done == false) {
> - uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
> + deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
>
> - if (!event) {
> + if (!deq) {
> rte_pause();
> continue;
> }
>
> if (ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
> - pipeline_event_tx(dev, port, &ev);
> + enq = pipeline_event_tx(dev, port, &ev, t);
> + ev.op = RTE_EVENT_OP_RELEASE;
> w->processed_pkts++;
> } else {
> ev.queue_id++;
> pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
> - pipeline_event_enqueue(dev, port, &ev);
> + enq = pipeline_event_enqueue(dev, port, &ev, t);
> }
> }
> + pipeline_worker_cleanup(dev, port, &ev, enq, deq);
>
> return 0;
> }
> @@ -48,11 +51,12 @@ pipeline_queue_worker_single_stage_fwd(void *arg)
> {
> PIPELINE_WORKER_SINGLE_STAGE_INIT;
> const uint8_t *tx_queue = t->tx_evqueue_id;
> + uint8_t enq = 0, deq = 0;
>
> while (t->done == false) {
> - uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
> + deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
>
> - if (!event) {
> + if (!deq) {
> rte_pause();
> continue;
> }
> @@ -60,9 +64,10 @@ pipeline_queue_worker_single_stage_fwd(void *arg)
> ev.queue_id = tx_queue[ev.mbuf->port];
> rte_event_eth_tx_adapter_txq_set(ev.mbuf, 0);
> pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
> - pipeline_event_enqueue(dev, port, &ev);
> + enq = pipeline_event_enqueue(dev, port, &ev, t);
> w->processed_pkts++;
> }
> + pipeline_worker_cleanup(dev, port, &ev, enq, deq);
>
> return 0;
> }
> @@ -71,10 +76,10 @@ static __rte_noinline int
> pipeline_queue_worker_single_stage_burst_tx(void *arg)
> {
> PIPELINE_WORKER_SINGLE_STAGE_BURST_INIT;
> + uint16_t nb_rx = 0, nb_tx = 0;
>
> while (t->done == false) {
> - uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
> - BURST_SIZE, 0);
> + nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
>
> if (!nb_rx) {
> rte_pause();
> @@ -84,17 +89,18 @@ pipeline_queue_worker_single_stage_burst_tx(void *arg)
> for (i = 0; i < nb_rx; i++) {
> rte_prefetch0(ev[i + 1].mbuf);
> if (ev[i].sched_type == RTE_SCHED_TYPE_ATOMIC) {
> - pipeline_event_tx(dev, port, &ev[i]);
> + pipeline_event_tx(dev, port, &ev[i], t);
> + ev[i].op = RTE_EVENT_OP_RELEASE;
> w->processed_pkts++;
> } else {
> ev[i].queue_id++;
> pipeline_fwd_event(&ev[i],
> RTE_SCHED_TYPE_ATOMIC);
> - pipeline_event_enqueue_burst(dev, port, ev,
> - nb_rx);
> }
> }
> + nb_tx = pipeline_event_enqueue_burst(dev, port, ev, nb_rx, t);
> }
> + pipeline_worker_cleanup(dev, port, ev, nb_tx, nb_rx);
>
> return 0;
> }
> @@ -104,10 +110,10 @@ pipeline_queue_worker_single_stage_burst_fwd(void *arg)
> {
> PIPELINE_WORKER_SINGLE_STAGE_BURST_INIT;
> const uint8_t *tx_queue = t->tx_evqueue_id;
> + uint16_t nb_rx = 0, nb_tx = 0;
>
> while (t->done == false) {
> - uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
> - BURST_SIZE, 0);
> + nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
>
> if (!nb_rx) {
> rte_pause();
> @@ -121,9 +127,10 @@ pipeline_queue_worker_single_stage_burst_fwd(void *arg)
> pipeline_fwd_event(&ev[i], RTE_SCHED_TYPE_ATOMIC);
> }
>
> - pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
> + nb_tx = pipeline_event_enqueue_burst(dev, port, ev, nb_rx, t);
> w->processed_pkts += nb_rx;
> }
> + pipeline_worker_cleanup(dev, port, ev, nb_tx, nb_rx);
>
> return 0;
> }
> @@ -132,26 +139,29 @@ static __rte_noinline int
> pipeline_queue_worker_single_stage_tx_vector(void *arg)
> {
> PIPELINE_WORKER_SINGLE_STAGE_INIT;
> + uint8_t enq = 0, deq = 0;
> uint16_t vector_sz;
>
> while (!t->done) {
> - uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
> + deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
>
> - if (!event) {
> + if (!deq) {
> rte_pause();
> continue;
> }
>
> if (ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
> vector_sz = ev.vec->nb_elem;
> - pipeline_event_tx_vector(dev, port, &ev);
> + enq = pipeline_event_tx_vector(dev, port, &ev, t);
> + ev.op = RTE_EVENT_OP_RELEASE;
> w->processed_pkts += vector_sz;
> } else {
> ev.queue_id++;
> pipeline_fwd_event_vector(&ev, RTE_SCHED_TYPE_ATOMIC);
> - pipeline_event_enqueue(dev, port, &ev);
> + enq = pipeline_event_enqueue(dev, port, &ev, t);
> }
> }
> + pipeline_worker_cleanup(dev, port, &ev, enq, deq);
>
> return 0;
> }
> @@ -161,12 +171,13 @@ pipeline_queue_worker_single_stage_fwd_vector(void *arg)
> {
> PIPELINE_WORKER_SINGLE_STAGE_INIT;
> const uint8_t *tx_queue = t->tx_evqueue_id;
> + uint8_t enq = 0, deq = 0;
> uint16_t vector_sz;
>
> while (!t->done) {
> - uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
> + deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
>
> - if (!event) {
> + if (!deq) {
> rte_pause();
> continue;
> }
> @@ -175,9 +186,10 @@ pipeline_queue_worker_single_stage_fwd_vector(void *arg)
> ev.vec->queue = 0;
> vector_sz = ev.vec->nb_elem;
> pipeline_fwd_event_vector(&ev, RTE_SCHED_TYPE_ATOMIC);
> - pipeline_event_enqueue(dev, port, &ev);
> + enq = pipeline_event_enqueue(dev, port, &ev, t);
> w->processed_pkts += vector_sz;
> }
> + pipeline_worker_cleanup(dev, port, &ev, enq, deq);
>
> return 0;
> }
> @@ -186,11 +198,11 @@ static __rte_noinline int
> pipeline_queue_worker_single_stage_burst_tx_vector(void *arg)
> {
> PIPELINE_WORKER_SINGLE_STAGE_BURST_INIT;
> + uint16_t nb_rx = 0, nb_tx = 0;
> uint16_t vector_sz;
>
> while (!t->done) {
> - uint16_t nb_rx =
> - rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
> + nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
>
> if (!nb_rx) {
> rte_pause();
> @@ -200,7 +212,7 @@ pipeline_queue_worker_single_stage_burst_tx_vector(void *arg)
> for (i = 0; i < nb_rx; i++) {
> if (ev[i].sched_type == RTE_SCHED_TYPE_ATOMIC) {
> vector_sz = ev[i].vec->nb_elem;
> - pipeline_event_tx_vector(dev, port, &ev[i]);
> + pipeline_event_tx_vector(dev, port, &ev[i], t);
> ev[i].op = RTE_EVENT_OP_RELEASE;
> w->processed_pkts += vector_sz;
> } else {
> @@ -210,8 +222,9 @@ pipeline_queue_worker_single_stage_burst_tx_vector(void *arg)
> }
> }
>
> - pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
> + nb_tx = pipeline_event_enqueue_burst(dev, port, ev, nb_rx, t);
> }
> + pipeline_worker_cleanup(dev, port, ev, nb_tx, nb_rx);
>
> return 0;
> }
> @@ -221,11 +234,11 @@ pipeline_queue_worker_single_stage_burst_fwd_vector(void *arg)
> {
> PIPELINE_WORKER_SINGLE_STAGE_BURST_INIT;
> const uint8_t *tx_queue = t->tx_evqueue_id;
> + uint16_t nb_rx = 0, nb_tx = 0;
> uint16_t vector_sz;
>
> while (!t->done) {
> - uint16_t nb_rx =
> - rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
> + nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
>
> if (!nb_rx) {
> rte_pause();
> @@ -241,9 +254,10 @@ pipeline_queue_worker_single_stage_burst_fwd_vector(void *arg)
> RTE_SCHED_TYPE_ATOMIC);
> }
>
> - pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
> + nb_tx = pipeline_event_enqueue_burst(dev, port, ev, nb_rx, t);
> w->processed_pkts += vector_sz;
> }
> + pipeline_worker_cleanup(dev, port, ev, nb_tx, nb_rx);
>
> return 0;
> }
> @@ -253,11 +267,12 @@ pipeline_queue_worker_multi_stage_tx(void *arg)
> {
> PIPELINE_WORKER_MULTI_STAGE_INIT;
> const uint8_t *tx_queue = t->tx_evqueue_id;
> + uint8_t enq = 0, deq = 0;
>
> while (t->done == false) {
> - uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
> + deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
>
> - if (!event) {
> + if (!deq) {
> rte_pause();
> continue;
> }
> @@ -265,7 +280,8 @@ pipeline_queue_worker_multi_stage_tx(void *arg)
> cq_id = ev.queue_id % nb_stages;
>
> if (ev.queue_id == tx_queue[ev.mbuf->port]) {
> - pipeline_event_tx(dev, port, &ev);
> + enq = pipeline_event_tx(dev, port, &ev, t);
> + ev.op = RTE_EVENT_OP_RELEASE;
> w->processed_pkts++;
> continue;
> }
> @@ -274,8 +290,9 @@ pipeline_queue_worker_multi_stage_tx(void *arg)
> pipeline_fwd_event(&ev, cq_id != last_queue ?
> sched_type_list[cq_id] :
> RTE_SCHED_TYPE_ATOMIC);
> - pipeline_event_enqueue(dev, port, &ev);
> + enq = pipeline_event_enqueue(dev, port, &ev, t);
> }
> + pipeline_worker_cleanup(dev, port, &ev, enq, deq);
>
> return 0;
> }
> @@ -285,11 +302,12 @@ pipeline_queue_worker_multi_stage_fwd(void *arg)
> {
> PIPELINE_WORKER_MULTI_STAGE_INIT;
> const uint8_t *tx_queue = t->tx_evqueue_id;
> + uint8_t enq = 0, deq = 0;
>
> while (t->done == false) {
> - uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
> + deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
>
> - if (!event) {
> + if (!deq) {
> rte_pause();
> continue;
> }
> @@ -300,14 +318,15 @@ pipeline_queue_worker_multi_stage_fwd(void *arg)
> ev.queue_id = tx_queue[ev.mbuf->port];
> rte_event_eth_tx_adapter_txq_set(ev.mbuf, 0);
> pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
> - pipeline_event_enqueue(dev, port, &ev);
> + enq = pipeline_event_enqueue(dev, port, &ev, t);
> w->processed_pkts++;
> } else {
> ev.queue_id++;
> pipeline_fwd_event(&ev, sched_type_list[cq_id]);
> - pipeline_event_enqueue(dev, port, &ev);
> + enq = pipeline_event_enqueue(dev, port, &ev, t);
> }
> }
> + pipeline_worker_cleanup(dev, port, &ev, enq, deq);
>
> return 0;
> }
> @@ -317,10 +336,10 @@ pipeline_queue_worker_multi_stage_burst_tx(void *arg)
> {
> PIPELINE_WORKER_MULTI_STAGE_BURST_INIT;
> const uint8_t *tx_queue = t->tx_evqueue_id;
> + uint16_t nb_rx = 0, nb_tx = 0;
>
> while (t->done == false) {
> - uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
> - BURST_SIZE, 0);
> + nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
>
> if (!nb_rx) {
> rte_pause();
> @@ -332,7 +351,8 @@ pipeline_queue_worker_multi_stage_burst_tx(void *arg)
> cq_id = ev[i].queue_id % nb_stages;
>
> if (ev[i].queue_id == tx_queue[ev[i].mbuf->port]) {
> - pipeline_event_tx(dev, port, &ev[i]);
> + pipeline_event_tx(dev, port, &ev[i], t);
> + ev[i].op = RTE_EVENT_OP_RELEASE;
> w->processed_pkts++;
> continue;
> }
> @@ -341,9 +361,10 @@ pipeline_queue_worker_multi_stage_burst_tx(void *arg)
> pipeline_fwd_event(&ev[i], cq_id != last_queue ?
> sched_type_list[cq_id] :
> RTE_SCHED_TYPE_ATOMIC);
> - pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
> }
> + nb_tx = pipeline_event_enqueue_burst(dev, port, ev, nb_rx, t);
> }
> + pipeline_worker_cleanup(dev, port, ev, nb_tx, nb_rx);
>
> return 0;
> }
> @@ -353,11 +374,11 @@ pipeline_queue_worker_multi_stage_burst_fwd(void *arg)
> {
> PIPELINE_WORKER_MULTI_STAGE_BURST_INIT;
> const uint8_t *tx_queue = t->tx_evqueue_id;
> + uint16_t nb_rx = 0, nb_tx = 0;
>
> while (t->done == false) {
> uint16_t processed_pkts = 0;
> - uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
> - BURST_SIZE, 0);
> + nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
>
> if (!nb_rx) {
> rte_pause();
> @@ -381,9 +402,10 @@ pipeline_queue_worker_multi_stage_burst_fwd(void *arg)
> }
> }
>
> - pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
> + nb_tx = pipeline_event_enqueue_burst(dev, port, ev, nb_rx, t);
> w->processed_pkts += processed_pkts;
> }
> + pipeline_worker_cleanup(dev, port, ev, nb_tx, nb_rx);
>
> return 0;
> }
> @@ -393,12 +415,13 @@ pipeline_queue_worker_multi_stage_tx_vector(void *arg)
> {
> PIPELINE_WORKER_MULTI_STAGE_INIT;
> const uint8_t *tx_queue = t->tx_evqueue_id;
> + uint8_t enq = 0, deq = 0;
> uint16_t vector_sz;
>
> while (!t->done) {
> - uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
> + deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
>
> - if (!event) {
> + if (!deq) {
> rte_pause();
> continue;
> }
> @@ -407,8 +430,9 @@ pipeline_queue_worker_multi_stage_tx_vector(void *arg)
>
> if (ev.queue_id == tx_queue[ev.vec->port]) {
> vector_sz = ev.vec->nb_elem;
> - pipeline_event_tx_vector(dev, port, &ev);
> + enq = pipeline_event_tx_vector(dev, port, &ev, t);
> w->processed_pkts += vector_sz;
> + ev.op = RTE_EVENT_OP_RELEASE;
> continue;
> }
>
> @@ -416,8 +440,9 @@ pipeline_queue_worker_multi_stage_tx_vector(void *arg)
> pipeline_fwd_event_vector(&ev, cq_id != last_queue
> ? sched_type_list[cq_id]
> : RTE_SCHED_TYPE_ATOMIC);
> - pipeline_event_enqueue(dev, port, &ev);
> + enq = pipeline_event_enqueue(dev, port, &ev, t);
> }
> + pipeline_worker_cleanup(dev, port, &ev, enq, deq);
>
> return 0;
> }
> @@ -427,12 +452,13 @@ pipeline_queue_worker_multi_stage_fwd_vector(void *arg)
> {
> PIPELINE_WORKER_MULTI_STAGE_INIT;
> const uint8_t *tx_queue = t->tx_evqueue_id;
> + uint8_t enq = 0, deq = 0;
> uint16_t vector_sz;
>
> while (!t->done) {
> - uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
> + deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
>
> - if (!event) {
> + if (!deq) {
> rte_pause();
> continue;
> }
> @@ -449,8 +475,9 @@ pipeline_queue_worker_multi_stage_fwd_vector(void *arg)
> pipeline_fwd_event_vector(&ev, sched_type_list[cq_id]);
> }
>
> - pipeline_event_enqueue(dev, port, &ev);
> + enq = pipeline_event_enqueue(dev, port, &ev, t);
> }
> + pipeline_worker_cleanup(dev, port, &ev, enq, deq);
>
> return 0;
> }
> @@ -460,11 +487,11 @@ pipeline_queue_worker_multi_stage_burst_tx_vector(void *arg)
> {
> PIPELINE_WORKER_MULTI_STAGE_BURST_INIT;
> const uint8_t *tx_queue = t->tx_evqueue_id;
> + uint16_t nb_rx = 0, nb_tx = 0;
> uint16_t vector_sz;
>
> while (!t->done) {
> - uint16_t nb_rx =
> - rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
> + nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
>
> if (!nb_rx) {
> rte_pause();
> @@ -476,7 +503,7 @@ pipeline_queue_worker_multi_stage_burst_tx_vector(void *arg)
>
> if (ev[i].queue_id == tx_queue[ev[i].vec->port]) {
> vector_sz = ev[i].vec->nb_elem;
> - pipeline_event_tx_vector(dev, port, &ev[i]);
> + pipeline_event_tx_vector(dev, port, &ev[i], t);
> ev[i].op = RTE_EVENT_OP_RELEASE;
> w->processed_pkts += vector_sz;
> continue;
> @@ -489,8 +516,9 @@ pipeline_queue_worker_multi_stage_burst_tx_vector(void *arg)
> : RTE_SCHED_TYPE_ATOMIC);
> }
>
> - pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
> + nb_tx = pipeline_event_enqueue_burst(dev, port, ev, nb_rx, t);
> }
> + pipeline_worker_cleanup(dev, port, ev, nb_tx, nb_rx);
>
> return 0;
> }
> @@ -500,11 +528,11 @@ pipeline_queue_worker_multi_stage_burst_fwd_vector(void *arg)
> {
> PIPELINE_WORKER_MULTI_STAGE_BURST_INIT;
> const uint8_t *tx_queue = t->tx_evqueue_id;
> + uint16_t nb_rx = 0, nb_tx = 0;
> uint16_t vector_sz;
>
> while (!t->done) {
> - uint16_t nb_rx =
> - rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
> + nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
>
> if (!nb_rx) {
> rte_pause();
> @@ -527,8 +555,9 @@ pipeline_queue_worker_multi_stage_burst_fwd_vector(void *arg)
> }
> }
>
> - pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
> + nb_tx = pipeline_event_enqueue_burst(dev, port, ev, nb_rx, t);
> }
> + pipeline_worker_cleanup(dev, port, ev, nb_tx, nb_rx);
>
> return 0;
> }
> --
> 2.25.1
>
^ permalink raw reply [flat|nested] 17+ messages in thread
* Re: [PATCH 6/6] examples/ipsec-secgw: cleanup worker state before exit
2022-04-26 21:14 ` [PATCH 6/6] examples/ipsec-secgw: cleanup " Pavan Nikhilesh
@ 2022-05-13 13:41 ` Jerin Jacob
0 siblings, 0 replies; 17+ messages in thread
From: Jerin Jacob @ 2022-05-13 13:41 UTC (permalink / raw)
To: Pavan Nikhilesh; +Cc: Jerin Jacob, Radu Nicolau, Akhil Goyal, dpdk-dev
On Wed, Apr 27, 2022 at 2:45 AM Pavan Nikhilesh
<pbhagavatula@marvell.com> wrote:
>
> Event ports are configured to implicitly release the scheduler contexts
> currently held in the next call to rte_event_dequeue_burst().
> A worker core might still hold a scheduling context during exit as the
> next call to rte_event_dequeue_burst() is never made.
> This might lead to deadlock based on the worker exit timing and when
> there are very less number of flows.
>
> Add a cleanup function to release any scheduling contexts held by the
> worker by using RTE_EVENT_OP_RELEASE.
>
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Please fix following check path error in this series
### app/eventdev: simplify signal handling and teardown
WARNING:SPACING: space prohibited between function name and open parenthesis '('
#163: FILE: app/test-eventdev/test_perf_common.c:1112:
+ RTE_ETH_FOREACH_DEV (i) {
WARNING:SPACING: space prohibited between function name and open parenthesis '('
#234: FILE: app/test-eventdev/test_pipeline_common.c:515:
+ RTE_ETH_FOREACH_DEV (i) {
> ---
> examples/ipsec-secgw/ipsec_worker.c | 40 ++++++++++++++++++++---------
> 1 file changed, 28 insertions(+), 12 deletions(-)
>
> diff --git a/examples/ipsec-secgw/ipsec_worker.c b/examples/ipsec-secgw/ipsec_worker.c
> index 8639426c5c..3df5acf384 100644
> --- a/examples/ipsec-secgw/ipsec_worker.c
> +++ b/examples/ipsec-secgw/ipsec_worker.c
> @@ -749,7 +749,7 @@ ipsec_wrkr_non_burst_int_port_drv_mode(struct eh_event_link_info *links,
> uint8_t nb_links)
> {
> struct port_drv_mode_data data[RTE_MAX_ETHPORTS];
> - unsigned int nb_rx = 0;
> + unsigned int nb_rx = 0, nb_tx;
> struct rte_mbuf *pkt;
> struct rte_event ev;
> uint32_t lcore_id;
> @@ -847,11 +847,19 @@ ipsec_wrkr_non_burst_int_port_drv_mode(struct eh_event_link_info *links,
> * directly enqueued to the adapter and it would be
> * internally submitted to the eth device.
> */
> - rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
> - links[0].event_port_id,
> - &ev, /* events */
> - 1, /* nb_events */
> - 0 /* flags */);
> + nb_tx = rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
> + links[0].event_port_id,
> + &ev, /* events */
> + 1, /* nb_events */
> + 0 /* flags */);
> + if (!nb_tx)
> + rte_pktmbuf_free(ev.mbuf);
> + }
> +
> + if (ev.u64) {
> + ev.op = RTE_EVENT_OP_RELEASE;
> + rte_event_enqueue_burst(links[0].eventdev_id,
> + links[0].event_port_id, &ev, 1);
> }
> }
>
> @@ -864,7 +872,7 @@ ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links,
> uint8_t nb_links)
> {
> struct lcore_conf_ev_tx_int_port_wrkr lconf;
> - unsigned int nb_rx = 0;
> + unsigned int nb_rx = 0, nb_tx;
> struct rte_event ev;
> uint32_t lcore_id;
> int32_t socket_id;
> @@ -952,11 +960,19 @@ ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links,
> * directly enqueued to the adapter and it would be
> * internally submitted to the eth device.
> */
> - rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
> - links[0].event_port_id,
> - &ev, /* events */
> - 1, /* nb_events */
> - 0 /* flags */);
> + nb_tx = rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
> + links[0].event_port_id,
> + &ev, /* events */
> + 1, /* nb_events */
> + 0 /* flags */);
> + if (!nb_tx)
> + rte_pktmbuf_free(ev.mbuf);
> + }
> +
> + if (ev.u64) {
> + ev.op = RTE_EVENT_OP_RELEASE;
> + rte_event_enqueue_burst(links[0].eventdev_id,
> + links[0].event_port_id, &ev, 1);
> }
> }
>
> --
> 2.25.1
>
^ permalink raw reply [flat|nested] 17+ messages in thread
* [PATCH v2 1/6] app/eventdev: simplify signal handling and teardown
2022-04-26 21:14 [PATCH 1/6] app/eventdev: simplify signal handling and teardown Pavan Nikhilesh
` (6 preceding siblings ...)
2022-05-13 13:39 ` Jerin Jacob
@ 2022-05-13 16:07 ` pbhagavatula
2022-05-13 16:07 ` [PATCH v2 2/6] app/eventdev: clean up worker state before exit pbhagavatula
` (5 more replies)
7 siblings, 6 replies; 17+ messages in thread
From: pbhagavatula @ 2022-05-13 16:07 UTC (permalink / raw)
To: jerinj; +Cc: dev, Pavan Nikhilesh
From: Pavan Nikhilesh <pbhagavatula@marvell.com>
Remove rte_*_dev calls from signal handler callback as signal handlers
are supposed to be light weight.
Split ethernet device teardown into Rx and Tx sections, wait for
workers to finish processing after disabling Rx to allow workers
to complete processing currently held packets.
Verified SW event device on ARM64 using the following command:
./build/app/dpdk-test-eventdev -l 7-23 -s 0xf00 --vdev=event_sw0
-a 0002:02:00.0 -- --prod_type_ethdev --nb_pkts=0 --verbose 2
--test=pipeline_queue --stlist=o --wlcores 16-23
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
app/test-eventdev/evt_main.c | 58 +++++++++---------------
app/test-eventdev/evt_test.h | 3 ++
app/test-eventdev/test_perf_atq.c | 1 +
app/test-eventdev/test_perf_common.c | 20 +++++++-
app/test-eventdev/test_perf_common.h | 4 +-
app/test-eventdev/test_perf_queue.c | 1 +
app/test-eventdev/test_pipeline_atq.c | 1 +
app/test-eventdev/test_pipeline_common.c | 19 +++++++-
app/test-eventdev/test_pipeline_common.h | 5 +-
app/test-eventdev/test_pipeline_queue.c | 1 +
10 files changed, 72 insertions(+), 41 deletions(-)
diff --git a/app/test-eventdev/evt_main.c b/app/test-eventdev/evt_main.c
index a7d6b0c1cf..c5d63061bf 100644
--- a/app/test-eventdev/evt_main.c
+++ b/app/test-eventdev/evt_main.c
@@ -19,11 +19,7 @@ struct evt_test *test;
static void
signal_handler(int signum)
{
- int i;
- static uint8_t once;
-
- if ((signum == SIGINT || signum == SIGTERM) && !once) {
- once = true;
+ if (signum == SIGINT || signum == SIGTERM) {
printf("\nSignal %d received, preparing to exit...\n",
signum);
@@ -31,36 +27,7 @@ signal_handler(int signum)
/* request all lcores to exit from the main loop */
*(int *)test->test_priv = true;
rte_wmb();
-
- if (test->ops.ethdev_destroy)
- test->ops.ethdev_destroy(test, &opt);
-
- if (test->ops.cryptodev_destroy)
- test->ops.cryptodev_destroy(test, &opt);
-
- rte_eal_mp_wait_lcore();
-
- if (test->ops.test_result)
- test->ops.test_result(test, &opt);
-
- if (opt.prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
- RTE_ETH_FOREACH_DEV(i)
- rte_eth_dev_close(i);
- }
-
- if (test->ops.eventdev_destroy)
- test->ops.eventdev_destroy(test, &opt);
-
- if (test->ops.mempool_destroy)
- test->ops.mempool_destroy(test, &opt);
-
- if (test->ops.test_destroy)
- test->ops.test_destroy(test, &opt);
}
-
- /* exit with the expected status */
- signal(signum, SIG_DFL);
- kill(getpid(), signum);
}
}
@@ -189,10 +156,29 @@ main(int argc, char **argv)
}
}
+ if (test->ops.ethdev_rx_stop)
+ test->ops.ethdev_rx_stop(test, &opt);
+
+ if (test->ops.cryptodev_destroy)
+ test->ops.cryptodev_destroy(test, &opt);
+
rte_eal_mp_wait_lcore();
- /* Print the test result */
- ret = test->ops.test_result(test, &opt);
+ if (test->ops.test_result)
+ test->ops.test_result(test, &opt);
+
+ if (test->ops.ethdev_destroy)
+ test->ops.ethdev_destroy(test, &opt);
+
+ if (test->ops.eventdev_destroy)
+ test->ops.eventdev_destroy(test, &opt);
+
+ if (test->ops.mempool_destroy)
+ test->ops.mempool_destroy(test, &opt);
+
+ if (test->ops.test_destroy)
+ test->ops.test_destroy(test, &opt);
+
nocap:
if (ret == EVT_TEST_SUCCESS) {
printf("Result: "CLGRN"%s"CLNRM"\n", "Success");
diff --git a/app/test-eventdev/evt_test.h b/app/test-eventdev/evt_test.h
index 50fa474ec2..1049f99ddc 100644
--- a/app/test-eventdev/evt_test.h
+++ b/app/test-eventdev/evt_test.h
@@ -41,6 +41,8 @@ typedef void (*evt_test_eventdev_destroy_t)
(struct evt_test *test, struct evt_options *opt);
typedef void (*evt_test_ethdev_destroy_t)
(struct evt_test *test, struct evt_options *opt);
+typedef void (*evt_test_ethdev_rx_stop_t)(struct evt_test *test,
+ struct evt_options *opt);
typedef void (*evt_test_cryptodev_destroy_t)
(struct evt_test *test, struct evt_options *opt);
typedef void (*evt_test_mempool_destroy_t)
@@ -60,6 +62,7 @@ struct evt_test_ops {
evt_test_launch_lcores_t launch_lcores;
evt_test_result_t test_result;
evt_test_eventdev_destroy_t eventdev_destroy;
+ evt_test_ethdev_rx_stop_t ethdev_rx_stop;
evt_test_ethdev_destroy_t ethdev_destroy;
evt_test_cryptodev_destroy_t cryptodev_destroy;
evt_test_mempool_destroy_t mempool_destroy;
diff --git a/app/test-eventdev/test_perf_atq.c b/app/test-eventdev/test_perf_atq.c
index 67ff681666..bac3ea602f 100644
--- a/app/test-eventdev/test_perf_atq.c
+++ b/app/test-eventdev/test_perf_atq.c
@@ -343,6 +343,7 @@ static const struct evt_test_ops perf_atq = {
.test_setup = perf_test_setup,
.ethdev_setup = perf_ethdev_setup,
.cryptodev_setup = perf_cryptodev_setup,
+ .ethdev_rx_stop = perf_ethdev_rx_stop,
.mempool_setup = perf_mempool_setup,
.eventdev_setup = perf_atq_eventdev_setup,
.launch_lcores = perf_atq_launch_lcores,
diff --git a/app/test-eventdev/test_perf_common.c b/app/test-eventdev/test_perf_common.c
index 9d1f4a4567..4cf16b4267 100644
--- a/app/test-eventdev/test_perf_common.c
+++ b/app/test-eventdev/test_perf_common.c
@@ -1087,7 +1087,8 @@ perf_ethdev_setup(struct evt_test *test, struct evt_options *opt)
return 0;
}
-void perf_ethdev_destroy(struct evt_test *test, struct evt_options *opt)
+void
+perf_ethdev_rx_stop(struct evt_test *test, struct evt_options *opt)
{
uint16_t i;
RTE_SET_USED(test);
@@ -1095,6 +1096,23 @@ void perf_ethdev_destroy(struct evt_test *test, struct evt_options *opt)
if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
RTE_ETH_FOREACH_DEV(i) {
rte_event_eth_rx_adapter_stop(i);
+ rte_event_eth_rx_adapter_queue_del(i, i, -1);
+ rte_eth_dev_rx_queue_stop(i, 0);
+ }
+ }
+}
+
+void
+perf_ethdev_destroy(struct evt_test *test, struct evt_options *opt)
+{
+ uint16_t i;
+ RTE_SET_USED(test);
+
+ if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
+ RTE_ETH_FOREACH_DEV(i) {
+ rte_event_eth_tx_adapter_stop(i);
+ rte_event_eth_tx_adapter_queue_del(i, i, -1);
+ rte_eth_dev_tx_queue_stop(i, 0);
rte_eth_dev_stop(i);
}
}
diff --git a/app/test-eventdev/test_perf_common.h b/app/test-eventdev/test_perf_common.h
index ea0907d61a..e504bb1df9 100644
--- a/app/test-eventdev/test_perf_common.h
+++ b/app/test-eventdev/test_perf_common.h
@@ -12,10 +12,11 @@
#include <rte_cryptodev.h>
#include <rte_cycles.h>
#include <rte_ethdev.h>
-#include <rte_eventdev.h>
#include <rte_event_crypto_adapter.h>
#include <rte_event_eth_rx_adapter.h>
+#include <rte_event_eth_tx_adapter.h>
#include <rte_event_timer_adapter.h>
+#include <rte_eventdev.h>
#include <rte_lcore.h>
#include <rte_malloc.h>
#include <rte_mempool.h>
@@ -181,6 +182,7 @@ void perf_test_destroy(struct evt_test *test, struct evt_options *opt);
void perf_eventdev_destroy(struct evt_test *test, struct evt_options *opt);
void perf_cryptodev_destroy(struct evt_test *test, struct evt_options *opt);
void perf_ethdev_destroy(struct evt_test *test, struct evt_options *opt);
+void perf_ethdev_rx_stop(struct evt_test *test, struct evt_options *opt);
void perf_mempool_destroy(struct evt_test *test, struct evt_options *opt);
#endif /* _TEST_PERF_COMMON_ */
diff --git a/app/test-eventdev/test_perf_queue.c b/app/test-eventdev/test_perf_queue.c
index dcf6d82947..108f1742a7 100644
--- a/app/test-eventdev/test_perf_queue.c
+++ b/app/test-eventdev/test_perf_queue.c
@@ -360,6 +360,7 @@ static const struct evt_test_ops perf_queue = {
.mempool_setup = perf_mempool_setup,
.ethdev_setup = perf_ethdev_setup,
.cryptodev_setup = perf_cryptodev_setup,
+ .ethdev_rx_stop = perf_ethdev_rx_stop,
.eventdev_setup = perf_queue_eventdev_setup,
.launch_lcores = perf_queue_launch_lcores,
.eventdev_destroy = perf_eventdev_destroy,
diff --git a/app/test-eventdev/test_pipeline_atq.c b/app/test-eventdev/test_pipeline_atq.c
index 84dd4f44e3..79218502ba 100644
--- a/app/test-eventdev/test_pipeline_atq.c
+++ b/app/test-eventdev/test_pipeline_atq.c
@@ -772,6 +772,7 @@ static const struct evt_test_ops pipeline_atq = {
.ethdev_setup = pipeline_ethdev_setup,
.eventdev_setup = pipeline_atq_eventdev_setup,
.launch_lcores = pipeline_atq_launch_lcores,
+ .ethdev_rx_stop = pipeline_ethdev_rx_stop,
.eventdev_destroy = pipeline_eventdev_destroy,
.mempool_destroy = pipeline_mempool_destroy,
.ethdev_destroy = pipeline_ethdev_destroy,
diff --git a/app/test-eventdev/test_pipeline_common.c b/app/test-eventdev/test_pipeline_common.c
index ddaa9f3fdb..29b64014d7 100644
--- a/app/test-eventdev/test_pipeline_common.c
+++ b/app/test-eventdev/test_pipeline_common.c
@@ -505,6 +505,22 @@ pipeline_event_tx_adapter_setup(struct evt_options *opt,
return ret;
}
+void
+pipeline_ethdev_rx_stop(struct evt_test *test, struct evt_options *opt)
+{
+ uint16_t i, j;
+ RTE_SET_USED(test);
+
+ if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
+ RTE_ETH_FOREACH_DEV(i) {
+ rte_event_eth_rx_adapter_stop(i);
+ rte_event_eth_rx_adapter_queue_del(i, i, -1);
+ for (j = 0; j < opt->eth_queues; j++)
+ rte_eth_dev_rx_queue_stop(i, j);
+ }
+ }
+}
+
void
pipeline_ethdev_destroy(struct evt_test *test, struct evt_options *opt)
{
@@ -513,8 +529,9 @@ pipeline_ethdev_destroy(struct evt_test *test, struct evt_options *opt)
RTE_SET_USED(opt);
RTE_ETH_FOREACH_DEV(i) {
- rte_event_eth_rx_adapter_stop(i);
rte_event_eth_tx_adapter_stop(i);
+ rte_event_eth_tx_adapter_queue_del(i, i, -1);
+ rte_eth_dev_tx_queue_stop(i, 0);
rte_eth_dev_stop(i);
}
}
diff --git a/app/test-eventdev/test_pipeline_common.h b/app/test-eventdev/test_pipeline_common.h
index d69e2f8a3e..c979c33772 100644
--- a/app/test-eventdev/test_pipeline_common.h
+++ b/app/test-eventdev/test_pipeline_common.h
@@ -12,16 +12,16 @@
#include <rte_cycles.h>
#include <rte_ethdev.h>
-#include <rte_eventdev.h>
#include <rte_event_eth_rx_adapter.h>
#include <rte_event_eth_tx_adapter.h>
+#include <rte_eventdev.h>
#include <rte_lcore.h>
#include <rte_malloc.h>
#include <rte_mempool.h>
#include <rte_prefetch.h>
-#include <rte_spinlock.h>
#include <rte_service.h>
#include <rte_service_component.h>
+#include <rte_spinlock.h>
#include "evt_common.h"
#include "evt_options.h"
@@ -186,6 +186,7 @@ void pipeline_opt_dump(struct evt_options *opt, uint8_t nb_queues);
void pipeline_test_destroy(struct evt_test *test, struct evt_options *opt);
void pipeline_eventdev_destroy(struct evt_test *test, struct evt_options *opt);
void pipeline_ethdev_destroy(struct evt_test *test, struct evt_options *opt);
+void pipeline_ethdev_rx_stop(struct evt_test *test, struct evt_options *opt);
void pipeline_mempool_destroy(struct evt_test *test, struct evt_options *opt);
#endif /* _TEST_PIPELINE_COMMON_ */
diff --git a/app/test-eventdev/test_pipeline_queue.c b/app/test-eventdev/test_pipeline_queue.c
index f6cc3e358e..343f8f3b1d 100644
--- a/app/test-eventdev/test_pipeline_queue.c
+++ b/app/test-eventdev/test_pipeline_queue.c
@@ -798,6 +798,7 @@ static const struct evt_test_ops pipeline_queue = {
.ethdev_setup = pipeline_ethdev_setup,
.eventdev_setup = pipeline_queue_eventdev_setup,
.launch_lcores = pipeline_queue_launch_lcores,
+ .ethdev_rx_stop = pipeline_ethdev_rx_stop,
.eventdev_destroy = pipeline_eventdev_destroy,
.mempool_destroy = pipeline_mempool_destroy,
.ethdev_destroy = pipeline_ethdev_destroy,
--
2.25.1
^ permalink raw reply [flat|nested] 17+ messages in thread
* [PATCH v2 2/6] app/eventdev: clean up worker state before exit
2022-05-13 16:07 ` [PATCH v2 " pbhagavatula
@ 2022-05-13 16:07 ` pbhagavatula
2022-05-13 16:07 ` [PATCH v2 3/6] examples/eventdev: " pbhagavatula
` (4 subsequent siblings)
5 siblings, 0 replies; 17+ messages in thread
From: pbhagavatula @ 2022-05-13 16:07 UTC (permalink / raw)
To: jerinj; +Cc: dev, Pavan Nikhilesh
From: Pavan Nikhilesh <pbhagavatula@marvell.com>
Event ports are configured to implicitly release the scheduler contexts
currently held in the next call to rte_event_dequeue_burst().
A worker core might still hold a scheduling context during exit, as the
next call to rte_event_dequeue_burst() is never made.
This might lead to deadlock based on the worker exit timing and when
there are very less number of flows.
Add clean up function to release any scheduling contexts held by the
worker by using RTE_EVENT_OP_RELEASE.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Acked-by: Jerin Jacob <jerinj@marvell.com>
---
app/test-eventdev/test_perf_atq.c | 31 +++--
app/test-eventdev/test_perf_common.c | 17 +++
app/test-eventdev/test_perf_common.h | 3 +
app/test-eventdev/test_perf_queue.c | 30 +++--
app/test-eventdev/test_pipeline_atq.c | 134 ++++++++++++---------
app/test-eventdev/test_pipeline_common.c | 39 ++++++
app/test-eventdev/test_pipeline_common.h | 59 ++++++---
app/test-eventdev/test_pipeline_queue.c | 145 ++++++++++++++---------
8 files changed, 304 insertions(+), 154 deletions(-)
diff --git a/app/test-eventdev/test_perf_atq.c b/app/test-eventdev/test_perf_atq.c
index bac3ea602f..5a0b190384 100644
--- a/app/test-eventdev/test_perf_atq.c
+++ b/app/test-eventdev/test_perf_atq.c
@@ -37,13 +37,14 @@ atq_fwd_event(struct rte_event *const ev, uint8_t *const sched_type_list,
static int
perf_atq_worker(void *arg, const int enable_fwd_latency)
{
- PERF_WORKER_INIT;
+ uint16_t enq = 0, deq = 0;
struct rte_event ev;
+ PERF_WORKER_INIT;
while (t->done == false) {
- uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+ deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
- if (!event) {
+ if (!deq) {
rte_pause();
continue;
}
@@ -78,24 +79,29 @@ perf_atq_worker(void *arg, const int enable_fwd_latency)
bufs, sz, cnt);
} else {
atq_fwd_event(&ev, sched_type_list, nb_stages);
- while (rte_event_enqueue_burst(dev, port, &ev, 1) != 1)
- rte_pause();
+ do {
+ enq = rte_event_enqueue_burst(dev, port, &ev,
+ 1);
+ } while (!enq && !t->done);
}
}
+
+ perf_worker_cleanup(pool, dev, port, &ev, enq, deq);
+
return 0;
}
static int
perf_atq_worker_burst(void *arg, const int enable_fwd_latency)
{
- PERF_WORKER_INIT;
- uint16_t i;
/* +1 to avoid prefetch out of array check */
struct rte_event ev[BURST_SIZE + 1];
+ uint16_t enq = 0, nb_rx = 0;
+ PERF_WORKER_INIT;
+ uint16_t i;
while (t->done == false) {
- uint16_t const nb_rx = rte_event_dequeue_burst(dev, port, ev,
- BURST_SIZE, 0);
+ nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
if (!nb_rx) {
rte_pause();
@@ -146,14 +152,15 @@ perf_atq_worker_burst(void *arg, const int enable_fwd_latency)
}
}
- uint16_t enq;
-
enq = rte_event_enqueue_burst(dev, port, ev, nb_rx);
- while (enq < nb_rx) {
+ while ((enq < nb_rx) && !t->done) {
enq += rte_event_enqueue_burst(dev, port,
ev + enq, nb_rx - enq);
}
}
+
+ perf_worker_cleanup(pool, dev, port, ev, enq, nb_rx);
+
return 0;
}
diff --git a/app/test-eventdev/test_perf_common.c b/app/test-eventdev/test_perf_common.c
index 4cf16b4267..b51a100425 100644
--- a/app/test-eventdev/test_perf_common.c
+++ b/app/test-eventdev/test_perf_common.c
@@ -985,6 +985,23 @@ perf_opt_dump(struct evt_options *opt, uint8_t nb_queues)
evt_dump("prod_enq_burst_sz", "%d", opt->prod_enq_burst_sz);
}
+void
+perf_worker_cleanup(struct rte_mempool *const pool, uint8_t dev_id,
+ uint8_t port_id, struct rte_event events[], uint16_t nb_enq,
+ uint16_t nb_deq)
+{
+ int i;
+
+ if (nb_deq) {
+ for (i = nb_enq; i < nb_deq; i++)
+ rte_mempool_put(pool, events[i].event_ptr);
+
+ for (i = 0; i < nb_deq; i++)
+ events[i].op = RTE_EVENT_OP_RELEASE;
+ rte_event_enqueue_burst(dev_id, port_id, events, nb_deq);
+ }
+}
+
void
perf_eventdev_destroy(struct evt_test *test, struct evt_options *opt)
{
diff --git a/app/test-eventdev/test_perf_common.h b/app/test-eventdev/test_perf_common.h
index e504bb1df9..f6bfc73be0 100644
--- a/app/test-eventdev/test_perf_common.h
+++ b/app/test-eventdev/test_perf_common.h
@@ -184,5 +184,8 @@ void perf_cryptodev_destroy(struct evt_test *test, struct evt_options *opt);
void perf_ethdev_destroy(struct evt_test *test, struct evt_options *opt);
void perf_ethdev_rx_stop(struct evt_test *test, struct evt_options *opt);
void perf_mempool_destroy(struct evt_test *test, struct evt_options *opt);
+void perf_worker_cleanup(struct rte_mempool *const pool, uint8_t dev_id,
+ uint8_t port_id, struct rte_event events[],
+ uint16_t nb_enq, uint16_t nb_deq);
#endif /* _TEST_PERF_COMMON_ */
diff --git a/app/test-eventdev/test_perf_queue.c b/app/test-eventdev/test_perf_queue.c
index 108f1742a7..b498cacef6 100644
--- a/app/test-eventdev/test_perf_queue.c
+++ b/app/test-eventdev/test_perf_queue.c
@@ -39,13 +39,14 @@ fwd_event(struct rte_event *const ev, uint8_t *const sched_type_list,
static int
perf_queue_worker(void *arg, const int enable_fwd_latency)
{
- PERF_WORKER_INIT;
+ uint16_t enq = 0, deq = 0;
struct rte_event ev;
+ PERF_WORKER_INIT;
while (t->done == false) {
- uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+ deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
- if (!event) {
+ if (!deq) {
rte_pause();
continue;
}
@@ -80,24 +81,29 @@ perf_queue_worker(void *arg, const int enable_fwd_latency)
&ev, w, bufs, sz, cnt);
} else {
fwd_event(&ev, sched_type_list, nb_stages);
- while (rte_event_enqueue_burst(dev, port, &ev, 1) != 1)
- rte_pause();
+ do {
+ enq = rte_event_enqueue_burst(dev, port, &ev,
+ 1);
+ } while (!enq && !t->done);
}
}
+
+ perf_worker_cleanup(pool, dev, port, &ev, enq, deq);
+
return 0;
}
static int
perf_queue_worker_burst(void *arg, const int enable_fwd_latency)
{
- PERF_WORKER_INIT;
- uint16_t i;
/* +1 to avoid prefetch out of array check */
struct rte_event ev[BURST_SIZE + 1];
+ uint16_t enq = 0, nb_rx = 0;
+ PERF_WORKER_INIT;
+ uint16_t i;
while (t->done == false) {
- uint16_t const nb_rx = rte_event_dequeue_burst(dev, port, ev,
- BURST_SIZE, 0);
+ nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
if (!nb_rx) {
rte_pause();
@@ -147,14 +153,16 @@ perf_queue_worker_burst(void *arg, const int enable_fwd_latency)
}
}
- uint16_t enq;
enq = rte_event_enqueue_burst(dev, port, ev, nb_rx);
- while (enq < nb_rx) {
+ while (enq < nb_rx && !t->done) {
enq += rte_event_enqueue_burst(dev, port,
ev + enq, nb_rx - enq);
}
}
+
+ perf_worker_cleanup(pool, dev, port, ev, enq, nb_rx);
+
return 0;
}
diff --git a/app/test-eventdev/test_pipeline_atq.c b/app/test-eventdev/test_pipeline_atq.c
index 79218502ba..4b10197127 100644
--- a/app/test-eventdev/test_pipeline_atq.c
+++ b/app/test-eventdev/test_pipeline_atq.c
@@ -21,18 +21,20 @@ static __rte_noinline int
pipeline_atq_worker_single_stage_tx(void *arg)
{
PIPELINE_WORKER_SINGLE_STAGE_INIT;
+ uint8_t enq = 0, deq = 0;
while (t->done == false) {
- uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+ deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
- if (!event) {
+ if (!deq) {
rte_pause();
continue;
}
- pipeline_event_tx(dev, port, &ev);
+ deq = pipeline_event_tx(dev, port, &ev, t);
w->processed_pkts++;
}
+ pipeline_worker_cleanup(dev, port, &ev, enq, deq);
return 0;
}
@@ -42,20 +44,22 @@ pipeline_atq_worker_single_stage_fwd(void *arg)
{
PIPELINE_WORKER_SINGLE_STAGE_INIT;
const uint8_t *tx_queue = t->tx_evqueue_id;
+ uint8_t enq = 0, deq = 0;
while (t->done == false) {
- uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+ deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
- if (!event) {
+ if (!deq) {
rte_pause();
continue;
}
ev.queue_id = tx_queue[ev.mbuf->port];
pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
- pipeline_event_enqueue(dev, port, &ev);
+ enq = pipeline_event_enqueue(dev, port, &ev, t);
w->processed_pkts++;
}
+ pipeline_worker_cleanup(dev, port, &ev, enq, deq);
return 0;
}
@@ -64,10 +68,10 @@ static __rte_noinline int
pipeline_atq_worker_single_stage_burst_tx(void *arg)
{
PIPELINE_WORKER_SINGLE_STAGE_BURST_INIT;
+ uint16_t nb_rx = 0, nb_tx = 0;
while (t->done == false) {
- uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
- BURST_SIZE, 0);
+ nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
if (!nb_rx) {
rte_pause();
@@ -79,9 +83,10 @@ pipeline_atq_worker_single_stage_burst_tx(void *arg)
rte_event_eth_tx_adapter_txq_set(ev[i].mbuf, 0);
}
- pipeline_event_tx_burst(dev, port, ev, nb_rx);
- w->processed_pkts += nb_rx;
+ nb_tx = pipeline_event_tx_burst(dev, port, ev, nb_rx, t);
+ w->processed_pkts += nb_tx;
}
+ pipeline_worker_cleanup(dev, port, ev, nb_tx, nb_rx);
return 0;
}
@@ -91,10 +96,10 @@ pipeline_atq_worker_single_stage_burst_fwd(void *arg)
{
PIPELINE_WORKER_SINGLE_STAGE_BURST_INIT;
const uint8_t *tx_queue = t->tx_evqueue_id;
+ uint16_t nb_rx = 0, nb_tx = 0;
while (t->done == false) {
- uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
- BURST_SIZE, 0);
+ nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
if (!nb_rx) {
rte_pause();
@@ -108,9 +113,10 @@ pipeline_atq_worker_single_stage_burst_fwd(void *arg)
pipeline_fwd_event(&ev[i], RTE_SCHED_TYPE_ATOMIC);
}
- pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
- w->processed_pkts += nb_rx;
+ nb_tx = pipeline_event_enqueue_burst(dev, port, ev, nb_rx, t);
+ w->processed_pkts += nb_tx;
}
+ pipeline_worker_cleanup(dev, port, ev, nb_tx, nb_rx);
return 0;
}
@@ -119,19 +125,21 @@ static __rte_noinline int
pipeline_atq_worker_single_stage_tx_vector(void *arg)
{
PIPELINE_WORKER_SINGLE_STAGE_INIT;
+ uint8_t enq = 0, deq = 0;
uint16_t vector_sz;
while (!t->done) {
- uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+ deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
- if (!event) {
+ if (!deq) {
rte_pause();
continue;
}
vector_sz = ev.vec->nb_elem;
- pipeline_event_tx_vector(dev, port, &ev);
+ enq = pipeline_event_tx_vector(dev, port, &ev, t);
w->processed_pkts += vector_sz;
}
+ pipeline_worker_cleanup(dev, port, &ev, enq, deq);
return 0;
}
@@ -141,12 +149,13 @@ pipeline_atq_worker_single_stage_fwd_vector(void *arg)
{
PIPELINE_WORKER_SINGLE_STAGE_INIT;
const uint8_t *tx_queue = t->tx_evqueue_id;
+ uint8_t enq = 0, deq = 0;
uint16_t vector_sz;
while (!t->done) {
- uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+ deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
- if (!event) {
+ if (!deq) {
rte_pause();
continue;
}
@@ -155,9 +164,10 @@ pipeline_atq_worker_single_stage_fwd_vector(void *arg)
ev.queue_id = tx_queue[ev.vec->port];
ev.vec->queue = 0;
pipeline_fwd_event_vector(&ev, RTE_SCHED_TYPE_ATOMIC);
- pipeline_event_enqueue(dev, port, &ev);
+ enq = pipeline_event_enqueue(dev, port, &ev, t);
w->processed_pkts += vector_sz;
}
+ pipeline_worker_cleanup(dev, port, &ev, enq, deq);
return 0;
}
@@ -166,11 +176,11 @@ static __rte_noinline int
pipeline_atq_worker_single_stage_burst_tx_vector(void *arg)
{
PIPELINE_WORKER_SINGLE_STAGE_BURST_INIT;
+ uint16_t nb_rx = 0, nb_tx = 0;
uint16_t vector_sz;
while (!t->done) {
- uint16_t nb_rx =
- rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
+ nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
if (!nb_rx) {
rte_pause();
@@ -182,9 +192,10 @@ pipeline_atq_worker_single_stage_burst_tx_vector(void *arg)
ev[i].vec->queue = 0;
}
- pipeline_event_tx_burst(dev, port, ev, nb_rx);
+ nb_tx = pipeline_event_tx_burst(dev, port, ev, nb_rx, t);
w->processed_pkts += vector_sz;
}
+ pipeline_worker_cleanup(dev, port, ev, nb_tx, nb_rx);
return 0;
}
@@ -194,11 +205,11 @@ pipeline_atq_worker_single_stage_burst_fwd_vector(void *arg)
{
PIPELINE_WORKER_SINGLE_STAGE_BURST_INIT;
const uint8_t *tx_queue = t->tx_evqueue_id;
+ uint16_t nb_rx = 0, nb_tx = 0;
uint16_t vector_sz;
while (!t->done) {
- uint16_t nb_rx =
- rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
+ nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
if (!nb_rx) {
rte_pause();
@@ -214,9 +225,10 @@ pipeline_atq_worker_single_stage_burst_fwd_vector(void *arg)
RTE_SCHED_TYPE_ATOMIC);
}
- pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
+ nb_tx = pipeline_event_enqueue_burst(dev, port, ev, nb_rx, t);
w->processed_pkts += vector_sz;
}
+ pipeline_worker_cleanup(dev, port, ev, nb_tx, nb_rx);
return 0;
}
@@ -225,11 +237,12 @@ static __rte_noinline int
pipeline_atq_worker_multi_stage_tx(void *arg)
{
PIPELINE_WORKER_MULTI_STAGE_INIT;
+ uint8_t enq = 0, deq = 0;
while (t->done == false) {
- uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+ deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
- if (!event) {
+ if (!deq) {
rte_pause();
continue;
}
@@ -237,15 +250,16 @@ pipeline_atq_worker_multi_stage_tx(void *arg)
cq_id = ev.sub_event_type % nb_stages;
if (cq_id == last_queue) {
- pipeline_event_tx(dev, port, &ev);
+ enq = pipeline_event_tx(dev, port, &ev, t);
w->processed_pkts++;
continue;
}
ev.sub_event_type++;
pipeline_fwd_event(&ev, sched_type_list[cq_id]);
- pipeline_event_enqueue(dev, port, &ev);
+ enq = pipeline_event_enqueue(dev, port, &ev, t);
}
+ pipeline_worker_cleanup(dev, port, &ev, enq, deq);
return 0;
}
@@ -255,11 +269,12 @@ pipeline_atq_worker_multi_stage_fwd(void *arg)
{
PIPELINE_WORKER_MULTI_STAGE_INIT;
const uint8_t *tx_queue = t->tx_evqueue_id;
+ uint8_t enq = 0, deq = 0;
while (t->done == false) {
- uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+ deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
- if (!event) {
+ if (!deq) {
rte_pause();
continue;
}
@@ -275,8 +290,9 @@ pipeline_atq_worker_multi_stage_fwd(void *arg)
pipeline_fwd_event(&ev, sched_type_list[cq_id]);
}
- pipeline_event_enqueue(dev, port, &ev);
+ enq = pipeline_event_enqueue(dev, port, &ev, t);
}
+ pipeline_worker_cleanup(dev, port, &ev, enq, deq);
return 0;
}
@@ -285,10 +301,10 @@ static __rte_noinline int
pipeline_atq_worker_multi_stage_burst_tx(void *arg)
{
PIPELINE_WORKER_MULTI_STAGE_BURST_INIT;
+ uint16_t nb_rx = 0, nb_tx = 0;
while (t->done == false) {
- uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
- BURST_SIZE, 0);
+ nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
if (!nb_rx) {
rte_pause();
@@ -300,7 +316,7 @@ pipeline_atq_worker_multi_stage_burst_tx(void *arg)
cq_id = ev[i].sub_event_type % nb_stages;
if (cq_id == last_queue) {
- pipeline_event_tx(dev, port, &ev[i]);
+ pipeline_event_tx(dev, port, &ev[i], t);
ev[i].op = RTE_EVENT_OP_RELEASE;
w->processed_pkts++;
continue;
@@ -310,8 +326,9 @@ pipeline_atq_worker_multi_stage_burst_tx(void *arg)
pipeline_fwd_event(&ev[i], sched_type_list[cq_id]);
}
- pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
+ nb_tx = pipeline_event_enqueue_burst(dev, port, ev, nb_rx, t);
}
+ pipeline_worker_cleanup(dev, port, ev, nb_tx, nb_rx);
return 0;
}
@@ -321,10 +338,10 @@ pipeline_atq_worker_multi_stage_burst_fwd(void *arg)
{
PIPELINE_WORKER_MULTI_STAGE_BURST_INIT;
const uint8_t *tx_queue = t->tx_evqueue_id;
+ uint16_t nb_rx = 0, nb_tx = 0;
while (t->done == false) {
- uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
- BURST_SIZE, 0);
+ nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
if (!nb_rx) {
rte_pause();
@@ -347,8 +364,9 @@ pipeline_atq_worker_multi_stage_burst_fwd(void *arg)
}
}
- pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
+ nb_tx = pipeline_event_enqueue_burst(dev, port, ev, nb_rx, t);
}
+ pipeline_worker_cleanup(dev, port, ev, nb_tx, nb_rx);
return 0;
}
@@ -357,12 +375,13 @@ static __rte_noinline int
pipeline_atq_worker_multi_stage_tx_vector(void *arg)
{
PIPELINE_WORKER_MULTI_STAGE_INIT;
+ uint8_t enq = 0, deq = 0;
uint16_t vector_sz;
while (!t->done) {
- uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+ deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
- if (!event) {
+ if (!deq) {
rte_pause();
continue;
}
@@ -371,15 +390,16 @@ pipeline_atq_worker_multi_stage_tx_vector(void *arg)
if (cq_id == last_queue) {
vector_sz = ev.vec->nb_elem;
- pipeline_event_tx_vector(dev, port, &ev);
+ enq = pipeline_event_tx_vector(dev, port, &ev, t);
w->processed_pkts += vector_sz;
continue;
}
ev.sub_event_type++;
pipeline_fwd_event_vector(&ev, sched_type_list[cq_id]);
- pipeline_event_enqueue(dev, port, &ev);
+ enq = pipeline_event_enqueue(dev, port, &ev, t);
}
+ pipeline_worker_cleanup(dev, port, &ev, enq, deq);
return 0;
}
@@ -389,12 +409,13 @@ pipeline_atq_worker_multi_stage_fwd_vector(void *arg)
{
PIPELINE_WORKER_MULTI_STAGE_INIT;
const uint8_t *tx_queue = t->tx_evqueue_id;
+ uint8_t enq = 0, deq = 0;
uint16_t vector_sz;
while (!t->done) {
- uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+ deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
- if (!event) {
+ if (!deq) {
rte_pause();
continue;
}
@@ -406,14 +427,15 @@ pipeline_atq_worker_multi_stage_fwd_vector(void *arg)
ev.vec->queue = 0;
vector_sz = ev.vec->nb_elem;
pipeline_fwd_event_vector(&ev, RTE_SCHED_TYPE_ATOMIC);
- pipeline_event_enqueue(dev, port, &ev);
+ enq = pipeline_event_enqueue(dev, port, &ev, t);
w->processed_pkts += vector_sz;
} else {
ev.sub_event_type++;
pipeline_fwd_event_vector(&ev, sched_type_list[cq_id]);
- pipeline_event_enqueue(dev, port, &ev);
+ enq = pipeline_event_enqueue(dev, port, &ev, t);
}
}
+ pipeline_worker_cleanup(dev, port, &ev, enq, deq);
return 0;
}
@@ -422,11 +444,11 @@ static __rte_noinline int
pipeline_atq_worker_multi_stage_burst_tx_vector(void *arg)
{
PIPELINE_WORKER_MULTI_STAGE_BURST_INIT;
+ uint16_t nb_rx = 0, nb_tx = 0;
uint16_t vector_sz;
while (!t->done) {
- uint16_t nb_rx =
- rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
+ nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
if (!nb_rx) {
rte_pause();
@@ -438,7 +460,7 @@ pipeline_atq_worker_multi_stage_burst_tx_vector(void *arg)
if (cq_id == last_queue) {
vector_sz = ev[i].vec->nb_elem;
- pipeline_event_tx_vector(dev, port, &ev[i]);
+ pipeline_event_tx_vector(dev, port, &ev[i], t);
ev[i].op = RTE_EVENT_OP_RELEASE;
w->processed_pkts += vector_sz;
continue;
@@ -449,8 +471,9 @@ pipeline_atq_worker_multi_stage_burst_tx_vector(void *arg)
sched_type_list[cq_id]);
}
- pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
+ nb_tx = pipeline_event_enqueue_burst(dev, port, ev, nb_rx, t);
}
+ pipeline_worker_cleanup(dev, port, ev, nb_tx, nb_rx);
return 0;
}
@@ -460,11 +483,11 @@ pipeline_atq_worker_multi_stage_burst_fwd_vector(void *arg)
{
PIPELINE_WORKER_MULTI_STAGE_BURST_INIT;
const uint8_t *tx_queue = t->tx_evqueue_id;
+ uint16_t nb_rx = 0, nb_tx = 0;
uint16_t vector_sz;
while (!t->done) {
- uint16_t nb_rx =
- rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
+ nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
if (!nb_rx) {
rte_pause();
@@ -488,8 +511,9 @@ pipeline_atq_worker_multi_stage_burst_fwd_vector(void *arg)
}
}
- pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
+ nb_tx = pipeline_event_enqueue_burst(dev, port, ev, nb_rx, t);
}
+ pipeline_worker_cleanup(dev, port, ev, nb_tx, nb_rx);
return 0;
}
diff --git a/app/test-eventdev/test_pipeline_common.c b/app/test-eventdev/test_pipeline_common.c
index 29b64014d7..d8e80903b2 100644
--- a/app/test-eventdev/test_pipeline_common.c
+++ b/app/test-eventdev/test_pipeline_common.c
@@ -505,6 +505,45 @@ pipeline_event_tx_adapter_setup(struct evt_options *opt,
return ret;
}
+static void
+pipeline_vector_array_free(struct rte_event events[], uint16_t num)
+{
+ uint16_t i;
+
+ for (i = 0; i < num; i++) {
+ rte_pktmbuf_free_bulk(events[i].vec->mbufs,
+ events[i].vec->nb_elem);
+ rte_mempool_put(rte_mempool_from_obj(events[i].vec),
+ events[i].vec);
+ }
+}
+
+void
+pipeline_worker_cleanup(uint8_t dev, uint8_t port, struct rte_event ev[],
+ uint16_t enq, uint16_t deq)
+{
+ int i;
+
+ if (!(deq - enq))
+ return;
+
+ if (deq) {
+ for (i = enq; i < deq; i++) {
+ if (ev[i].op == RTE_EVENT_OP_RELEASE)
+ continue;
+ if (ev[i].event_type & RTE_EVENT_TYPE_VECTOR)
+ pipeline_vector_array_free(&ev[i], 1);
+ else
+ rte_pktmbuf_free(ev[i].mbuf);
+ }
+
+ for (i = 0; i < deq; i++)
+ ev[i].op = RTE_EVENT_OP_RELEASE;
+
+ rte_event_enqueue_burst(dev, port, ev, deq);
+ }
+}
+
void
pipeline_ethdev_rx_stop(struct evt_test *test, struct evt_options *opt)
{
diff --git a/app/test-eventdev/test_pipeline_common.h b/app/test-eventdev/test_pipeline_common.h
index c979c33772..a6443faea4 100644
--- a/app/test-eventdev/test_pipeline_common.h
+++ b/app/test-eventdev/test_pipeline_common.h
@@ -109,59 +109,80 @@ pipeline_fwd_event_vector(struct rte_event *ev, uint8_t sched)
ev->sched_type = sched;
}
-static __rte_always_inline void
+static __rte_always_inline uint8_t
pipeline_event_tx(const uint8_t dev, const uint8_t port,
- struct rte_event * const ev)
+ struct rte_event *const ev, struct test_pipeline *t)
{
+ uint8_t enq;
+
rte_event_eth_tx_adapter_txq_set(ev->mbuf, 0);
- while (!rte_event_eth_tx_adapter_enqueue(dev, port, ev, 1, 0))
- rte_pause();
+ do {
+ enq = rte_event_eth_tx_adapter_enqueue(dev, port, ev, 1, 0);
+ } while (!enq && !t->done);
+
+ return enq;
}
-static __rte_always_inline void
+static __rte_always_inline uint8_t
pipeline_event_tx_vector(const uint8_t dev, const uint8_t port,
- struct rte_event *const ev)
+ struct rte_event *const ev, struct test_pipeline *t)
{
+ uint8_t enq;
+
ev->vec->queue = 0;
+ do {
+ enq = rte_event_eth_tx_adapter_enqueue(dev, port, ev, 1, 0);
+ } while (!enq && !t->done);
- while (!rte_event_eth_tx_adapter_enqueue(dev, port, ev, 1, 0))
- rte_pause();
+ return enq;
}
-static __rte_always_inline void
+static __rte_always_inline uint16_t
pipeline_event_tx_burst(const uint8_t dev, const uint8_t port,
- struct rte_event *ev, const uint16_t nb_rx)
+ struct rte_event *ev, const uint16_t nb_rx,
+ struct test_pipeline *t)
{
uint16_t enq;
enq = rte_event_eth_tx_adapter_enqueue(dev, port, ev, nb_rx, 0);
- while (enq < nb_rx) {
+ while (enq < nb_rx && !t->done) {
enq += rte_event_eth_tx_adapter_enqueue(dev, port,
ev + enq, nb_rx - enq, 0);
}
+
+ return enq;
}
-static __rte_always_inline void
+static __rte_always_inline uint8_t
pipeline_event_enqueue(const uint8_t dev, const uint8_t port,
- struct rte_event *ev)
+ struct rte_event *ev, struct test_pipeline *t)
{
- while (rte_event_enqueue_burst(dev, port, ev, 1) != 1)
- rte_pause();
+ uint8_t enq;
+
+ do {
+ enq = rte_event_enqueue_burst(dev, port, ev, 1);
+ } while (!enq && !t->done);
+
+ return enq;
}
-static __rte_always_inline void
+static __rte_always_inline uint16_t
pipeline_event_enqueue_burst(const uint8_t dev, const uint8_t port,
- struct rte_event *ev, const uint16_t nb_rx)
+ struct rte_event *ev, const uint16_t nb_rx,
+ struct test_pipeline *t)
{
uint16_t enq;
enq = rte_event_enqueue_burst(dev, port, ev, nb_rx);
- while (enq < nb_rx) {
+ while (enq < nb_rx && !t->done) {
enq += rte_event_enqueue_burst(dev, port,
ev + enq, nb_rx - enq);
}
+
+ return enq;
}
+
static inline int
pipeline_nb_event_ports(struct evt_options *opt)
{
@@ -188,5 +209,7 @@ void pipeline_eventdev_destroy(struct evt_test *test, struct evt_options *opt);
void pipeline_ethdev_destroy(struct evt_test *test, struct evt_options *opt);
void pipeline_ethdev_rx_stop(struct evt_test *test, struct evt_options *opt);
void pipeline_mempool_destroy(struct evt_test *test, struct evt_options *opt);
+void pipeline_worker_cleanup(uint8_t dev, uint8_t port, struct rte_event ev[],
+ uint16_t enq, uint16_t deq);
#endif /* _TEST_PIPELINE_COMMON_ */
diff --git a/app/test-eventdev/test_pipeline_queue.c b/app/test-eventdev/test_pipeline_queue.c
index 343f8f3b1d..e989396474 100644
--- a/app/test-eventdev/test_pipeline_queue.c
+++ b/app/test-eventdev/test_pipeline_queue.c
@@ -21,24 +21,27 @@ static __rte_noinline int
pipeline_queue_worker_single_stage_tx(void *arg)
{
PIPELINE_WORKER_SINGLE_STAGE_INIT;
+ uint8_t enq = 0, deq = 0;
while (t->done == false) {
- uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+ deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
- if (!event) {
+ if (!deq) {
rte_pause();
continue;
}
if (ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
- pipeline_event_tx(dev, port, &ev);
+ enq = pipeline_event_tx(dev, port, &ev, t);
+ ev.op = RTE_EVENT_OP_RELEASE;
w->processed_pkts++;
} else {
ev.queue_id++;
pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
- pipeline_event_enqueue(dev, port, &ev);
+ enq = pipeline_event_enqueue(dev, port, &ev, t);
}
}
+ pipeline_worker_cleanup(dev, port, &ev, enq, deq);
return 0;
}
@@ -48,11 +51,12 @@ pipeline_queue_worker_single_stage_fwd(void *arg)
{
PIPELINE_WORKER_SINGLE_STAGE_INIT;
const uint8_t *tx_queue = t->tx_evqueue_id;
+ uint8_t enq = 0, deq = 0;
while (t->done == false) {
- uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+ deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
- if (!event) {
+ if (!deq) {
rte_pause();
continue;
}
@@ -60,9 +64,10 @@ pipeline_queue_worker_single_stage_fwd(void *arg)
ev.queue_id = tx_queue[ev.mbuf->port];
rte_event_eth_tx_adapter_txq_set(ev.mbuf, 0);
pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
- pipeline_event_enqueue(dev, port, &ev);
+ enq = pipeline_event_enqueue(dev, port, &ev, t);
w->processed_pkts++;
}
+ pipeline_worker_cleanup(dev, port, &ev, enq, deq);
return 0;
}
@@ -71,10 +76,10 @@ static __rte_noinline int
pipeline_queue_worker_single_stage_burst_tx(void *arg)
{
PIPELINE_WORKER_SINGLE_STAGE_BURST_INIT;
+ uint16_t nb_rx = 0, nb_tx = 0;
while (t->done == false) {
- uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
- BURST_SIZE, 0);
+ nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
if (!nb_rx) {
rte_pause();
@@ -84,17 +89,18 @@ pipeline_queue_worker_single_stage_burst_tx(void *arg)
for (i = 0; i < nb_rx; i++) {
rte_prefetch0(ev[i + 1].mbuf);
if (ev[i].sched_type == RTE_SCHED_TYPE_ATOMIC) {
- pipeline_event_tx(dev, port, &ev[i]);
+ pipeline_event_tx(dev, port, &ev[i], t);
+ ev[i].op = RTE_EVENT_OP_RELEASE;
w->processed_pkts++;
} else {
ev[i].queue_id++;
pipeline_fwd_event(&ev[i],
RTE_SCHED_TYPE_ATOMIC);
- pipeline_event_enqueue_burst(dev, port, ev,
- nb_rx);
}
}
+ nb_tx = pipeline_event_enqueue_burst(dev, port, ev, nb_rx, t);
}
+ pipeline_worker_cleanup(dev, port, ev, nb_tx, nb_rx);
return 0;
}
@@ -104,10 +110,10 @@ pipeline_queue_worker_single_stage_burst_fwd(void *arg)
{
PIPELINE_WORKER_SINGLE_STAGE_BURST_INIT;
const uint8_t *tx_queue = t->tx_evqueue_id;
+ uint16_t nb_rx = 0, nb_tx = 0;
while (t->done == false) {
- uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
- BURST_SIZE, 0);
+ nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
if (!nb_rx) {
rte_pause();
@@ -121,9 +127,10 @@ pipeline_queue_worker_single_stage_burst_fwd(void *arg)
pipeline_fwd_event(&ev[i], RTE_SCHED_TYPE_ATOMIC);
}
- pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
+ nb_tx = pipeline_event_enqueue_burst(dev, port, ev, nb_rx, t);
w->processed_pkts += nb_rx;
}
+ pipeline_worker_cleanup(dev, port, ev, nb_tx, nb_rx);
return 0;
}
@@ -132,26 +139,29 @@ static __rte_noinline int
pipeline_queue_worker_single_stage_tx_vector(void *arg)
{
PIPELINE_WORKER_SINGLE_STAGE_INIT;
+ uint8_t enq = 0, deq = 0;
uint16_t vector_sz;
while (!t->done) {
- uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+ deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
- if (!event) {
+ if (!deq) {
rte_pause();
continue;
}
if (ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
vector_sz = ev.vec->nb_elem;
- pipeline_event_tx_vector(dev, port, &ev);
+ enq = pipeline_event_tx_vector(dev, port, &ev, t);
+ ev.op = RTE_EVENT_OP_RELEASE;
w->processed_pkts += vector_sz;
} else {
ev.queue_id++;
pipeline_fwd_event_vector(&ev, RTE_SCHED_TYPE_ATOMIC);
- pipeline_event_enqueue(dev, port, &ev);
+ enq = pipeline_event_enqueue(dev, port, &ev, t);
}
}
+ pipeline_worker_cleanup(dev, port, &ev, enq, deq);
return 0;
}
@@ -161,12 +171,13 @@ pipeline_queue_worker_single_stage_fwd_vector(void *arg)
{
PIPELINE_WORKER_SINGLE_STAGE_INIT;
const uint8_t *tx_queue = t->tx_evqueue_id;
+ uint8_t enq = 0, deq = 0;
uint16_t vector_sz;
while (!t->done) {
- uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+ deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
- if (!event) {
+ if (!deq) {
rte_pause();
continue;
}
@@ -175,9 +186,10 @@ pipeline_queue_worker_single_stage_fwd_vector(void *arg)
ev.vec->queue = 0;
vector_sz = ev.vec->nb_elem;
pipeline_fwd_event_vector(&ev, RTE_SCHED_TYPE_ATOMIC);
- pipeline_event_enqueue(dev, port, &ev);
+ enq = pipeline_event_enqueue(dev, port, &ev, t);
w->processed_pkts += vector_sz;
}
+ pipeline_worker_cleanup(dev, port, &ev, enq, deq);
return 0;
}
@@ -186,11 +198,11 @@ static __rte_noinline int
pipeline_queue_worker_single_stage_burst_tx_vector(void *arg)
{
PIPELINE_WORKER_SINGLE_STAGE_BURST_INIT;
+ uint16_t nb_rx = 0, nb_tx = 0;
uint16_t vector_sz;
while (!t->done) {
- uint16_t nb_rx =
- rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
+ nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
if (!nb_rx) {
rte_pause();
@@ -200,7 +212,7 @@ pipeline_queue_worker_single_stage_burst_tx_vector(void *arg)
for (i = 0; i < nb_rx; i++) {
if (ev[i].sched_type == RTE_SCHED_TYPE_ATOMIC) {
vector_sz = ev[i].vec->nb_elem;
- pipeline_event_tx_vector(dev, port, &ev[i]);
+ pipeline_event_tx_vector(dev, port, &ev[i], t);
ev[i].op = RTE_EVENT_OP_RELEASE;
w->processed_pkts += vector_sz;
} else {
@@ -210,8 +222,9 @@ pipeline_queue_worker_single_stage_burst_tx_vector(void *arg)
}
}
- pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
+ nb_tx = pipeline_event_enqueue_burst(dev, port, ev, nb_rx, t);
}
+ pipeline_worker_cleanup(dev, port, ev, nb_tx, nb_rx);
return 0;
}
@@ -221,11 +234,11 @@ pipeline_queue_worker_single_stage_burst_fwd_vector(void *arg)
{
PIPELINE_WORKER_SINGLE_STAGE_BURST_INIT;
const uint8_t *tx_queue = t->tx_evqueue_id;
+ uint16_t nb_rx = 0, nb_tx = 0;
uint16_t vector_sz;
while (!t->done) {
- uint16_t nb_rx =
- rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
+ nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
if (!nb_rx) {
rte_pause();
@@ -241,9 +254,10 @@ pipeline_queue_worker_single_stage_burst_fwd_vector(void *arg)
RTE_SCHED_TYPE_ATOMIC);
}
- pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
+ nb_tx = pipeline_event_enqueue_burst(dev, port, ev, nb_rx, t);
w->processed_pkts += vector_sz;
}
+ pipeline_worker_cleanup(dev, port, ev, nb_tx, nb_rx);
return 0;
}
@@ -253,11 +267,12 @@ pipeline_queue_worker_multi_stage_tx(void *arg)
{
PIPELINE_WORKER_MULTI_STAGE_INIT;
const uint8_t *tx_queue = t->tx_evqueue_id;
+ uint8_t enq = 0, deq = 0;
while (t->done == false) {
- uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+ deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
- if (!event) {
+ if (!deq) {
rte_pause();
continue;
}
@@ -265,7 +280,8 @@ pipeline_queue_worker_multi_stage_tx(void *arg)
cq_id = ev.queue_id % nb_stages;
if (ev.queue_id == tx_queue[ev.mbuf->port]) {
- pipeline_event_tx(dev, port, &ev);
+ enq = pipeline_event_tx(dev, port, &ev, t);
+ ev.op = RTE_EVENT_OP_RELEASE;
w->processed_pkts++;
continue;
}
@@ -274,8 +290,9 @@ pipeline_queue_worker_multi_stage_tx(void *arg)
pipeline_fwd_event(&ev, cq_id != last_queue ?
sched_type_list[cq_id] :
RTE_SCHED_TYPE_ATOMIC);
- pipeline_event_enqueue(dev, port, &ev);
+ enq = pipeline_event_enqueue(dev, port, &ev, t);
}
+ pipeline_worker_cleanup(dev, port, &ev, enq, deq);
return 0;
}
@@ -285,11 +302,12 @@ pipeline_queue_worker_multi_stage_fwd(void *arg)
{
PIPELINE_WORKER_MULTI_STAGE_INIT;
const uint8_t *tx_queue = t->tx_evqueue_id;
+ uint8_t enq = 0, deq = 0;
while (t->done == false) {
- uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+ deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
- if (!event) {
+ if (!deq) {
rte_pause();
continue;
}
@@ -300,14 +318,15 @@ pipeline_queue_worker_multi_stage_fwd(void *arg)
ev.queue_id = tx_queue[ev.mbuf->port];
rte_event_eth_tx_adapter_txq_set(ev.mbuf, 0);
pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
- pipeline_event_enqueue(dev, port, &ev);
+ enq = pipeline_event_enqueue(dev, port, &ev, t);
w->processed_pkts++;
} else {
ev.queue_id++;
pipeline_fwd_event(&ev, sched_type_list[cq_id]);
- pipeline_event_enqueue(dev, port, &ev);
+ enq = pipeline_event_enqueue(dev, port, &ev, t);
}
}
+ pipeline_worker_cleanup(dev, port, &ev, enq, deq);
return 0;
}
@@ -317,10 +336,10 @@ pipeline_queue_worker_multi_stage_burst_tx(void *arg)
{
PIPELINE_WORKER_MULTI_STAGE_BURST_INIT;
const uint8_t *tx_queue = t->tx_evqueue_id;
+ uint16_t nb_rx = 0, nb_tx = 0;
while (t->done == false) {
- uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
- BURST_SIZE, 0);
+ nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
if (!nb_rx) {
rte_pause();
@@ -332,7 +351,8 @@ pipeline_queue_worker_multi_stage_burst_tx(void *arg)
cq_id = ev[i].queue_id % nb_stages;
if (ev[i].queue_id == tx_queue[ev[i].mbuf->port]) {
- pipeline_event_tx(dev, port, &ev[i]);
+ pipeline_event_tx(dev, port, &ev[i], t);
+ ev[i].op = RTE_EVENT_OP_RELEASE;
w->processed_pkts++;
continue;
}
@@ -341,9 +361,10 @@ pipeline_queue_worker_multi_stage_burst_tx(void *arg)
pipeline_fwd_event(&ev[i], cq_id != last_queue ?
sched_type_list[cq_id] :
RTE_SCHED_TYPE_ATOMIC);
- pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
}
+ nb_tx = pipeline_event_enqueue_burst(dev, port, ev, nb_rx, t);
}
+ pipeline_worker_cleanup(dev, port, ev, nb_tx, nb_rx);
return 0;
}
@@ -353,11 +374,11 @@ pipeline_queue_worker_multi_stage_burst_fwd(void *arg)
{
PIPELINE_WORKER_MULTI_STAGE_BURST_INIT;
const uint8_t *tx_queue = t->tx_evqueue_id;
+ uint16_t nb_rx = 0, nb_tx = 0;
while (t->done == false) {
uint16_t processed_pkts = 0;
- uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
- BURST_SIZE, 0);
+ nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
if (!nb_rx) {
rte_pause();
@@ -381,9 +402,10 @@ pipeline_queue_worker_multi_stage_burst_fwd(void *arg)
}
}
- pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
+ nb_tx = pipeline_event_enqueue_burst(dev, port, ev, nb_rx, t);
w->processed_pkts += processed_pkts;
}
+ pipeline_worker_cleanup(dev, port, ev, nb_tx, nb_rx);
return 0;
}
@@ -393,12 +415,13 @@ pipeline_queue_worker_multi_stage_tx_vector(void *arg)
{
PIPELINE_WORKER_MULTI_STAGE_INIT;
const uint8_t *tx_queue = t->tx_evqueue_id;
+ uint8_t enq = 0, deq = 0;
uint16_t vector_sz;
while (!t->done) {
- uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+ deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
- if (!event) {
+ if (!deq) {
rte_pause();
continue;
}
@@ -407,8 +430,9 @@ pipeline_queue_worker_multi_stage_tx_vector(void *arg)
if (ev.queue_id == tx_queue[ev.vec->port]) {
vector_sz = ev.vec->nb_elem;
- pipeline_event_tx_vector(dev, port, &ev);
+ enq = pipeline_event_tx_vector(dev, port, &ev, t);
w->processed_pkts += vector_sz;
+ ev.op = RTE_EVENT_OP_RELEASE;
continue;
}
@@ -416,8 +440,9 @@ pipeline_queue_worker_multi_stage_tx_vector(void *arg)
pipeline_fwd_event_vector(&ev, cq_id != last_queue
? sched_type_list[cq_id]
: RTE_SCHED_TYPE_ATOMIC);
- pipeline_event_enqueue(dev, port, &ev);
+ enq = pipeline_event_enqueue(dev, port, &ev, t);
}
+ pipeline_worker_cleanup(dev, port, &ev, enq, deq);
return 0;
}
@@ -427,12 +452,13 @@ pipeline_queue_worker_multi_stage_fwd_vector(void *arg)
{
PIPELINE_WORKER_MULTI_STAGE_INIT;
const uint8_t *tx_queue = t->tx_evqueue_id;
+ uint8_t enq = 0, deq = 0;
uint16_t vector_sz;
while (!t->done) {
- uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+ deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
- if (!event) {
+ if (!deq) {
rte_pause();
continue;
}
@@ -449,8 +475,9 @@ pipeline_queue_worker_multi_stage_fwd_vector(void *arg)
pipeline_fwd_event_vector(&ev, sched_type_list[cq_id]);
}
- pipeline_event_enqueue(dev, port, &ev);
+ enq = pipeline_event_enqueue(dev, port, &ev, t);
}
+ pipeline_worker_cleanup(dev, port, &ev, enq, deq);
return 0;
}
@@ -460,11 +487,11 @@ pipeline_queue_worker_multi_stage_burst_tx_vector(void *arg)
{
PIPELINE_WORKER_MULTI_STAGE_BURST_INIT;
const uint8_t *tx_queue = t->tx_evqueue_id;
+ uint16_t nb_rx = 0, nb_tx = 0;
uint16_t vector_sz;
while (!t->done) {
- uint16_t nb_rx =
- rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
+ nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
if (!nb_rx) {
rte_pause();
@@ -476,7 +503,7 @@ pipeline_queue_worker_multi_stage_burst_tx_vector(void *arg)
if (ev[i].queue_id == tx_queue[ev[i].vec->port]) {
vector_sz = ev[i].vec->nb_elem;
- pipeline_event_tx_vector(dev, port, &ev[i]);
+ pipeline_event_tx_vector(dev, port, &ev[i], t);
ev[i].op = RTE_EVENT_OP_RELEASE;
w->processed_pkts += vector_sz;
continue;
@@ -489,8 +516,9 @@ pipeline_queue_worker_multi_stage_burst_tx_vector(void *arg)
: RTE_SCHED_TYPE_ATOMIC);
}
- pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
+ nb_tx = pipeline_event_enqueue_burst(dev, port, ev, nb_rx, t);
}
+ pipeline_worker_cleanup(dev, port, ev, nb_tx, nb_rx);
return 0;
}
@@ -500,11 +528,11 @@ pipeline_queue_worker_multi_stage_burst_fwd_vector(void *arg)
{
PIPELINE_WORKER_MULTI_STAGE_BURST_INIT;
const uint8_t *tx_queue = t->tx_evqueue_id;
+ uint16_t nb_rx = 0, nb_tx = 0;
uint16_t vector_sz;
while (!t->done) {
- uint16_t nb_rx =
- rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
+ nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
if (!nb_rx) {
rte_pause();
@@ -527,8 +555,9 @@ pipeline_queue_worker_multi_stage_burst_fwd_vector(void *arg)
}
}
- pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
+ nb_tx = pipeline_event_enqueue_burst(dev, port, ev, nb_rx, t);
}
+ pipeline_worker_cleanup(dev, port, ev, nb_tx, nb_rx);
return 0;
}
--
2.25.1
^ permalink raw reply [flat|nested] 17+ messages in thread
* [PATCH v2 3/6] examples/eventdev: clean up worker state before exit
2022-05-13 16:07 ` [PATCH v2 " pbhagavatula
2022-05-13 16:07 ` [PATCH v2 2/6] app/eventdev: clean up worker state before exit pbhagavatula
@ 2022-05-13 16:07 ` pbhagavatula
2022-05-13 16:07 ` [PATCH v2 4/6] examples/l3fwd: " pbhagavatula
` (3 subsequent siblings)
5 siblings, 0 replies; 17+ messages in thread
From: pbhagavatula @ 2022-05-13 16:07 UTC (permalink / raw)
To: jerinj, Harry van Haaren; +Cc: dev, Pavan Nikhilesh
From: Pavan Nikhilesh <pbhagavatula@marvell.com>
Event ports are configured to implicitly release the scheduler contexts
currently held in the next call to rte_event_dequeue_burst().
A worker core might still hold a scheduling context during exit, as the
next call to rte_event_dequeue_burst() is never made.
This might lead to deadlock based on the worker exit timing and when
there are very less number of flows.
Add clean up function to release any scheduling contexts held by the
worker by using RTE_EVENT_OP_RELEASE.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
examples/eventdev_pipeline/pipeline_common.h | 22 ++++++
.../pipeline_worker_generic.c | 23 +++---
.../eventdev_pipeline/pipeline_worker_tx.c | 79 ++++++++++++-------
3 files changed, 87 insertions(+), 37 deletions(-)
diff --git a/examples/eventdev_pipeline/pipeline_common.h b/examples/eventdev_pipeline/pipeline_common.h
index b12eb281e1..9899b257b0 100644
--- a/examples/eventdev_pipeline/pipeline_common.h
+++ b/examples/eventdev_pipeline/pipeline_common.h
@@ -140,5 +140,27 @@ schedule_devices(unsigned int lcore_id)
}
}
+static inline void
+worker_cleanup(uint8_t dev_id, uint8_t port_id, struct rte_event events[],
+ uint16_t nb_enq, uint16_t nb_deq)
+{
+ int i;
+
+ if (!(nb_deq - nb_enq))
+ return;
+
+ if (nb_deq) {
+ for (i = nb_enq; i < nb_deq; i++) {
+ if (events[i].op == RTE_EVENT_OP_RELEASE)
+ continue;
+ rte_pktmbuf_free(events[i].mbuf);
+ }
+
+ for (i = 0; i < nb_deq; i++)
+ events[i].op = RTE_EVENT_OP_RELEASE;
+ rte_event_enqueue_burst(dev_id, port_id, events, nb_deq);
+ }
+}
+
void set_worker_generic_setup_data(struct setup_data *caps, bool burst);
void set_worker_tx_enq_setup_data(struct setup_data *caps, bool burst);
diff --git a/examples/eventdev_pipeline/pipeline_worker_generic.c b/examples/eventdev_pipeline/pipeline_worker_generic.c
index ce1e92d59e..c564c808e2 100644
--- a/examples/eventdev_pipeline/pipeline_worker_generic.c
+++ b/examples/eventdev_pipeline/pipeline_worker_generic.c
@@ -16,6 +16,7 @@ worker_generic(void *arg)
uint8_t port_id = data->port_id;
size_t sent = 0, received = 0;
unsigned int lcore_id = rte_lcore_id();
+ uint16_t nb_rx = 0, nb_tx = 0;
while (!fdata->done) {
@@ -27,8 +28,7 @@ worker_generic(void *arg)
continue;
}
- const uint16_t nb_rx = rte_event_dequeue_burst(dev_id, port_id,
- &ev, 1, 0);
+ nb_rx = rte_event_dequeue_burst(dev_id, port_id, &ev, 1, 0);
if (nb_rx == 0) {
rte_pause();
@@ -47,11 +47,14 @@ worker_generic(void *arg)
work();
- while (rte_event_enqueue_burst(dev_id, port_id, &ev, 1) != 1)
- rte_pause();
+ do {
+ nb_tx = rte_event_enqueue_burst(dev_id, port_id, &ev,
+ 1);
+ } while (!nb_tx && !fdata->done);
sent++;
}
+ worker_cleanup(dev_id, port_id, &ev, nb_tx, nb_rx);
if (!cdata.quiet)
printf(" worker %u thread done. RX=%zu TX=%zu\n",
rte_lcore_id(), received, sent);
@@ -69,10 +72,9 @@ worker_generic_burst(void *arg)
uint8_t port_id = data->port_id;
size_t sent = 0, received = 0;
unsigned int lcore_id = rte_lcore_id();
+ uint16_t i, nb_rx = 0, nb_tx = 0;
while (!fdata->done) {
- uint16_t i;
-
if (fdata->cap.scheduler)
fdata->cap.scheduler(lcore_id);
@@ -81,8 +83,8 @@ worker_generic_burst(void *arg)
continue;
}
- const uint16_t nb_rx = rte_event_dequeue_burst(dev_id, port_id,
- events, RTE_DIM(events), 0);
+ nb_rx = rte_event_dequeue_burst(dev_id, port_id, events,
+ RTE_DIM(events), 0);
if (nb_rx == 0) {
rte_pause();
@@ -103,8 +105,7 @@ worker_generic_burst(void *arg)
work();
}
- uint16_t nb_tx = rte_event_enqueue_burst(dev_id, port_id,
- events, nb_rx);
+ nb_tx = rte_event_enqueue_burst(dev_id, port_id, events, nb_rx);
while (nb_tx < nb_rx && !fdata->done)
nb_tx += rte_event_enqueue_burst(dev_id, port_id,
events + nb_tx,
@@ -112,6 +113,8 @@ worker_generic_burst(void *arg)
sent += nb_tx;
}
+ worker_cleanup(dev_id, port_id, events, nb_tx, nb_rx);
+
if (!cdata.quiet)
printf(" worker %u thread done. RX=%zu TX=%zu\n",
rte_lcore_id(), received, sent);
diff --git a/examples/eventdev_pipeline/pipeline_worker_tx.c b/examples/eventdev_pipeline/pipeline_worker_tx.c
index 473940f8c7..a82e064c1c 100644
--- a/examples/eventdev_pipeline/pipeline_worker_tx.c
+++ b/examples/eventdev_pipeline/pipeline_worker_tx.c
@@ -18,21 +18,22 @@ static __rte_always_inline void
worker_event_enqueue(const uint8_t dev, const uint8_t port,
struct rte_event *ev)
{
- while (rte_event_enqueue_burst(dev, port, ev, 1) != 1)
+ while (!rte_event_enqueue_burst(dev, port, ev, 1) && !fdata->done)
rte_pause();
}
-static __rte_always_inline void
+static __rte_always_inline uint16_t
worker_event_enqueue_burst(const uint8_t dev, const uint8_t port,
- struct rte_event *ev, const uint16_t nb_rx)
+ struct rte_event *ev, const uint16_t nb_rx)
{
uint16_t enq;
enq = rte_event_enqueue_burst(dev, port, ev, nb_rx);
- while (enq < nb_rx) {
+ while (enq < nb_rx && !fdata->done)
enq += rte_event_enqueue_burst(dev, port,
ev + enq, nb_rx - enq);
- }
+
+ return enq;
}
static __rte_always_inline void
@@ -40,7 +41,8 @@ worker_tx_pkt(const uint8_t dev, const uint8_t port, struct rte_event *ev)
{
exchange_mac(ev->mbuf);
rte_event_eth_tx_adapter_txq_set(ev->mbuf, 0);
- while (!rte_event_eth_tx_adapter_enqueue(dev, port, ev, 1, 0))
+ while (!rte_event_eth_tx_adapter_enqueue(dev, port, ev, 1, 0) &&
+ !fdata->done)
rte_pause();
}
@@ -76,6 +78,11 @@ worker_do_tx_single(void *arg)
}
}
+ if (ev.u64) {
+ ev.op = RTE_EVENT_OP_RELEASE;
+ rte_event_enqueue_burst(dev, port, &ev, 1);
+ }
+
if (!cdata.quiet)
printf(" worker %u thread done. RX=%zu FWD=%zu TX=%zu\n",
rte_lcore_id(), received, fwd, tx);
@@ -111,6 +118,11 @@ worker_do_tx_single_atq(void *arg)
}
}
+ if (ev.u64) {
+ ev.op = RTE_EVENT_OP_RELEASE;
+ rte_event_enqueue_burst(dev, port, &ev, 1);
+ }
+
if (!cdata.quiet)
printf(" worker %u thread done. RX=%zu FWD=%zu TX=%zu\n",
rte_lcore_id(), received, fwd, tx);
@@ -126,11 +138,10 @@ worker_do_tx_single_burst(void *arg)
const uint8_t dev = data->dev_id;
const uint8_t port = data->port_id;
size_t fwd = 0, received = 0, tx = 0;
+ uint16_t nb_tx = 0, nb_rx = 0, i;
while (!fdata->done) {
- uint16_t i;
- uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
- BATCH_SIZE, 0);
+ nb_rx = rte_event_dequeue_burst(dev, port, ev, BATCH_SIZE, 0);
if (!nb_rx) {
rte_pause();
@@ -153,10 +164,12 @@ worker_do_tx_single_burst(void *arg)
work();
}
- worker_event_enqueue_burst(dev, port, ev, nb_rx);
- fwd += nb_rx;
+ nb_tx = worker_event_enqueue_burst(dev, port, ev, nb_rx);
+ fwd += nb_tx;
}
+ worker_cleanup(dev, port, ev, nb_tx, nb_rx);
+
if (!cdata.quiet)
printf(" worker %u thread done. RX=%zu FWD=%zu TX=%zu\n",
rte_lcore_id(), received, fwd, tx);
@@ -172,11 +185,10 @@ worker_do_tx_single_burst_atq(void *arg)
const uint8_t dev = data->dev_id;
const uint8_t port = data->port_id;
size_t fwd = 0, received = 0, tx = 0;
+ uint16_t i, nb_rx = 0, nb_tx = 0;
while (!fdata->done) {
- uint16_t i;
- uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
- BATCH_SIZE, 0);
+ nb_rx = rte_event_dequeue_burst(dev, port, ev, BATCH_SIZE, 0);
if (!nb_rx) {
rte_pause();
@@ -197,10 +209,12 @@ worker_do_tx_single_burst_atq(void *arg)
work();
}
- worker_event_enqueue_burst(dev, port, ev, nb_rx);
- fwd += nb_rx;
+ nb_tx = worker_event_enqueue_burst(dev, port, ev, nb_rx);
+ fwd += nb_tx;
}
+ worker_cleanup(dev, port, ev, nb_tx, nb_rx);
+
if (!cdata.quiet)
printf(" worker %u thread done. RX=%zu FWD=%zu TX=%zu\n",
rte_lcore_id(), received, fwd, tx);
@@ -251,6 +265,11 @@ worker_do_tx(void *arg)
fwd++;
}
+ if (ev.u64) {
+ ev.op = RTE_EVENT_OP_RELEASE;
+ rte_event_enqueue_burst(dev, port, &ev, 1);
+ }
+
if (!cdata.quiet)
printf(" worker %u thread done. RX=%zu FWD=%zu TX=%zu\n",
rte_lcore_id(), received, fwd, tx);
@@ -297,6 +316,11 @@ worker_do_tx_atq(void *arg)
fwd++;
}
+ if (ev.u64) {
+ ev.op = RTE_EVENT_OP_RELEASE;
+ rte_event_enqueue_burst(dev, port, &ev, 1);
+ }
+
if (!cdata.quiet)
printf(" worker %u thread done. RX=%zu FWD=%zu TX=%zu\n",
rte_lcore_id(), received, fwd, tx);
@@ -314,11 +338,10 @@ worker_do_tx_burst(void *arg)
uint8_t port = data->port_id;
uint8_t lst_qid = cdata.num_stages - 1;
size_t fwd = 0, received = 0, tx = 0;
+ uint16_t i, nb_rx = 0, nb_tx = 0;
while (!fdata->done) {
- uint16_t i;
- const uint16_t nb_rx = rte_event_dequeue_burst(dev, port,
- ev, BATCH_SIZE, 0);
+ nb_rx = rte_event_dequeue_burst(dev, port, ev, BATCH_SIZE, 0);
if (nb_rx == 0) {
rte_pause();
@@ -347,11 +370,13 @@ worker_do_tx_burst(void *arg)
}
work();
}
- worker_event_enqueue_burst(dev, port, ev, nb_rx);
- fwd += nb_rx;
+ nb_tx = worker_event_enqueue_burst(dev, port, ev, nb_rx);
+ fwd += nb_tx;
}
+ worker_cleanup(dev, port, ev, nb_tx, nb_rx);
+
if (!cdata.quiet)
printf(" worker %u thread done. RX=%zu FWD=%zu TX=%zu\n",
rte_lcore_id(), received, fwd, tx);
@@ -369,12 +394,10 @@ worker_do_tx_burst_atq(void *arg)
uint8_t port = data->port_id;
uint8_t lst_qid = cdata.num_stages - 1;
size_t fwd = 0, received = 0, tx = 0;
+ uint16_t i, nb_rx = 0, nb_tx = 0;
while (!fdata->done) {
- uint16_t i;
-
- const uint16_t nb_rx = rte_event_dequeue_burst(dev, port,
- ev, BATCH_SIZE, 0);
+ nb_rx = rte_event_dequeue_burst(dev, port, ev, BATCH_SIZE, 0);
if (nb_rx == 0) {
rte_pause();
@@ -402,10 +425,12 @@ worker_do_tx_burst_atq(void *arg)
work();
}
- worker_event_enqueue_burst(dev, port, ev, nb_rx);
- fwd += nb_rx;
+ nb_tx = worker_event_enqueue_burst(dev, port, ev, nb_rx);
+ fwd += nb_tx;
}
+ worker_cleanup(dev, port, ev, nb_tx, nb_rx);
+
if (!cdata.quiet)
printf(" worker %u thread done. RX=%zu FWD=%zu TX=%zu\n",
rte_lcore_id(), received, fwd, tx);
--
2.25.1
^ permalink raw reply [flat|nested] 17+ messages in thread
* [PATCH v2 4/6] examples/l3fwd: clean up worker state before exit
2022-05-13 16:07 ` [PATCH v2 " pbhagavatula
2022-05-13 16:07 ` [PATCH v2 2/6] app/eventdev: clean up worker state before exit pbhagavatula
2022-05-13 16:07 ` [PATCH v2 3/6] examples/eventdev: " pbhagavatula
@ 2022-05-13 16:07 ` pbhagavatula
2022-05-13 16:07 ` [PATCH v2 5/6] examples/l2fwd-event: " pbhagavatula
` (2 subsequent siblings)
5 siblings, 0 replies; 17+ messages in thread
From: pbhagavatula @ 2022-05-13 16:07 UTC (permalink / raw)
To: jerinj; +Cc: dev, Pavan Nikhilesh
From: Pavan Nikhilesh <pbhagavatula@marvell.com>
Event ports are configured to implicitly release the scheduler contexts
currently held in the next call to rte_event_dequeue_burst().
A worker core might still hold a scheduling context during exit, as the
next call to rte_event_dequeue_burst() is never made.
This might lead to deadlock based on the worker exit timing and when
there are very less number of flows.
Add clean up function to release any scheduling contexts held by the
worker by using RTE_EVENT_OP_RELEASE.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
examples/l3fwd/l3fwd_em.c | 32 ++++++++++++++++++++++----------
examples/l3fwd/l3fwd_event.c | 34 ++++++++++++++++++++++++++++++++++
examples/l3fwd/l3fwd_event.h | 5 +++++
examples/l3fwd/l3fwd_fib.c | 10 ++++++++--
examples/l3fwd/l3fwd_lpm.c | 32 ++++++++++++++++++++++----------
5 files changed, 91 insertions(+), 22 deletions(-)
diff --git a/examples/l3fwd/l3fwd_em.c b/examples/l3fwd/l3fwd_em.c
index 24d0910fe0..6f8d94f120 100644
--- a/examples/l3fwd/l3fwd_em.c
+++ b/examples/l3fwd/l3fwd_em.c
@@ -653,6 +653,7 @@ em_event_loop_single(struct l3fwd_event_resources *evt_rsrc,
const uint8_t tx_q_id = evt_rsrc->evq.event_q_id[
evt_rsrc->evq.nb_queues - 1];
const uint8_t event_d_id = evt_rsrc->event_d_id;
+ uint8_t deq = 0, enq = 0;
struct lcore_conf *lconf;
unsigned int lcore_id;
struct rte_event ev;
@@ -665,7 +666,9 @@ em_event_loop_single(struct l3fwd_event_resources *evt_rsrc,
RTE_LOG(INFO, L3FWD, "entering %s on lcore %u\n", __func__, lcore_id);
while (!force_quit) {
- if (!rte_event_dequeue_burst(event_d_id, event_p_id, &ev, 1, 0))
+ deq = rte_event_dequeue_burst(event_d_id, event_p_id, &ev, 1,
+ 0);
+ if (!deq)
continue;
struct rte_mbuf *mbuf = ev.mbuf;
@@ -684,19 +687,22 @@ em_event_loop_single(struct l3fwd_event_resources *evt_rsrc,
if (flags & L3FWD_EVENT_TX_ENQ) {
ev.queue_id = tx_q_id;
ev.op = RTE_EVENT_OP_FORWARD;
- while (rte_event_enqueue_burst(event_d_id, event_p_id,
- &ev, 1) && !force_quit)
- ;
+ do {
+ enq = rte_event_enqueue_burst(
+ event_d_id, event_p_id, &ev, 1);
+ } while (!enq && !force_quit);
}
if (flags & L3FWD_EVENT_TX_DIRECT) {
rte_event_eth_tx_adapter_txq_set(mbuf, 0);
- while (!rte_event_eth_tx_adapter_enqueue(event_d_id,
- event_p_id, &ev, 1, 0) &&
- !force_quit)
- ;
+ do {
+ enq = rte_event_eth_tx_adapter_enqueue(
+ event_d_id, event_p_id, &ev, 1, 0);
+ } while (!enq && !force_quit);
}
}
+
+ l3fwd_event_worker_cleanup(event_d_id, event_p_id, &ev, enq, deq, 0);
}
static __rte_always_inline void
@@ -709,9 +715,9 @@ em_event_loop_burst(struct l3fwd_event_resources *evt_rsrc,
const uint8_t event_d_id = evt_rsrc->event_d_id;
const uint16_t deq_len = evt_rsrc->deq_depth;
struct rte_event events[MAX_PKT_BURST];
+ int i, nb_enq = 0, nb_deq = 0;
struct lcore_conf *lconf;
unsigned int lcore_id;
- int i, nb_enq, nb_deq;
if (event_p_id < 0)
return;
@@ -769,6 +775,9 @@ em_event_loop_burst(struct l3fwd_event_resources *evt_rsrc,
nb_deq - nb_enq, 0);
}
}
+
+ l3fwd_event_worker_cleanup(event_d_id, event_p_id, events, nb_enq,
+ nb_deq, 0);
}
static __rte_always_inline void
@@ -832,9 +841,9 @@ em_event_loop_vector(struct l3fwd_event_resources *evt_rsrc,
const uint8_t event_d_id = evt_rsrc->event_d_id;
const uint16_t deq_len = evt_rsrc->deq_depth;
struct rte_event events[MAX_PKT_BURST];
+ int i, nb_enq = 0, nb_deq = 0;
struct lcore_conf *lconf;
unsigned int lcore_id;
- int i, nb_enq, nb_deq;
if (event_p_id < 0)
return;
@@ -887,6 +896,9 @@ em_event_loop_vector(struct l3fwd_event_resources *evt_rsrc,
nb_deq - nb_enq, 0);
}
}
+
+ l3fwd_event_worker_cleanup(event_d_id, event_p_id, events, nb_enq,
+ nb_deq, 1);
}
int __rte_noinline
diff --git a/examples/l3fwd/l3fwd_event.c b/examples/l3fwd/l3fwd_event.c
index 7a401290f8..a14a21b414 100644
--- a/examples/l3fwd/l3fwd_event.c
+++ b/examples/l3fwd/l3fwd_event.c
@@ -287,3 +287,37 @@ l3fwd_event_resource_setup(struct rte_eth_conf *port_conf)
fib_event_loop[evt_rsrc->vector_enabled][evt_rsrc->tx_mode_q]
[evt_rsrc->has_burst];
}
+
+static void
+l3fwd_event_vector_array_free(struct rte_event events[], uint16_t num)
+{
+ uint16_t i;
+
+ for (i = 0; i < num; i++) {
+ rte_pktmbuf_free_bulk(events[i].vec->mbufs,
+ events[i].vec->nb_elem);
+ rte_mempool_put(rte_mempool_from_obj(events[i].vec),
+ events[i].vec);
+ }
+}
+
+void
+l3fwd_event_worker_cleanup(uint8_t event_d_id, uint8_t event_p_id,
+ struct rte_event events[], uint16_t nb_enq,
+ uint16_t nb_deq, uint8_t is_vector)
+{
+ int i;
+
+ if (nb_deq) {
+ if (is_vector)
+ l3fwd_event_vector_array_free(events + nb_enq,
+ nb_deq - nb_enq);
+ else
+ for (i = nb_enq; i < nb_deq; i++)
+ rte_pktmbuf_free(events[i].mbuf);
+
+ for (i = 0; i < nb_deq; i++)
+ events[i].op = RTE_EVENT_OP_RELEASE;
+ rte_event_enqueue_burst(event_d_id, event_p_id, events, nb_deq);
+ }
+}
diff --git a/examples/l3fwd/l3fwd_event.h b/examples/l3fwd/l3fwd_event.h
index f139632016..b93841a16f 100644
--- a/examples/l3fwd/l3fwd_event.h
+++ b/examples/l3fwd/l3fwd_event.h
@@ -103,10 +103,15 @@ event_vector_txq_set(struct rte_event_vector *vec, uint16_t txq)
}
}
+
+
struct l3fwd_event_resources *l3fwd_get_eventdev_rsrc(void);
void l3fwd_event_resource_setup(struct rte_eth_conf *port_conf);
int l3fwd_get_free_event_port(struct l3fwd_event_resources *eventdev_rsrc);
void l3fwd_event_set_generic_ops(struct l3fwd_event_setup_ops *ops);
void l3fwd_event_set_internal_port_ops(struct l3fwd_event_setup_ops *ops);
+void l3fwd_event_worker_cleanup(uint8_t event_d_id, uint8_t event_p_id,
+ struct rte_event events[], uint16_t nb_enq,
+ uint16_t nb_deq, uint8_t is_vector);
#endif /* __L3FWD_EVENTDEV_H__ */
diff --git a/examples/l3fwd/l3fwd_fib.c b/examples/l3fwd/l3fwd_fib.c
index 6e0054b4cb..26d0767ae2 100644
--- a/examples/l3fwd/l3fwd_fib.c
+++ b/examples/l3fwd/l3fwd_fib.c
@@ -252,9 +252,9 @@ fib_event_loop(struct l3fwd_event_resources *evt_rsrc,
const uint8_t event_d_id = evt_rsrc->event_d_id;
const uint16_t deq_len = evt_rsrc->deq_depth;
struct rte_event events[MAX_PKT_BURST];
+ int i, nb_enq = 0, nb_deq = 0;
struct lcore_conf *lconf;
unsigned int lcore_id;
- int nb_enq, nb_deq, i;
uint32_t ipv4_arr[MAX_PKT_BURST];
uint8_t ipv6_arr[MAX_PKT_BURST][RTE_FIB6_IPV6_ADDR_SIZE];
@@ -370,6 +370,9 @@ fib_event_loop(struct l3fwd_event_resources *evt_rsrc,
nb_deq - nb_enq, 0);
}
}
+
+ l3fwd_event_worker_cleanup(event_d_id, event_p_id, events, nb_enq,
+ nb_deq, 0);
}
int __rte_noinline
@@ -491,7 +494,7 @@ fib_event_loop_vector(struct l3fwd_event_resources *evt_rsrc,
const uint8_t event_d_id = evt_rsrc->event_d_id;
const uint16_t deq_len = evt_rsrc->deq_depth;
struct rte_event events[MAX_PKT_BURST];
- int nb_enq, nb_deq, i;
+ int nb_enq = 0, nb_deq = 0, i;
if (event_p_id < 0)
return;
@@ -538,6 +541,9 @@ fib_event_loop_vector(struct l3fwd_event_resources *evt_rsrc,
nb_deq - nb_enq, 0);
}
}
+
+ l3fwd_event_worker_cleanup(event_d_id, event_p_id, events, nb_enq,
+ nb_deq, 1);
}
int __rte_noinline
diff --git a/examples/l3fwd/l3fwd_lpm.c b/examples/l3fwd/l3fwd_lpm.c
index bec22c44cd..501fc5db5e 100644
--- a/examples/l3fwd/l3fwd_lpm.c
+++ b/examples/l3fwd/l3fwd_lpm.c
@@ -273,6 +273,7 @@ lpm_event_loop_single(struct l3fwd_event_resources *evt_rsrc,
const uint8_t tx_q_id = evt_rsrc->evq.event_q_id[
evt_rsrc->evq.nb_queues - 1];
const uint8_t event_d_id = evt_rsrc->event_d_id;
+ uint8_t enq = 0, deq = 0;
struct lcore_conf *lconf;
unsigned int lcore_id;
struct rte_event ev;
@@ -285,7 +286,9 @@ lpm_event_loop_single(struct l3fwd_event_resources *evt_rsrc,
RTE_LOG(INFO, L3FWD, "entering %s on lcore %u\n", __func__, lcore_id);
while (!force_quit) {
- if (!rte_event_dequeue_burst(event_d_id, event_p_id, &ev, 1, 0))
+ deq = rte_event_dequeue_burst(event_d_id, event_p_id, &ev, 1,
+ 0);
+ if (!deq)
continue;
if (lpm_process_event_pkt(lconf, ev.mbuf) == BAD_PORT) {
@@ -296,19 +299,22 @@ lpm_event_loop_single(struct l3fwd_event_resources *evt_rsrc,
if (flags & L3FWD_EVENT_TX_ENQ) {
ev.queue_id = tx_q_id;
ev.op = RTE_EVENT_OP_FORWARD;
- while (rte_event_enqueue_burst(event_d_id, event_p_id,
- &ev, 1) && !force_quit)
- ;
+ do {
+ enq = rte_event_enqueue_burst(
+ event_d_id, event_p_id, &ev, 1);
+ } while (!enq && !force_quit);
}
if (flags & L3FWD_EVENT_TX_DIRECT) {
rte_event_eth_tx_adapter_txq_set(ev.mbuf, 0);
- while (!rte_event_eth_tx_adapter_enqueue(event_d_id,
- event_p_id, &ev, 1, 0) &&
- !force_quit)
- ;
+ do {
+ enq = rte_event_eth_tx_adapter_enqueue(
+ event_d_id, event_p_id, &ev, 1, 0);
+ } while (!enq && !force_quit);
}
}
+
+ l3fwd_event_worker_cleanup(event_d_id, event_p_id, &ev, enq, deq, 0);
}
static __rte_always_inline void
@@ -321,9 +327,9 @@ lpm_event_loop_burst(struct l3fwd_event_resources *evt_rsrc,
const uint8_t event_d_id = evt_rsrc->event_d_id;
const uint16_t deq_len = evt_rsrc->deq_depth;
struct rte_event events[MAX_PKT_BURST];
+ int i, nb_enq = 0, nb_deq = 0;
struct lcore_conf *lconf;
unsigned int lcore_id;
- int i, nb_enq, nb_deq;
if (event_p_id < 0)
return;
@@ -375,6 +381,9 @@ lpm_event_loop_burst(struct l3fwd_event_resources *evt_rsrc,
nb_deq - nb_enq, 0);
}
}
+
+ l3fwd_event_worker_cleanup(event_d_id, event_p_id, events, nb_enq,
+ nb_deq, 0);
}
static __rte_always_inline void
@@ -459,9 +468,9 @@ lpm_event_loop_vector(struct l3fwd_event_resources *evt_rsrc,
const uint8_t event_d_id = evt_rsrc->event_d_id;
const uint16_t deq_len = evt_rsrc->deq_depth;
struct rte_event events[MAX_PKT_BURST];
+ int i, nb_enq = 0, nb_deq = 0;
struct lcore_conf *lconf;
unsigned int lcore_id;
- int i, nb_enq, nb_deq;
if (event_p_id < 0)
return;
@@ -510,6 +519,9 @@ lpm_event_loop_vector(struct l3fwd_event_resources *evt_rsrc,
nb_deq - nb_enq, 0);
}
}
+
+ l3fwd_event_worker_cleanup(event_d_id, event_p_id, events, nb_enq,
+ nb_deq, 1);
}
int __rte_noinline
--
2.25.1
^ permalink raw reply [flat|nested] 17+ messages in thread
* [PATCH v2 5/6] examples/l2fwd-event: clean up worker state before exit
2022-05-13 16:07 ` [PATCH v2 " pbhagavatula
` (2 preceding siblings ...)
2022-05-13 16:07 ` [PATCH v2 4/6] examples/l3fwd: " pbhagavatula
@ 2022-05-13 16:07 ` pbhagavatula
2022-05-13 16:07 ` [PATCH v2 6/6] examples/ipsec-secgw: cleanup " pbhagavatula
2022-05-16 16:46 ` [PATCH v2 1/6] app/eventdev: simplify signal handling and teardown Jerin Jacob
5 siblings, 0 replies; 17+ messages in thread
From: pbhagavatula @ 2022-05-13 16:07 UTC (permalink / raw)
To: jerinj, Sunil Kumar Kori, Pavan Nikhilesh; +Cc: dev
From: Pavan Nikhilesh <pbhagavatula@marvell.com>
Event ports are configured to implicitly release the scheduler contexts
currently held in the next call to rte_event_dequeue_burst().
A worker core might still hold a scheduling context during exit, as the
next call to rte_event_dequeue_burst() is never made.
This might lead to deadlock based on the worker exit timing and when
there are very less number of flows.
Add clean up function to release any scheduling contexts held by the
worker by using RTE_EVENT_OP_RELEASE.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
examples/l2fwd-event/l2fwd_common.c | 34 +++++++++++++++++++++++++++++
examples/l2fwd-event/l2fwd_common.h | 3 +++
examples/l2fwd-event/l2fwd_event.c | 31 ++++++++++++++++----------
3 files changed, 56 insertions(+), 12 deletions(-)
diff --git a/examples/l2fwd-event/l2fwd_common.c b/examples/l2fwd-event/l2fwd_common.c
index cf3d1b8aaf..15bfe790a0 100644
--- a/examples/l2fwd-event/l2fwd_common.c
+++ b/examples/l2fwd-event/l2fwd_common.c
@@ -114,3 +114,37 @@ l2fwd_event_init_ports(struct l2fwd_resources *rsrc)
return nb_ports_available;
}
+
+static void
+l2fwd_event_vector_array_free(struct rte_event events[], uint16_t num)
+{
+ uint16_t i;
+
+ for (i = 0; i < num; i++) {
+ rte_pktmbuf_free_bulk(events[i].vec->mbufs,
+ events[i].vec->nb_elem);
+ rte_mempool_put(rte_mempool_from_obj(events[i].vec),
+ events[i].vec);
+ }
+}
+
+void
+l2fwd_event_worker_cleanup(uint8_t event_d_id, uint8_t port_id,
+ struct rte_event events[], uint16_t nb_enq,
+ uint16_t nb_deq, uint8_t is_vector)
+{
+ int i;
+
+ if (nb_deq) {
+ if (is_vector)
+ l2fwd_event_vector_array_free(events + nb_enq,
+ nb_deq - nb_enq);
+ else
+ for (i = nb_enq; i < nb_deq; i++)
+ rte_pktmbuf_free(events[i].mbuf);
+
+ for (i = 0; i < nb_deq; i++)
+ events[i].op = RTE_EVENT_OP_RELEASE;
+ rte_event_enqueue_burst(event_d_id, port_id, events, nb_deq);
+ }
+}
diff --git a/examples/l2fwd-event/l2fwd_common.h b/examples/l2fwd-event/l2fwd_common.h
index 396e238c6a..bff3b65abf 100644
--- a/examples/l2fwd-event/l2fwd_common.h
+++ b/examples/l2fwd-event/l2fwd_common.h
@@ -140,5 +140,8 @@ l2fwd_get_rsrc(void)
}
int l2fwd_event_init_ports(struct l2fwd_resources *rsrc);
+void l2fwd_event_worker_cleanup(uint8_t event_d_id, uint8_t port_id,
+ struct rte_event events[], uint16_t nb_enq,
+ uint16_t nb_deq, uint8_t is_vector);
#endif /* __L2FWD_COMMON_H__ */
diff --git a/examples/l2fwd-event/l2fwd_event.c b/examples/l2fwd-event/l2fwd_event.c
index 6df3cdfeab..63450537fe 100644
--- a/examples/l2fwd-event/l2fwd_event.c
+++ b/examples/l2fwd-event/l2fwd_event.c
@@ -193,6 +193,7 @@ l2fwd_event_loop_single(struct l2fwd_resources *rsrc,
evt_rsrc->evq.nb_queues - 1];
const uint64_t timer_period = rsrc->timer_period;
const uint8_t event_d_id = evt_rsrc->event_d_id;
+ uint8_t enq = 0, deq = 0;
struct rte_event ev;
if (port_id < 0)
@@ -203,26 +204,28 @@ l2fwd_event_loop_single(struct l2fwd_resources *rsrc,
while (!rsrc->force_quit) {
/* Read packet from eventdev */
- if (!rte_event_dequeue_burst(event_d_id, port_id, &ev, 1, 0))
+ deq = rte_event_dequeue_burst(event_d_id, port_id, &ev, 1, 0);
+ if (!deq)
continue;
l2fwd_event_fwd(rsrc, &ev, tx_q_id, timer_period, flags);
if (flags & L2FWD_EVENT_TX_ENQ) {
- while (rte_event_enqueue_burst(event_d_id, port_id,
- &ev, 1) &&
- !rsrc->force_quit)
- ;
+ do {
+ enq = rte_event_enqueue_burst(event_d_id,
+ port_id, &ev, 1);
+ } while (!enq && !rsrc->force_quit);
}
if (flags & L2FWD_EVENT_TX_DIRECT) {
- while (!rte_event_eth_tx_adapter_enqueue(event_d_id,
- port_id,
- &ev, 1, 0) &&
- !rsrc->force_quit)
- ;
+ do {
+ enq = rte_event_eth_tx_adapter_enqueue(
+ event_d_id, port_id, &ev, 1, 0);
+ } while (!enq && !rsrc->force_quit);
}
}
+
+ l2fwd_event_worker_cleanup(event_d_id, port_id, &ev, enq, deq, 0);
}
static __rte_always_inline void
@@ -237,7 +240,7 @@ l2fwd_event_loop_burst(struct l2fwd_resources *rsrc,
const uint8_t event_d_id = evt_rsrc->event_d_id;
const uint8_t deq_len = evt_rsrc->deq_depth;
struct rte_event ev[MAX_PKT_BURST];
- uint16_t nb_rx, nb_tx;
+ uint16_t nb_rx = 0, nb_tx = 0;
uint8_t i;
if (port_id < 0)
@@ -280,6 +283,8 @@ l2fwd_event_loop_burst(struct l2fwd_resources *rsrc,
ev + nb_tx, nb_rx - nb_tx, 0);
}
}
+
+ l2fwd_event_worker_cleanup(event_d_id, port_id, ev, nb_rx, nb_tx, 0);
}
static __rte_always_inline void
@@ -419,7 +424,7 @@ l2fwd_event_loop_vector(struct l2fwd_resources *rsrc, const uint32_t flags)
const uint8_t event_d_id = evt_rsrc->event_d_id;
const uint8_t deq_len = evt_rsrc->deq_depth;
struct rte_event ev[MAX_PKT_BURST];
- uint16_t nb_rx, nb_tx;
+ uint16_t nb_rx = 0, nb_tx = 0;
uint8_t i;
if (port_id < 0)
@@ -462,6 +467,8 @@ l2fwd_event_loop_vector(struct l2fwd_resources *rsrc, const uint32_t flags)
nb_rx - nb_tx, 0);
}
}
+
+ l2fwd_event_worker_cleanup(event_d_id, port_id, ev, nb_rx, nb_tx, 1);
}
static void __rte_noinline
--
2.25.1
^ permalink raw reply [flat|nested] 17+ messages in thread
* [PATCH v2 6/6] examples/ipsec-secgw: cleanup worker state before exit
2022-05-13 16:07 ` [PATCH v2 " pbhagavatula
` (3 preceding siblings ...)
2022-05-13 16:07 ` [PATCH v2 5/6] examples/l2fwd-event: " pbhagavatula
@ 2022-05-13 16:07 ` pbhagavatula
2022-05-16 16:46 ` [PATCH v2 1/6] app/eventdev: simplify signal handling and teardown Jerin Jacob
5 siblings, 0 replies; 17+ messages in thread
From: pbhagavatula @ 2022-05-13 16:07 UTC (permalink / raw)
To: jerinj, Radu Nicolau, Akhil Goyal; +Cc: dev, Pavan Nikhilesh
From: Pavan Nikhilesh <pbhagavatula@marvell.com>
Event ports are configured to implicitly release the scheduler contexts
currently held in the next call to rte_event_dequeue_burst().
A worker core might still hold a scheduling context during exit as the
next call to rte_event_dequeue_burst() is never made.
This might lead to deadlock based on the worker exit timing and when
there are very less number of flows.
Add a cleanup function to release any scheduling contexts held by the
worker by using RTE_EVENT_OP_RELEASE.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
examples/ipsec-secgw/ipsec_worker.c | 40 ++++++++++++++++++++---------
1 file changed, 28 insertions(+), 12 deletions(-)
diff --git a/examples/ipsec-secgw/ipsec_worker.c b/examples/ipsec-secgw/ipsec_worker.c
index 8639426c5c..3df5acf384 100644
--- a/examples/ipsec-secgw/ipsec_worker.c
+++ b/examples/ipsec-secgw/ipsec_worker.c
@@ -749,7 +749,7 @@ ipsec_wrkr_non_burst_int_port_drv_mode(struct eh_event_link_info *links,
uint8_t nb_links)
{
struct port_drv_mode_data data[RTE_MAX_ETHPORTS];
- unsigned int nb_rx = 0;
+ unsigned int nb_rx = 0, nb_tx;
struct rte_mbuf *pkt;
struct rte_event ev;
uint32_t lcore_id;
@@ -847,11 +847,19 @@ ipsec_wrkr_non_burst_int_port_drv_mode(struct eh_event_link_info *links,
* directly enqueued to the adapter and it would be
* internally submitted to the eth device.
*/
- rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
- links[0].event_port_id,
- &ev, /* events */
- 1, /* nb_events */
- 0 /* flags */);
+ nb_tx = rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
+ links[0].event_port_id,
+ &ev, /* events */
+ 1, /* nb_events */
+ 0 /* flags */);
+ if (!nb_tx)
+ rte_pktmbuf_free(ev.mbuf);
+ }
+
+ if (ev.u64) {
+ ev.op = RTE_EVENT_OP_RELEASE;
+ rte_event_enqueue_burst(links[0].eventdev_id,
+ links[0].event_port_id, &ev, 1);
}
}
@@ -864,7 +872,7 @@ ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links,
uint8_t nb_links)
{
struct lcore_conf_ev_tx_int_port_wrkr lconf;
- unsigned int nb_rx = 0;
+ unsigned int nb_rx = 0, nb_tx;
struct rte_event ev;
uint32_t lcore_id;
int32_t socket_id;
@@ -952,11 +960,19 @@ ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links,
* directly enqueued to the adapter and it would be
* internally submitted to the eth device.
*/
- rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
- links[0].event_port_id,
- &ev, /* events */
- 1, /* nb_events */
- 0 /* flags */);
+ nb_tx = rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
+ links[0].event_port_id,
+ &ev, /* events */
+ 1, /* nb_events */
+ 0 /* flags */);
+ if (!nb_tx)
+ rte_pktmbuf_free(ev.mbuf);
+ }
+
+ if (ev.u64) {
+ ev.op = RTE_EVENT_OP_RELEASE;
+ rte_event_enqueue_burst(links[0].eventdev_id,
+ links[0].event_port_id, &ev, 1);
}
}
--
2.25.1
^ permalink raw reply [flat|nested] 17+ messages in thread
* Re: [PATCH v2 1/6] app/eventdev: simplify signal handling and teardown
2022-05-13 16:07 ` [PATCH v2 " pbhagavatula
` (4 preceding siblings ...)
2022-05-13 16:07 ` [PATCH v2 6/6] examples/ipsec-secgw: cleanup " pbhagavatula
@ 2022-05-16 16:46 ` Jerin Jacob
5 siblings, 0 replies; 17+ messages in thread
From: Jerin Jacob @ 2022-05-16 16:46 UTC (permalink / raw)
To: Pavan Nikhilesh; +Cc: Jerin Jacob, dpdk-dev
On Fri, May 13, 2022 at 9:37 PM <pbhagavatula@marvell.com> wrote:
>
> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
>
> Remove rte_*_dev calls from signal handler callback as signal handlers
> are supposed to be light weight.
>
> Split ethernet device teardown into Rx and Tx sections, wait for
> workers to finish processing after disabling Rx to allow workers
> to complete processing currently held packets.
>
> Verified SW event device on ARM64 using the following command:
>
> ./build/app/dpdk-test-eventdev -l 7-23 -s 0xf00 --vdev=event_sw0
> -a 0002:02:00.0 -- --prod_type_ethdev --nb_pkts=0 --verbose 2
> --test=pipeline_queue --stlist=o --wlcores 16-23
Series Acked-by: Jerin Jacob <jerinj@marvell.com>
Series applied to dpdk-next-net-eventdev/for-main. Thanks
>
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
> ---
> app/test-eventdev/evt_main.c | 58 +++++++++---------------
> app/test-eventdev/evt_test.h | 3 ++
> app/test-eventdev/test_perf_atq.c | 1 +
> app/test-eventdev/test_perf_common.c | 20 +++++++-
> app/test-eventdev/test_perf_common.h | 4 +-
> app/test-eventdev/test_perf_queue.c | 1 +
> app/test-eventdev/test_pipeline_atq.c | 1 +
> app/test-eventdev/test_pipeline_common.c | 19 +++++++-
> app/test-eventdev/test_pipeline_common.h | 5 +-
> app/test-eventdev/test_pipeline_queue.c | 1 +
> 10 files changed, 72 insertions(+), 41 deletions(-)
>
> diff --git a/app/test-eventdev/evt_main.c b/app/test-eventdev/evt_main.c
> index a7d6b0c1cf..c5d63061bf 100644
> --- a/app/test-eventdev/evt_main.c
> +++ b/app/test-eventdev/evt_main.c
> @@ -19,11 +19,7 @@ struct evt_test *test;
> static void
> signal_handler(int signum)
> {
> - int i;
> - static uint8_t once;
> -
> - if ((signum == SIGINT || signum == SIGTERM) && !once) {
> - once = true;
> + if (signum == SIGINT || signum == SIGTERM) {
> printf("\nSignal %d received, preparing to exit...\n",
> signum);
>
> @@ -31,36 +27,7 @@ signal_handler(int signum)
> /* request all lcores to exit from the main loop */
> *(int *)test->test_priv = true;
> rte_wmb();
> -
> - if (test->ops.ethdev_destroy)
> - test->ops.ethdev_destroy(test, &opt);
> -
> - if (test->ops.cryptodev_destroy)
> - test->ops.cryptodev_destroy(test, &opt);
> -
> - rte_eal_mp_wait_lcore();
> -
> - if (test->ops.test_result)
> - test->ops.test_result(test, &opt);
> -
> - if (opt.prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
> - RTE_ETH_FOREACH_DEV(i)
> - rte_eth_dev_close(i);
> - }
> -
> - if (test->ops.eventdev_destroy)
> - test->ops.eventdev_destroy(test, &opt);
> -
> - if (test->ops.mempool_destroy)
> - test->ops.mempool_destroy(test, &opt);
> -
> - if (test->ops.test_destroy)
> - test->ops.test_destroy(test, &opt);
> }
> -
> - /* exit with the expected status */
> - signal(signum, SIG_DFL);
> - kill(getpid(), signum);
> }
> }
>
> @@ -189,10 +156,29 @@ main(int argc, char **argv)
> }
> }
>
> + if (test->ops.ethdev_rx_stop)
> + test->ops.ethdev_rx_stop(test, &opt);
> +
> + if (test->ops.cryptodev_destroy)
> + test->ops.cryptodev_destroy(test, &opt);
> +
> rte_eal_mp_wait_lcore();
>
> - /* Print the test result */
> - ret = test->ops.test_result(test, &opt);
> + if (test->ops.test_result)
> + test->ops.test_result(test, &opt);
> +
> + if (test->ops.ethdev_destroy)
> + test->ops.ethdev_destroy(test, &opt);
> +
> + if (test->ops.eventdev_destroy)
> + test->ops.eventdev_destroy(test, &opt);
> +
> + if (test->ops.mempool_destroy)
> + test->ops.mempool_destroy(test, &opt);
> +
> + if (test->ops.test_destroy)
> + test->ops.test_destroy(test, &opt);
> +
> nocap:
> if (ret == EVT_TEST_SUCCESS) {
> printf("Result: "CLGRN"%s"CLNRM"\n", "Success");
> diff --git a/app/test-eventdev/evt_test.h b/app/test-eventdev/evt_test.h
> index 50fa474ec2..1049f99ddc 100644
> --- a/app/test-eventdev/evt_test.h
> +++ b/app/test-eventdev/evt_test.h
> @@ -41,6 +41,8 @@ typedef void (*evt_test_eventdev_destroy_t)
> (struct evt_test *test, struct evt_options *opt);
> typedef void (*evt_test_ethdev_destroy_t)
> (struct evt_test *test, struct evt_options *opt);
> +typedef void (*evt_test_ethdev_rx_stop_t)(struct evt_test *test,
> + struct evt_options *opt);
> typedef void (*evt_test_cryptodev_destroy_t)
> (struct evt_test *test, struct evt_options *opt);
> typedef void (*evt_test_mempool_destroy_t)
> @@ -60,6 +62,7 @@ struct evt_test_ops {
> evt_test_launch_lcores_t launch_lcores;
> evt_test_result_t test_result;
> evt_test_eventdev_destroy_t eventdev_destroy;
> + evt_test_ethdev_rx_stop_t ethdev_rx_stop;
> evt_test_ethdev_destroy_t ethdev_destroy;
> evt_test_cryptodev_destroy_t cryptodev_destroy;
> evt_test_mempool_destroy_t mempool_destroy;
> diff --git a/app/test-eventdev/test_perf_atq.c b/app/test-eventdev/test_perf_atq.c
> index 67ff681666..bac3ea602f 100644
> --- a/app/test-eventdev/test_perf_atq.c
> +++ b/app/test-eventdev/test_perf_atq.c
> @@ -343,6 +343,7 @@ static const struct evt_test_ops perf_atq = {
> .test_setup = perf_test_setup,
> .ethdev_setup = perf_ethdev_setup,
> .cryptodev_setup = perf_cryptodev_setup,
> + .ethdev_rx_stop = perf_ethdev_rx_stop,
> .mempool_setup = perf_mempool_setup,
> .eventdev_setup = perf_atq_eventdev_setup,
> .launch_lcores = perf_atq_launch_lcores,
> diff --git a/app/test-eventdev/test_perf_common.c b/app/test-eventdev/test_perf_common.c
> index 9d1f4a4567..4cf16b4267 100644
> --- a/app/test-eventdev/test_perf_common.c
> +++ b/app/test-eventdev/test_perf_common.c
> @@ -1087,7 +1087,8 @@ perf_ethdev_setup(struct evt_test *test, struct evt_options *opt)
> return 0;
> }
>
> -void perf_ethdev_destroy(struct evt_test *test, struct evt_options *opt)
> +void
> +perf_ethdev_rx_stop(struct evt_test *test, struct evt_options *opt)
> {
> uint16_t i;
> RTE_SET_USED(test);
> @@ -1095,6 +1096,23 @@ void perf_ethdev_destroy(struct evt_test *test, struct evt_options *opt)
> if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
> RTE_ETH_FOREACH_DEV(i) {
> rte_event_eth_rx_adapter_stop(i);
> + rte_event_eth_rx_adapter_queue_del(i, i, -1);
> + rte_eth_dev_rx_queue_stop(i, 0);
> + }
> + }
> +}
> +
> +void
> +perf_ethdev_destroy(struct evt_test *test, struct evt_options *opt)
> +{
> + uint16_t i;
> + RTE_SET_USED(test);
> +
> + if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
> + RTE_ETH_FOREACH_DEV(i) {
> + rte_event_eth_tx_adapter_stop(i);
> + rte_event_eth_tx_adapter_queue_del(i, i, -1);
> + rte_eth_dev_tx_queue_stop(i, 0);
> rte_eth_dev_stop(i);
> }
> }
> diff --git a/app/test-eventdev/test_perf_common.h b/app/test-eventdev/test_perf_common.h
> index ea0907d61a..e504bb1df9 100644
> --- a/app/test-eventdev/test_perf_common.h
> +++ b/app/test-eventdev/test_perf_common.h
> @@ -12,10 +12,11 @@
> #include <rte_cryptodev.h>
> #include <rte_cycles.h>
> #include <rte_ethdev.h>
> -#include <rte_eventdev.h>
> #include <rte_event_crypto_adapter.h>
> #include <rte_event_eth_rx_adapter.h>
> +#include <rte_event_eth_tx_adapter.h>
> #include <rte_event_timer_adapter.h>
> +#include <rte_eventdev.h>
> #include <rte_lcore.h>
> #include <rte_malloc.h>
> #include <rte_mempool.h>
> @@ -181,6 +182,7 @@ void perf_test_destroy(struct evt_test *test, struct evt_options *opt);
> void perf_eventdev_destroy(struct evt_test *test, struct evt_options *opt);
> void perf_cryptodev_destroy(struct evt_test *test, struct evt_options *opt);
> void perf_ethdev_destroy(struct evt_test *test, struct evt_options *opt);
> +void perf_ethdev_rx_stop(struct evt_test *test, struct evt_options *opt);
> void perf_mempool_destroy(struct evt_test *test, struct evt_options *opt);
>
> #endif /* _TEST_PERF_COMMON_ */
> diff --git a/app/test-eventdev/test_perf_queue.c b/app/test-eventdev/test_perf_queue.c
> index dcf6d82947..108f1742a7 100644
> --- a/app/test-eventdev/test_perf_queue.c
> +++ b/app/test-eventdev/test_perf_queue.c
> @@ -360,6 +360,7 @@ static const struct evt_test_ops perf_queue = {
> .mempool_setup = perf_mempool_setup,
> .ethdev_setup = perf_ethdev_setup,
> .cryptodev_setup = perf_cryptodev_setup,
> + .ethdev_rx_stop = perf_ethdev_rx_stop,
> .eventdev_setup = perf_queue_eventdev_setup,
> .launch_lcores = perf_queue_launch_lcores,
> .eventdev_destroy = perf_eventdev_destroy,
> diff --git a/app/test-eventdev/test_pipeline_atq.c b/app/test-eventdev/test_pipeline_atq.c
> index 84dd4f44e3..79218502ba 100644
> --- a/app/test-eventdev/test_pipeline_atq.c
> +++ b/app/test-eventdev/test_pipeline_atq.c
> @@ -772,6 +772,7 @@ static const struct evt_test_ops pipeline_atq = {
> .ethdev_setup = pipeline_ethdev_setup,
> .eventdev_setup = pipeline_atq_eventdev_setup,
> .launch_lcores = pipeline_atq_launch_lcores,
> + .ethdev_rx_stop = pipeline_ethdev_rx_stop,
> .eventdev_destroy = pipeline_eventdev_destroy,
> .mempool_destroy = pipeline_mempool_destroy,
> .ethdev_destroy = pipeline_ethdev_destroy,
> diff --git a/app/test-eventdev/test_pipeline_common.c b/app/test-eventdev/test_pipeline_common.c
> index ddaa9f3fdb..29b64014d7 100644
> --- a/app/test-eventdev/test_pipeline_common.c
> +++ b/app/test-eventdev/test_pipeline_common.c
> @@ -505,6 +505,22 @@ pipeline_event_tx_adapter_setup(struct evt_options *opt,
> return ret;
> }
>
> +void
> +pipeline_ethdev_rx_stop(struct evt_test *test, struct evt_options *opt)
> +{
> + uint16_t i, j;
> + RTE_SET_USED(test);
> +
> + if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
> + RTE_ETH_FOREACH_DEV(i) {
> + rte_event_eth_rx_adapter_stop(i);
> + rte_event_eth_rx_adapter_queue_del(i, i, -1);
> + for (j = 0; j < opt->eth_queues; j++)
> + rte_eth_dev_rx_queue_stop(i, j);
> + }
> + }
> +}
> +
> void
> pipeline_ethdev_destroy(struct evt_test *test, struct evt_options *opt)
> {
> @@ -513,8 +529,9 @@ pipeline_ethdev_destroy(struct evt_test *test, struct evt_options *opt)
> RTE_SET_USED(opt);
>
> RTE_ETH_FOREACH_DEV(i) {
> - rte_event_eth_rx_adapter_stop(i);
> rte_event_eth_tx_adapter_stop(i);
> + rte_event_eth_tx_adapter_queue_del(i, i, -1);
> + rte_eth_dev_tx_queue_stop(i, 0);
> rte_eth_dev_stop(i);
> }
> }
> diff --git a/app/test-eventdev/test_pipeline_common.h b/app/test-eventdev/test_pipeline_common.h
> index d69e2f8a3e..c979c33772 100644
> --- a/app/test-eventdev/test_pipeline_common.h
> +++ b/app/test-eventdev/test_pipeline_common.h
> @@ -12,16 +12,16 @@
>
> #include <rte_cycles.h>
> #include <rte_ethdev.h>
> -#include <rte_eventdev.h>
> #include <rte_event_eth_rx_adapter.h>
> #include <rte_event_eth_tx_adapter.h>
> +#include <rte_eventdev.h>
> #include <rte_lcore.h>
> #include <rte_malloc.h>
> #include <rte_mempool.h>
> #include <rte_prefetch.h>
> -#include <rte_spinlock.h>
> #include <rte_service.h>
> #include <rte_service_component.h>
> +#include <rte_spinlock.h>
>
> #include "evt_common.h"
> #include "evt_options.h"
> @@ -186,6 +186,7 @@ void pipeline_opt_dump(struct evt_options *opt, uint8_t nb_queues);
> void pipeline_test_destroy(struct evt_test *test, struct evt_options *opt);
> void pipeline_eventdev_destroy(struct evt_test *test, struct evt_options *opt);
> void pipeline_ethdev_destroy(struct evt_test *test, struct evt_options *opt);
> +void pipeline_ethdev_rx_stop(struct evt_test *test, struct evt_options *opt);
> void pipeline_mempool_destroy(struct evt_test *test, struct evt_options *opt);
>
> #endif /* _TEST_PIPELINE_COMMON_ */
> diff --git a/app/test-eventdev/test_pipeline_queue.c b/app/test-eventdev/test_pipeline_queue.c
> index f6cc3e358e..343f8f3b1d 100644
> --- a/app/test-eventdev/test_pipeline_queue.c
> +++ b/app/test-eventdev/test_pipeline_queue.c
> @@ -798,6 +798,7 @@ static const struct evt_test_ops pipeline_queue = {
> .ethdev_setup = pipeline_ethdev_setup,
> .eventdev_setup = pipeline_queue_eventdev_setup,
> .launch_lcores = pipeline_queue_launch_lcores,
> + .ethdev_rx_stop = pipeline_ethdev_rx_stop,
> .eventdev_destroy = pipeline_eventdev_destroy,
> .mempool_destroy = pipeline_mempool_destroy,
> .ethdev_destroy = pipeline_ethdev_destroy,
> --
> 2.25.1
>
^ permalink raw reply [flat|nested] 17+ messages in thread
end of thread, other threads:[~2022-05-16 16:46 UTC | newest]
Thread overview: 17+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-04-26 21:14 [PATCH 1/6] app/eventdev: simplify signal handling and teardown Pavan Nikhilesh
2022-04-26 21:14 ` [PATCH 2/6] app/eventdev: clean up worker state before exit Pavan Nikhilesh
2022-05-13 13:40 ` Jerin Jacob
2022-04-26 21:14 ` [PATCH 3/6] examples/eventdev: " Pavan Nikhilesh
2022-04-26 21:14 ` [PATCH 4/6] examples/l3fwd: " Pavan Nikhilesh
2022-04-26 21:14 ` [PATCH 5/6] examples/l2fwd-event: " Pavan Nikhilesh
2022-04-26 21:14 ` [PATCH 6/6] examples/ipsec-secgw: cleanup " Pavan Nikhilesh
2022-05-13 13:41 ` Jerin Jacob
2022-05-13 11:49 ` [PATCH 1/6] app/eventdev: simplify signal handling and teardown Jerin Jacob
2022-05-13 13:39 ` Jerin Jacob
2022-05-13 16:07 ` [PATCH v2 " pbhagavatula
2022-05-13 16:07 ` [PATCH v2 2/6] app/eventdev: clean up worker state before exit pbhagavatula
2022-05-13 16:07 ` [PATCH v2 3/6] examples/eventdev: " pbhagavatula
2022-05-13 16:07 ` [PATCH v2 4/6] examples/l3fwd: " pbhagavatula
2022-05-13 16:07 ` [PATCH v2 5/6] examples/l2fwd-event: " pbhagavatula
2022-05-13 16:07 ` [PATCH v2 6/6] examples/ipsec-secgw: cleanup " pbhagavatula
2022-05-16 16:46 ` [PATCH v2 1/6] app/eventdev: simplify signal handling and teardown Jerin Jacob
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).