patches for DPDK stable branches
 help / color / mirror / Atom feed
* [PATCH v2 2/3] app/testeventdev: fix asymmetric last stage handling
       [not found] ` <20221104122556.751286-1-vfialko@marvell.com>
@ 2022-11-04 12:25   ` Volodymyr Fialko
  2022-11-04 12:25   ` [PATCH v2 3/3] app/testeventdev: fix timestamp with crypto producer Volodymyr Fialko
  1 sibling, 0 replies; 2+ messages in thread
From: Volodymyr Fialko @ 2022-11-04 12:25 UTC (permalink / raw)
  To: dev, Jerin Jacob, Abhinandan Gujjar, Fan Zhang, Akhil Goyal,
	Anoob Joseph
  Cc: Volodymyr Fialko, stable

For asymmetric crypto producer check for event type in
`process_crypto_request` will not pass in case of multiple stages, due
to overwrite of event type during event forward. Use producer type to
dispatch.

Fixes: 8f5b549502d1 ("app/eventdev: support asym ops for crypto adapter")
Cc: stable@dpdk.org

Signed-off-by: Volodymyr Fialko <vfialko@marvell.com>
---
 app/test-eventdev/test_perf_atq.c    | 10 +++++-----
 app/test-eventdev/test_perf_common.h | 11 +++++------
 app/test-eventdev/test_perf_queue.c  | 10 +++++-----
 3 files changed, 15 insertions(+), 16 deletions(-)

diff --git a/app/test-eventdev/test_perf_atq.c b/app/test-eventdev/test_perf_atq.c
index 8326f54045..2b71f30b66 100644
--- a/app/test-eventdev/test_perf_atq.c
+++ b/app/test-eventdev/test_perf_atq.c
@@ -74,10 +74,10 @@ perf_atq_worker(void *arg, const int enable_fwd_latency)
 		/* last stage in pipeline */
 		if (unlikely((ev.sub_event_type % nb_stages) == laststage)) {
 			if (enable_fwd_latency)
-				cnt = perf_process_last_stage_latency(pool,
+				cnt = perf_process_last_stage_latency(pool, prod_crypto_type,
 					&ev, w, bufs, sz, cnt);
 			else
-				cnt = perf_process_last_stage(pool, &ev, w,
+				cnt = perf_process_last_stage(pool, prod_crypto_type, &ev, w,
 					 bufs, sz, cnt);
 		} else {
 			atq_fwd_event(&ev, sched_type_list, nb_stages);
@@ -141,10 +141,10 @@ perf_atq_worker_burst(void *arg, const int enable_fwd_latency)
 			if (unlikely((ev[i].sub_event_type % nb_stages)
 						== laststage)) {
 				if (enable_fwd_latency)
-					cnt = perf_process_last_stage_latency(
-						pool, &ev[i], w, bufs, sz, cnt);
+					cnt = perf_process_last_stage_latency(pool,
+						prod_crypto_type, &ev[i], w, bufs, sz, cnt);
 				else
-					cnt = perf_process_last_stage(pool,
+					cnt = perf_process_last_stage(pool, prod_crypto_type,
 						&ev[i], w, bufs, sz, cnt);
 
 				ev[i].op = RTE_EVENT_OP_RELEASE;
diff --git a/app/test-eventdev/test_perf_common.h b/app/test-eventdev/test_perf_common.h
index d06d52cdf8..5b075bfbc4 100644
--- a/app/test-eventdev/test_perf_common.h
+++ b/app/test-eventdev/test_perf_common.h
@@ -108,7 +108,7 @@ struct perf_elt {
 				rte_lcore_id(), dev, port)
 
 static __rte_always_inline int
-perf_process_last_stage(struct rte_mempool *const pool,
+perf_process_last_stage(struct rte_mempool *const pool, uint8_t prod_crypto_type,
 		struct rte_event *const ev, struct worker_data *const w,
 		void *bufs[], int const buf_sz, uint8_t count)
 {
@@ -119,7 +119,7 @@ perf_process_last_stage(struct rte_mempool *const pool,
 	rte_atomic_thread_fence(__ATOMIC_RELEASE);
 	w->processed_pkts++;
 
-	if (ev->event_type == RTE_EVENT_TYPE_CRYPTODEV &&
+	if (prod_crypto_type &&
 			((struct rte_crypto_op *)ev->event_ptr)->type ==
 				RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
 		struct rte_crypto_op *op = ev->event_ptr;
@@ -137,7 +137,7 @@ perf_process_last_stage(struct rte_mempool *const pool,
 }
 
 static __rte_always_inline uint8_t
-perf_process_last_stage_latency(struct rte_mempool *const pool,
+perf_process_last_stage_latency(struct rte_mempool *const pool, uint8_t prod_crypto_type,
 		struct rte_event *const ev, struct worker_data *const w,
 		void *bufs[], int const buf_sz, uint8_t count)
 {
@@ -151,9 +151,8 @@ perf_process_last_stage_latency(struct rte_mempool *const pool,
 	rte_atomic_thread_fence(__ATOMIC_RELEASE);
 	w->processed_pkts++;
 
-	if (ev->event_type == RTE_EVENT_TYPE_CRYPTODEV &&
-			((struct rte_crypto_op *)m)->type ==
-				RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
+	if (prod_crypto_type &&
+			((struct rte_crypto_op *)m)->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
 		rte_free(((struct rte_crypto_op *)m)->asym->modex.result.data);
 		rte_crypto_op_free((struct rte_crypto_op *)m);
 	} else {
diff --git a/app/test-eventdev/test_perf_queue.c b/app/test-eventdev/test_perf_queue.c
index 814ab9f9bd..38509eddbb 100644
--- a/app/test-eventdev/test_perf_queue.c
+++ b/app/test-eventdev/test_perf_queue.c
@@ -76,10 +76,10 @@ perf_queue_worker(void *arg, const int enable_fwd_latency)
 		/* last stage in pipeline */
 		if (unlikely((ev.queue_id % nb_stages) == laststage)) {
 			if (enable_fwd_latency)
-				cnt = perf_process_last_stage_latency(pool,
+				cnt = perf_process_last_stage_latency(pool, prod_crypto_type,
 					&ev, w, bufs, sz, cnt);
 			else
-				cnt = perf_process_last_stage(pool,
+				cnt = perf_process_last_stage(pool, prod_crypto_type,
 					&ev, w, bufs, sz, cnt);
 		} else {
 			fwd_event(&ev, sched_type_list, nb_stages);
@@ -143,10 +143,10 @@ perf_queue_worker_burst(void *arg, const int enable_fwd_latency)
 			if (unlikely((ev[i].queue_id % nb_stages) ==
 						 laststage)) {
 				if (enable_fwd_latency)
-					cnt = perf_process_last_stage_latency(
-						pool, &ev[i], w, bufs, sz, cnt);
+					cnt = perf_process_last_stage_latency(pool,
+						prod_crypto_type, &ev[i], w, bufs, sz, cnt);
 				else
-					cnt = perf_process_last_stage(pool,
+					cnt = perf_process_last_stage(pool, prod_crypto_type,
 						&ev[i], w, bufs, sz, cnt);
 
 				ev[i].op = RTE_EVENT_OP_RELEASE;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 2+ messages in thread

* [PATCH v2 3/3] app/testeventdev: fix timestamp with crypto producer
       [not found] ` <20221104122556.751286-1-vfialko@marvell.com>
  2022-11-04 12:25   ` [PATCH v2 2/3] app/testeventdev: fix asymmetric last stage handling Volodymyr Fialko
@ 2022-11-04 12:25   ` Volodymyr Fialko
  1 sibling, 0 replies; 2+ messages in thread
From: Volodymyr Fialko @ 2022-11-04 12:25 UTC (permalink / raw)
  To: dev, Jerin Jacob, Akhil Goyal, Abhinandan Gujjar, Shijith Thotton
  Cc: anoobj, Volodymyr Fialko, stable

With symmetric crypto producer and enabled `--fwd_latency` we will
treat rte_mbuf as perf_elt which will lead to rte_mbuf header
corruption. Use rte_mbuf data to store time stamp information.
For asymmetric add space in result data for time stamp.

Fixes: de2bc16e1bd1 ("app/eventdev: add crypto producer mode")
Cc: stable@dpdk.org

Signed-off-by: Volodymyr Fialko <vfialko@marvell.com>
---
 app/test-eventdev/test_perf_atq.c    |  64 ++++-----------
 app/test-eventdev/test_perf_common.c |  47 +++++++++---
 app/test-eventdev/test_perf_common.h | 111 +++++++++++++++++++++------
 app/test-eventdev/test_perf_queue.c  |  71 +++++------------
 4 files changed, 160 insertions(+), 133 deletions(-)

diff --git a/app/test-eventdev/test_perf_atq.c b/app/test-eventdev/test_perf_atq.c
index 2b71f30b66..9d30081117 100644
--- a/app/test-eventdev/test_perf_atq.c
+++ b/app/test-eventdev/test_perf_atq.c
@@ -14,16 +14,6 @@ atq_nb_event_queues(struct evt_options *opt)
 		rte_eth_dev_count_avail() : evt_nr_active_lcores(opt->plcores);
 }
 
-static __rte_always_inline void
-atq_mark_fwd_latency(struct rte_event *const ev)
-{
-	if (unlikely(ev->sub_event_type == 0)) {
-		struct perf_elt *const m = ev->event_ptr;
-
-		m->timestamp = rte_get_timer_cycles();
-	}
-}
-
 static __rte_always_inline void
 atq_fwd_event(struct rte_event *const ev, uint8_t *const sched_type_list,
 		const uint8_t nb_stages)
@@ -37,9 +27,11 @@ atq_fwd_event(struct rte_event *const ev, uint8_t *const sched_type_list,
 static int
 perf_atq_worker(void *arg, const int enable_fwd_latency)
 {
+	struct perf_elt *pe = NULL;
 	uint16_t enq = 0, deq = 0;
 	struct rte_event ev;
 	PERF_WORKER_INIT;
+	uint8_t stage;
 
 	while (t->done == false) {
 		deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
@@ -49,30 +41,18 @@ perf_atq_worker(void *arg, const int enable_fwd_latency)
 			continue;
 		}
 
-		if (prod_crypto_type &&
-		    (ev.event_type == RTE_EVENT_TYPE_CRYPTODEV)) {
-			struct rte_crypto_op *op = ev.event_ptr;
-
-			if (op->status == RTE_CRYPTO_OP_STATUS_SUCCESS) {
-				if (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
-					if (op->sym->m_dst == NULL)
-						ev.event_ptr = op->sym->m_src;
-					else
-						ev.event_ptr = op->sym->m_dst;
-					rte_crypto_op_free(op);
-				}
-			} else {
-				rte_crypto_op_free(op);
+		if (prod_crypto_type && (ev.event_type == RTE_EVENT_TYPE_CRYPTODEV)) {
+			if (perf_handle_crypto_ev(&ev, &pe, enable_fwd_latency))
 				continue;
-			}
 		}
 
-		if (enable_fwd_latency && !prod_timer_type)
+		stage = ev.sub_event_type % nb_stages;
+		if (enable_fwd_latency && !prod_timer_type && stage == 0)
 		/* first stage in pipeline, mark ts to compute fwd latency */
-			atq_mark_fwd_latency(&ev);
+			perf_mark_fwd_latency(ev.event_ptr);
 
 		/* last stage in pipeline */
-		if (unlikely((ev.sub_event_type % nb_stages) == laststage)) {
+		if (unlikely(stage == laststage)) {
 			if (enable_fwd_latency)
 				cnt = perf_process_last_stage_latency(pool, prod_crypto_type,
 					&ev, w, bufs, sz, cnt);
@@ -99,7 +79,9 @@ perf_atq_worker_burst(void *arg, const int enable_fwd_latency)
 	/* +1 to avoid prefetch out of array check */
 	struct rte_event ev[BURST_SIZE + 1];
 	uint16_t enq = 0, nb_rx = 0;
+	struct perf_elt *pe = NULL;
 	PERF_WORKER_INIT;
+	uint8_t stage;
 	uint16_t i;
 
 	while (t->done == false) {
@@ -111,35 +93,21 @@ perf_atq_worker_burst(void *arg, const int enable_fwd_latency)
 		}
 
 		for (i = 0; i < nb_rx; i++) {
-			if (prod_crypto_type &&
-			    (ev[i].event_type == RTE_EVENT_TYPE_CRYPTODEV)) {
-				struct rte_crypto_op *op = ev[i].event_ptr;
-
-				if (op->status ==
-				    RTE_CRYPTO_OP_STATUS_SUCCESS) {
-					if (op->sym->m_dst == NULL)
-						ev[i].event_ptr =
-							op->sym->m_src;
-					else
-						ev[i].event_ptr =
-							op->sym->m_dst;
-					rte_crypto_op_free(op);
-				} else {
-					rte_crypto_op_free(op);
+			if (prod_crypto_type && (ev[i].event_type == RTE_EVENT_TYPE_CRYPTODEV)) {
+				if (perf_handle_crypto_ev(&ev[i], &pe, enable_fwd_latency))
 					continue;
-				}
 			}
 
-			if (enable_fwd_latency && !prod_timer_type) {
+			stage = ev[i].sub_event_type % nb_stages;
+			if (enable_fwd_latency && !prod_timer_type && stage == 0) {
 				rte_prefetch0(ev[i+1].event_ptr);
 				/* first stage in pipeline.
 				 * mark time stamp to compute fwd latency
 				 */
-				atq_mark_fwd_latency(&ev[i]);
+				perf_mark_fwd_latency(ev[i].event_ptr);
 			}
 			/* last stage in pipeline */
-			if (unlikely((ev[i].sub_event_type % nb_stages)
-						== laststage)) {
+			if (unlikely(stage == laststage)) {
 				if (enable_fwd_latency)
 					cnt = perf_process_last_stage_latency(pool,
 						prod_crypto_type, &ev[i], w, bufs, sz, cnt);
diff --git a/app/test-eventdev/test_perf_common.c b/app/test-eventdev/test_perf_common.c
index 6aae18fddb..140c0c2dc3 100644
--- a/app/test-eventdev/test_perf_common.c
+++ b/app/test-eventdev/test_perf_common.c
@@ -370,16 +370,17 @@ crypto_adapter_enq_op_new(struct prod_data *p)
 	uint64_t alloc_failures = 0;
 	uint32_t flow_counter = 0;
 	struct rte_crypto_op *op;
+	uint16_t len, offset;
 	struct rte_mbuf *m;
 	uint64_t count = 0;
-	uint16_t len;
 
 	if (opt->verbose_level > 1)
 		printf("%s(): lcore %d queue %d cdev_id %u cdev_qp_id %u\n",
 		       __func__, rte_lcore_id(), p->queue_id, p->ca.cdev_id,
 		       p->ca.cdev_qp_id);
 
-	len = opt->mbuf_sz ? opt->mbuf_sz : RTE_ETHER_MIN_LEN;
+	offset = sizeof(struct perf_elt);
+	len = RTE_MAX(RTE_ETHER_MIN_LEN + offset, opt->mbuf_sz);
 
 	while (count < nb_pkts && t->done == false) {
 		if (opt->crypto_op_type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
@@ -402,19 +403,24 @@ crypto_adapter_enq_op_new(struct prod_data *p)
 			rte_pktmbuf_append(m, len);
 			sym_op = op->sym;
 			sym_op->m_src = m;
-			sym_op->cipher.data.offset = 0;
-			sym_op->cipher.data.length = len;
+			sym_op->cipher.data.offset = offset;
+			sym_op->cipher.data.length = len - offset;
 			rte_crypto_op_attach_sym_session(
 				op, p->ca.crypto_sess[flow_counter++ % nb_flows]);
 		} else {
 			struct rte_crypto_asym_op *asym_op;
-			uint8_t *result = rte_zmalloc(NULL,
-					modex_test_case.result_len, 0);
+			uint8_t *result;
+
+			if (rte_mempool_get(pool, (void **)&result)) {
+				alloc_failures++;
+				continue;
+			}
 
 			op = rte_crypto_op_alloc(t->ca_op_pool,
 					 RTE_CRYPTO_OP_TYPE_ASYMMETRIC);
 			if (unlikely(op == NULL)) {
 				alloc_failures++;
+				rte_mempool_put(pool, result);
 				continue;
 			}
 
@@ -451,10 +457,10 @@ crypto_adapter_enq_op_fwd(struct prod_data *p)
 	uint64_t alloc_failures = 0;
 	uint32_t flow_counter = 0;
 	struct rte_crypto_op *op;
+	uint16_t len, offset;
 	struct rte_event ev;
 	struct rte_mbuf *m;
 	uint64_t count = 0;
-	uint16_t len;
 
 	if (opt->verbose_level > 1)
 		printf("%s(): lcore %d port %d queue %d cdev_id %u cdev_qp_id %u\n",
@@ -466,7 +472,9 @@ crypto_adapter_enq_op_fwd(struct prod_data *p)
 	ev.queue_id = p->queue_id;
 	ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
 	ev.event_type = RTE_EVENT_TYPE_CPU;
-	len = opt->mbuf_sz ? opt->mbuf_sz : RTE_ETHER_MIN_LEN;
+
+	offset = sizeof(struct perf_elt);
+	len = RTE_MAX(RTE_ETHER_MIN_LEN + offset, opt->mbuf_sz);
 
 	while (count < nb_pkts && t->done == false) {
 		if (opt->crypto_op_type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
@@ -489,19 +497,24 @@ crypto_adapter_enq_op_fwd(struct prod_data *p)
 			rte_pktmbuf_append(m, len);
 			sym_op = op->sym;
 			sym_op->m_src = m;
-			sym_op->cipher.data.offset = 0;
-			sym_op->cipher.data.length = len;
+			sym_op->cipher.data.offset = offset;
+			sym_op->cipher.data.length = len - offset;
 			rte_crypto_op_attach_sym_session(
 				op, p->ca.crypto_sess[flow_counter++ % nb_flows]);
 		} else {
 			struct rte_crypto_asym_op *asym_op;
-			uint8_t *result = rte_zmalloc(NULL,
-					modex_test_case.result_len, 0);
+			uint8_t *result;
+
+			if (rte_mempool_get(pool, (void **)&result)) {
+				alloc_failures++;
+				continue;
+			}
 
 			op = rte_crypto_op_alloc(t->ca_op_pool,
 					 RTE_CRYPTO_OP_TYPE_ASYMMETRIC);
 			if (unlikely(op == NULL)) {
 				alloc_failures++;
+				rte_mempool_put(pool, result);
 				continue;
 			}
 
@@ -1510,6 +1523,16 @@ perf_mempool_setup(struct evt_test *test, struct evt_options *opt)
 				0, NULL, NULL,
 				perf_elt_init, /* obj constructor */
 				NULL, opt->socket_id, 0); /* flags */
+	} else if (opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR &&
+			opt->crypto_op_type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC)  {
+		t->pool = rte_mempool_create(test->name, /* mempool name */
+				opt->pool_sz, /* number of elements*/
+				sizeof(struct perf_elt) + modex_test_case.result_len,
+				/* element size*/
+				512, /* cache size*/
+				0, NULL, NULL,
+				NULL, /* obj constructor */
+				NULL, opt->socket_id, 0); /* flags */
 	} else {
 		t->pool = rte_pktmbuf_pool_create(test->name, /* mempool name */
 				opt->pool_sz, /* number of elements*/
diff --git a/app/test-eventdev/test_perf_common.h b/app/test-eventdev/test_perf_common.h
index 5b075bfbc4..503b6aa1db 100644
--- a/app/test-eventdev/test_perf_common.h
+++ b/app/test-eventdev/test_perf_common.h
@@ -107,11 +107,50 @@ struct perf_elt {
 		printf("%s(): lcore %d dev_id %d port=%d\n", __func__,\
 				rte_lcore_id(), dev, port)
 
+static __rte_always_inline void
+perf_mark_fwd_latency(struct perf_elt *const pe)
+{
+	pe->timestamp = rte_get_timer_cycles();
+}
+
+static __rte_always_inline int
+perf_handle_crypto_ev(struct rte_event *ev, struct perf_elt **pe, int enable_fwd_latency)
+{
+	struct rte_crypto_op *op = ev->event_ptr;
+	struct rte_mbuf *m;
+
+
+	if (unlikely(op->status != RTE_CRYPTO_OP_STATUS_SUCCESS)) {
+		rte_crypto_op_free(op);
+		return op->status;
+	}
+
+	/* Forward latency not enabled - perf data will not be accessed */
+	if (!enable_fwd_latency)
+		return 0;
+
+	/* Get pointer to perf data */
+	if (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
+		if (op->sym->m_dst == NULL)
+			m = op->sym->m_src;
+		else
+			m = op->sym->m_dst;
+		*pe = rte_pktmbuf_mtod(m, struct perf_elt *);
+	} else {
+		*pe = RTE_PTR_ADD(op->asym->modex.result.data, op->asym->modex.result.length);
+	}
+
+	return 0;
+}
+
+
 static __rte_always_inline int
 perf_process_last_stage(struct rte_mempool *const pool, uint8_t prod_crypto_type,
 		struct rte_event *const ev, struct worker_data *const w,
 		void *bufs[], int const buf_sz, uint8_t count)
 {
+	void *to_free_in_bulk;
+
 	/* release fence here ensures event_prt is
 	 * stored before updating the number of
 	 * processed packets for worker lcores
@@ -119,20 +158,31 @@ perf_process_last_stage(struct rte_mempool *const pool, uint8_t prod_crypto_type
 	rte_atomic_thread_fence(__ATOMIC_RELEASE);
 	w->processed_pkts++;
 
-	if (prod_crypto_type &&
-			((struct rte_crypto_op *)ev->event_ptr)->type ==
-				RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
+	if (prod_crypto_type) {
 		struct rte_crypto_op *op = ev->event_ptr;
+		struct rte_mbuf *m;
+
+		if (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
+			if (op->sym->m_dst == NULL)
+				m = op->sym->m_src;
+			else
+				m = op->sym->m_dst;
 
-		rte_free(op->asym->modex.result.data);
+			to_free_in_bulk = m;
+		} else {
+			to_free_in_bulk = op->asym->modex.result.data;
+		}
 		rte_crypto_op_free(op);
 	} else {
-		bufs[count++] = ev->event_ptr;
-		if (unlikely(count == buf_sz)) {
-			count = 0;
-			rte_mempool_put_bulk(pool, bufs, buf_sz);
-		}
+		to_free_in_bulk = ev->event_ptr;
 	}
+
+	bufs[count++] = to_free_in_bulk;
+	if (unlikely(count == buf_sz)) {
+		count = 0;
+		rte_mempool_put_bulk(pool, bufs, buf_sz);
+	}
+
 	return count;
 }
 
@@ -142,7 +192,8 @@ perf_process_last_stage_latency(struct rte_mempool *const pool, uint8_t prod_cry
 		void *bufs[], int const buf_sz, uint8_t count)
 {
 	uint64_t latency;
-	struct perf_elt *const m = ev->event_ptr;
+	struct perf_elt *pe;
+	void *to_free_in_bulk;
 
 	/* release fence here ensures event_prt is
 	 * stored before updating the number of
@@ -151,22 +202,38 @@ perf_process_last_stage_latency(struct rte_mempool *const pool, uint8_t prod_cry
 	rte_atomic_thread_fence(__ATOMIC_RELEASE);
 	w->processed_pkts++;
 
-	if (prod_crypto_type &&
-			((struct rte_crypto_op *)m)->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
-		rte_free(((struct rte_crypto_op *)m)->asym->modex.result.data);
-		rte_crypto_op_free((struct rte_crypto_op *)m);
-	} else {
-		bufs[count++] = ev->event_ptr;
-		if (unlikely(count == buf_sz)) {
-			count = 0;
-			latency = rte_get_timer_cycles() - m->timestamp;
-			rte_mempool_put_bulk(pool, bufs, buf_sz);
+	if (prod_crypto_type) {
+		struct rte_crypto_op *op = ev->event_ptr;
+		struct rte_mbuf *m;
+
+		if (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
+			if (op->sym->m_dst == NULL)
+				m = op->sym->m_src;
+			else
+				m = op->sym->m_dst;
+
+			to_free_in_bulk = m;
+			pe = rte_pktmbuf_mtod(m, struct perf_elt *);
 		} else {
-			latency = rte_get_timer_cycles() - m->timestamp;
+			pe = RTE_PTR_ADD(op->asym->modex.result.data,
+					 op->asym->modex.result.length);
+			to_free_in_bulk = op->asym->modex.result.data;
 		}
+		rte_crypto_op_free(op);
+	} else {
+		pe = ev->event_ptr;
+		to_free_in_bulk = pe;
+	}
 
-		w->latency += latency;
+	latency = rte_get_timer_cycles() - pe->timestamp;
+	w->latency += latency;
+
+	bufs[count++] = to_free_in_bulk;
+	if (unlikely(count == buf_sz)) {
+		count = 0;
+		rte_mempool_put_bulk(pool, bufs, buf_sz);
 	}
+
 	return count;
 }
 
diff --git a/app/test-eventdev/test_perf_queue.c b/app/test-eventdev/test_perf_queue.c
index 38509eddbb..69ef0ebbac 100644
--- a/app/test-eventdev/test_perf_queue.c
+++ b/app/test-eventdev/test_perf_queue.c
@@ -15,17 +15,6 @@ perf_queue_nb_event_queues(struct evt_options *opt)
 	return nb_prod * opt->nb_stages;
 }
 
-static __rte_always_inline void
-mark_fwd_latency(struct rte_event *const ev,
-		const uint8_t nb_stages)
-{
-	if (unlikely((ev->queue_id % nb_stages) == 0)) {
-		struct perf_elt *const m = ev->event_ptr;
-
-		m->timestamp = rte_get_timer_cycles();
-	}
-}
-
 static __rte_always_inline void
 fwd_event(struct rte_event *const ev, uint8_t *const sched_type_list,
 		const uint8_t nb_stages)
@@ -39,9 +28,12 @@ fwd_event(struct rte_event *const ev, uint8_t *const sched_type_list,
 static int
 perf_queue_worker(void *arg, const int enable_fwd_latency)
 {
+	struct perf_elt *pe = NULL;
 	uint16_t enq = 0, deq = 0;
 	struct rte_event ev;
 	PERF_WORKER_INIT;
+	uint8_t stage;
+
 
 	while (t->done == false) {
 		deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
@@ -51,30 +43,20 @@ perf_queue_worker(void *arg, const int enable_fwd_latency)
 			continue;
 		}
 
-		if (prod_crypto_type &&
-		    (ev.event_type == RTE_EVENT_TYPE_CRYPTODEV)) {
-			struct rte_crypto_op *op = ev.event_ptr;
-
-			if (op->status == RTE_CRYPTO_OP_STATUS_SUCCESS) {
-				if (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
-					if (op->sym->m_dst == NULL)
-						ev.event_ptr = op->sym->m_src;
-					else
-						ev.event_ptr = op->sym->m_dst;
-					rte_crypto_op_free(op);
-				}
-			} else {
-				rte_crypto_op_free(op);
+		if (prod_crypto_type && (ev.event_type == RTE_EVENT_TYPE_CRYPTODEV)) {
+			if (perf_handle_crypto_ev(&ev, &pe, enable_fwd_latency))
 				continue;
-			}
+		} else {
+			pe = ev.event_ptr;
 		}
 
-		if (enable_fwd_latency && !prod_timer_type)
+		stage = ev.queue_id % nb_stages;
+		if (enable_fwd_latency && !prod_timer_type && stage == 0)
 		/* first q in pipeline, mark timestamp to compute fwd latency */
-			mark_fwd_latency(&ev, nb_stages);
+			perf_mark_fwd_latency(pe);
 
 		/* last stage in pipeline */
-		if (unlikely((ev.queue_id % nb_stages) == laststage)) {
+		if (unlikely(stage == laststage)) {
 			if (enable_fwd_latency)
 				cnt = perf_process_last_stage_latency(pool, prod_crypto_type,
 					&ev, w, bufs, sz, cnt);
@@ -84,8 +66,7 @@ perf_queue_worker(void *arg, const int enable_fwd_latency)
 		} else {
 			fwd_event(&ev, sched_type_list, nb_stages);
 			do {
-				enq = rte_event_enqueue_burst(dev, port, &ev,
-							      1);
+				enq = rte_event_enqueue_burst(dev, port, &ev, 1);
 			} while (!enq && !t->done);
 		}
 	}
@@ -101,7 +82,9 @@ perf_queue_worker_burst(void *arg, const int enable_fwd_latency)
 	/* +1 to avoid prefetch out of array check */
 	struct rte_event ev[BURST_SIZE + 1];
 	uint16_t enq = 0, nb_rx = 0;
+	struct perf_elt *pe = NULL;
 	PERF_WORKER_INIT;
+	uint8_t stage;
 	uint16_t i;
 
 	while (t->done == false) {
@@ -113,35 +96,21 @@ perf_queue_worker_burst(void *arg, const int enable_fwd_latency)
 		}
 
 		for (i = 0; i < nb_rx; i++) {
-			if (prod_crypto_type &&
-			    (ev[i].event_type == RTE_EVENT_TYPE_CRYPTODEV)) {
-				struct rte_crypto_op *op = ev[i].event_ptr;
-
-				if (op->status ==
-				    RTE_CRYPTO_OP_STATUS_SUCCESS) {
-					if (op->sym->m_dst == NULL)
-						ev[i].event_ptr =
-							op->sym->m_src;
-					else
-						ev[i].event_ptr =
-							op->sym->m_dst;
-					rte_crypto_op_free(op);
-				} else {
-					rte_crypto_op_free(op);
+			if (prod_crypto_type && (ev[i].event_type == RTE_EVENT_TYPE_CRYPTODEV)) {
+				if (perf_handle_crypto_ev(&ev[i], &pe, enable_fwd_latency))
 					continue;
-				}
 			}
 
-			if (enable_fwd_latency && !prod_timer_type) {
+			stage = ev[i].queue_id % nb_stages;
+			if (enable_fwd_latency && !prod_timer_type && stage == 0) {
 				rte_prefetch0(ev[i+1].event_ptr);
 				/* first queue in pipeline.
 				 * mark time stamp to compute fwd latency
 				 */
-				mark_fwd_latency(&ev[i], nb_stages);
+				perf_mark_fwd_latency(ev[i].event_ptr);
 			}
 			/* last stage in pipeline */
-			if (unlikely((ev[i].queue_id % nb_stages) ==
-						 laststage)) {
+			if (unlikely(stage == laststage)) {
 				if (enable_fwd_latency)
 					cnt = perf_process_last_stage_latency(pool,
 						prod_crypto_type, &ev[i], w, bufs, sz, cnt);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2022-11-04 12:26 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
     [not found] <20221103175347.651579-1-vfialko@marvell.com>
     [not found] ` <20221104122556.751286-1-vfialko@marvell.com>
2022-11-04 12:25   ` [PATCH v2 2/3] app/testeventdev: fix asymmetric last stage handling Volodymyr Fialko
2022-11-04 12:25   ` [PATCH v2 3/3] app/testeventdev: fix timestamp with crypto producer Volodymyr Fialko

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).