DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH v2] app/test: add event inline security tests
@ 2022-06-22  1:38 Volodymyr Fialko
  2022-06-22 11:33 ` [PATCH v3] " Volodymyr Fialko
                   ` (2 more replies)
  0 siblings, 3 replies; 36+ messages in thread
From: Volodymyr Fialko @ 2022-06-22  1:38 UTC (permalink / raw)
  To: dev, Akhil Goyal; +Cc: jerinj, anoobj, Volodymyr Fialko

Enable ability to run inline security tests using event
API(rte_event_eth_tx_adapter_enqueue/rte_event_dequeue_burst).
New test command - event_inline_ipsec_autotest will run same list of
test cases as inline_ipsec_autotest, but packets will go through eventdev.

Signed-off-by: Volodymyr Fialko <vfialko@marvell.com>
---
v2:
* Fixed compilation with mingw.
---
 app/test/test_security_inline_proto.c | 392 +++++++++++++++++++++++++-
 1 file changed, 377 insertions(+), 15 deletions(-)

diff --git a/app/test/test_security_inline_proto.c b/app/test/test_security_inline_proto.c
index 82d27550f4..aa3a0d139c 100644
--- a/app/test/test_security_inline_proto.c
+++ b/app/test/test_security_inline_proto.c
@@ -21,8 +21,19 @@ test_inline_ipsec(void)
 	return TEST_SKIPPED;
 }
 
+static int
+test_event_inline_ipsec(void)
+{
+	printf("Event inline ipsec not supported on Windows, skipping test\n");
+	return TEST_SKIPPED;
+}
+
 #else
 
+#include <rte_eventdev.h>
+#include <rte_event_eth_rx_adapter.h>
+#include <rte_event_eth_tx_adapter.h>
+
 #define NB_ETHPORTS_USED		1
 #define MEMPOOL_CACHE_SIZE		32
 #define MAX_PKT_BURST			32
@@ -93,7 +104,12 @@ static struct rte_eth_txconf tx_conf = {
 	.tx_rs_thresh = 32, /* Use PMD default values */
 };
 
-uint16_t port_id;
+static uint16_t port_id;
+static uint8_t eventdev_id;
+static uint8_t rx_adapter_id;
+static uint8_t tx_adapter_id;
+
+static bool event_mode_enabled;
 
 static uint64_t link_mbps;
 
@@ -885,6 +901,51 @@ test_ipsec_with_reassembly(struct reassembly_vector *vector,
 	return ret;
 }
 
+static int
+event_tx_burst(struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+	struct rte_event ev = {0};
+	int i, nb_sent = 0;
+
+	/* Convert packets to events */
+	ev.sched_type = RTE_SCHED_TYPE_PARALLEL;
+	for (i = 0; i < nb_pkts; i++) {
+		ev.mbuf = tx_pkts[i];
+		nb_sent += rte_event_eth_tx_adapter_enqueue(
+				eventdev_id, port_id, &ev, 1, 0);
+	}
+
+	return nb_sent;
+}
+
+static int
+event_rx_burst(struct rte_mbuf **rx_pkts, uint16_t nb_pkts_to_rx)
+{
+	int nb_ev, nb_rx = 0, j = 0;
+	const int ms_per_pkt = 3;
+	struct rte_event ev;
+
+	do {
+		nb_ev = rte_event_dequeue_burst(eventdev_id, port_id,
+				&ev, 1, 0);
+
+		if (nb_ev == 0) {
+			rte_delay_ms(1);
+			continue;
+		}
+
+		/* Get packet from event */
+		if (ev.event_type != RTE_EVENT_TYPE_ETHDEV) {
+			printf("Unsupported event type: %i\n",
+				ev.event_type);
+			continue;
+		}
+		rx_pkts[nb_rx++] = ev.mbuf;
+	} while (j++ < (nb_pkts_to_rx * ms_per_pkt) && nb_rx < nb_pkts_to_rx);
+
+	return nb_rx;
+}
+
 static int
 test_ipsec_inline_proto_process(struct ipsec_test_data *td,
 		struct ipsec_test_data *res_d,
@@ -958,9 +1019,13 @@ test_ipsec_inline_proto_process(struct ipsec_test_data *td,
 		}
 	}
 	/* Send packet to ethdev for inline IPsec processing. */
-	nb_sent = rte_eth_tx_burst(port_id, 0, tx_pkts_burst, nb_pkts);
+	if (event_mode_enabled)
+		nb_sent = event_tx_burst(tx_pkts_burst, nb_pkts);
+	else
+		nb_sent = rte_eth_tx_burst(port_id, 0, tx_pkts_burst, nb_pkts);
+
 	if (nb_sent != nb_pkts) {
-		printf("\nUnable to TX %d packets", nb_pkts);
+		printf("\nUnable to TX %d packets, sent: %i", nb_pkts, nb_sent);
 		for ( ; nb_sent < nb_pkts; nb_sent++)
 			rte_pktmbuf_free(tx_pkts_burst[nb_sent]);
 		ret = TEST_FAILED;
@@ -970,17 +1035,22 @@ test_ipsec_inline_proto_process(struct ipsec_test_data *td,
 	rte_pause();
 
 	/* Receive back packet on loopback interface. */
-	do {
-		rte_delay_ms(1);
-		nb_rx += rte_eth_rx_burst(port_id, 0, &rx_pkts_burst[nb_rx],
-				nb_sent - nb_rx);
-		if (nb_rx >= nb_sent)
-			break;
-	} while (j++ < 5 || nb_rx == 0);
+	if (event_mode_enabled)
+		nb_rx = event_rx_burst(rx_pkts_burst, nb_sent);
+	else
+		do {
+			rte_delay_ms(1);
+			nb_rx += rte_eth_rx_burst(port_id, 0,
+					&rx_pkts_burst[nb_rx],
+					nb_sent - nb_rx);
+			if (nb_rx >= nb_sent)
+				break;
+		} while (j++ < 5 || nb_rx == 0);
 
 	if (nb_rx != nb_sent) {
-		printf("\nUnable to RX all %d packets", nb_sent);
-		while (--nb_rx)
+		printf("\nUnable to RX all %d packets, received(%i)",
+				nb_sent, nb_rx);
+		while (--nb_rx >= 0)
 			rte_pktmbuf_free(rx_pkts_burst[nb_rx]);
 		ret = TEST_FAILED;
 		goto out;
@@ -1380,6 +1450,289 @@ inline_ipsec_testsuite_teardown(void)
 			printf("rte_eth_dev_reset: err=%s, port=%u\n",
 			       rte_strerror(-ret), port_id);
 	}
+	rte_free(tx_pkts_burst);
+	rte_free(rx_pkts_burst);
+}
+
+static int
+event_inline_ipsec_testsuite_setup(void)
+{
+	struct rte_event_eth_rx_adapter_queue_conf queue_conf = {0};
+	struct rte_event_dev_info evdev_default_conf = {0};
+	struct rte_event_dev_config eventdev_conf = {0};
+	struct rte_event_queue_conf eventq_conf = {0};
+	struct rte_event_port_conf ev_port_conf = {0};
+	const uint16_t nb_txd = 1024, nb_rxd = 1024;
+	uint16_t nb_rx_queue = 1, nb_tx_queue = 1;
+	uint8_t ev_queue_id = 0, tx_queue_id = 0;
+	int nb_eventqueue = 1, nb_eventport = 1;
+	const int all_queues = -1;
+	uint32_t caps = 0;
+	uint16_t nb_ports;
+	int ret;
+
+	printf("Start event inline IPsec test.\n");
+
+	nb_ports = rte_eth_dev_count_avail();
+	if (nb_ports == 0) {
+		printf("Test require: 1 port, available: 0\n");
+		return TEST_SKIPPED;
+	}
+
+	init_mempools(NB_MBUF);
+
+	if (tx_pkts_burst == NULL) {
+		tx_pkts_burst = (struct rte_mbuf **)rte_calloc("tx_buff",
+					  MAX_TRAFFIC_BURST,
+					  sizeof(void *),
+					  RTE_CACHE_LINE_SIZE);
+		if (!tx_pkts_burst)
+			return -1;
+
+		rx_pkts_burst = (struct rte_mbuf **)rte_calloc("rx_buff",
+					  MAX_TRAFFIC_BURST,
+					  sizeof(void *),
+					  RTE_CACHE_LINE_SIZE);
+		if (!rx_pkts_burst)
+			return -1;
+
+	}
+
+	printf("Generate %d packets\n", MAX_TRAFFIC_BURST);
+
+	/* configuring port 0 for the test is enough */
+	port_id = 0;
+	/* port configure */
+	ret = rte_eth_dev_configure(port_id, nb_rx_queue,
+				    nb_tx_queue, &port_conf);
+	if (ret < 0) {
+		printf("Cannot configure device: err=%d, port=%d\n",
+			 ret, port_id);
+		return ret;
+	}
+
+	/* Tx queue setup */
+	ret = rte_eth_tx_queue_setup(port_id, 0, nb_txd,
+				     SOCKET_ID_ANY, &tx_conf);
+	if (ret < 0) {
+		printf("rte_eth_tx_queue_setup: err=%d, port=%d\n",
+				ret, port_id);
+		return ret;
+	}
+
+	/* rx queue steup */
+	ret = rte_eth_rx_queue_setup(port_id, 0, nb_rxd, SOCKET_ID_ANY,
+				     &rx_conf, mbufpool);
+	if (ret < 0) {
+		printf("rte_eth_rx_queue_setup: err=%d, port=%d\n",
+				ret, port_id);
+		return ret;
+	}
+
+	/* Setup eventdev */
+	eventdev_id = 0;
+	rx_adapter_id = 0;
+	tx_adapter_id = 0;
+
+	/* Get default conf of eventdev */
+	ret = rte_event_dev_info_get(eventdev_id, &evdev_default_conf);
+	if (ret < 0) {
+		printf("Error in getting event device info[devID:%d]\n",
+				eventdev_id);
+		return ret;
+	}
+
+	/* Get Tx adapter capabilities */
+	ret = rte_event_eth_tx_adapter_caps_get(eventdev_id, tx_adapter_id, &caps);
+	if (ret < 0) {
+		printf("Failed to get event device %d eth tx adapter"
+				" capabilities for port %d\n",
+				eventdev_id, port_id);
+		return ret;
+	}
+	if (!(caps & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT))
+		tx_queue_id = nb_eventqueue++;
+
+	eventdev_conf.nb_events_limit =
+			evdev_default_conf.max_num_events;
+	eventdev_conf.nb_event_queue_flows =
+			evdev_default_conf.max_event_queue_flows;
+	eventdev_conf.nb_event_port_dequeue_depth =
+			evdev_default_conf.max_event_port_dequeue_depth;
+	eventdev_conf.nb_event_port_enqueue_depth =
+			evdev_default_conf.max_event_port_enqueue_depth;
+
+	eventdev_conf.nb_event_queues = nb_eventqueue;
+	eventdev_conf.nb_event_ports = nb_eventport;
+
+	/* Configure event device */
+
+	ret = rte_event_dev_configure(eventdev_id, &eventdev_conf);
+	if (ret < 0) {
+		printf("Error in configuring event device\n");
+		return ret;
+	}
+
+	/* Configure event queue */
+	eventq_conf.schedule_type = RTE_SCHED_TYPE_PARALLEL;
+	eventq_conf.nb_atomic_flows = 1024;
+	eventq_conf.nb_atomic_order_sequences = 1024;
+
+	/* Setup the queue */
+	ret = rte_event_queue_setup(eventdev_id, ev_queue_id, &eventq_conf);
+	if (ret < 0) {
+		printf("Failed to setup event queue %d\n", ret);
+		return ret;
+	}
+
+	/* Configure event port */
+	ret = rte_event_port_setup(eventdev_id, port_id, NULL);
+	if (ret < 0) {
+		printf("Failed to setup event port %d\n", ret);
+		return ret;
+	}
+
+	/* Make event queue - event port link */
+	ret = rte_event_port_link(eventdev_id, port_id, NULL, NULL, 1);
+	if (ret < 0) {
+		printf("Failed to link event port %d\n", ret);
+		return ret;
+	}
+
+	/* Setup port conf */
+	ev_port_conf.new_event_threshold = 1200;
+	ev_port_conf.dequeue_depth =
+			evdev_default_conf.max_event_port_dequeue_depth;
+	ev_port_conf.enqueue_depth =
+			evdev_default_conf.max_event_port_enqueue_depth;
+
+	/* Create Rx adapter */
+	ret = rte_event_eth_rx_adapter_create(rx_adapter_id, eventdev_id,
+			&ev_port_conf);
+	if (ret < 0) {
+		printf("Failed to create rx adapter %d\n", ret);
+		return ret;
+	}
+
+	/* Setup queue conf */
+	queue_conf.ev.queue_id = ev_queue_id;
+	queue_conf.ev.sched_type = RTE_SCHED_TYPE_PARALLEL;
+	queue_conf.ev.event_type = RTE_EVENT_TYPE_ETHDEV;
+
+	/* Add queue to the adapter */
+	ret = rte_event_eth_rx_adapter_queue_add(rx_adapter_id, port_id,
+			all_queues, &queue_conf);
+	if (ret < 0) {
+		printf("Failed to add eth queue to rx adapter %d\n", ret);
+		return ret;
+	}
+
+	/* Start rx adapter */
+	ret = rte_event_eth_rx_adapter_start(rx_adapter_id);
+	if (ret < 0) {
+		printf("Failed to start rx adapter %d\n", ret);
+		return ret;
+	}
+
+	/* Create tx adapter */
+	ret = rte_event_eth_tx_adapter_create(tx_adapter_id, eventdev_id,
+			&ev_port_conf);
+	if (ret < 0) {
+		printf("Failed to create tx adapter %d\n", ret);
+		return ret;
+	}
+
+	/* Add queue to the adapter */
+	ret = rte_event_eth_tx_adapter_queue_add(tx_adapter_id, port_id,
+			all_queues);
+	if (ret < 0) {
+		printf("Failed to add eth queue to tx adapter %d\n", ret);
+		return ret;
+	}
+	/* Setup Tx queue & port */
+	if (tx_queue_id) {
+		/* Setup the queue */
+		ret = rte_event_queue_setup(eventdev_id, tx_queue_id,
+				&eventq_conf);
+		if (ret < 0) {
+			printf("Failed to setup tx event queue %d\n", ret);
+			return ret;
+		}
+		/* Link Tx event queue to Tx port */
+		ret = rte_event_port_link(eventdev_id, port_id,
+				&tx_queue_id, NULL, 1);
+		if (ret != 1) {
+			printf("Failed to link event queue to port\n");
+			return ret;
+		}
+	}
+
+	/* Start tx adapter */
+	ret = rte_event_eth_tx_adapter_start(tx_adapter_id);
+	if (ret < 0) {
+		printf("Failed to start tx adapter %d\n", ret);
+		return ret;
+	}
+
+	/* Start eventdev */
+	ret = rte_event_dev_start(eventdev_id);
+	if (ret < 0) {
+		printf("Failed to start event device %d\n", ret);
+		return ret;
+	}
+
+	event_mode_enabled = true;
+	test_ipsec_alg_list_populate();
+
+	return 0;
+}
+
+static void
+event_inline_ipsec_testsuite_teardown(void)
+{
+	uint16_t portid;
+	int ret;
+
+	event_mode_enabled = false;
+
+	/* Stop and release rx adapter */
+	ret = rte_event_eth_rx_adapter_stop(rx_adapter_id);
+	if (ret < 0)
+		printf("Failed to stop rx adapter %d\n", ret);
+	ret = rte_event_eth_rx_adapter_queue_del(rx_adapter_id, port_id, -1);
+	if (ret < 0)
+		printf("Failed to remove rx adapter queues %d\n", ret);
+	ret = rte_event_eth_rx_adapter_free(rx_adapter_id);
+	if (ret < 0)
+		printf("Failed to free rx adapter %d\n", ret);
+
+	/* Stop and release tx adapter */
+	ret = rte_event_eth_tx_adapter_stop(tx_adapter_id);
+	if (ret < 0)
+		printf("Failed to stop tx adapter %d\n", ret);
+	ret = rte_event_eth_tx_adapter_queue_del(tx_adapter_id, port_id, -1);
+	if (ret < 0)
+		printf("Failed to remove tx adapter queues %d\n", ret);
+	ret = rte_event_eth_tx_adapter_free(tx_adapter_id);
+	if (ret < 0)
+		printf("Failed to free tx adapter %d\n", ret);
+
+	/* Stop and release event devices */
+	rte_event_dev_stop(eventdev_id);
+	ret = rte_event_dev_close(eventdev_id);
+	if (ret < 0)
+		printf("Failed to close event dev %d, %d\n", eventdev_id, ret);
+
+	/* port tear down */
+	RTE_ETH_FOREACH_DEV(portid) {
+		ret = rte_eth_dev_reset(portid);
+		if (ret != 0)
+			printf("rte_eth_dev_reset: err=%s, port=%u\n",
+			       rte_strerror(-ret), port_id);
+	}
+
+	rte_free(tx_pkts_burst);
+	rte_free(rx_pkts_burst);
 }
 
 static int
@@ -1920,7 +2273,7 @@ test_ipsec_inline_pkt_replay(const void *test_data, const uint64_t esn[],
 	flags.antireplay = true;
 
 	for (i = 0; i < nb_pkts; i++) {
-		memcpy(&td_outb[i], test_data, sizeof(td_outb));
+		memcpy(&td_outb[i], test_data, sizeof(td_outb[0]));
 		td_outb[i].ipsec_xform.options.iv_gen_disable = 1;
 		td_outb[i].ipsec_xform.replay_win_sz = winsz;
 		td_outb[i].ipsec_xform.options.esn = esn_en;
@@ -2054,8 +2407,6 @@ test_ipsec_inline_proto_pkt_esn_antireplay4096(const void *test_data)
 
 static struct unit_test_suite inline_ipsec_testsuite  = {
 	.suite_name = "Inline IPsec Ethernet Device Unit Test Suite",
-	.setup = inline_ipsec_testsuite_setup,
-	.teardown = inline_ipsec_testsuite_teardown,
 	.unit_test_cases = {
 		TEST_CASE_NAMED_WITH_DATA(
 			"Outbound known vector (ESP tunnel mode IPv4 AES-GCM 128)",
@@ -2374,9 +2725,20 @@ static struct unit_test_suite inline_ipsec_testsuite  = {
 static int
 test_inline_ipsec(void)
 {
+	inline_ipsec_testsuite.setup = inline_ipsec_testsuite_setup;
+	inline_ipsec_testsuite.teardown = inline_ipsec_testsuite_teardown;
+	return unit_test_suite_runner(&inline_ipsec_testsuite);
+}
+
+static int
+test_event_inline_ipsec(void)
+{
+	inline_ipsec_testsuite.setup = event_inline_ipsec_testsuite_setup;
+	inline_ipsec_testsuite.teardown = event_inline_ipsec_testsuite_teardown;
 	return unit_test_suite_runner(&inline_ipsec_testsuite);
 }
 
 #endif /* !RTE_EXEC_ENV_WINDOWS */
 
 REGISTER_TEST_COMMAND(inline_ipsec_autotest, test_inline_ipsec);
+REGISTER_TEST_COMMAND(event_inline_ipsec_autotest, test_event_inline_ipsec);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 36+ messages in thread

* [PATCH v3] app/test: add event inline security tests
  2022-06-22  1:38 [PATCH v2] app/test: add event inline security tests Volodymyr Fialko
@ 2022-06-22 11:33 ` Volodymyr Fialko
  2022-06-22 16:32   ` Anoob Joseph
  2022-06-28  8:29   ` Akhil Goyal
  2022-06-28 12:09 ` [PATCH] doc: announce change in crypto adapter queue add Volodymyr Fialko
  2022-08-04  9:59 ` [PATCH 0/3] Vector support for event crypto adapter Volodymyr Fialko
  2 siblings, 2 replies; 36+ messages in thread
From: Volodymyr Fialko @ 2022-06-22 11:33 UTC (permalink / raw)
  To: dev, Akhil Goyal; +Cc: jerinj, anoobj, Volodymyr Fialko

Enable ability to run inline security tests using event
API(rte_event_eth_tx_adapter_enqueue/rte_event_dequeue_burst).
New test command - event_inline_ipsec_autotest will run same list of
test cases as inline_ipsec_autotest, but packets will go through eventdev.

Signed-off-by: Volodymyr Fialko <vfialko@marvell.com>
---
v2:
* Fixed compilation with mingw.
v3:
* Fixed struct zero initialization for gcc 4.*
---
 app/test/test_security_inline_proto.c | 393 +++++++++++++++++++++++++-
 1 file changed, 378 insertions(+), 15 deletions(-)

diff --git a/app/test/test_security_inline_proto.c b/app/test/test_security_inline_proto.c
index 82d27550f4..5f26a04b06 100644
--- a/app/test/test_security_inline_proto.c
+++ b/app/test/test_security_inline_proto.c
@@ -21,8 +21,19 @@ test_inline_ipsec(void)
 	return TEST_SKIPPED;
 }
 
+static int
+test_event_inline_ipsec(void)
+{
+	printf("Event inline ipsec not supported on Windows, skipping test\n");
+	return TEST_SKIPPED;
+}
+
 #else
 
+#include <rte_eventdev.h>
+#include <rte_event_eth_rx_adapter.h>
+#include <rte_event_eth_tx_adapter.h>
+
 #define NB_ETHPORTS_USED		1
 #define MEMPOOL_CACHE_SIZE		32
 #define MAX_PKT_BURST			32
@@ -93,7 +104,12 @@ static struct rte_eth_txconf tx_conf = {
 	.tx_rs_thresh = 32, /* Use PMD default values */
 };
 
-uint16_t port_id;
+static uint16_t port_id;
+static uint8_t eventdev_id;
+static uint8_t rx_adapter_id;
+static uint8_t tx_adapter_id;
+
+static bool event_mode_enabled;
 
 static uint64_t link_mbps;
 
@@ -885,6 +901,52 @@ test_ipsec_with_reassembly(struct reassembly_vector *vector,
 	return ret;
 }
 
+static int
+event_tx_burst(struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+	struct rte_event ev;
+	int i, nb_sent = 0;
+
+	/* Convert packets to events */
+	memset(&ev, 0, sizeof(ev));
+	ev.sched_type = RTE_SCHED_TYPE_PARALLEL;
+	for (i = 0; i < nb_pkts; i++) {
+		ev.mbuf = tx_pkts[i];
+		nb_sent += rte_event_eth_tx_adapter_enqueue(
+				eventdev_id, port_id, &ev, 1, 0);
+	}
+
+	return nb_sent;
+}
+
+static int
+event_rx_burst(struct rte_mbuf **rx_pkts, uint16_t nb_pkts_to_rx)
+{
+	int nb_ev, nb_rx = 0, j = 0;
+	const int ms_per_pkt = 3;
+	struct rte_event ev;
+
+	do {
+		nb_ev = rte_event_dequeue_burst(eventdev_id, port_id,
+				&ev, 1, 0);
+
+		if (nb_ev == 0) {
+			rte_delay_ms(1);
+			continue;
+		}
+
+		/* Get packet from event */
+		if (ev.event_type != RTE_EVENT_TYPE_ETHDEV) {
+			printf("Unsupported event type: %i\n",
+				ev.event_type);
+			continue;
+		}
+		rx_pkts[nb_rx++] = ev.mbuf;
+	} while (j++ < (nb_pkts_to_rx * ms_per_pkt) && nb_rx < nb_pkts_to_rx);
+
+	return nb_rx;
+}
+
 static int
 test_ipsec_inline_proto_process(struct ipsec_test_data *td,
 		struct ipsec_test_data *res_d,
@@ -958,9 +1020,13 @@ test_ipsec_inline_proto_process(struct ipsec_test_data *td,
 		}
 	}
 	/* Send packet to ethdev for inline IPsec processing. */
-	nb_sent = rte_eth_tx_burst(port_id, 0, tx_pkts_burst, nb_pkts);
+	if (event_mode_enabled)
+		nb_sent = event_tx_burst(tx_pkts_burst, nb_pkts);
+	else
+		nb_sent = rte_eth_tx_burst(port_id, 0, tx_pkts_burst, nb_pkts);
+
 	if (nb_sent != nb_pkts) {
-		printf("\nUnable to TX %d packets", nb_pkts);
+		printf("\nUnable to TX %d packets, sent: %i", nb_pkts, nb_sent);
 		for ( ; nb_sent < nb_pkts; nb_sent++)
 			rte_pktmbuf_free(tx_pkts_burst[nb_sent]);
 		ret = TEST_FAILED;
@@ -970,17 +1036,22 @@ test_ipsec_inline_proto_process(struct ipsec_test_data *td,
 	rte_pause();
 
 	/* Receive back packet on loopback interface. */
-	do {
-		rte_delay_ms(1);
-		nb_rx += rte_eth_rx_burst(port_id, 0, &rx_pkts_burst[nb_rx],
-				nb_sent - nb_rx);
-		if (nb_rx >= nb_sent)
-			break;
-	} while (j++ < 5 || nb_rx == 0);
+	if (event_mode_enabled)
+		nb_rx = event_rx_burst(rx_pkts_burst, nb_sent);
+	else
+		do {
+			rte_delay_ms(1);
+			nb_rx += rte_eth_rx_burst(port_id, 0,
+					&rx_pkts_burst[nb_rx],
+					nb_sent - nb_rx);
+			if (nb_rx >= nb_sent)
+				break;
+		} while (j++ < 5 || nb_rx == 0);
 
 	if (nb_rx != nb_sent) {
-		printf("\nUnable to RX all %d packets", nb_sent);
-		while (--nb_rx)
+		printf("\nUnable to RX all %d packets, received(%i)",
+				nb_sent, nb_rx);
+		while (--nb_rx >= 0)
 			rte_pktmbuf_free(rx_pkts_burst[nb_rx]);
 		ret = TEST_FAILED;
 		goto out;
@@ -1380,6 +1451,289 @@ inline_ipsec_testsuite_teardown(void)
 			printf("rte_eth_dev_reset: err=%s, port=%u\n",
 			       rte_strerror(-ret), port_id);
 	}
+	rte_free(tx_pkts_burst);
+	rte_free(rx_pkts_burst);
+}
+
+static int
+event_inline_ipsec_testsuite_setup(void)
+{
+	struct rte_event_eth_rx_adapter_queue_conf queue_conf = {0};
+	struct rte_event_dev_info evdev_default_conf = {0};
+	struct rte_event_dev_config eventdev_conf = {0};
+	struct rte_event_queue_conf eventq_conf = {0};
+	struct rte_event_port_conf ev_port_conf = {0};
+	const uint16_t nb_txd = 1024, nb_rxd = 1024;
+	uint16_t nb_rx_queue = 1, nb_tx_queue = 1;
+	uint8_t ev_queue_id = 0, tx_queue_id = 0;
+	int nb_eventqueue = 1, nb_eventport = 1;
+	const int all_queues = -1;
+	uint32_t caps = 0;
+	uint16_t nb_ports;
+	int ret;
+
+	printf("Start event inline IPsec test.\n");
+
+	nb_ports = rte_eth_dev_count_avail();
+	if (nb_ports == 0) {
+		printf("Test require: 1 port, available: 0\n");
+		return TEST_SKIPPED;
+	}
+
+	init_mempools(NB_MBUF);
+
+	if (tx_pkts_burst == NULL) {
+		tx_pkts_burst = (struct rte_mbuf **)rte_calloc("tx_buff",
+					  MAX_TRAFFIC_BURST,
+					  sizeof(void *),
+					  RTE_CACHE_LINE_SIZE);
+		if (!tx_pkts_burst)
+			return -1;
+
+		rx_pkts_burst = (struct rte_mbuf **)rte_calloc("rx_buff",
+					  MAX_TRAFFIC_BURST,
+					  sizeof(void *),
+					  RTE_CACHE_LINE_SIZE);
+		if (!rx_pkts_burst)
+			return -1;
+
+	}
+
+	printf("Generate %d packets\n", MAX_TRAFFIC_BURST);
+
+	/* configuring port 0 for the test is enough */
+	port_id = 0;
+	/* port configure */
+	ret = rte_eth_dev_configure(port_id, nb_rx_queue,
+				    nb_tx_queue, &port_conf);
+	if (ret < 0) {
+		printf("Cannot configure device: err=%d, port=%d\n",
+			 ret, port_id);
+		return ret;
+	}
+
+	/* Tx queue setup */
+	ret = rte_eth_tx_queue_setup(port_id, 0, nb_txd,
+				     SOCKET_ID_ANY, &tx_conf);
+	if (ret < 0) {
+		printf("rte_eth_tx_queue_setup: err=%d, port=%d\n",
+				ret, port_id);
+		return ret;
+	}
+
+	/* rx queue steup */
+	ret = rte_eth_rx_queue_setup(port_id, 0, nb_rxd, SOCKET_ID_ANY,
+				     &rx_conf, mbufpool);
+	if (ret < 0) {
+		printf("rte_eth_rx_queue_setup: err=%d, port=%d\n",
+				ret, port_id);
+		return ret;
+	}
+
+	/* Setup eventdev */
+	eventdev_id = 0;
+	rx_adapter_id = 0;
+	tx_adapter_id = 0;
+
+	/* Get default conf of eventdev */
+	ret = rte_event_dev_info_get(eventdev_id, &evdev_default_conf);
+	if (ret < 0) {
+		printf("Error in getting event device info[devID:%d]\n",
+				eventdev_id);
+		return ret;
+	}
+
+	/* Get Tx adapter capabilities */
+	ret = rte_event_eth_tx_adapter_caps_get(eventdev_id, tx_adapter_id, &caps);
+	if (ret < 0) {
+		printf("Failed to get event device %d eth tx adapter"
+				" capabilities for port %d\n",
+				eventdev_id, port_id);
+		return ret;
+	}
+	if (!(caps & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT))
+		tx_queue_id = nb_eventqueue++;
+
+	eventdev_conf.nb_events_limit =
+			evdev_default_conf.max_num_events;
+	eventdev_conf.nb_event_queue_flows =
+			evdev_default_conf.max_event_queue_flows;
+	eventdev_conf.nb_event_port_dequeue_depth =
+			evdev_default_conf.max_event_port_dequeue_depth;
+	eventdev_conf.nb_event_port_enqueue_depth =
+			evdev_default_conf.max_event_port_enqueue_depth;
+
+	eventdev_conf.nb_event_queues = nb_eventqueue;
+	eventdev_conf.nb_event_ports = nb_eventport;
+
+	/* Configure event device */
+
+	ret = rte_event_dev_configure(eventdev_id, &eventdev_conf);
+	if (ret < 0) {
+		printf("Error in configuring event device\n");
+		return ret;
+	}
+
+	/* Configure event queue */
+	eventq_conf.schedule_type = RTE_SCHED_TYPE_PARALLEL;
+	eventq_conf.nb_atomic_flows = 1024;
+	eventq_conf.nb_atomic_order_sequences = 1024;
+
+	/* Setup the queue */
+	ret = rte_event_queue_setup(eventdev_id, ev_queue_id, &eventq_conf);
+	if (ret < 0) {
+		printf("Failed to setup event queue %d\n", ret);
+		return ret;
+	}
+
+	/* Configure event port */
+	ret = rte_event_port_setup(eventdev_id, port_id, NULL);
+	if (ret < 0) {
+		printf("Failed to setup event port %d\n", ret);
+		return ret;
+	}
+
+	/* Make event queue - event port link */
+	ret = rte_event_port_link(eventdev_id, port_id, NULL, NULL, 1);
+	if (ret < 0) {
+		printf("Failed to link event port %d\n", ret);
+		return ret;
+	}
+
+	/* Setup port conf */
+	ev_port_conf.new_event_threshold = 1200;
+	ev_port_conf.dequeue_depth =
+			evdev_default_conf.max_event_port_dequeue_depth;
+	ev_port_conf.enqueue_depth =
+			evdev_default_conf.max_event_port_enqueue_depth;
+
+	/* Create Rx adapter */
+	ret = rte_event_eth_rx_adapter_create(rx_adapter_id, eventdev_id,
+			&ev_port_conf);
+	if (ret < 0) {
+		printf("Failed to create rx adapter %d\n", ret);
+		return ret;
+	}
+
+	/* Setup queue conf */
+	queue_conf.ev.queue_id = ev_queue_id;
+	queue_conf.ev.sched_type = RTE_SCHED_TYPE_PARALLEL;
+	queue_conf.ev.event_type = RTE_EVENT_TYPE_ETHDEV;
+
+	/* Add queue to the adapter */
+	ret = rte_event_eth_rx_adapter_queue_add(rx_adapter_id, port_id,
+			all_queues, &queue_conf);
+	if (ret < 0) {
+		printf("Failed to add eth queue to rx adapter %d\n", ret);
+		return ret;
+	}
+
+	/* Start rx adapter */
+	ret = rte_event_eth_rx_adapter_start(rx_adapter_id);
+	if (ret < 0) {
+		printf("Failed to start rx adapter %d\n", ret);
+		return ret;
+	}
+
+	/* Create tx adapter */
+	ret = rte_event_eth_tx_adapter_create(tx_adapter_id, eventdev_id,
+			&ev_port_conf);
+	if (ret < 0) {
+		printf("Failed to create tx adapter %d\n", ret);
+		return ret;
+	}
+
+	/* Add queue to the adapter */
+	ret = rte_event_eth_tx_adapter_queue_add(tx_adapter_id, port_id,
+			all_queues);
+	if (ret < 0) {
+		printf("Failed to add eth queue to tx adapter %d\n", ret);
+		return ret;
+	}
+	/* Setup Tx queue & port */
+	if (tx_queue_id) {
+		/* Setup the queue */
+		ret = rte_event_queue_setup(eventdev_id, tx_queue_id,
+				&eventq_conf);
+		if (ret < 0) {
+			printf("Failed to setup tx event queue %d\n", ret);
+			return ret;
+		}
+		/* Link Tx event queue to Tx port */
+		ret = rte_event_port_link(eventdev_id, port_id,
+				&tx_queue_id, NULL, 1);
+		if (ret != 1) {
+			printf("Failed to link event queue to port\n");
+			return ret;
+		}
+	}
+
+	/* Start tx adapter */
+	ret = rte_event_eth_tx_adapter_start(tx_adapter_id);
+	if (ret < 0) {
+		printf("Failed to start tx adapter %d\n", ret);
+		return ret;
+	}
+
+	/* Start eventdev */
+	ret = rte_event_dev_start(eventdev_id);
+	if (ret < 0) {
+		printf("Failed to start event device %d\n", ret);
+		return ret;
+	}
+
+	event_mode_enabled = true;
+	test_ipsec_alg_list_populate();
+
+	return 0;
+}
+
+static void
+event_inline_ipsec_testsuite_teardown(void)
+{
+	uint16_t portid;
+	int ret;
+
+	event_mode_enabled = false;
+
+	/* Stop and release rx adapter */
+	ret = rte_event_eth_rx_adapter_stop(rx_adapter_id);
+	if (ret < 0)
+		printf("Failed to stop rx adapter %d\n", ret);
+	ret = rte_event_eth_rx_adapter_queue_del(rx_adapter_id, port_id, -1);
+	if (ret < 0)
+		printf("Failed to remove rx adapter queues %d\n", ret);
+	ret = rte_event_eth_rx_adapter_free(rx_adapter_id);
+	if (ret < 0)
+		printf("Failed to free rx adapter %d\n", ret);
+
+	/* Stop and release tx adapter */
+	ret = rte_event_eth_tx_adapter_stop(tx_adapter_id);
+	if (ret < 0)
+		printf("Failed to stop tx adapter %d\n", ret);
+	ret = rte_event_eth_tx_adapter_queue_del(tx_adapter_id, port_id, -1);
+	if (ret < 0)
+		printf("Failed to remove tx adapter queues %d\n", ret);
+	ret = rte_event_eth_tx_adapter_free(tx_adapter_id);
+	if (ret < 0)
+		printf("Failed to free tx adapter %d\n", ret);
+
+	/* Stop and release event devices */
+	rte_event_dev_stop(eventdev_id);
+	ret = rte_event_dev_close(eventdev_id);
+	if (ret < 0)
+		printf("Failed to close event dev %d, %d\n", eventdev_id, ret);
+
+	/* port tear down */
+	RTE_ETH_FOREACH_DEV(portid) {
+		ret = rte_eth_dev_reset(portid);
+		if (ret != 0)
+			printf("rte_eth_dev_reset: err=%s, port=%u\n",
+			       rte_strerror(-ret), port_id);
+	}
+
+	rte_free(tx_pkts_burst);
+	rte_free(rx_pkts_burst);
 }
 
 static int
@@ -1920,7 +2274,7 @@ test_ipsec_inline_pkt_replay(const void *test_data, const uint64_t esn[],
 	flags.antireplay = true;
 
 	for (i = 0; i < nb_pkts; i++) {
-		memcpy(&td_outb[i], test_data, sizeof(td_outb));
+		memcpy(&td_outb[i], test_data, sizeof(td_outb[0]));
 		td_outb[i].ipsec_xform.options.iv_gen_disable = 1;
 		td_outb[i].ipsec_xform.replay_win_sz = winsz;
 		td_outb[i].ipsec_xform.options.esn = esn_en;
@@ -2054,8 +2408,6 @@ test_ipsec_inline_proto_pkt_esn_antireplay4096(const void *test_data)
 
 static struct unit_test_suite inline_ipsec_testsuite  = {
 	.suite_name = "Inline IPsec Ethernet Device Unit Test Suite",
-	.setup = inline_ipsec_testsuite_setup,
-	.teardown = inline_ipsec_testsuite_teardown,
 	.unit_test_cases = {
 		TEST_CASE_NAMED_WITH_DATA(
 			"Outbound known vector (ESP tunnel mode IPv4 AES-GCM 128)",
@@ -2374,9 +2726,20 @@ static struct unit_test_suite inline_ipsec_testsuite  = {
 static int
 test_inline_ipsec(void)
 {
+	inline_ipsec_testsuite.setup = inline_ipsec_testsuite_setup;
+	inline_ipsec_testsuite.teardown = inline_ipsec_testsuite_teardown;
+	return unit_test_suite_runner(&inline_ipsec_testsuite);
+}
+
+static int
+test_event_inline_ipsec(void)
+{
+	inline_ipsec_testsuite.setup = event_inline_ipsec_testsuite_setup;
+	inline_ipsec_testsuite.teardown = event_inline_ipsec_testsuite_teardown;
 	return unit_test_suite_runner(&inline_ipsec_testsuite);
 }
 
 #endif /* !RTE_EXEC_ENV_WINDOWS */
 
 REGISTER_TEST_COMMAND(inline_ipsec_autotest, test_inline_ipsec);
+REGISTER_TEST_COMMAND(event_inline_ipsec_autotest, test_event_inline_ipsec);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 36+ messages in thread

* RE: [PATCH v3] app/test: add event inline security tests
  2022-06-22 11:33 ` [PATCH v3] " Volodymyr Fialko
@ 2022-06-22 16:32   ` Anoob Joseph
  2022-06-28  8:29   ` Akhil Goyal
  1 sibling, 0 replies; 36+ messages in thread
From: Anoob Joseph @ 2022-06-22 16:32 UTC (permalink / raw)
  To: Volodymyr Fialko, dev, Akhil Goyal
  Cc: Jerin Jacob Kollanukkaran, Volodymyr Fialko

> 
> Enable ability to run inline security tests using event
> API(rte_event_eth_tx_adapter_enqueue/rte_event_dequeue_burst).
> New test command - event_inline_ipsec_autotest will run same list of test
> cases as inline_ipsec_autotest, but packets will go through eventdev.
> 
> Signed-off-by: Volodymyr Fialko <vfialko@marvell.com>

Acked-by: Anoob Joseph <anoobj@marvell.com>

^ permalink raw reply	[flat|nested] 36+ messages in thread

* RE: [PATCH v3] app/test: add event inline security tests
  2022-06-22 11:33 ` [PATCH v3] " Volodymyr Fialko
  2022-06-22 16:32   ` Anoob Joseph
@ 2022-06-28  8:29   ` Akhil Goyal
  1 sibling, 0 replies; 36+ messages in thread
From: Akhil Goyal @ 2022-06-28  8:29 UTC (permalink / raw)
  To: Volodymyr Fialko, dev
  Cc: Jerin Jacob Kollanukkaran, Anoob Joseph, Volodymyr Fialko

> Enable ability to run inline security tests using event
> API(rte_event_eth_tx_adapter_enqueue/rte_event_dequeue_burst).
> New test command - event_inline_ipsec_autotest will run same list of
> test cases as inline_ipsec_autotest, but packets will go through eventdev.
> 
> Signed-off-by: Volodymyr Fialko <vfialko@marvell.com>
> ---
> v2:
> * Fixed compilation with mingw.
> v3:
> * Fixed struct zero initialization for gcc 4.*
Acked-by: Akhil Goyal <gakhil@marvell.com>

Applied to dpdk-next-crypto

Thanks.

^ permalink raw reply	[flat|nested] 36+ messages in thread

* [PATCH] doc: announce change in crypto adapter queue add
  2022-06-22  1:38 [PATCH v2] app/test: add event inline security tests Volodymyr Fialko
  2022-06-22 11:33 ` [PATCH v3] " Volodymyr Fialko
@ 2022-06-28 12:09 ` Volodymyr Fialko
  2022-06-28 12:40   ` Akhil Goyal
                     ` (2 more replies)
  2022-08-04  9:59 ` [PATCH 0/3] Vector support for event crypto adapter Volodymyr Fialko
  2 siblings, 3 replies; 36+ messages in thread
From: Volodymyr Fialko @ 2022-06-28 12:09 UTC (permalink / raw)
  To: dev, Ray Kinsella; +Cc: jerinj, gakhil, anoobj, Volodymyr Fialko

The function `rte_event_crypto_adapter_queue_pair_add` will accept
`rte_event_crypto_adapter_queue_conf` argument instead of `rte_event`.

Signed-off-by: Volodymyr Fialko <vfialko@marvell.com>
---
 doc/guides/rel_notes/deprecation.rst | 6 ++++++
 1 file changed, 6 insertions(+)

diff --git a/doc/guides/rel_notes/deprecation.rst b/doc/guides/rel_notes/deprecation.rst
index 4e5b23c53d..63a6459f17 100644
--- a/doc/guides/rel_notes/deprecation.rst
+++ b/doc/guides/rel_notes/deprecation.rst
@@ -125,3 +125,9 @@ Deprecation Notices
   applications should be updated to use the ``dmadev`` library instead,
   with the underlying HW-functionality being provided by the ``ioat`` or
   ``idxd`` dma drivers
+
+* eventdev: The function ``rte_event_crypto_adapter_queue_pair_add`` will
+  accept configuration of type ``rte_event_crypto_adapter_queue_conf`` instead
+  of ``rte_event``, similar to ``rte_event_eth_rx_adapter_queue_add`` signature.
+  Event will be one of the configuration fields, together with additional
+  vector parameters.
-- 
2.25.1


^ permalink raw reply	[flat|nested] 36+ messages in thread

* RE: [PATCH] doc: announce change in crypto adapter queue add
  2022-06-28 12:09 ` [PATCH] doc: announce change in crypto adapter queue add Volodymyr Fialko
@ 2022-06-28 12:40   ` Akhil Goyal
  2022-07-11 14:56     ` Jerin Jacob
  2022-07-14  9:04   ` Hemant Agrawal
  2022-07-17 11:32   ` Thomas Monjalon
  2 siblings, 1 reply; 36+ messages in thread
From: Akhil Goyal @ 2022-06-28 12:40 UTC (permalink / raw)
  To: Volodymyr Fialko, dev, Ray Kinsella
  Cc: Jerin Jacob Kollanukkaran, Anoob Joseph, Volodymyr Fialko

> Subject: [PATCH] doc: announce change in crypto adapter queue add
> 
> The function `rte_event_crypto_adapter_queue_pair_add` will accept
> `rte_event_crypto_adapter_queue_conf` argument instead of `rte_event`.
> 
> Signed-off-by: Volodymyr Fialko <vfialko@marvell.com>
Acked-by: Akhil Goyal <gakhil@marvell.com>


^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH] doc: announce change in crypto adapter queue add
  2022-06-28 12:40   ` Akhil Goyal
@ 2022-07-11 14:56     ` Jerin Jacob
  2022-07-12  5:31       ` [EXT] " Akhil Goyal
  0 siblings, 1 reply; 36+ messages in thread
From: Jerin Jacob @ 2022-07-11 14:56 UTC (permalink / raw)
  To: Akhil Goyal
  Cc: Volodymyr Fialko, dev, Ray Kinsella, Jerin Jacob Kollanukkaran,
	Anoob Joseph

On Tue, Jun 28, 2022 at 6:12 PM Akhil Goyal <gakhil@marvell.com> wrote:
>
> > Subject: [PATCH] doc: announce change in crypto adapter queue add
> >
> > The function `rte_event_crypto_adapter_queue_pair_add` will accept
> > `rte_event_crypto_adapter_queue_conf` argument instead of `rte_event`.
> >
> > Signed-off-by: Volodymyr Fialko <vfialko@marvell.com>
> Acked-by: Akhil Goyal <gakhil@marvell.com>

Acked-by: Jerin Jacob <jerinj@marvell.com>


>

^ permalink raw reply	[flat|nested] 36+ messages in thread

* RE: [EXT] Re: [PATCH] doc: announce change in crypto adapter queue add
  2022-07-11 14:56     ` Jerin Jacob
@ 2022-07-12  5:31       ` Akhil Goyal
  2022-07-13  6:49         ` Gujjar, Abhinandan S
  0 siblings, 1 reply; 36+ messages in thread
From: Akhil Goyal @ 2022-07-12  5:31 UTC (permalink / raw)
  To: Jerin Jacob, Abhinandan Gujjar
  Cc: Volodymyr Fialko, dev, Ray Kinsella, Jerin Jacob Kollanukkaran,
	Anoob Joseph

> On Tue, Jun 28, 2022 at 6:12 PM Akhil Goyal <gakhil@marvell.com> wrote:
> >
> > > Subject: [PATCH] doc: announce change in crypto adapter queue add
> > >
> > > The function `rte_event_crypto_adapter_queue_pair_add` will accept
> > > `rte_event_crypto_adapter_queue_conf` argument instead of `rte_event`.
> > >
> > > Signed-off-by: Volodymyr Fialko <vfialko@marvell.com>
> > Acked-by: Akhil Goyal <gakhil@marvell.com>
> 
> Acked-by: Jerin Jacob <jerinj@marvell.com>
Hi Abhinandan,

Could you please Ack this deprecation notice.

Regards,
Akhil

^ permalink raw reply	[flat|nested] 36+ messages in thread

* RE: [EXT] Re: [PATCH] doc: announce change in crypto adapter queue add
  2022-07-12  5:31       ` [EXT] " Akhil Goyal
@ 2022-07-13  6:49         ` Gujjar, Abhinandan S
  0 siblings, 0 replies; 36+ messages in thread
From: Gujjar, Abhinandan S @ 2022-07-13  6:49 UTC (permalink / raw)
  To: Akhil Goyal, Jerin Jacob
  Cc: Volodymyr Fialko, dev, Ray Kinsella, Jerin Jacob Kollanukkaran,
	Anoob Joseph



> -----Original Message-----
> From: Akhil Goyal <gakhil@marvell.com>
> Sent: Tuesday, July 12, 2022 11:02 AM
> To: Jerin Jacob <jerinjacobk@gmail.com>; Gujjar, Abhinandan S
> <abhinandan.gujjar@intel.com>
> Cc: Volodymyr Fialko <vfialko@marvell.com>; dev@dpdk.org; Ray Kinsella
> <mdr@ashroe.eu>; Jerin Jacob Kollanukkaran <jerinj@marvell.com>; Anoob
> Joseph <anoobj@marvell.com>
> Subject: RE: [EXT] Re: [PATCH] doc: announce change in crypto adapter queue
> add
> 
> > On Tue, Jun 28, 2022 at 6:12 PM Akhil Goyal <gakhil@marvell.com> wrote:
> > >
> > > > Subject: [PATCH] doc: announce change in crypto adapter queue add
> > > >
> > > > The function `rte_event_crypto_adapter_queue_pair_add` will accept
> > > > `rte_event_crypto_adapter_queue_conf` argument instead of `rte_event`.
> > > >
> > > > Signed-off-by: Volodymyr Fialko <vfialko@marvell.com>
> > > Acked-by: Akhil Goyal <gakhil@marvell.com>
> >
> > Acked-by: Jerin Jacob <jerinj@marvell.com>
Acked-by: Abhinandan Gujjar <abhinandan.gujjar@intel.com>
> Hi Abhinandan,
> 
> Could you please Ack this deprecation notice.
> 
> Regards,
> Akhil

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH] doc: announce change in crypto adapter queue add
  2022-06-28 12:09 ` [PATCH] doc: announce change in crypto adapter queue add Volodymyr Fialko
  2022-06-28 12:40   ` Akhil Goyal
@ 2022-07-14  9:04   ` Hemant Agrawal
  2022-07-17 11:32   ` Thomas Monjalon
  2 siblings, 0 replies; 36+ messages in thread
From: Hemant Agrawal @ 2022-07-14  9:04 UTC (permalink / raw)
  To: Volodymyr Fialko, dev, Ray Kinsella; +Cc: jerinj, gakhil, anoobj


On 6/28/2022 5:39 PM, Volodymyr Fialko wrote:
> The function `rte_event_crypto_adapter_queue_pair_add` will accept
> `rte_event_crypto_adapter_queue_conf` argument instead of `rte_event`.
>
> Signed-off-by: Volodymyr Fialko <vfialko@marvell.com>
Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>

> ---
>   doc/guides/rel_notes/deprecation.rst | 6 ++++++
>   1 file changed, 6 insertions(+)
>
> diff --git a/doc/guides/rel_notes/deprecation.rst b/doc/guides/rel_notes/deprecation.rst
> index 4e5b23c53d..63a6459f17 100644
> --- a/doc/guides/rel_notes/deprecation.rst
> +++ b/doc/guides/rel_notes/deprecation.rst
> @@ -125,3 +125,9 @@ Deprecation Notices
>     applications should be updated to use the ``dmadev`` library instead,
>     with the underlying HW-functionality being provided by the ``ioat`` or
>     ``idxd`` dma drivers
> +
> +* eventdev: The function ``rte_event_crypto_adapter_queue_pair_add`` will
> +  accept configuration of type ``rte_event_crypto_adapter_queue_conf`` instead
> +  of ``rte_event``, similar to ``rte_event_eth_rx_adapter_queue_add`` signature.
> +  Event will be one of the configuration fields, together with additional
> +  vector parameters.

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH] doc: announce change in crypto adapter queue add
  2022-06-28 12:09 ` [PATCH] doc: announce change in crypto adapter queue add Volodymyr Fialko
  2022-06-28 12:40   ` Akhil Goyal
  2022-07-14  9:04   ` Hemant Agrawal
@ 2022-07-17 11:32   ` Thomas Monjalon
  2 siblings, 0 replies; 36+ messages in thread
From: Thomas Monjalon @ 2022-07-17 11:32 UTC (permalink / raw)
  To: Volodymyr Fialko; +Cc: dev, Ray Kinsella, jerinj, gakhil, anoobj

28/06/2022 14:09, Volodymyr Fialko:
> The function `rte_event_crypto_adapter_queue_pair_add` will accept
> `rte_event_crypto_adapter_queue_conf` argument instead of `rte_event`.
> 
> Signed-off-by: Volodymyr Fialko <vfialko@marvell.com>

    Acked-by: Akhil Goyal <gakhil@marvell.com>
    Acked-by: Jerin Jacob <jerinj@marvell.com>
    Acked-by: Abhinandan Gujjar <abhinandan.gujjar@intel.com>
    Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>

Applied, thanks.



^ permalink raw reply	[flat|nested] 36+ messages in thread

* [PATCH 0/3] Vector support for event crypto adapter
  2022-06-22  1:38 [PATCH v2] app/test: add event inline security tests Volodymyr Fialko
  2022-06-22 11:33 ` [PATCH v3] " Volodymyr Fialko
  2022-06-28 12:09 ` [PATCH] doc: announce change in crypto adapter queue add Volodymyr Fialko
@ 2022-08-04  9:59 ` Volodymyr Fialko
  2022-08-04  9:59   ` [PATCH 1/3] eventdev: introduce event cryptodev vector type Volodymyr Fialko
                     ` (3 more replies)
  2 siblings, 4 replies; 36+ messages in thread
From: Volodymyr Fialko @ 2022-08-04  9:59 UTC (permalink / raw)
  To: dev; +Cc: jerinj, gakhil, anoobj, Volodymyr Fialko

Introduce ability to aggregate crypto operations processed by event
crypto adapter into single event containing rte_event_vector whose event
type is RTE_EVENT_TYPE_CRYPTODEV_VECTOR.

Volodymyr Fialko (3):
  eventdev: introduce event cryptodev vector type
  crypto/cnxk: add burst enqueue for event crypto
  crypto/cnxk: add vectorization for event crypto

 app/test-eventdev/test_perf_common.c          |  10 +-
 app/test/test_event_crypto_adapter.c          |  12 +-
 .../prog_guide/event_crypto_adapter.rst       |  23 +-
 drivers/crypto/cnxk/cn10k_cryptodev_ops.c     | 455 ++++++++++++++++--
 drivers/crypto/cnxk/cn10k_cryptodev_ops.h     |   9 +-
 drivers/crypto/cnxk/cnxk_cryptodev_ops.h      |   9 +-
 drivers/crypto/cnxk/meson.build               |   2 +-
 drivers/crypto/cnxk/version.map               |   1 +
 drivers/event/cnxk/cn10k_eventdev.c           |  47 +-
 drivers/event/cnxk/cn10k_worker.c             |  10 -
 drivers/event/cnxk/cn10k_worker.h             |   8 +-
 drivers/event/cnxk/cn9k_eventdev.c            |  14 +-
 drivers/event/cnxk/cnxk_eventdev.h            |   4 +-
 drivers/event/cnxk/cnxk_eventdev_adptr.c      |  17 +-
 drivers/event/dpaa/dpaa_eventdev.c            |   9 +-
 drivers/event/dpaa2/dpaa2_eventdev.c          |   9 +-
 drivers/event/octeontx/ssovf_evdev.c          |   4 +-
 lib/eventdev/eventdev_pmd.h                   |  35 +-
 lib/eventdev/eventdev_trace.h                 |   6 +-
 lib/eventdev/rte_event_crypto_adapter.c       |  90 +++-
 lib/eventdev/rte_event_crypto_adapter.h       | 101 +++-
 lib/eventdev/rte_event_eth_rx_adapter.h       |   3 +-
 lib/eventdev/rte_eventdev.h                   |   8 +
 23 files changed, 768 insertions(+), 118 deletions(-)

-- 
2.25.1


^ permalink raw reply	[flat|nested] 36+ messages in thread

* [PATCH 1/3] eventdev: introduce event cryptodev vector type
  2022-08-04  9:59 ` [PATCH 0/3] Vector support for event crypto adapter Volodymyr Fialko
@ 2022-08-04  9:59   ` Volodymyr Fialko
  2022-09-21 18:32     ` Akhil Goyal
  2022-09-24  8:43     ` Gujjar, Abhinandan S
  2022-08-04  9:59   ` [PATCH 2/3] crypto/cnxk: add burst enqueue for event crypto Volodymyr Fialko
                     ` (2 subsequent siblings)
  3 siblings, 2 replies; 36+ messages in thread
From: Volodymyr Fialko @ 2022-08-04  9:59 UTC (permalink / raw)
  To: dev, Jerin Jacob, Abhinandan Gujjar, Pavan Nikhilesh,
	Shijith Thotton, Hemant Agrawal, Sachin Saxena,
	Jay Jayatheerthan
  Cc: gakhil, anoobj, Volodymyr Fialko

Introduce ability to aggregate crypto operations processed by event
crypto adapter into single event containing rte_event_vector whose event
type is RTE_EVENT_TYPE_CRYPTODEV_VECTOR.

Application should set RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR in
rte_event_crypto_adapter_queue_conf::flag and provide vector configuration
with respect of rte_event_crypto_adapter_vector_limits, which could be
obtained by calling rte_event_crypto_adapter_vector_limits_get, to enable
vectorization.

The event crypto adapter would be responsible for vectorizing the crypto
operations based on provided response information in
rte_event_crypto_metadata::response_info.

Updated drivers and tests accordingly to new API.

Signed-off-by: Volodymyr Fialko <vfialko@marvell.com>
---
 app/test-eventdev/test_perf_common.c          |  10 +-
 app/test/test_event_crypto_adapter.c          |  12 ++-
 .../prog_guide/event_crypto_adapter.rst       |  23 +++-
 drivers/event/cnxk/cn10k_eventdev.c           |   4 +-
 drivers/event/cnxk/cn9k_eventdev.c            |   5 +-
 drivers/event/dpaa/dpaa_eventdev.c            |   9 +-
 drivers/event/dpaa2/dpaa2_eventdev.c          |   9 +-
 drivers/event/octeontx/ssovf_evdev.c          |   4 +-
 lib/eventdev/eventdev_pmd.h                   |  35 +++++-
 lib/eventdev/eventdev_trace.h                 |   6 +-
 lib/eventdev/rte_event_crypto_adapter.c       |  90 ++++++++++++++--
 lib/eventdev/rte_event_crypto_adapter.h       | 101 +++++++++++++++++-
 lib/eventdev/rte_event_eth_rx_adapter.h       |   3 +-
 lib/eventdev/rte_eventdev.h                   |   8 ++
 14 files changed, 276 insertions(+), 43 deletions(-)

diff --git a/app/test-eventdev/test_perf_common.c b/app/test-eventdev/test_perf_common.c
index 81420be73a..c770bc93f6 100644
--- a/app/test-eventdev/test_perf_common.c
+++ b/app/test-eventdev/test_perf_common.c
@@ -837,14 +837,14 @@ perf_event_crypto_adapter_setup(struct test_perf *t, struct prod_data *p)
 	}
 
 	if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
-		struct rte_event response_info;
+		struct rte_event_crypto_adapter_queue_conf conf;
 
-		response_info.event = 0;
-		response_info.sched_type = RTE_SCHED_TYPE_ATOMIC;
-		response_info.queue_id = p->queue_id;
+		memset(&conf, 0, sizeof(conf));
+		conf.ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
+		conf.ev.queue_id = p->queue_id;
 		ret = rte_event_crypto_adapter_queue_pair_add(
 			TEST_PERF_CA_ID, p->ca.cdev_id, p->ca.cdev_qp_id,
-			&response_info);
+			&conf);
 	} else {
 		ret = rte_event_crypto_adapter_queue_pair_add(
 			TEST_PERF_CA_ID, p->ca.cdev_id, p->ca.cdev_qp_id, NULL);
diff --git a/app/test/test_event_crypto_adapter.c b/app/test/test_event_crypto_adapter.c
index 2ecc7e2cea..bb617c1042 100644
--- a/app/test/test_event_crypto_adapter.c
+++ b/app/test/test_event_crypto_adapter.c
@@ -1175,6 +1175,10 @@ test_crypto_adapter_create(void)
 static int
 test_crypto_adapter_qp_add_del(void)
 {
+	struct rte_event_crypto_adapter_queue_conf queue_conf = {
+		.ev = response_info,
+	};
+
 	uint32_t cap;
 	int ret;
 
@@ -1183,7 +1187,7 @@ test_crypto_adapter_qp_add_del(void)
 
 	if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
 		ret = rte_event_crypto_adapter_queue_pair_add(TEST_ADAPTER_ID,
-				TEST_CDEV_ID, TEST_CDEV_QP_ID, &response_info);
+				TEST_CDEV_ID, TEST_CDEV_QP_ID, &queue_conf);
 	} else
 		ret = rte_event_crypto_adapter_queue_pair_add(TEST_ADAPTER_ID,
 					TEST_CDEV_ID, TEST_CDEV_QP_ID, NULL);
@@ -1206,6 +1210,10 @@ configure_event_crypto_adapter(enum rte_event_crypto_adapter_mode mode)
 		.new_event_threshold = 1200,
 	};
 
+	struct rte_event_crypto_adapter_queue_conf queue_conf = {
+		.ev = response_info,
+	};
+
 	uint32_t cap;
 	int ret;
 
@@ -1238,7 +1246,7 @@ configure_event_crypto_adapter(enum rte_event_crypto_adapter_mode mode)
 
 	if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
 		ret = rte_event_crypto_adapter_queue_pair_add(TEST_ADAPTER_ID,
-				TEST_CDEV_ID, TEST_CDEV_QP_ID, &response_info);
+				TEST_CDEV_ID, TEST_CDEV_QP_ID, &queue_conf);
 	} else
 		ret = rte_event_crypto_adapter_queue_pair_add(TEST_ADAPTER_ID,
 				TEST_CDEV_ID, TEST_CDEV_QP_ID, NULL);
diff --git a/doc/guides/prog_guide/event_crypto_adapter.rst b/doc/guides/prog_guide/event_crypto_adapter.rst
index 4fb5c688e0..554df7e358 100644
--- a/doc/guides/prog_guide/event_crypto_adapter.rst
+++ b/doc/guides/prog_guide/event_crypto_adapter.rst
@@ -201,10 +201,10 @@ capability, event information must be passed to the add API.
 
         ret = rte_event_crypto_adapter_caps_get(id, evdev, &cap);
         if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
-                struct rte_event event;
+                struct rte_event_crypto_adapter_queue_conf conf;
 
-                // Fill in event information & pass it to add API
-                rte_event_crypto_adapter_queue_pair_add(id, cdev_id, qp_id, &event);
+                // Fill in conf.event information & pass it to add API
+                rte_event_crypto_adapter_queue_pair_add(id, cdev_id, qp_id, &conf);
         } else
                 rte_event_crypto_adapter_queue_pair_add(id, cdev_id, qp_id, NULL);
 
@@ -291,6 +291,23 @@ the ``rte_crypto_op``.
                 rte_memcpy(op + len, &m_data, sizeof(m_data));
         }
 
+Enable event vectorization
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The event crypto adapter can aggregate outcoming crypto operations based on
+provided response information of ``rte_event_crypto_metadata::response_info``
+and generate a ``rte_event`` containing ``rte_event_vector`` whose event type
+is ``RTE_EVENT_TYPE_CRYPTODEV_VECTOR``.
+To enable vectorization application should set
+RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR in
+``rte_event_crypto_adapter_queue_conf::flag`` and provide vector
+configuration(size, mempool, etc.) with respect of
+``rte_event_crypto_adapter_vector_limits``, which could be obtained by calling
+``rte_event_crypto_adapter_vector_limits_get()``.
+
+The RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR capability indicates whether
+PMD supports this feature.
+
 Start the adapter instance
 ~~~~~~~~~~~~~~~~~~~~~~~~~~
 
diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index 5a0cab40a9..e74ec57382 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -889,11 +889,11 @@ static int
 cn10k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
 			    const struct rte_cryptodev *cdev,
 			    int32_t queue_pair_id,
-			    const struct rte_event *event)
+			    const struct rte_event_crypto_adapter_queue_conf *conf)
 {
 	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
 
-	RTE_SET_USED(event);
+	RTE_SET_USED(conf);
 
 	CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k");
 	CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k");
diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
index 2e27030049..45ed547cb0 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -1120,11 +1120,12 @@ cn9k_crypto_adapter_caps_get(const struct rte_eventdev *event_dev,
 static int
 cn9k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
 			   const struct rte_cryptodev *cdev,
-			   int32_t queue_pair_id, const struct rte_event *event)
+			   int32_t queue_pair_id,
+			   const struct rte_event_crypto_adapter_queue_conf *conf)
 {
 	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
 
-	RTE_SET_USED(event);
+	RTE_SET_USED(conf);
 
 	CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k");
 	CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k");
diff --git a/drivers/event/dpaa/dpaa_eventdev.c b/drivers/event/dpaa/dpaa_eventdev.c
index ff6cc0be18..2b9ecd9fbf 100644
--- a/drivers/event/dpaa/dpaa_eventdev.c
+++ b/drivers/event/dpaa/dpaa_eventdev.c
@@ -26,6 +26,7 @@
 #include <rte_eventdev.h>
 #include <eventdev_pmd_vdev.h>
 #include <rte_ethdev.h>
+#include <rte_event_crypto_adapter.h>
 #include <rte_event_eth_rx_adapter.h>
 #include <rte_event_eth_tx_adapter.h>
 #include <cryptodev_pmd.h>
@@ -775,10 +776,10 @@ static int
 dpaa_eventdev_crypto_queue_add(const struct rte_eventdev *dev,
 		const struct rte_cryptodev *cryptodev,
 		int32_t rx_queue_id,
-		const struct rte_event *ev)
+		const struct rte_event_crypto_adapter_queue_conf *conf)
 {
 	struct dpaa_eventdev *priv = dev->data->dev_private;
-	uint8_t ev_qid = ev->queue_id;
+	uint8_t ev_qid = conf->ev.queue_id;
 	u16 ch_id = priv->evq_info[ev_qid].ch_id;
 	int ret;
 
@@ -786,10 +787,10 @@ dpaa_eventdev_crypto_queue_add(const struct rte_eventdev *dev,
 
 	if (rx_queue_id == -1)
 		return dpaa_eventdev_crypto_queue_add_all(dev,
-				cryptodev, ev);
+				cryptodev, &conf->ev);
 
 	ret = dpaa_sec_eventq_attach(cryptodev, rx_queue_id,
-			ch_id, ev);
+			ch_id, &conf->ev);
 	if (ret) {
 		DPAA_EVENTDEV_ERR(
 			"dpaa_sec_eventq_attach failed: ret: %d\n", ret);
diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
index ffc7b8b073..0137736794 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.c
+++ b/drivers/event/dpaa2/dpaa2_eventdev.c
@@ -26,6 +26,7 @@
 #include <rte_bus_vdev.h>
 #include <ethdev_driver.h>
 #include <cryptodev_pmd.h>
+#include <rte_event_crypto_adapter.h>
 #include <rte_event_eth_rx_adapter.h>
 #include <rte_event_eth_tx_adapter.h>
 
@@ -865,10 +866,10 @@ static int
 dpaa2_eventdev_crypto_queue_add(const struct rte_eventdev *dev,
 		const struct rte_cryptodev *cryptodev,
 		int32_t rx_queue_id,
-		const struct rte_event *ev)
+		const struct rte_event_crypto_adapter_queue_conf *conf)
 {
 	struct dpaa2_eventdev *priv = dev->data->dev_private;
-	uint8_t ev_qid = ev->queue_id;
+	uint8_t ev_qid = conf->ev.queue_id;
 	struct dpaa2_dpcon_dev *dpcon = priv->evq_info[ev_qid].dpcon;
 	int ret;
 
@@ -876,10 +877,10 @@ dpaa2_eventdev_crypto_queue_add(const struct rte_eventdev *dev,
 
 	if (rx_queue_id == -1)
 		return dpaa2_eventdev_crypto_queue_add_all(dev,
-				cryptodev, ev);
+				cryptodev, &conf->ev);
 
 	ret = dpaa2_sec_eventq_attach(cryptodev, rx_queue_id,
-				      dpcon, ev);
+				      dpcon, &conf->ev);
 	if (ret) {
 		DPAA2_EVENTDEV_ERR(
 			"dpaa2_sec_eventq_attach failed: ret: %d\n", ret);
diff --git a/drivers/event/octeontx/ssovf_evdev.c b/drivers/event/octeontx/ssovf_evdev.c
index 9e14e35d10..17acd8ef64 100644
--- a/drivers/event/octeontx/ssovf_evdev.c
+++ b/drivers/event/octeontx/ssovf_evdev.c
@@ -745,12 +745,12 @@ static int
 ssovf_crypto_adapter_qp_add(const struct rte_eventdev *dev,
 			    const struct rte_cryptodev *cdev,
 			    int32_t queue_pair_id,
-			    const struct rte_event *event)
+			    const struct rte_event_crypto_adapter_queue_conf *conf)
 {
 	struct cpt_instance *qp;
 	uint8_t qp_id;
 
-	RTE_SET_USED(event);
+	RTE_SET_USED(conf);
 
 	if (queue_pair_id == -1) {
 		for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) {
diff --git a/lib/eventdev/eventdev_pmd.h b/lib/eventdev/eventdev_pmd.h
index 69402668d8..bcfc9cbcb2 100644
--- a/lib/eventdev/eventdev_pmd.h
+++ b/lib/eventdev/eventdev_pmd.h
@@ -907,6 +907,7 @@ rte_event_pmd_selftest_seqn(struct rte_mbuf *mbuf)
 }
 
 struct rte_cryptodev;
+struct rte_event_crypto_adapter_queue_conf;
 
 /**
  * This API may change without prior notice
@@ -961,11 +962,11 @@ typedef int (*eventdev_crypto_adapter_caps_get_t)
  *   - <0: Error code returned by the driver function.
  *
  */
-typedef int (*eventdev_crypto_adapter_queue_pair_add_t)
-			(const struct rte_eventdev *dev,
-			 const struct rte_cryptodev *cdev,
-			 int32_t queue_pair_id,
-			 const struct rte_event *event);
+typedef int (*eventdev_crypto_adapter_queue_pair_add_t)(
+		const struct rte_eventdev *dev,
+		const struct rte_cryptodev *cdev,
+		int32_t queue_pair_id,
+		const struct rte_event_crypto_adapter_queue_conf *queue_conf);
 
 
 /**
@@ -1074,6 +1075,27 @@ typedef int (*eventdev_crypto_adapter_stats_reset)
 			(const struct rte_eventdev *dev,
 			 const struct rte_cryptodev *cdev);
 
+struct rte_event_crypto_adapter_vector_limits;
+/**
+ * Get event vector limits for a given event, crypto device pair.
+ *
+ * @param dev
+ *   Event device pointer
+ *
+ * @param cdev
+ *   Crypto device pointer
+ *
+ * @param[out] limits
+ *   Pointer to the limits structure to be filled.
+ *
+ * @return
+ *   - 0: Success.
+ *   - <0: Error code returned by the driver function.
+ */
+typedef int (*eventdev_crypto_adapter_vector_limits_get_t)(
+	const struct rte_eventdev *dev, const struct rte_cryptodev *cdev,
+	struct rte_event_crypto_adapter_vector_limits *limits);
+
 /**
  * Retrieve the event device's eth Tx adapter capabilities.
  *
@@ -1339,6 +1361,9 @@ struct eventdev_ops {
 	/**< Get crypto stats */
 	eventdev_crypto_adapter_stats_reset crypto_adapter_stats_reset;
 	/**< Reset crypto stats */
+	eventdev_crypto_adapter_vector_limits_get_t
+		crypto_adapter_vector_limits_get;
+	/**< Get event vector limits for the crypto adapter */
 
 	eventdev_eth_rx_adapter_q_stats_get eth_rx_adapter_queue_stats_get;
 	/**< Get ethernet Rx queue stats */
diff --git a/lib/eventdev/eventdev_trace.h b/lib/eventdev/eventdev_trace.h
index 5ec43d80ee..d48cd58850 100644
--- a/lib/eventdev/eventdev_trace.h
+++ b/lib/eventdev/eventdev_trace.h
@@ -18,6 +18,7 @@ extern "C" {
 #include <rte_trace_point.h>
 
 #include "rte_eventdev.h"
+#include "rte_event_crypto_adapter.h"
 #include "rte_event_eth_rx_adapter.h"
 #include "rte_event_timer_adapter.h"
 
@@ -271,11 +272,12 @@ RTE_TRACE_POINT(
 RTE_TRACE_POINT(
 	rte_eventdev_trace_crypto_adapter_queue_pair_add,
 	RTE_TRACE_POINT_ARGS(uint8_t adptr_id, uint8_t cdev_id,
-		const void *event, int32_t queue_pair_id),
+		int32_t queue_pair_id,
+		const struct rte_event_crypto_adapter_queue_conf *conf),
 	rte_trace_point_emit_u8(adptr_id);
 	rte_trace_point_emit_u8(cdev_id);
 	rte_trace_point_emit_i32(queue_pair_id);
-	rte_trace_point_emit_ptr(event);
+	rte_trace_point_emit_ptr(conf);
 )
 
 RTE_TRACE_POINT(
diff --git a/lib/eventdev/rte_event_crypto_adapter.c b/lib/eventdev/rte_event_crypto_adapter.c
index 7c695176f4..73a4f231e2 100644
--- a/lib/eventdev/rte_event_crypto_adapter.c
+++ b/lib/eventdev/rte_event_crypto_adapter.c
@@ -921,11 +921,12 @@ int
 rte_event_crypto_adapter_queue_pair_add(uint8_t id,
 			uint8_t cdev_id,
 			int32_t queue_pair_id,
-			const struct rte_event *event)
+			const struct rte_event_crypto_adapter_queue_conf *conf)
 {
+	struct rte_event_crypto_adapter_vector_limits limits;
 	struct event_crypto_adapter *adapter;
-	struct rte_eventdev *dev;
 	struct crypto_device_info *dev_info;
+	struct rte_eventdev *dev;
 	uint32_t cap;
 	int ret;
 
@@ -951,11 +952,47 @@ rte_event_crypto_adapter_queue_pair_add(uint8_t id,
 	}
 
 	if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) &&
-	    (event == NULL)) {
+	    (conf == NULL)) {
 		RTE_EDEV_LOG_ERR("Conf value can not be NULL for dev_id=%u",
 				  cdev_id);
 		return -EINVAL;
 	}
+	if ((conf != NULL) &&
+	    (conf->flags & RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR)) {
+		if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR) == 0) {
+			RTE_EDEV_LOG_ERR("Event vectorization is not supported,"
+					 "dev %" PRIu8 " cdev %" PRIu8, id,
+					 cdev_id);
+			return -ENOTSUP;
+		}
+
+		ret = rte_event_crypto_adapter_vector_limits_get(
+			adapter->eventdev_id, cdev_id, &limits);
+		if (ret < 0) {
+			RTE_EDEV_LOG_ERR("Failed to get event device vector "
+					 "limits, dev %" PRIu8 " cdev %" PRIu8,
+					 id, cdev_id);
+			return -EINVAL;
+		}
+		if (conf->vector_sz < limits.min_sz ||
+		    conf->vector_sz > limits.max_sz ||
+		    conf->vector_timeout_ns < limits.min_timeout_ns ||
+		    conf->vector_timeout_ns > limits.max_timeout_ns ||
+		    conf->vector_mp == NULL) {
+			RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
+					 " dev %" PRIu8 " cdev %" PRIu8,
+					 id, cdev_id);
+			return -EINVAL;
+		}
+		if (conf->vector_mp->elt_size <
+		    (sizeof(struct rte_event_vector) +
+		     (sizeof(uintptr_t) * conf->vector_sz))) {
+			RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
+					 " dev %" PRIu8 " cdev %" PRIu8,
+					 id, cdev_id);
+			return -EINVAL;
+		}
+	}
 
 	dev_info = &adapter->cdevs[cdev_id];
 
@@ -990,7 +1027,7 @@ rte_event_crypto_adapter_queue_pair_add(uint8_t id,
 		ret = (*dev->dev_ops->crypto_adapter_queue_pair_add)(dev,
 				dev_info->dev,
 				queue_pair_id,
-				event);
+				conf);
 		if (ret)
 			return ret;
 
@@ -1030,8 +1067,8 @@ rte_event_crypto_adapter_queue_pair_add(uint8_t id,
 		rte_service_component_runstate_set(adapter->service_id, 1);
 	}
 
-	rte_eventdev_trace_crypto_adapter_queue_pair_add(id, cdev_id, event,
-		queue_pair_id);
+	rte_eventdev_trace_crypto_adapter_queue_pair_add(id, cdev_id,
+		queue_pair_id, conf);
 	return 0;
 }
 
@@ -1290,3 +1327,44 @@ rte_event_crypto_adapter_event_port_get(uint8_t id, uint8_t *event_port_id)
 
 	return 0;
 }
+
+int
+rte_event_crypto_adapter_vector_limits_get(
+	uint8_t dev_id, uint16_t cdev_id,
+	struct rte_event_crypto_adapter_vector_limits *limits)
+{
+	struct rte_cryptodev *cdev;
+	struct rte_eventdev *dev;
+	uint32_t cap;
+	int ret;
+
+	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+
+	if (!rte_cryptodev_is_valid_dev(cdev_id)) {
+		RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
+		return -EINVAL;
+	}
+
+	if (limits == NULL)
+		return -EINVAL;
+
+	dev = &rte_eventdevs[dev_id];
+	cdev = rte_cryptodev_pmd_get_dev(cdev_id);
+
+	ret = rte_event_crypto_adapter_caps_get(dev_id, cdev_id, &cap);
+	if (ret) {
+		RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
+				 "cdev %" PRIu16, dev_id, cdev_id);
+		return ret;
+	}
+
+	if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR))
+		return -ENOTSUP;
+
+	RTE_FUNC_PTR_OR_ERR_RET(
+		*dev->dev_ops->crypto_adapter_vector_limits_get,
+		-ENOTSUP);
+
+	return dev->dev_ops->crypto_adapter_vector_limits_get(
+		dev, cdev, limits);
+}
diff --git a/lib/eventdev/rte_event_crypto_adapter.h b/lib/eventdev/rte_event_crypto_adapter.h
index d90a19e72c..7dd6171b9b 100644
--- a/lib/eventdev/rte_event_crypto_adapter.h
+++ b/lib/eventdev/rte_event_crypto_adapter.h
@@ -253,6 +253,78 @@ struct rte_event_crypto_adapter_conf {
 	 */
 };
 
+#define RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR	0x1
+/**< This flag indicates that crypto operations processed on the crypto
+ * adapter need to be vectorized
+ * @see rte_event_crypto_adapter_queue_conf::flags
+ */
+
+/**
+ * Adapter queue configuration structure
+ */
+struct rte_event_crypto_adapter_queue_conf {
+	uint32_t flags;
+	/**< Flags for handling crypto operations
+	 * @see RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR
+	 */
+	struct rte_event ev;
+	/**< If HW supports cryptodev queue pair to event queue binding,
+	 * application is expected to fill in event information.
+	 * @see RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND
+	 */
+	uint16_t vector_sz;
+	/**< Indicates the maximum number for crypto operations to combine and
+	 * form a vector.
+	 * @see rte_event_crypto_adapter_vector_limits::min_sz
+	 * @see rte_event_crypto_adapter_vector_limits::max_sz
+	 * Valid when RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR flag is set in
+	 * @see rte_event_crypto_adapter_queue_conf::rx_queue_flags
+	 */
+	uint64_t vector_timeout_ns;
+	/**<
+	 * Indicates the maximum number of nanoseconds to wait for aggregating
+	 * crypto operations. Should be within vectorization limits of the
+	 * adapter
+	 * @see rte_event_crypto_adapter_vector_limits::min_timeout_ns
+	 * @see rte_event_crypto_adapter_vector_limits::max_timeout_ns
+	 * Valid when RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR flag is set in
+	 * @see rte_event_crypto_adapter_queue_conf::flags
+	 */
+	struct rte_mempool *vector_mp;
+	/**< Indicates the mempool that should be used for allocating
+	 * rte_event_vector container.
+	 * Should be created by using `rte_event_vector_pool_create`.
+	 * Valid when RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR flag is set in
+	 * @see rte_event_crypto_adapter_queue_conf::flags.
+	 */
+};
+
+/**
+ * A structure used to retrieve event crypto adapter vector limits.
+ */
+struct rte_event_crypto_adapter_vector_limits {
+	uint16_t min_sz;
+	/**< Minimum vector limit configurable.
+	 * @see rte_event_crypto_adapter_queue_conf::vector_sz
+	 */
+	uint16_t max_sz;
+	/**< Maximum vector limit configurable.
+	 * @see rte_event_crypto_adapter_queue_conf::vector_sz
+	 */
+	uint8_t log2_sz;
+	/**< True if the size configured should be in log2.
+	 * @see rte_event_crypto_adapter_queue_conf::vector_sz
+	 */
+	uint64_t min_timeout_ns;
+	/**< Minimum vector timeout configurable.
+	 * @see rte_event_crypto_adapter_queue_conf::vector_timeout_ns
+	 */
+	uint64_t max_timeout_ns;
+	/**< Maximum vector timeout configurable.
+	 * @see rte_event_crypto_adapter_queue_conf::vector_timeout_ns
+	 */
+};
+
 /**
  * Function type used for adapter configuration callback. The callback is
  * used to fill in members of the struct rte_event_crypto_adapter_conf, this
@@ -392,10 +464,9 @@ rte_event_crypto_adapter_free(uint8_t id);
  *  Cryptodev queue pair identifier. If queue_pair_id is set -1,
  *  adapter adds all the pre configured queue pairs to the instance.
  *
- * @param event
- *  if HW supports cryptodev queue pair to event queue binding, application is
- *  expected to fill in event information, else it will be NULL.
- *  @see RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND
+ * @param conf
+ *  Additional configuration structure of type
+ *  *rte_event_crypto_adapter_queue_conf*
  *
  * @return
  *  - 0: Success, queue pair added correctly.
@@ -405,7 +476,7 @@ int
 rte_event_crypto_adapter_queue_pair_add(uint8_t id,
 			uint8_t cdev_id,
 			int32_t queue_pair_id,
-			const struct rte_event *event);
+			const struct rte_event_crypto_adapter_queue_conf *conf);
 
 /**
  * Delete a queue pair from an event crypto adapter.
@@ -523,6 +594,26 @@ rte_event_crypto_adapter_service_id_get(uint8_t id, uint32_t *service_id);
 int
 rte_event_crypto_adapter_event_port_get(uint8_t id, uint8_t *event_port_id);
 
+/**
+ * Retrieve vector limits for a given event dev and crypto dev pair.
+ * @see rte_event_crypto_adapter_vector_limits
+ *
+ * @param dev_id
+ *  Event device identifier.
+ * @param cdev_id
+ *  Crypto device identifier.
+ * @param [out] limits
+ *  A pointer to rte_event_crypto_adapter_vector_limits structure that has to
+ * be filled.
+ *
+ * @return
+ *  - 0: Success.
+ *  - <0: Error code on failure.
+ */
+int rte_event_crypto_adapter_vector_limits_get(
+	uint8_t dev_id, uint16_t cdev_id,
+	struct rte_event_crypto_adapter_vector_limits *limits);
+
 /**
  * Enqueue a burst of crypto operations as event objects supplied in *rte_event*
  * structure on an event crypto adapter designated by its event *dev_id* through
diff --git a/lib/eventdev/rte_event_eth_rx_adapter.h b/lib/eventdev/rte_event_eth_rx_adapter.h
index 3608a7b2cf..c8f2936866 100644
--- a/lib/eventdev/rte_event_eth_rx_adapter.h
+++ b/lib/eventdev/rte_event_eth_rx_adapter.h
@@ -457,7 +457,8 @@ int rte_event_eth_rx_adapter_free(uint8_t id);
  * @see RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ
  *
  * @param conf
- *  Additional configuration structure of type *rte_event_eth_rx_adapter_conf*
+ *  Additional configuration structure of type
+ *  *rte_event_eth_rx_adapter_queue_conf*
  *
  * @return
  *  - 0: Success, Receive queue added correctly.
diff --git a/lib/eventdev/rte_eventdev.h b/lib/eventdev/rte_eventdev.h
index 6a6f6ea4c1..1a737bf851 100644
--- a/lib/eventdev/rte_eventdev.h
+++ b/lib/eventdev/rte_eventdev.h
@@ -1203,6 +1203,9 @@ struct rte_event_vector {
 #define RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR                                   \
 	(RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_ETH_RX_ADAPTER)
 /**< The event vector generated from eth Rx adapter. */
+#define RTE_EVENT_TYPE_CRYPTODEV_VECTOR                                        \
+	(RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_CRYPTODEV)
+/**< The event vector generated from cryptodev adapter. */
 
 #define RTE_EVENT_TYPE_MAX              0x10
 /**< Maximum number of event types */
@@ -1420,6 +1423,11 @@ rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps);
  * the private data information along with the crypto session.
  */
 
+#define RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR   0x10
+/**< Flag indicates HW is capable of aggregating processed
+ * crypto operations into rte_event_vector.
+ */
+
 /**
  * Retrieve the event device's crypto adapter capabilities for the
  * specified cryptodev device
-- 
2.25.1


^ permalink raw reply	[flat|nested] 36+ messages in thread

* [PATCH 2/3] crypto/cnxk: add burst enqueue for event crypto
  2022-08-04  9:59 ` [PATCH 0/3] Vector support for event crypto adapter Volodymyr Fialko
  2022-08-04  9:59   ` [PATCH 1/3] eventdev: introduce event cryptodev vector type Volodymyr Fialko
@ 2022-08-04  9:59   ` Volodymyr Fialko
  2022-08-04  9:59   ` [PATCH 3/3] crypto/cnxk: add vectorization " Volodymyr Fialko
  2022-09-26 11:36   ` [PATCH v2 0/3] Vector support for event crypto adapter Volodymyr Fialko
  3 siblings, 0 replies; 36+ messages in thread
From: Volodymyr Fialko @ 2022-08-04  9:59 UTC (permalink / raw)
  To: dev, Ankur Dwivedi, Anoob Joseph, Tejasree Kondoj,
	Pavan Nikhilesh, Shijith Thotton
  Cc: jerinj, gakhil, Volodymyr Fialko

Added support for burst enqueue for cn10k event crypto adapter.
Instruction will be grouped based on the queue pair and sent in a burst.

Signed-off-by: Volodymyr Fialko <vfialko@marvell.com>
---
 drivers/crypto/cnxk/cn10k_cryptodev_ops.c | 141 +++++++++++++++-------
 drivers/crypto/cnxk/cn10k_cryptodev_ops.h |   7 +-
 drivers/crypto/cnxk/meson.build           |   2 +-
 drivers/event/cnxk/cn10k_eventdev.c       |   2 +-
 drivers/event/cnxk/cn10k_worker.c         |  10 --
 drivers/event/cnxk/cn10k_worker.h         |   2 -
 6 files changed, 105 insertions(+), 59 deletions(-)

diff --git a/drivers/crypto/cnxk/cn10k_cryptodev_ops.c b/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
index f761ba36e2..bfa6374005 100644
--- a/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
+++ b/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
@@ -9,11 +9,12 @@
 
 #include "cn10k_cryptodev.h"
 #include "cn10k_cryptodev_ops.h"
-#include "cn10k_ipsec_la_ops.h"
 #include "cn10k_ipsec.h"
+#include "cn10k_ipsec_la_ops.h"
 #include "cnxk_ae.h"
 #include "cnxk_cryptodev.h"
 #include "cnxk_cryptodev_ops.h"
+#include "cnxk_eventdev.h"
 #include "cnxk_se.h"
 
 #include "roc_api.h"
@@ -391,79 +392,135 @@ cn10k_ca_meta_info_extract(struct rte_crypto_op *op,
 	return 0;
 }
 
-uint16_t
-cn10k_cpt_crypto_adapter_enqueue(uintptr_t base, struct rte_crypto_op *op)
+static inline uint16_t
+ca_lmtst_burst_submit(struct cn10k_sso_hws *ws, uint64_t w2[], struct cnxk_cpt_qp *qp,
+		      struct rte_crypto_op *op[], uint16_t nb_ops)
 {
+	struct cpt_inflight_req *infl_reqs[PKTS_PER_LOOP];
+	uint64_t lmt_base, lmt_arg, io_addr;
+	struct cpt_inst_s *inst, *inst_base;
 	struct cpt_inflight_req *infl_req;
-	uint64_t lmt_base, lmt_arg, w2;
-	struct cpt_inst_s *inst;
 	union cpt_fc_write_s fc;
-	struct cnxk_cpt_qp *qp;
 	uint64_t *fc_addr;
 	uint16_t lmt_id;
-	int ret;
+	int ret, i;
 
-	ret = cn10k_ca_meta_info_extract(op, &qp, &w2);
-	if (unlikely(ret)) {
-		rte_errno = EINVAL;
-		return 0;
-	}
+	lmt_base = qp->lmtline.lmt_base;
+	io_addr = qp->lmtline.io_addr;
+	fc_addr = qp->lmtline.fc_addr;
+
+	const uint32_t fc_thresh = qp->lmtline.fc_thresh;
+
+	ROC_LMT_BASE_ID_GET(lmt_base, lmt_id);
+	inst_base = (struct cpt_inst_s *)lmt_base;
 
 	if (unlikely(!qp->ca.enabled)) {
 		rte_errno = EINVAL;
 		return 0;
 	}
 
-	if (unlikely(rte_mempool_get(qp->ca.req_mp, (void **)&infl_req))) {
+	if (unlikely(rte_mempool_get_bulk(qp->ca.req_mp, (void **)infl_reqs, nb_ops))) {
 		rte_errno = ENOMEM;
 		return 0;
 	}
-	infl_req->op_flags = 0;
-
-	lmt_base = qp->lmtline.lmt_base;
-	fc_addr = qp->lmtline.fc_addr;
 
-	const uint32_t fc_thresh = qp->lmtline.fc_thresh;
+	for (i = 0; i < nb_ops; i++) {
+		inst = &inst_base[2 * i];
+		infl_req = infl_reqs[i];
+		infl_req->op_flags = 0;
 
-	ROC_LMT_BASE_ID_GET(lmt_base, lmt_id);
-	inst = (struct cpt_inst_s *)lmt_base;
+		ret = cn10k_cpt_fill_inst(qp, &op[i], inst, infl_req);
+		if (unlikely(ret != 1)) {
+			plt_dp_err("Could not process op: %p", op[i]);
+			if (i != 0)
+				goto submit;
+			else
+				goto put;
+		}
 
-	ret = cn10k_cpt_fill_inst(qp, &op, inst, infl_req);
-	if (unlikely(ret != 1)) {
-		plt_dp_err("Could not process op: %p", op);
-		rte_mempool_put(qp->ca.req_mp, infl_req);
-		return 0;
+		infl_req->res.cn10k.compcode = CPT_COMP_NOT_DONE;
+		infl_req->qp = qp;
+		inst->w0.u64 = 0;
+		inst->res_addr = (uint64_t)&infl_req->res;
+		inst->w2.u64 = w2[i];
+		inst->w3.u64 = CNXK_CPT_INST_W3(1, infl_req);
 	}
 
-	infl_req->cop = op;
-	infl_req->res.cn10k.compcode = CPT_COMP_NOT_DONE;
-	infl_req->qp = qp;
-	inst->w0.u64 = 0;
-	inst->res_addr = (uint64_t)&infl_req->res;
-	inst->w2.u64 = w2;
-	inst->w3.u64 = CNXK_CPT_INST_W3(1, infl_req);
-
 	fc.u64[0] = __atomic_load_n(fc_addr, __ATOMIC_RELAXED);
 	if (unlikely(fc.s.qsize > fc_thresh)) {
-		rte_mempool_put(qp->ca.req_mp, infl_req);
 		rte_errno = EAGAIN;
-		return 0;
+		i = 0;
+		goto put;
 	}
 
-	if (inst->w2.s.tt == RTE_SCHED_TYPE_ORDERED)
-		roc_sso_hws_head_wait(base);
+submit:
+	if (CNXK_TT_FROM_TAG(ws->gw_rdata) == SSO_TT_ORDERED)
+		roc_sso_hws_head_wait(ws->base);
 
-	lmt_arg = ROC_CN10K_CPT_LMT_ARG | (uint64_t)lmt_id;
-	roc_lmt_submit_steorl(lmt_arg, qp->lmtline.io_addr);
+	if (i > PKTS_PER_STEORL) {
+		lmt_arg = ROC_CN10K_CPT_LMT_ARG | (PKTS_PER_STEORL - 1) << 12 | (uint64_t)lmt_id;
+		roc_lmt_submit_steorl(lmt_arg, io_addr);
+		lmt_arg = ROC_CN10K_CPT_LMT_ARG | (i - PKTS_PER_STEORL - 1) << 12 |
+			  (uint64_t)(lmt_id + PKTS_PER_STEORL);
+		roc_lmt_submit_steorl(lmt_arg, io_addr);
+	} else {
+		lmt_arg = ROC_CN10K_CPT_LMT_ARG | (i - 1) << 12 | (uint64_t)lmt_id;
+		roc_lmt_submit_steorl(lmt_arg, io_addr);
+	}
 
 	rte_io_wmb();
 
-	return 1;
+put:
+	if (unlikely(i != nb_ops))
+		rte_mempool_put_bulk(qp->ca.req_mp, (void *)&infl_reqs[i], nb_ops - i);
+
+	return i;
+}
+
+uint16_t __rte_hot
+cn10k_cpt_crypto_adapter_enqueue(void *ws, struct rte_event ev[], uint16_t nb_events)
+{
+	struct rte_crypto_op *ops[PKTS_PER_LOOP], *op;
+	struct cnxk_cpt_qp *qp, *curr_qp = NULL;
+	uint64_t w2s[PKTS_PER_LOOP], w2;
+	uint16_t submitted, count = 0;
+	int ret, i, ops_len = 0;
+
+	for (i = 0; i < nb_events; i++) {
+		op = ev[i].event_ptr;
+		ret = cn10k_ca_meta_info_extract(op, &qp, &w2);
+		if (unlikely(ret)) {
+			rte_errno = EINVAL;
+			return count;
+		}
+
+		if (qp != curr_qp) {
+			if (ops_len) {
+				submitted = ca_lmtst_burst_submit(ws, w2s, curr_qp, ops, ops_len);
+				count += submitted;
+				if (unlikely(submitted != ops_len))
+					return count;
+				ops_len = 0;
+			}
+			curr_qp = qp;
+		}
+		w2s[ops_len] = w2;
+		ops[ops_len] = op;
+		if (++ops_len == PKTS_PER_LOOP) {
+			submitted = ca_lmtst_burst_submit(ws, w2s, curr_qp, ops, ops_len);
+			count += submitted;
+			if (unlikely(submitted != ops_len))
+				return count;
+			ops_len = 0;
+		}
+	}
+	if (ops_len)
+		count += ca_lmtst_burst_submit(ws, w2s, curr_qp, ops, ops_len);
+	return count;
 }
 
 static inline void
-cn10k_cpt_sec_post_process(struct rte_crypto_op *cop,
-			   struct cpt_cn10k_res_s *res)
+cn10k_cpt_sec_post_process(struct rte_crypto_op *cop, struct cpt_cn10k_res_s *res)
 {
 	struct rte_mbuf *mbuf = cop->sym->m_src;
 	const uint16_t m_len = res->rlen;
diff --git a/drivers/crypto/cnxk/cn10k_cryptodev_ops.h b/drivers/crypto/cnxk/cn10k_cryptodev_ops.h
index 1ad4c16873..628d6a567c 100644
--- a/drivers/crypto/cnxk/cn10k_cryptodev_ops.h
+++ b/drivers/crypto/cnxk/cn10k_cryptodev_ops.h
@@ -5,16 +5,17 @@
 #ifndef _CN10K_CRYPTODEV_OPS_H_
 #define _CN10K_CRYPTODEV_OPS_H_
 
-#include <rte_cryptodev.h>
 #include <cryptodev_pmd.h>
+#include <rte_cryptodev.h>
+#include <rte_eventdev.h>
 
 extern struct rte_cryptodev_ops cn10k_cpt_ops;
 
 void cn10k_cpt_set_enqdeq_fns(struct rte_cryptodev *dev);
 
 __rte_internal
-uint16_t cn10k_cpt_crypto_adapter_enqueue(uintptr_t base,
-					  struct rte_crypto_op *op);
+uint16_t __rte_hot cn10k_cpt_crypto_adapter_enqueue(void *ws, struct rte_event ev[],
+		uint16_t nb_events);
 __rte_internal
 uintptr_t cn10k_cpt_crypto_adapter_dequeue(uintptr_t get_work1);
 
diff --git a/drivers/crypto/cnxk/meson.build b/drivers/crypto/cnxk/meson.build
index 23a1cc3aac..952554ac12 100644
--- a/drivers/crypto/cnxk/meson.build
+++ b/drivers/crypto/cnxk/meson.build
@@ -24,7 +24,7 @@ sources = files(
 
 deps += ['bus_pci', 'common_cnxk', 'security', 'eventdev']
 
-includes += include_directories('../../../lib/net')
+includes += include_directories('../../../lib/net', '../../event/cnxk')
 
 if get_option('buildtype').contains('debug')
     cflags += [ '-DLA_IPSEC_DEBUG' ]
diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index e74ec57382..3651af9ca8 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -454,7 +454,7 @@ cn10k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 					       sso_hws_deq_tmo_ca_burst);
 		}
 	}
-	event_dev->ca_enqueue = cn10k_sso_hws_ca_enq;
+	event_dev->ca_enqueue = cn10k_cpt_crypto_adapter_enqueue;
 
 	if (dev->tx_offloads & NIX_TX_MULTI_SEG_F)
 		CN10K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
diff --git a/drivers/event/cnxk/cn10k_worker.c b/drivers/event/cnxk/cn10k_worker.c
index f953e19dd0..4581c41233 100644
--- a/drivers/event/cnxk/cn10k_worker.c
+++ b/drivers/event/cnxk/cn10k_worker.c
@@ -64,13 +64,3 @@ cn10k_sso_hws_enq_fwd_burst(void *port, const struct rte_event ev[],
 
 	return 1;
 }
-
-uint16_t __rte_hot
-cn10k_sso_hws_ca_enq(void *port, struct rte_event ev[], uint16_t nb_events)
-{
-	struct cn10k_sso_hws *ws = port;
-
-	RTE_SET_USED(nb_events);
-
-	return cn10k_cpt_crypto_adapter_enqueue(ws->base, ev->event_ptr);
-}
diff --git a/drivers/event/cnxk/cn10k_worker.h b/drivers/event/cnxk/cn10k_worker.h
index 0915f404e0..65bb08c0a1 100644
--- a/drivers/event/cnxk/cn10k_worker.h
+++ b/drivers/event/cnxk/cn10k_worker.h
@@ -353,8 +353,6 @@ uint16_t __rte_hot cn10k_sso_hws_enq_new_burst(void *port,
 uint16_t __rte_hot cn10k_sso_hws_enq_fwd_burst(void *port,
 					       const struct rte_event ev[],
 					       uint16_t nb_events);
-uint16_t __rte_hot cn10k_sso_hws_ca_enq(void *port, struct rte_event ev[],
-					uint16_t nb_events);
 
 #define R(name, flags)                                                         \
 	uint16_t __rte_hot cn10k_sso_hws_deq_##name(                           \
-- 
2.25.1


^ permalink raw reply	[flat|nested] 36+ messages in thread

* [PATCH 3/3] crypto/cnxk: add vectorization for event crypto
  2022-08-04  9:59 ` [PATCH 0/3] Vector support for event crypto adapter Volodymyr Fialko
  2022-08-04  9:59   ` [PATCH 1/3] eventdev: introduce event cryptodev vector type Volodymyr Fialko
  2022-08-04  9:59   ` [PATCH 2/3] crypto/cnxk: add burst enqueue for event crypto Volodymyr Fialko
@ 2022-08-04  9:59   ` Volodymyr Fialko
  2022-09-26 11:36   ` [PATCH v2 0/3] Vector support for event crypto adapter Volodymyr Fialko
  3 siblings, 0 replies; 36+ messages in thread
From: Volodymyr Fialko @ 2022-08-04  9:59 UTC (permalink / raw)
  To: dev, Ankur Dwivedi, Anoob Joseph, Tejasree Kondoj, Ray Kinsella,
	Pavan Nikhilesh, Shijith Thotton
  Cc: jerinj, gakhil, Volodymyr Fialko

Add support for vector aggregation of crypto operations for cn10k.
Crypto operations will be grouped by sub event type, flow id, scheduler
type and queue id fields from  rte_event_crypto_metadata::response_info.

Signed-off-by: Volodymyr Fialko <vfialko@marvell.com>
---
Depends-on: Series-24099
Depends-on: Series-24066

 drivers/crypto/cnxk/cn10k_cryptodev_ops.c | 398 +++++++++++++++++++---
 drivers/crypto/cnxk/cn10k_cryptodev_ops.h |   2 +
 drivers/crypto/cnxk/cnxk_cryptodev_ops.h  |   9 +-
 drivers/crypto/cnxk/version.map           |   1 +
 drivers/event/cnxk/cn10k_eventdev.c       |  43 ++-
 drivers/event/cnxk/cn10k_worker.h         |   6 +-
 drivers/event/cnxk/cn9k_eventdev.c        |  11 +-
 drivers/event/cnxk/cnxk_eventdev.h        |   4 +-
 drivers/event/cnxk/cnxk_eventdev_adptr.c  |  17 +-
 9 files changed, 431 insertions(+), 60 deletions(-)

diff --git a/drivers/crypto/cnxk/cn10k_cryptodev_ops.c b/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
index bfa6374005..c94a32f268 100644
--- a/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
+++ b/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
@@ -19,6 +19,25 @@
 
 #include "roc_api.h"
 
+#define PKTS_PER_LOOP	32
+#define PKTS_PER_STEORL 16
+
+/* Holds information required to send crypto operations in one burst */
+struct ops_burst {
+	struct rte_crypto_op *op[PKTS_PER_LOOP];
+	uint64_t w2[PKTS_PER_LOOP];
+	struct cn10k_sso_hws *ws;
+	struct cnxk_cpt_qp *qp;
+	uint16_t nb_ops;
+};
+
+/* Holds information required to send vector of operations */
+struct vec_request {
+	struct cpt_inflight_req *req;
+	struct rte_event_vector *vec;
+	uint64_t w2;
+};
+
 static inline struct cnxk_se_sess *
 cn10k_cpt_sym_temp_sess_create(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op)
 {
@@ -183,9 +202,6 @@ cn10k_cpt_fill_inst(struct cnxk_cpt_qp *qp, struct rte_crypto_op *ops[],
 	return 1;
 }
 
-#define PKTS_PER_LOOP	32
-#define PKTS_PER_STEORL 16
-
 static uint16_t
 cn10k_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
 {
@@ -286,9 +302,9 @@ cn10k_cpt_crypto_adapter_ev_mdata_set(struct rte_cryptodev *dev __rte_unused,
 	union rte_event_crypto_metadata *ec_mdata = mdata;
 	struct rte_event *rsp_info;
 	struct cnxk_cpt_qp *qp;
+	uint64_t w2, tag_type;
 	uint8_t cdev_id;
 	int16_t qp_id;
-	uint64_t w2;
 
 	/* Get queue pair */
 	cdev_id = ec_mdata->request_info.cdev_id;
@@ -296,9 +312,9 @@ cn10k_cpt_crypto_adapter_ev_mdata_set(struct rte_cryptodev *dev __rte_unused,
 	qp = rte_cryptodevs[cdev_id].data->queue_pairs[qp_id];
 
 	/* Prepare w2 */
+	tag_type = qp->ca.vector_sz ? RTE_EVENT_TYPE_CRYPTODEV_VECTOR : RTE_EVENT_TYPE_CRYPTODEV;
 	rsp_info = &ec_mdata->response_info;
-	w2 = CNXK_CPT_INST_W2((RTE_EVENT_TYPE_CRYPTODEV << 28) |
-				      (rsp_info->sub_event_type << 20) |
+	w2 = CNXK_CPT_INST_W2((tag_type << 28) | (rsp_info->sub_event_type << 20) |
 				      rsp_info->flow_id,
 			      rsp_info->sched_type, rsp_info->queue_id, 0);
 
@@ -392,18 +408,236 @@ cn10k_ca_meta_info_extract(struct rte_crypto_op *op,
 	return 0;
 }
 
+static inline void
+cn10k_cpt_vec_inst_fill(struct vec_request *vec_req, struct cpt_inst_s *inst,
+			struct cnxk_cpt_qp *qp)
+{
+	const union cpt_res_s res = {.cn10k.compcode = CPT_COMP_NOT_DONE};
+	struct cpt_inflight_req *infl_req = vec_req->req;
+
+	const union cpt_inst_w4 w4 = {
+		.s.opcode_major = ROC_SE_MAJOR_OP_MISC,
+		.s.opcode_minor = ROC_SE_MISC_MINOR_OP_PASSTHROUGH,
+		.s.param1 = 1,
+		.s.param2 = 1,
+		.s.dlen = 0,
+	};
+
+	infl_req->vec = vec_req->vec;
+	infl_req->qp = qp;
+
+	inst->res_addr = (uint64_t)&infl_req->res;
+	__atomic_store_n(&infl_req->res.u64[0], res.u64[0], __ATOMIC_RELAXED);
+
+	inst->w0.u64 = 0;
+	inst->w2.u64 = vec_req->w2;
+	inst->w3.u64 = CNXK_CPT_INST_W3(1, infl_req);
+	inst->w4.u64 = w4.u64;
+	inst->w7.u64 = ROC_CPT_DFLT_ENG_GRP_SE << 61;
+}
+
+static void
+cn10k_cpt_vec_pkt_submission_timeout_handle(void)
+{
+	plt_dp_err("Vector packet submission timedout");
+	abort();
+}
+
+static inline void
+cn10k_cpt_vec_submit(struct vec_request vec_tbl[], uint16_t vec_tbl_len, struct cnxk_cpt_qp *qp)
+{
+	uint64_t lmt_base, lmt_arg, lmt_id, io_addr;
+	union cpt_fc_write_s fc;
+	struct cpt_inst_s *inst;
+	uint16_t burst_size;
+	uint64_t *fc_addr;
+	int i;
+
+	if (vec_tbl_len == 0)
+		return;
+
+	const uint32_t fc_thresh = qp->lmtline.fc_thresh;
+	/*
+	 * Use 10 mins timeout for the poll. It is not possible to recover from partial submission
+	 * of vector packet. Actual packets for processing are submitted to CPT prior to this
+	 * routine. Hence, any failure for submission of vector packet would indicate an
+	 * unrecoverable error for the application.
+	 */
+	const uint64_t timeout = rte_get_timer_cycles() + 10 * 60 * rte_get_timer_hz();
+
+	lmt_base = qp->lmtline.lmt_base;
+	io_addr = qp->lmtline.io_addr;
+	fc_addr = qp->lmtline.fc_addr;
+	ROC_LMT_BASE_ID_GET(lmt_base, lmt_id);
+	inst = (struct cpt_inst_s *)lmt_base;
+
+again:
+	burst_size = RTE_MIN(PKTS_PER_STEORL, vec_tbl_len);
+	for (i = 0; i < burst_size; i++)
+		cn10k_cpt_vec_inst_fill(&vec_tbl[i], &inst[i * 2], qp);
+
+	do {
+		fc.u64[0] = __atomic_load_n(fc_addr, __ATOMIC_RELAXED);
+		if (likely(fc.s.qsize < fc_thresh))
+			break;
+		if (unlikely(rte_get_timer_cycles() > timeout))
+			cn10k_cpt_vec_pkt_submission_timeout_handle();
+	} while (true);
+
+	lmt_arg = ROC_CN10K_CPT_LMT_ARG | (i - 1) << 12 | lmt_id;
+	roc_lmt_submit_steorl(lmt_arg, io_addr);
+
+	rte_io_wmb();
+
+	vec_tbl_len -= i;
+
+	if (vec_tbl_len > 0) {
+		vec_tbl += i;
+		goto again;
+	}
+}
+
+static inline int
+ca_lmtst_vec_submit(struct ops_burst *burst, struct vec_request vec_tbl[], uint16_t *vec_tbl_len)
+{
+	struct cpt_inflight_req *infl_reqs[PKTS_PER_LOOP];
+	uint64_t lmt_base, lmt_arg, io_addr;
+	uint16_t lmt_id, len = *vec_tbl_len;
+	struct cpt_inst_s *inst, *inst_base;
+	struct cpt_inflight_req *infl_req;
+	struct rte_event_vector *vec;
+	union cpt_fc_write_s fc;
+	struct cnxk_cpt_qp *qp;
+	uint64_t *fc_addr;
+	int ret, i, vi;
+
+	qp = burst->qp;
+
+	lmt_base = qp->lmtline.lmt_base;
+	io_addr = qp->lmtline.io_addr;
+	fc_addr = qp->lmtline.fc_addr;
+
+	const uint32_t fc_thresh = qp->lmtline.fc_thresh;
+
+	ROC_LMT_BASE_ID_GET(lmt_base, lmt_id);
+	inst_base = (struct cpt_inst_s *)lmt_base;
+
+#ifdef CNXK_CRYPTODEV_DEBUG
+	if (unlikely(!qp->ca.enabled)) {
+		rte_errno = EINVAL;
+		return 0;
+	}
+#endif
+
+	/* Perform fc check before putting packets into vectors */
+	fc.u64[0] = __atomic_load_n(fc_addr, __ATOMIC_RELAXED);
+	if (unlikely(fc.s.qsize > fc_thresh)) {
+		rte_errno = EAGAIN;
+		return 0;
+	}
+
+	if (unlikely(rte_mempool_get_bulk(qp->ca.req_mp, (void **)infl_reqs, burst->nb_ops))) {
+		rte_errno = ENOMEM;
+		return 0;
+	}
+
+	for (i = 0; i < burst->nb_ops; i++) {
+		inst = &inst_base[2 * i];
+		infl_req = infl_reqs[i];
+		infl_req->op_flags = 0;
+
+		ret = cn10k_cpt_fill_inst(qp, &burst->op[i], inst, infl_req);
+		if (unlikely(ret != 1)) {
+			plt_cpt_dbg("Could not process op: %p", burst->op[i]);
+			if (i != 0)
+				goto submit;
+			else
+				goto put;
+		}
+
+		infl_req->res.cn10k.compcode = CPT_COMP_NOT_DONE;
+		infl_req->qp = qp;
+		inst->w3.u64 = 0x1;
+
+		/* Lookup for existing vector by w2 */
+		for (vi = len - 1; vi >= 0; vi--) {
+			if (vec_tbl[vi].w2 != burst->w2[i])
+				continue;
+			vec = vec_tbl[vi].vec;
+			if (unlikely(vec->nb_elem == qp->ca.vector_sz))
+				continue;
+			vec->ptrs[vec->nb_elem++] = infl_req;
+			goto next_op; /* continue outer loop */
+		}
+
+		/* No available vectors found, allocate a new one */
+		if (unlikely(rte_mempool_get(qp->ca.vector_mp, (void **)&vec_tbl[len].vec))) {
+			rte_errno = ENOMEM;
+			if (i != 0)
+				goto submit;
+			else
+				goto put;
+		}
+		/* Also preallocate in-flight request, that will be used to
+		 * submit misc passthrough instruction
+		 */
+		if (unlikely(rte_mempool_get(qp->ca.req_mp, (void **)&vec_tbl[len].req))) {
+			rte_mempool_put(qp->ca.vector_mp, vec_tbl[len].vec);
+			rte_errno = ENOMEM;
+			if (i != 0)
+				goto submit;
+			else
+				goto put;
+		}
+		vec_tbl[len].w2 = burst->w2[i];
+		vec_tbl[len].vec->ptrs[0] = infl_req;
+		vec_tbl[len].vec->nb_elem = 1;
+		len++;
+
+next_op:;
+	}
+
+	/* Submit operations in burst */
+submit:
+	if (CNXK_TT_FROM_TAG(burst->ws->gw_rdata) == SSO_TT_ORDERED)
+		roc_sso_hws_head_wait(burst->ws->base);
+
+	if (i > PKTS_PER_STEORL) {
+		lmt_arg = ROC_CN10K_CPT_LMT_ARG | (PKTS_PER_STEORL - 1) << 12 | (uint64_t)lmt_id;
+		roc_lmt_submit_steorl(lmt_arg, io_addr);
+		lmt_arg = ROC_CN10K_CPT_LMT_ARG | (i - PKTS_PER_STEORL - 1) << 12 |
+			  (uint64_t)(lmt_id + PKTS_PER_STEORL);
+		roc_lmt_submit_steorl(lmt_arg, io_addr);
+	} else {
+		lmt_arg = ROC_CN10K_CPT_LMT_ARG | (i - 1) << 12 | (uint64_t)lmt_id;
+		roc_lmt_submit_steorl(lmt_arg, io_addr);
+	}
+
+	rte_io_wmb();
+
+put:
+	if (i != burst->nb_ops)
+		rte_mempool_put_bulk(qp->ca.req_mp, (void *)&infl_reqs[i], burst->nb_ops - i);
+
+	*vec_tbl_len = len;
+
+	return i;
+}
+
 static inline uint16_t
-ca_lmtst_burst_submit(struct cn10k_sso_hws *ws, uint64_t w2[], struct cnxk_cpt_qp *qp,
-		      struct rte_crypto_op *op[], uint16_t nb_ops)
+ca_lmtst_burst_submit(struct ops_burst *burst)
 {
 	struct cpt_inflight_req *infl_reqs[PKTS_PER_LOOP];
 	uint64_t lmt_base, lmt_arg, io_addr;
 	struct cpt_inst_s *inst, *inst_base;
 	struct cpt_inflight_req *infl_req;
 	union cpt_fc_write_s fc;
+	struct cnxk_cpt_qp *qp;
 	uint64_t *fc_addr;
 	uint16_t lmt_id;
-	int ret, i;
+	int ret, i, j;
+
+	qp = burst->qp;
 
 	lmt_base = qp->lmtline.lmt_base;
 	io_addr = qp->lmtline.io_addr;
@@ -414,24 +648,26 @@ ca_lmtst_burst_submit(struct cn10k_sso_hws *ws, uint64_t w2[], struct cnxk_cpt_q
 	ROC_LMT_BASE_ID_GET(lmt_base, lmt_id);
 	inst_base = (struct cpt_inst_s *)lmt_base;
 
+#ifdef CNXK_CRYPTODEV_DEBUG
 	if (unlikely(!qp->ca.enabled)) {
 		rte_errno = EINVAL;
 		return 0;
 	}
+#endif
 
-	if (unlikely(rte_mempool_get_bulk(qp->ca.req_mp, (void **)infl_reqs, nb_ops))) {
+	if (unlikely(rte_mempool_get_bulk(qp->ca.req_mp, (void **)infl_reqs, burst->nb_ops))) {
 		rte_errno = ENOMEM;
 		return 0;
 	}
 
-	for (i = 0; i < nb_ops; i++) {
+	for (i = 0; i < burst->nb_ops; i++) {
 		inst = &inst_base[2 * i];
 		infl_req = infl_reqs[i];
 		infl_req->op_flags = 0;
 
-		ret = cn10k_cpt_fill_inst(qp, &op[i], inst, infl_req);
+		ret = cn10k_cpt_fill_inst(qp, &burst->op[i], inst, infl_req);
 		if (unlikely(ret != 1)) {
-			plt_dp_err("Could not process op: %p", op[i]);
+			plt_dp_dbg("Could not process op: %p", burst->op[i]);
 			if (i != 0)
 				goto submit;
 			else
@@ -442,20 +678,25 @@ ca_lmtst_burst_submit(struct cn10k_sso_hws *ws, uint64_t w2[], struct cnxk_cpt_q
 		infl_req->qp = qp;
 		inst->w0.u64 = 0;
 		inst->res_addr = (uint64_t)&infl_req->res;
-		inst->w2.u64 = w2[i];
+		inst->w2.u64 = burst->w2[i];
 		inst->w3.u64 = CNXK_CPT_INST_W3(1, infl_req);
 	}
 
 	fc.u64[0] = __atomic_load_n(fc_addr, __ATOMIC_RELAXED);
 	if (unlikely(fc.s.qsize > fc_thresh)) {
 		rte_errno = EAGAIN;
+		for (j = 0; j < i; j++) {
+			infl_req = infl_reqs[j];
+			if (unlikely(infl_req->op_flags & CPT_OP_FLAGS_METABUF))
+				rte_mempool_put(qp->meta_info.pool, infl_req->mdata);
+		}
 		i = 0;
 		goto put;
 	}
 
 submit:
-	if (CNXK_TT_FROM_TAG(ws->gw_rdata) == SSO_TT_ORDERED)
-		roc_sso_hws_head_wait(ws->base);
+	if (CNXK_TT_FROM_TAG(burst->ws->gw_rdata) == SSO_TT_ORDERED)
+		roc_sso_hws_head_wait(burst->ws->base);
 
 	if (i > PKTS_PER_STEORL) {
 		lmt_arg = ROC_CN10K_CPT_LMT_ARG | (PKTS_PER_STEORL - 1) << 12 | (uint64_t)lmt_id;
@@ -471,8 +712,8 @@ ca_lmtst_burst_submit(struct cn10k_sso_hws *ws, uint64_t w2[], struct cnxk_cpt_q
 	rte_io_wmb();
 
 put:
-	if (unlikely(i != nb_ops))
-		rte_mempool_put_bulk(qp->ca.req_mp, (void *)&infl_reqs[i], nb_ops - i);
+	if (unlikely(i != burst->nb_ops))
+		rte_mempool_put_bulk(qp->ca.req_mp, (void *)&infl_reqs[i], burst->nb_ops - i);
 
 	return i;
 }
@@ -480,42 +721,76 @@ ca_lmtst_burst_submit(struct cn10k_sso_hws *ws, uint64_t w2[], struct cnxk_cpt_q
 uint16_t __rte_hot
 cn10k_cpt_crypto_adapter_enqueue(void *ws, struct rte_event ev[], uint16_t nb_events)
 {
-	struct rte_crypto_op *ops[PKTS_PER_LOOP], *op;
-	struct cnxk_cpt_qp *qp, *curr_qp = NULL;
-	uint64_t w2s[PKTS_PER_LOOP], w2;
-	uint16_t submitted, count = 0;
-	int ret, i, ops_len = 0;
+	uint16_t submitted, count = 0, vec_tbl_len = 0;
+	struct vec_request vec_tbl[nb_events];
+	struct rte_crypto_op *op;
+	struct ops_burst burst;
+	struct cnxk_cpt_qp *qp;
+	bool is_vector = false;
+	uint64_t w2;
+	int ret, i;
+
+	burst.ws = ws;
+	burst.qp = NULL;
+	burst.nb_ops = 0;
 
 	for (i = 0; i < nb_events; i++) {
 		op = ev[i].event_ptr;
 		ret = cn10k_ca_meta_info_extract(op, &qp, &w2);
 		if (unlikely(ret)) {
 			rte_errno = EINVAL;
-			return count;
+			goto vec_submit;
 		}
 
-		if (qp != curr_qp) {
-			if (ops_len) {
-				submitted = ca_lmtst_burst_submit(ws, w2s, curr_qp, ops, ops_len);
+		/* Queue pair change check */
+		if (qp != burst.qp) {
+			if (burst.nb_ops) {
+				if (is_vector) {
+					submitted =
+						ca_lmtst_vec_submit(&burst, vec_tbl, &vec_tbl_len);
+					/*
+					 * Vector submission is required on qp change, but not in
+					 * other cases, since we could send several vectors per
+					 * lmtst instruction only for same qp
+					 */
+					cn10k_cpt_vec_submit(vec_tbl, vec_tbl_len, burst.qp);
+					vec_tbl_len = 0;
+				} else {
+					submitted = ca_lmtst_burst_submit(&burst);
+				}
 				count += submitted;
-				if (unlikely(submitted != ops_len))
-					return count;
-				ops_len = 0;
+				if (unlikely(submitted != burst.nb_ops))
+					goto vec_submit;
+				burst.nb_ops = 0;
 			}
-			curr_qp = qp;
+			is_vector = qp->ca.vector_sz;
+			burst.qp = qp;
 		}
-		w2s[ops_len] = w2;
-		ops[ops_len] = op;
-		if (++ops_len == PKTS_PER_LOOP) {
-			submitted = ca_lmtst_burst_submit(ws, w2s, curr_qp, ops, ops_len);
+		burst.w2[burst.nb_ops] = w2;
+		burst.op[burst.nb_ops] = op;
+
+		/* Max nb_ops per burst check */
+		if (++burst.nb_ops == PKTS_PER_LOOP) {
+			if (is_vector)
+				submitted = ca_lmtst_vec_submit(&burst, vec_tbl, &vec_tbl_len);
+			else
+				submitted = ca_lmtst_burst_submit(&burst);
 			count += submitted;
-			if (unlikely(submitted != ops_len))
-				return count;
-			ops_len = 0;
+			if (unlikely(submitted != burst.nb_ops))
+				goto vec_submit;
+			burst.nb_ops = 0;
 		}
 	}
-	if (ops_len)
-		count += ca_lmtst_burst_submit(ws, w2s, curr_qp, ops, ops_len);
+	/* Submit the rest of crypto operations */
+	if (burst.nb_ops) {
+		if (is_vector)
+			count += ca_lmtst_vec_submit(&burst, vec_tbl, &vec_tbl_len);
+		else
+			count += ca_lmtst_burst_submit(&burst);
+	}
+
+vec_submit:
+	cn10k_cpt_vec_submit(vec_tbl, vec_tbl_len, burst.qp);
 	return count;
 }
 
@@ -673,6 +948,49 @@ cn10k_cpt_crypto_adapter_dequeue(uintptr_t get_work1)
 	return (uintptr_t)cop;
 }
 
+uintptr_t
+cn10k_cpt_crypto_adapter_vector_dequeue(uintptr_t get_work1)
+{
+	struct cpt_inflight_req *infl_req, *vec_infl_req;
+	struct rte_mempool *meta_mp, *req_mp;
+	struct rte_event_vector *vec;
+	struct rte_crypto_op *cop;
+	struct cnxk_cpt_qp *qp;
+	union cpt_res_s res;
+	int i;
+
+	vec_infl_req = (struct cpt_inflight_req *)(get_work1);
+
+	vec = vec_infl_req->vec;
+	qp = vec_infl_req->qp;
+	meta_mp = qp->meta_info.pool;
+	req_mp = qp->ca.req_mp;
+
+#ifdef CNXK_CRYPTODEV_DEBUG
+	res.u64[0] = __atomic_load_n(&vec_infl_req->res.u64[0], __ATOMIC_RELAXED);
+	PLT_ASSERT(res.cn10k.compcode == CPT_COMP_WARN);
+	PLT_ASSERT(res.cn10k.uc_compcode == 0);
+#endif
+
+	for (i = 0; i < vec->nb_elem; i++) {
+		infl_req = vec->ptrs[i];
+		cop = infl_req->cop;
+
+		res.u64[0] = __atomic_load_n(&infl_req->res.u64[0], __ATOMIC_RELAXED);
+		cn10k_cpt_dequeue_post_process(qp, cop, infl_req, &res.cn10k);
+
+		vec->ptrs[i] = cop;
+		if (unlikely(infl_req->op_flags & CPT_OP_FLAGS_METABUF))
+			rte_mempool_put(meta_mp, infl_req->mdata);
+
+		rte_mempool_put(req_mp, infl_req);
+	}
+
+	rte_mempool_put(req_mp, vec_infl_req);
+
+	return (uintptr_t)vec;
+}
+
 static uint16_t
 cn10k_cpt_dequeue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
 {
diff --git a/drivers/crypto/cnxk/cn10k_cryptodev_ops.h b/drivers/crypto/cnxk/cn10k_cryptodev_ops.h
index 628d6a567c..8104310c30 100644
--- a/drivers/crypto/cnxk/cn10k_cryptodev_ops.h
+++ b/drivers/crypto/cnxk/cn10k_cryptodev_ops.h
@@ -18,5 +18,7 @@ uint16_t __rte_hot cn10k_cpt_crypto_adapter_enqueue(void *ws, struct rte_event e
 		uint16_t nb_events);
 __rte_internal
 uintptr_t cn10k_cpt_crypto_adapter_dequeue(uintptr_t get_work1);
+__rte_internal
+uintptr_t cn10k_cpt_crypto_adapter_vector_dequeue(uintptr_t get_work1);
 
 #endif /* _CN10K_CRYPTODEV_OPS_H_ */
diff --git a/drivers/crypto/cnxk/cnxk_cryptodev_ops.h b/drivers/crypto/cnxk/cnxk_cryptodev_ops.h
index ffe4ae19aa..d9ed43b40b 100644
--- a/drivers/crypto/cnxk/cnxk_cryptodev_ops.h
+++ b/drivers/crypto/cnxk/cnxk_cryptodev_ops.h
@@ -37,7 +37,10 @@ struct cpt_qp_meta_info {
 
 struct cpt_inflight_req {
 	union cpt_res_s res;
-	struct rte_crypto_op *cop;
+	union {
+		struct rte_crypto_op *cop;
+		struct rte_event_vector *vec;
+	};
 	void *mdata;
 	uint8_t op_flags;
 	void *qp;
@@ -63,6 +66,10 @@ struct crypto_adpter_info {
 	/**< Set if queue pair is added to crypto adapter */
 	struct rte_mempool *req_mp;
 	/**< CPT inflight request mempool */
+	uint16_t vector_sz;
+	/** Maximum number of cops to combine into single vector */
+	struct rte_mempool *vector_mp;
+	/** Pool for allocating rte_event_vector */
 };
 
 struct cnxk_cpt_qp {
diff --git a/drivers/crypto/cnxk/version.map b/drivers/crypto/cnxk/version.map
index 0178c416ec..4735e70550 100644
--- a/drivers/crypto/cnxk/version.map
+++ b/drivers/crypto/cnxk/version.map
@@ -5,6 +5,7 @@ INTERNAL {
 	cn9k_cpt_crypto_adapter_dequeue;
 	cn10k_cpt_crypto_adapter_enqueue;
 	cn10k_cpt_crypto_adapter_dequeue;
+	cn10k_cpt_crypto_adapter_vector_dequeue;
 
 	local: *;
 };
diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index 3651af9ca8..067248fc77 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -880,7 +880,8 @@ cn10k_crypto_adapter_caps_get(const struct rte_eventdev *event_dev,
 	CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k");
 
 	*caps = RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD |
-		RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA;
+		RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA |
+		RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR;
 
 	return 0;
 }
@@ -892,8 +893,7 @@ cn10k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
 			    const struct rte_event_crypto_adapter_queue_conf *conf)
 {
 	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
-
-	RTE_SET_USED(conf);
+	int ret;
 
 	CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k");
 	CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k");
@@ -901,12 +901,14 @@ cn10k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
 	dev->is_ca_internal_port = 1;
 	cn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
 
-	return cnxk_crypto_adapter_qp_add(event_dev, cdev, queue_pair_id);
+	ret = cnxk_crypto_adapter_qp_add(event_dev, cdev, queue_pair_id, conf);
+	cn10k_sso_set_priv_mem(event_dev, NULL, 0);
+
+	return ret;
 }
 
 static int
-cn10k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev,
-			    const struct rte_cryptodev *cdev,
+cn10k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev, const struct rte_cryptodev *cdev,
 			    int32_t queue_pair_id)
 {
 	CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k");
@@ -915,6 +917,34 @@ cn10k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev,
 	return cnxk_crypto_adapter_qp_del(cdev, queue_pair_id);
 }
 
+static int
+cn10k_tim_caps_get(const struct rte_eventdev *evdev, uint64_t flags,
+		   uint32_t *caps, const struct event_timer_adapter_ops **ops)
+{
+	return cnxk_tim_caps_get(evdev, flags, caps, ops,
+				 cn10k_sso_set_priv_mem);
+}
+
+static int
+cn10k_crypto_adapter_vec_limits(const struct rte_eventdev *event_dev,
+				const struct rte_cryptodev *cdev,
+				struct rte_event_crypto_adapter_vector_limits *limits)
+{
+	CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k");
+	CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k");
+
+	limits->log2_sz = false;
+	limits->min_sz = 0;
+	limits->max_sz = UINT16_MAX;
+	/* Unused timeout, in software implementation we aggregate all crypto
+	 * operations passed to the enqueue function
+	 */
+	limits->min_timeout_ns = 0;
+	limits->max_timeout_ns = 0;
+
+	return 0;
+}
+
 static struct eventdev_ops cn10k_sso_dev_ops = {
 	.dev_infos_get = cn10k_sso_info_get,
 	.dev_configure = cn10k_sso_dev_configure,
@@ -953,6 +983,7 @@ static struct eventdev_ops cn10k_sso_dev_ops = {
 	.crypto_adapter_caps_get = cn10k_crypto_adapter_caps_get,
 	.crypto_adapter_queue_pair_add = cn10k_crypto_adapter_qp_add,
 	.crypto_adapter_queue_pair_del = cn10k_crypto_adapter_qp_del,
+	.crypto_adapter_vector_limits_get = cn10k_crypto_adapter_vec_limits,
 
 	.dump = cnxk_sso_dump,
 	.dev_start = cn10k_sso_start,
diff --git a/drivers/event/cnxk/cn10k_worker.h b/drivers/event/cnxk/cn10k_worker.h
index 65bb08c0a1..ddd11b3336 100644
--- a/drivers/event/cnxk/cn10k_worker.h
+++ b/drivers/event/cnxk/cn10k_worker.h
@@ -207,6 +207,9 @@ cn10k_sso_hws_post_process(struct cn10k_sso_hws *ws, uint64_t *u64,
 	if ((flags & CPT_RX_WQE_F) &&
 	    (CNXK_EVENT_TYPE_FROM_TAG(u64[0]) == RTE_EVENT_TYPE_CRYPTODEV)) {
 		u64[1] = cn10k_cpt_crypto_adapter_dequeue(u64[1]);
+	} else if ((flags & CPT_RX_WQE_F) &&
+		   (CNXK_EVENT_TYPE_FROM_TAG(u64[0]) == RTE_EVENT_TYPE_CRYPTODEV_VECTOR)) {
+		u64[1] = cn10k_cpt_crypto_adapter_vector_dequeue(u64[1]);
 	} else if (CNXK_EVENT_TYPE_FROM_TAG(u64[0]) == RTE_EVENT_TYPE_ETHDEV) {
 		uint8_t port = CNXK_SUB_EVENT_FROM_TAG(u64[0]);
 		uint64_t mbuf;
@@ -253,8 +256,7 @@ cn10k_sso_hws_post_process(struct cn10k_sso_hws *ws, uint64_t *u64,
 					 flags & NIX_RX_OFFLOAD_TSTAMP_F,
 					 (uint64_t *)tstamp_ptr);
 		u64[1] = mbuf;
-	} else if (CNXK_EVENT_TYPE_FROM_TAG(u64[0]) ==
-		   RTE_EVENT_TYPE_ETHDEV_VECTOR) {
+	} else if (CNXK_EVENT_TYPE_FROM_TAG(u64[0]) == RTE_EVENT_TYPE_ETHDEV_VECTOR) {
 		uint8_t port = CNXK_SUB_EVENT_FROM_TAG(u64[0]);
 		__uint128_t vwqe_hdr = *(__uint128_t *)u64[1];
 
diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
index 45ed547cb0..bd130d40aa 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -1124,8 +1124,7 @@ cn9k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
 			   const struct rte_event_crypto_adapter_queue_conf *conf)
 {
 	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
-
-	RTE_SET_USED(conf);
+	int ret;
 
 	CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k");
 	CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k");
@@ -1133,12 +1132,14 @@ cn9k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
 	dev->is_ca_internal_port = 1;
 	cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
 
-	return cnxk_crypto_adapter_qp_add(event_dev, cdev, queue_pair_id);
+	ret = cnxk_crypto_adapter_qp_add(event_dev, cdev, queue_pair_id, conf);
+	cn9k_sso_set_priv_mem(event_dev, NULL, 0);
+
+	return ret;
 }
 
 static int
-cn9k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev,
-			   const struct rte_cryptodev *cdev,
+cn9k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev, const struct rte_cryptodev *cdev,
 			   int32_t queue_pair_id)
 {
 	CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k");
diff --git a/drivers/event/cnxk/cnxk_eventdev.h b/drivers/event/cnxk/cnxk_eventdev.h
index c9a0686b4d..41d9c0b7e7 100644
--- a/drivers/event/cnxk/cnxk_eventdev.h
+++ b/drivers/event/cnxk/cnxk_eventdev.h
@@ -10,6 +10,7 @@
 #include <cryptodev_pmd.h>
 #include <rte_devargs.h>
 #include <rte_ethdev.h>
+#include <rte_event_crypto_adapter.h>
 #include <rte_event_eth_rx_adapter.h>
 #include <rte_event_eth_tx_adapter.h>
 #include <rte_kvargs.h>
@@ -312,7 +313,8 @@ int cnxk_sso_tx_adapter_start(uint8_t id, const struct rte_eventdev *event_dev);
 int cnxk_sso_tx_adapter_stop(uint8_t id, const struct rte_eventdev *event_dev);
 int cnxk_sso_tx_adapter_free(uint8_t id, const struct rte_eventdev *event_dev);
 int cnxk_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
-			       const struct rte_cryptodev *cdev, int32_t queue_pair_id);
+			       const struct rte_cryptodev *cdev, int32_t queue_pair_id,
+			       const struct rte_event_crypto_adapter_queue_conf *conf);
 int cnxk_crypto_adapter_qp_del(const struct rte_cryptodev *cdev, int32_t queue_pair_id);
 
 #endif /* __CNXK_EVENTDEV_H__ */
diff --git a/drivers/event/cnxk/cnxk_eventdev_adptr.c b/drivers/event/cnxk/cnxk_eventdev_adptr.c
index 3f46e79ba8..cd238fe074 100644
--- a/drivers/event/cnxk/cnxk_eventdev_adptr.c
+++ b/drivers/event/cnxk/cnxk_eventdev_adptr.c
@@ -631,7 +631,8 @@ cnxk_sso_tx_adapter_free(uint8_t id __rte_unused,
 }
 
 static int
-crypto_adapter_qp_setup(const struct rte_cryptodev *cdev, struct cnxk_cpt_qp *qp)
+crypto_adapter_qp_setup(const struct rte_cryptodev *cdev, struct cnxk_cpt_qp *qp,
+			const struct rte_event_crypto_adapter_queue_conf *conf)
 {
 	char name[RTE_MEMPOOL_NAMESIZE];
 	uint32_t cache_size, nb_req;
@@ -664,6 +665,10 @@ crypto_adapter_qp_setup(const struct rte_cryptodev *cdev, struct cnxk_cpt_qp *qp
 	if (qp->ca.req_mp == NULL)
 		return -ENOMEM;
 
+	if (conf != NULL) {
+		qp->ca.vector_sz = conf->vector_sz;
+		qp->ca.vector_mp = conf->vector_mp;
+	}
 	qp->ca.enabled = true;
 
 	return 0;
@@ -671,7 +676,8 @@ crypto_adapter_qp_setup(const struct rte_cryptodev *cdev, struct cnxk_cpt_qp *qp
 
 int
 cnxk_crypto_adapter_qp_add(const struct rte_eventdev *event_dev, const struct rte_cryptodev *cdev,
-			   int32_t queue_pair_id)
+			   int32_t queue_pair_id,
+			   const struct rte_event_crypto_adapter_queue_conf *conf)
 {
 	struct cnxk_sso_evdev *sso_evdev = cnxk_sso_pmd_priv(event_dev);
 	uint32_t adptr_xae_cnt = 0;
@@ -683,7 +689,7 @@ cnxk_crypto_adapter_qp_add(const struct rte_eventdev *event_dev, const struct rt
 
 		for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) {
 			qp = cdev->data->queue_pairs[qp_id];
-			ret = crypto_adapter_qp_setup(cdev, qp);
+			ret = crypto_adapter_qp_setup(cdev, qp, conf);
 			if (ret) {
 				cnxk_crypto_adapter_qp_del(cdev, -1);
 				return ret;
@@ -692,7 +698,7 @@ cnxk_crypto_adapter_qp_add(const struct rte_eventdev *event_dev, const struct rt
 		}
 	} else {
 		qp = cdev->data->queue_pairs[queue_pair_id];
-		ret = crypto_adapter_qp_setup(cdev, qp);
+		ret = crypto_adapter_qp_setup(cdev, qp, conf);
 		if (ret)
 			return ret;
 		adptr_xae_cnt = qp->ca.req_mp->size;
@@ -723,7 +729,8 @@ crypto_adapter_qp_free(struct cnxk_cpt_qp *qp)
 }
 
 int
-cnxk_crypto_adapter_qp_del(const struct rte_cryptodev *cdev, int32_t queue_pair_id)
+cnxk_crypto_adapter_qp_del(const struct rte_cryptodev *cdev,
+			   int32_t queue_pair_id)
 {
 	struct cnxk_cpt_qp *qp;
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 36+ messages in thread

* RE: [PATCH 1/3] eventdev: introduce event cryptodev vector type
  2022-08-04  9:59   ` [PATCH 1/3] eventdev: introduce event cryptodev vector type Volodymyr Fialko
@ 2022-09-21 18:32     ` Akhil Goyal
  2022-09-22  4:53       ` Gujjar, Abhinandan S
  2022-09-24  8:43     ` Gujjar, Abhinandan S
  1 sibling, 1 reply; 36+ messages in thread
From: Akhil Goyal @ 2022-09-21 18:32 UTC (permalink / raw)
  To: Volodymyr Fialko, dev, Jerin Jacob Kollanukkaran,
	Abhinandan Gujjar, Pavan Nikhilesh Bhagavatula, Shijith Thotton,
	Hemant Agrawal, Sachin Saxena, Jay Jayatheerthan
  Cc: Anoob Joseph, Volodymyr Fialko

Hi Abhinandan/Hemant,

Can you review this patchset?
The library changes need to merged in RC1.
If no comments, this will be merged in couple of days. It has been over 6 weeks for this patch.

Regards,
Akhil
> Subject: [PATCH 1/3] eventdev: introduce event cryptodev vector type
> 
> Introduce ability to aggregate crypto operations processed by event
> crypto adapter into single event containing rte_event_vector whose event
> type is RTE_EVENT_TYPE_CRYPTODEV_VECTOR.
> 
> Application should set RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR in
> rte_event_crypto_adapter_queue_conf::flag and provide vector configuration
> with respect of rte_event_crypto_adapter_vector_limits, which could be
> obtained by calling rte_event_crypto_adapter_vector_limits_get, to enable
> vectorization.
> 
> The event crypto adapter would be responsible for vectorizing the crypto
> operations based on provided response information in
> rte_event_crypto_metadata::response_info.
> 
> Updated drivers and tests accordingly to new API.
> 
> Signed-off-by: Volodymyr Fialko <vfialko@marvell.com>


^ permalink raw reply	[flat|nested] 36+ messages in thread

* RE: [PATCH 1/3] eventdev: introduce event cryptodev vector type
  2022-09-21 18:32     ` Akhil Goyal
@ 2022-09-22  4:53       ` Gujjar, Abhinandan S
  0 siblings, 0 replies; 36+ messages in thread
From: Gujjar, Abhinandan S @ 2022-09-22  4:53 UTC (permalink / raw)
  To: Akhil Goyal, Volodymyr Fialko, dev, Jerin Jacob Kollanukkaran,
	Pavan Nikhilesh Bhagavatula, Shijith Thotton, Hemant Agrawal,
	Sachin Saxena, Jayatheerthan, Jay
  Cc: Anoob Joseph, Volodymyr Fialko

Hi Akhil,

I have started reviewing it.
You can expect some feedback in the weekend.

Regards
Abhinandan

> -----Original Message-----
> From: Akhil Goyal <gakhil@marvell.com>
> Sent: Thursday, September 22, 2022 12:02 AM
> To: Volodymyr Fialko <vfialko@marvell.com>; dev@dpdk.org; Jerin Jacob
> Kollanukkaran <jerinj@marvell.com>; Gujjar, Abhinandan S
> <abhinandan.gujjar@intel.com>; Pavan Nikhilesh Bhagavatula
> <pbhagavatula@marvell.com>; Shijith Thotton <sthotton@marvell.com>;
> Hemant Agrawal <hemant.agrawal@nxp.com>; Sachin Saxena
> <sachin.saxena@oss.nxp.com>; Jayatheerthan, Jay
> <jay.jayatheerthan@intel.com>
> Cc: Anoob Joseph <anoobj@marvell.com>; Volodymyr Fialko
> <vfialko@marvell.com>
> Subject: RE: [PATCH 1/3] eventdev: introduce event cryptodev vector type
> 
> Hi Abhinandan/Hemant,
> 
> Can you review this patchset?
> The library changes need to merged in RC1.
> If no comments, this will be merged in couple of days. It has been over 6 weeks
> for this patch.
> 
> Regards,
> Akhil
> > Subject: [PATCH 1/3] eventdev: introduce event cryptodev vector type
> >
> > Introduce ability to aggregate crypto operations processed by event
> > crypto adapter into single event containing rte_event_vector whose
> > event type is RTE_EVENT_TYPE_CRYPTODEV_VECTOR.
> >
> > Application should set RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR in
> > rte_event_crypto_adapter_queue_conf::flag and provide vector
> > configuration with respect of rte_event_crypto_adapter_vector_limits,
> > which could be obtained by calling
> > rte_event_crypto_adapter_vector_limits_get, to enable vectorization.
> >
> > The event crypto adapter would be responsible for vectorizing the
> > crypto operations based on provided response information in
> > rte_event_crypto_metadata::response_info.
> >
> > Updated drivers and tests accordingly to new API.
> >
> > Signed-off-by: Volodymyr Fialko <vfialko@marvell.com>


^ permalink raw reply	[flat|nested] 36+ messages in thread

* RE: [PATCH 1/3] eventdev: introduce event cryptodev vector type
  2022-08-04  9:59   ` [PATCH 1/3] eventdev: introduce event cryptodev vector type Volodymyr Fialko
  2022-09-21 18:32     ` Akhil Goyal
@ 2022-09-24  8:43     ` Gujjar, Abhinandan S
  2022-09-26 11:02       ` Volodymyr Fialko
  1 sibling, 1 reply; 36+ messages in thread
From: Gujjar, Abhinandan S @ 2022-09-24  8:43 UTC (permalink / raw)
  To: Volodymyr Fialko, dev, Jerin Jacob, Pavan Nikhilesh,
	Shijith Thotton, Hemant Agrawal, Sachin Saxena, Jayatheerthan,
	Jay
  Cc: gakhil, anoobj



> -----Original Message-----
> From: Volodymyr Fialko <vfialko@marvell.com>
> Sent: Thursday, August 4, 2022 3:29 PM
> To: dev@dpdk.org; Jerin Jacob <jerinj@marvell.com>; Gujjar, Abhinandan S
> <abhinandan.gujjar@intel.com>; Pavan Nikhilesh
> <pbhagavatula@marvell.com>; Shijith Thotton <sthotton@marvell.com>;
> Hemant Agrawal <hemant.agrawal@nxp.com>; Sachin Saxena
> <sachin.saxena@oss.nxp.com>; Jayatheerthan, Jay
> <jay.jayatheerthan@intel.com>
> Cc: gakhil@marvell.com; anoobj@marvell.com; Volodymyr Fialko
> <vfialko@marvell.com>
> Subject: [PATCH 1/3] eventdev: introduce event cryptodev vector type
> 
> Introduce ability to aggregate crypto operations processed by event crypto
> adapter into single event containing rte_event_vector whose event type is
> RTE_EVENT_TYPE_CRYPTODEV_VECTOR.
> 
> Application should set RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR in
> rte_event_crypto_adapter_queue_conf::flag and provide vector configuration
> with respect of rte_event_crypto_adapter_vector_limits, which could be
> obtained by calling rte_event_crypto_adapter_vector_limits_get, to enable
> vectorization.
> 
> The event crypto adapter would be responsible for vectorizing the crypto
> operations based on provided response information in
> rte_event_crypto_metadata::response_info.
> 
> Updated drivers and tests accordingly to new API.
> 
> Signed-off-by: Volodymyr Fialko <vfialko@marvell.com>
> ---
>  app/test-eventdev/test_perf_common.c          |  10 +-
>  app/test/test_event_crypto_adapter.c          |  12 ++-
>  .../prog_guide/event_crypto_adapter.rst       |  23 +++-
>  drivers/event/cnxk/cn10k_eventdev.c           |   4 +-
>  drivers/event/cnxk/cn9k_eventdev.c            |   5 +-
>  drivers/event/dpaa/dpaa_eventdev.c            |   9 +-
>  drivers/event/dpaa2/dpaa2_eventdev.c          |   9 +-
>  drivers/event/octeontx/ssovf_evdev.c          |   4 +-
>  lib/eventdev/eventdev_pmd.h                   |  35 +++++-
>  lib/eventdev/eventdev_trace.h                 |   6 +-
>  lib/eventdev/rte_event_crypto_adapter.c       |  90 ++++++++++++++--
>  lib/eventdev/rte_event_crypto_adapter.h       | 101 +++++++++++++++++-
>  lib/eventdev/rte_event_eth_rx_adapter.h       |   3 +-
>  lib/eventdev/rte_eventdev.h                   |   8 ++
>  14 files changed, 276 insertions(+), 43 deletions(-)
> 

I don't see dataplane implementation of vectorization in the crypto adapter!
Is it missed out in the patch?
comments inline.

> diff --git a/app/test-eventdev/test_perf_common.c b/app/test-
> eventdev/test_perf_common.c
> index 81420be73a..c770bc93f6 100644
> --- a/app/test-eventdev/test_perf_common.c
> +++ b/app/test-eventdev/test_perf_common.c
> @@ -837,14 +837,14 @@ perf_event_crypto_adapter_setup(struct test_perf
> *t, struct prod_data *p)
>  	}
> 
>  	if (cap &
> RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
> -		struct rte_event response_info;
> +		struct rte_event_crypto_adapter_queue_conf conf;
> 
> -		response_info.event = 0;
> -		response_info.sched_type = RTE_SCHED_TYPE_ATOMIC;
> -		response_info.queue_id = p->queue_id;
> +		memset(&conf, 0, sizeof(conf));
> +		conf.ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
> +		conf.ev.queue_id = p->queue_id;
>  		ret = rte_event_crypto_adapter_queue_pair_add(
>  			TEST_PERF_CA_ID, p->ca.cdev_id, p->ca.cdev_qp_id,
> -			&response_info);
> +			&conf);
>  	} else {
>  		ret = rte_event_crypto_adapter_queue_pair_add(
>  			TEST_PERF_CA_ID, p->ca.cdev_id, p->ca.cdev_qp_id,
> NULL); diff --git a/app/test/test_event_crypto_adapter.c
> b/app/test/test_event_crypto_adapter.c
> index 2ecc7e2cea..bb617c1042 100644
> --- a/app/test/test_event_crypto_adapter.c
> +++ b/app/test/test_event_crypto_adapter.c
> @@ -1175,6 +1175,10 @@ test_crypto_adapter_create(void)  static int
>  test_crypto_adapter_qp_add_del(void)
>  {
> +	struct rte_event_crypto_adapter_queue_conf queue_conf = {
> +		.ev = response_info,
> +	};
> +
>  	uint32_t cap;
>  	int ret;
> 
> @@ -1183,7 +1187,7 @@ test_crypto_adapter_qp_add_del(void)
> 
>  	if (cap &
> RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
>  		ret =
> rte_event_crypto_adapter_queue_pair_add(TEST_ADAPTER_ID,
> -				TEST_CDEV_ID, TEST_CDEV_QP_ID,
> &response_info);
> +				TEST_CDEV_ID, TEST_CDEV_QP_ID,
> &queue_conf);
>  	} else
>  		ret =
> rte_event_crypto_adapter_queue_pair_add(TEST_ADAPTER_ID,
>  					TEST_CDEV_ID, TEST_CDEV_QP_ID,
> NULL); @@ -1206,6 +1210,10 @@ configure_event_crypto_adapter(enum
> rte_event_crypto_adapter_mode mode)
>  		.new_event_threshold = 1200,
>  	};
> 
> +	struct rte_event_crypto_adapter_queue_conf queue_conf = {
> +		.ev = response_info,
> +	};
> +
>  	uint32_t cap;
>  	int ret;
> 
> @@ -1238,7 +1246,7 @@ configure_event_crypto_adapter(enum
> rte_event_crypto_adapter_mode mode)
> 
>  	if (cap &
> RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
>  		ret =
> rte_event_crypto_adapter_queue_pair_add(TEST_ADAPTER_ID,
> -				TEST_CDEV_ID, TEST_CDEV_QP_ID,
> &response_info);
> +				TEST_CDEV_ID, TEST_CDEV_QP_ID,
> &queue_conf);
>  	} else
>  		ret =
> rte_event_crypto_adapter_queue_pair_add(TEST_ADAPTER_ID,
>  				TEST_CDEV_ID, TEST_CDEV_QP_ID, NULL); diff
> --git a/doc/guides/prog_guide/event_crypto_adapter.rst
> b/doc/guides/prog_guide/event_crypto_adapter.rst
> index 4fb5c688e0..554df7e358 100644
> --- a/doc/guides/prog_guide/event_crypto_adapter.rst
> +++ b/doc/guides/prog_guide/event_crypto_adapter.rst
> @@ -201,10 +201,10 @@ capability, event information must be passed to the
> add API.
> 
>          ret = rte_event_crypto_adapter_caps_get(id, evdev, &cap);
>          if (cap &
> RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
> -                struct rte_event event;
> +                struct rte_event_crypto_adapter_queue_conf conf;
> 
> -                // Fill in event information & pass it to add API
> -                rte_event_crypto_adapter_queue_pair_add(id, cdev_id, qp_id,
> &event);
> +                // Fill in conf.event information & pass it to add API
> +                rte_event_crypto_adapter_queue_pair_add(id, cdev_id,
> + qp_id, &conf);
>          } else
>                  rte_event_crypto_adapter_queue_pair_add(id, cdev_id, qp_id,
> NULL);
> 
> @@ -291,6 +291,23 @@ the ``rte_crypto_op``.
>                  rte_memcpy(op + len, &m_data, sizeof(m_data));
>          }
> 
> +Enable event vectorization
> +~~~~~~~~~~~~~~~~~~~~~~~~~~
> +
> +The event crypto adapter can aggregate outcoming crypto operations
> +based on provided response information of
> +``rte_event_crypto_metadata::response_info``
> +and generate a ``rte_event`` containing ``rte_event_vector`` whose
> +event type is ``RTE_EVENT_TYPE_CRYPTODEV_VECTOR``.
> +To enable vectorization application should set
> +RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR in
> +``rte_event_crypto_adapter_queue_conf::flag`` and provide vector
> +configuration(size, mempool, etc.) with respect of
> +``rte_event_crypto_adapter_vector_limits``, which could be obtained by
> +calling ``rte_event_crypto_adapter_vector_limits_get()``.
> +
> +The RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR capability indicates
> +whether PMD supports this feature.
> +
>  Start the adapter instance
>  ~~~~~~~~~~~~~~~~~~~~~~~~~~
> 
> diff --git a/drivers/event/cnxk/cn10k_eventdev.c
> b/drivers/event/cnxk/cn10k_eventdev.c
> index 5a0cab40a9..e74ec57382 100644
> --- a/drivers/event/cnxk/cn10k_eventdev.c
> +++ b/drivers/event/cnxk/cn10k_eventdev.c
> @@ -889,11 +889,11 @@ static int
>  cn10k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
>  			    const struct rte_cryptodev *cdev,
>  			    int32_t queue_pair_id,
> -			    const struct rte_event *event)
> +			    const struct rte_event_crypto_adapter_queue_conf
> *conf)
>  {
>  	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
> 
> -	RTE_SET_USED(event);
> +	RTE_SET_USED(conf);
> 
>  	CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k");
>  	CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k"); diff --
> git a/drivers/event/cnxk/cn9k_eventdev.c
> b/drivers/event/cnxk/cn9k_eventdev.c
> index 2e27030049..45ed547cb0 100644
> --- a/drivers/event/cnxk/cn9k_eventdev.c
> +++ b/drivers/event/cnxk/cn9k_eventdev.c
> @@ -1120,11 +1120,12 @@ cn9k_crypto_adapter_caps_get(const struct
> rte_eventdev *event_dev,  static int  cn9k_crypto_adapter_qp_add(const struct
> rte_eventdev *event_dev,
>  			   const struct rte_cryptodev *cdev,
> -			   int32_t queue_pair_id, const struct rte_event *event)
> +			   int32_t queue_pair_id,
> +			   const struct rte_event_crypto_adapter_queue_conf
> *conf)
>  {
>  	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
> 
> -	RTE_SET_USED(event);
> +	RTE_SET_USED(conf);
> 
>  	CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k");
>  	CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k"); diff --git
> a/drivers/event/dpaa/dpaa_eventdev.c b/drivers/event/dpaa/dpaa_eventdev.c
> index ff6cc0be18..2b9ecd9fbf 100644
> --- a/drivers/event/dpaa/dpaa_eventdev.c
> +++ b/drivers/event/dpaa/dpaa_eventdev.c
> @@ -26,6 +26,7 @@
>  #include <rte_eventdev.h>
>  #include <eventdev_pmd_vdev.h>
>  #include <rte_ethdev.h>
> +#include <rte_event_crypto_adapter.h>
>  #include <rte_event_eth_rx_adapter.h>
>  #include <rte_event_eth_tx_adapter.h>
>  #include <cryptodev_pmd.h>
> @@ -775,10 +776,10 @@ static int
>  dpaa_eventdev_crypto_queue_add(const struct rte_eventdev *dev,
>  		const struct rte_cryptodev *cryptodev,
>  		int32_t rx_queue_id,
> -		const struct rte_event *ev)
> +		const struct rte_event_crypto_adapter_queue_conf *conf)
>  {
>  	struct dpaa_eventdev *priv = dev->data->dev_private;
> -	uint8_t ev_qid = ev->queue_id;
> +	uint8_t ev_qid = conf->ev.queue_id;
>  	u16 ch_id = priv->evq_info[ev_qid].ch_id;
>  	int ret;
> 
> @@ -786,10 +787,10 @@ dpaa_eventdev_crypto_queue_add(const struct
> rte_eventdev *dev,
> 
>  	if (rx_queue_id == -1)
>  		return dpaa_eventdev_crypto_queue_add_all(dev,
> -				cryptodev, ev);
> +				cryptodev, &conf->ev);
> 
>  	ret = dpaa_sec_eventq_attach(cryptodev, rx_queue_id,
> -			ch_id, ev);
> +			ch_id, &conf->ev);
>  	if (ret) {
>  		DPAA_EVENTDEV_ERR(
>  			"dpaa_sec_eventq_attach failed: ret: %d\n", ret); diff -
> -git a/drivers/event/dpaa2/dpaa2_eventdev.c
> b/drivers/event/dpaa2/dpaa2_eventdev.c
> index ffc7b8b073..0137736794 100644
> --- a/drivers/event/dpaa2/dpaa2_eventdev.c
> +++ b/drivers/event/dpaa2/dpaa2_eventdev.c
> @@ -26,6 +26,7 @@
>  #include <rte_bus_vdev.h>
>  #include <ethdev_driver.h>
>  #include <cryptodev_pmd.h>
> +#include <rte_event_crypto_adapter.h>
>  #include <rte_event_eth_rx_adapter.h>
>  #include <rte_event_eth_tx_adapter.h>
> 
> @@ -865,10 +866,10 @@ static int
>  dpaa2_eventdev_crypto_queue_add(const struct rte_eventdev *dev,
>  		const struct rte_cryptodev *cryptodev,
>  		int32_t rx_queue_id,
> -		const struct rte_event *ev)
> +		const struct rte_event_crypto_adapter_queue_conf *conf)
>  {
>  	struct dpaa2_eventdev *priv = dev->data->dev_private;
> -	uint8_t ev_qid = ev->queue_id;
> +	uint8_t ev_qid = conf->ev.queue_id;
>  	struct dpaa2_dpcon_dev *dpcon = priv->evq_info[ev_qid].dpcon;
>  	int ret;
> 
> @@ -876,10 +877,10 @@ dpaa2_eventdev_crypto_queue_add(const struct
> rte_eventdev *dev,
> 
>  	if (rx_queue_id == -1)
>  		return dpaa2_eventdev_crypto_queue_add_all(dev,
> -				cryptodev, ev);
> +				cryptodev, &conf->ev);
> 
>  	ret = dpaa2_sec_eventq_attach(cryptodev, rx_queue_id,
> -				      dpcon, ev);
> +				      dpcon, &conf->ev);
>  	if (ret) {
>  		DPAA2_EVENTDEV_ERR(
>  			"dpaa2_sec_eventq_attach failed: ret: %d\n", ret); diff
> --git a/drivers/event/octeontx/ssovf_evdev.c
> b/drivers/event/octeontx/ssovf_evdev.c
> index 9e14e35d10..17acd8ef64 100644
> --- a/drivers/event/octeontx/ssovf_evdev.c
> +++ b/drivers/event/octeontx/ssovf_evdev.c
> @@ -745,12 +745,12 @@ static int
>  ssovf_crypto_adapter_qp_add(const struct rte_eventdev *dev,
>  			    const struct rte_cryptodev *cdev,
>  			    int32_t queue_pair_id,
> -			    const struct rte_event *event)
> +			    const struct rte_event_crypto_adapter_queue_conf
> *conf)
>  {
>  	struct cpt_instance *qp;
>  	uint8_t qp_id;
> 
> -	RTE_SET_USED(event);
> +	RTE_SET_USED(conf);
> 
>  	if (queue_pair_id == -1) {
>  		for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) {
> diff --git a/lib/eventdev/eventdev_pmd.h b/lib/eventdev/eventdev_pmd.h
> index 69402668d8..bcfc9cbcb2 100644
> --- a/lib/eventdev/eventdev_pmd.h
> +++ b/lib/eventdev/eventdev_pmd.h
> @@ -907,6 +907,7 @@ rte_event_pmd_selftest_seqn(struct rte_mbuf *mbuf)
> }
> 
>  struct rte_cryptodev;
> +struct rte_event_crypto_adapter_queue_conf;
> 
>  /**
>   * This API may change without prior notice @@ -961,11 +962,11 @@ typedef
> int (*eventdev_crypto_adapter_caps_get_t)
>   *   - <0: Error code returned by the driver function.
>   *
>   */
> -typedef int (*eventdev_crypto_adapter_queue_pair_add_t)
> -			(const struct rte_eventdev *dev,
> -			 const struct rte_cryptodev *cdev,
> -			 int32_t queue_pair_id,
> -			 const struct rte_event *event);
> +typedef int (*eventdev_crypto_adapter_queue_pair_add_t)(
> +		const struct rte_eventdev *dev,
> +		const struct rte_cryptodev *cdev,
> +		int32_t queue_pair_id,
> +		const struct rte_event_crypto_adapter_queue_conf
> *queue_conf);
> 
> 
>  /**
> @@ -1074,6 +1075,27 @@ typedef int
> (*eventdev_crypto_adapter_stats_reset)
>  			(const struct rte_eventdev *dev,
>  			 const struct rte_cryptodev *cdev);
> 
> +struct rte_event_crypto_adapter_vector_limits;
> +/**
> + * Get event vector limits for a given event, crypto device pair.
> + *
> + * @param dev
> + *   Event device pointer
> + *
> + * @param cdev
> + *   Crypto device pointer
> + *
> + * @param[out] limits
> + *   Pointer to the limits structure to be filled.
> + *
> + * @return
> + *   - 0: Success.
> + *   - <0: Error code returned by the driver function.
> + */
> +typedef int (*eventdev_crypto_adapter_vector_limits_get_t)(
> +	const struct rte_eventdev *dev, const struct rte_cryptodev *cdev,
> +	struct rte_event_crypto_adapter_vector_limits *limits);
> +
>  /**
>   * Retrieve the event device's eth Tx adapter capabilities.
>   *
> @@ -1339,6 +1361,9 @@ struct eventdev_ops {
>  	/**< Get crypto stats */
>  	eventdev_crypto_adapter_stats_reset crypto_adapter_stats_reset;
>  	/**< Reset crypto stats */
> +	eventdev_crypto_adapter_vector_limits_get_t
> +		crypto_adapter_vector_limits_get;
> +	/**< Get event vector limits for the crypto adapter */
> 
>  	eventdev_eth_rx_adapter_q_stats_get
> eth_rx_adapter_queue_stats_get;
>  	/**< Get ethernet Rx queue stats */
> diff --git a/lib/eventdev/eventdev_trace.h b/lib/eventdev/eventdev_trace.h
> index 5ec43d80ee..d48cd58850 100644
> --- a/lib/eventdev/eventdev_trace.h
> +++ b/lib/eventdev/eventdev_trace.h
> @@ -18,6 +18,7 @@ extern "C" {
>  #include <rte_trace_point.h>
> 
>  #include "rte_eventdev.h"
> +#include "rte_event_crypto_adapter.h"
>  #include "rte_event_eth_rx_adapter.h"
>  #include "rte_event_timer_adapter.h"
> 
> @@ -271,11 +272,12 @@ RTE_TRACE_POINT(
>  RTE_TRACE_POINT(
>  	rte_eventdev_trace_crypto_adapter_queue_pair_add,
>  	RTE_TRACE_POINT_ARGS(uint8_t adptr_id, uint8_t cdev_id,
> -		const void *event, int32_t queue_pair_id),
> +		int32_t queue_pair_id,
> +		const struct rte_event_crypto_adapter_queue_conf *conf),
>  	rte_trace_point_emit_u8(adptr_id);
>  	rte_trace_point_emit_u8(cdev_id);
>  	rte_trace_point_emit_i32(queue_pair_id);
> -	rte_trace_point_emit_ptr(event);
> +	rte_trace_point_emit_ptr(conf);
>  )
> 
>  RTE_TRACE_POINT(
> diff --git a/lib/eventdev/rte_event_crypto_adapter.c
> b/lib/eventdev/rte_event_crypto_adapter.c
> index 7c695176f4..73a4f231e2 100644
> --- a/lib/eventdev/rte_event_crypto_adapter.c
> +++ b/lib/eventdev/rte_event_crypto_adapter.c
> @@ -921,11 +921,12 @@ int
>  rte_event_crypto_adapter_queue_pair_add(uint8_t id,
>  			uint8_t cdev_id,
>  			int32_t queue_pair_id,
> -			const struct rte_event *event)
> +			const struct rte_event_crypto_adapter_queue_conf
> *conf)
>  {
> +	struct rte_event_crypto_adapter_vector_limits limits;
>  	struct event_crypto_adapter *adapter;
> -	struct rte_eventdev *dev;
>  	struct crypto_device_info *dev_info;
> +	struct rte_eventdev *dev;
>  	uint32_t cap;
>  	int ret;
> 
> @@ -951,11 +952,47 @@ rte_event_crypto_adapter_queue_pair_add(uint8_t
> id,
>  	}
> 
>  	if ((cap &
> RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) &&
> -	    (event == NULL)) {
> +	    (conf == NULL)) {
>  		RTE_EDEV_LOG_ERR("Conf value can not be NULL for
> dev_id=%u",
>  				  cdev_id);
>  		return -EINVAL;
>  	}
Newline?

> +	if ((conf != NULL) &&
Checking conf twice?
> +	    (conf->flags & RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR)) {
Else condition if the flags is not set?
> +		if ((cap &
> RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR) == 0) {
> +			RTE_EDEV_LOG_ERR("Event vectorization is not
> supported,"
> +					 "dev %" PRIu8 " cdev %" PRIu8, id,
> +					 cdev_id);
> +			return -ENOTSUP;
> +		}
> +
> +		ret = rte_event_crypto_adapter_vector_limits_get(
> +			adapter->eventdev_id, cdev_id, &limits);
> +		if (ret < 0) {
> +			RTE_EDEV_LOG_ERR("Failed to get event device vector
> "
> +					 "limits, dev %" PRIu8 " cdev %" PRIu8,
> +					 id, cdev_id);
> +			return -EINVAL;
> +		}
New line? Please check other cases.

> +		if (conf->vector_sz < limits.min_sz ||
> +		    conf->vector_sz > limits.max_sz ||
> +		    conf->vector_timeout_ns < limits.min_timeout_ns ||
> +		    conf->vector_timeout_ns > limits.max_timeout_ns ||
> +		    conf->vector_mp == NULL) {
> +			RTE_EDEV_LOG_ERR("Invalid event vector
> configuration,"
> +					 " dev %" PRIu8 " cdev %" PRIu8,
> +					 id, cdev_id);
> +			return -EINVAL;
> +		}
> +		if (conf->vector_mp->elt_size <
> +		    (sizeof(struct rte_event_vector) +
> +		     (sizeof(uintptr_t) * conf->vector_sz))) {
> +			RTE_EDEV_LOG_ERR("Invalid event vector
> configuration,"
> +					 " dev %" PRIu8 " cdev %" PRIu8,
> +					 id, cdev_id);
> +			return -EINVAL;
> +		}
> +	}
> 
>  	dev_info = &adapter->cdevs[cdev_id];
> 
> @@ -990,7 +1027,7 @@ rte_event_crypto_adapter_queue_pair_add(uint8_t
> id,
>  		ret = (*dev->dev_ops->crypto_adapter_queue_pair_add)(dev,
>  				dev_info->dev,
>  				queue_pair_id,
> -				event);
> +				conf);
>  		if (ret)
>  			return ret;
> 
> @@ -1030,8 +1067,8 @@ rte_event_crypto_adapter_queue_pair_add(uint8_t
> id,
>  		rte_service_component_runstate_set(adapter->service_id, 1);
>  	}
> 
> -	rte_eventdev_trace_crypto_adapter_queue_pair_add(id, cdev_id,
> event,
> -		queue_pair_id);
> +	rte_eventdev_trace_crypto_adapter_queue_pair_add(id, cdev_id,
> +		queue_pair_id, conf);
>  	return 0;
>  }
> 
> @@ -1290,3 +1327,44 @@ rte_event_crypto_adapter_event_port_get(uint8_t
> id, uint8_t *event_port_id)
> 
>  	return 0;
>  }
> +
> +int
> +rte_event_crypto_adapter_vector_limits_get(
> +	uint8_t dev_id, uint16_t cdev_id,
> +	struct rte_event_crypto_adapter_vector_limits *limits) {
> +	struct rte_cryptodev *cdev;
> +	struct rte_eventdev *dev;
> +	uint32_t cap;
> +	int ret;
> +
> +	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
> +
> +	if (!rte_cryptodev_is_valid_dev(cdev_id)) {
> +		RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
> +		return -EINVAL;
> +	}
> +
> +	if (limits == NULL)
> +		return -EINVAL;
Add appropriate error message like above?
> +
> +	dev = &rte_eventdevs[dev_id];
> +	cdev = rte_cryptodev_pmd_get_dev(cdev_id);
> +
> +	ret = rte_event_crypto_adapter_caps_get(dev_id, cdev_id, &cap);
> +	if (ret) {
> +		RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
> +				 "cdev %" PRIu16, dev_id, cdev_id);
> +		return ret;
> +	}
> +
> +	if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR))
> +		return -ENOTSUP;
Same here.
> +
> +	RTE_FUNC_PTR_OR_ERR_RET(
> +		*dev->dev_ops->crypto_adapter_vector_limits_get,
> +		-ENOTSUP);
> +
> +	return dev->dev_ops->crypto_adapter_vector_limits_get(
> +		dev, cdev, limits);
> +}
> diff --git a/lib/eventdev/rte_event_crypto_adapter.h
> b/lib/eventdev/rte_event_crypto_adapter.h
> index d90a19e72c..7dd6171b9b 100644
> --- a/lib/eventdev/rte_event_crypto_adapter.h
> +++ b/lib/eventdev/rte_event_crypto_adapter.h
> @@ -253,6 +253,78 @@ struct rte_event_crypto_adapter_conf {
>  	 */
>  };
> 
> +#define RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR	0x1
> +/**< This flag indicates that crypto operations processed on the crypto
> + * adapter need to be vectorized
> + * @see rte_event_crypto_adapter_queue_conf::flags
> + */
> +
> +/**
> + * Adapter queue configuration structure  */ struct
> +rte_event_crypto_adapter_queue_conf {
> +	uint32_t flags;
> +	/**< Flags for handling crypto operations
> +	 * @see RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR
> +	 */
> +	struct rte_event ev;
> +	/**< If HW supports cryptodev queue pair to event queue binding,
> +	 * application is expected to fill in event information.
> +	 * @see
> RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND
> +	 */
> +	uint16_t vector_sz;
> +	/**< Indicates the maximum number for crypto operations to combine
> and
> +	 * form a vector.
> +	 * @see rte_event_crypto_adapter_vector_limits::min_sz
> +	 * @see rte_event_crypto_adapter_vector_limits::max_sz
> +	 * Valid when RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR flag is
> set in
> +	 * @see rte_event_crypto_adapter_queue_conf::rx_queue_flags
rx_queue_flags??
> +	 */
> +	uint64_t vector_timeout_ns;
> +	/**<
> +	 * Indicates the maximum number of nanoseconds to wait for
> aggregating
> +	 * crypto operations. Should be within vectorization limits of the
> +	 * adapter
> +	 * @see rte_event_crypto_adapter_vector_limits::min_timeout_ns
> +	 * @see rte_event_crypto_adapter_vector_limits::max_timeout_ns
> +	 * Valid when RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR flag is
> set in
> +	 * @see rte_event_crypto_adapter_queue_conf::flags
> +	 */
> +	struct rte_mempool *vector_mp;
> +	/**< Indicates the mempool that should be used for allocating
> +	 * rte_event_vector container.
> +	 * Should be created by using `rte_event_vector_pool_create`.
> +	 * Valid when RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR flag is
> set in
> +	 * @see rte_event_crypto_adapter_queue_conf::flags.
> +	 */
> +};
> +
> +/**
> + * A structure used to retrieve event crypto adapter vector limits.
> + */
> +struct rte_event_crypto_adapter_vector_limits {
> +	uint16_t min_sz;
> +	/**< Minimum vector limit configurable.
> +	 * @see rte_event_crypto_adapter_queue_conf::vector_sz
> +	 */
> +	uint16_t max_sz;
> +	/**< Maximum vector limit configurable.
> +	 * @see rte_event_crypto_adapter_queue_conf::vector_sz
> +	 */
> +	uint8_t log2_sz;
> +	/**< True if the size configured should be in log2.
> +	 * @see rte_event_crypto_adapter_queue_conf::vector_sz
> +	 */
> +	uint64_t min_timeout_ns;
> +	/**< Minimum vector timeout configurable.
> +	 * @see rte_event_crypto_adapter_queue_conf::vector_timeout_ns
> +	 */
> +	uint64_t max_timeout_ns;
> +	/**< Maximum vector timeout configurable.
> +	 * @see rte_event_crypto_adapter_queue_conf::vector_timeout_ns
> +	 */
> +};
> +
>  /**
>   * Function type used for adapter configuration callback. The callback is
>   * used to fill in members of the struct rte_event_crypto_adapter_conf, this
> @@ -392,10 +464,9 @@ rte_event_crypto_adapter_free(uint8_t id);
>   *  Cryptodev queue pair identifier. If queue_pair_id is set -1,
>   *  adapter adds all the pre configured queue pairs to the instance.
>   *
> - * @param event
> - *  if HW supports cryptodev queue pair to event queue binding, application is
> - *  expected to fill in event information, else it will be NULL.
> - *  @see RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND
> + * @param conf
> + *  Additional configuration structure of type
> + *  *rte_event_crypto_adapter_queue_conf*
>   *
>   * @return
>   *  - 0: Success, queue pair added correctly.
> @@ -405,7 +476,7 @@ int
>  rte_event_crypto_adapter_queue_pair_add(uint8_t id,
>  			uint8_t cdev_id,
>  			int32_t queue_pair_id,
> -			const struct rte_event *event);
> +			const struct rte_event_crypto_adapter_queue_conf
> *conf);
> 
>  /**
>   * Delete a queue pair from an event crypto adapter.
> @@ -523,6 +594,26 @@ rte_event_crypto_adapter_service_id_get(uint8_t id,
> uint32_t *service_id);  int  rte_event_crypto_adapter_event_port_get(uint8_t
> id, uint8_t *event_port_id);
> 
> +/**
> + * Retrieve vector limits for a given event dev and crypto dev pair.
> + * @see rte_event_crypto_adapter_vector_limits
> + *
> + * @param dev_id
> + *  Event device identifier.
> + * @param cdev_id
> + *  Crypto device identifier.
> + * @param [out] limits
> + *  A pointer to rte_event_crypto_adapter_vector_limits structure that
> +has to
> + * be filled.
Space missing before "be filled"
> + *
> + * @return
> + *  - 0: Success.
> + *  - <0: Error code on failure.
> + */
> +int rte_event_crypto_adapter_vector_limits_get(
> +	uint8_t dev_id, uint16_t cdev_id,
> +	struct rte_event_crypto_adapter_vector_limits *limits);
> +
>  /**
>   * Enqueue a burst of crypto operations as event objects supplied in
> *rte_event*
>   * structure on an event crypto adapter designated by its event *dev_id*
> through diff --git a/lib/eventdev/rte_event_eth_rx_adapter.h
> b/lib/eventdev/rte_event_eth_rx_adapter.h
> index 3608a7b2cf..c8f2936866 100644
> --- a/lib/eventdev/rte_event_eth_rx_adapter.h
> +++ b/lib/eventdev/rte_event_eth_rx_adapter.h
> @@ -457,7 +457,8 @@ int rte_event_eth_rx_adapter_free(uint8_t id);
>   * @see RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ
>   *
>   * @param conf
> - *  Additional configuration structure of type
> *rte_event_eth_rx_adapter_conf*
> + *  Additional configuration structure of type
> + *  *rte_event_eth_rx_adapter_queue_conf*
These changes are not relevant. Please consider sending separate patch.

>   *
>   * @return
>   *  - 0: Success, Receive queue added correctly.
> diff --git a/lib/eventdev/rte_eventdev.h b/lib/eventdev/rte_eventdev.h index
> 6a6f6ea4c1..1a737bf851 100644
> --- a/lib/eventdev/rte_eventdev.h
> +++ b/lib/eventdev/rte_eventdev.h
> @@ -1203,6 +1203,9 @@ struct rte_event_vector {
>  #define RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR                                   \
>  	(RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_ETH_RX_ADAPTER)
> /**< The event vector generated from eth Rx adapter. */
> +#define RTE_EVENT_TYPE_CRYPTODEV_VECTOR                                        \
> +	(RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_CRYPTODEV) /**< The
> event
> +vector generated from cryptodev adapter. */
> 
>  #define RTE_EVENT_TYPE_MAX              0x10
>  /**< Maximum number of event types */
> @@ -1420,6 +1423,11 @@ rte_event_timer_adapter_caps_get(uint8_t dev_id,
> uint32_t *caps);
>   * the private data information along with the crypto session.
>   */
> 
> +#define RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR   0x10
> +/**< Flag indicates HW is capable of aggregating processed
> + * crypto operations into rte_event_vector.
> + */
> +
>  /**
>   * Retrieve the event device's crypto adapter capabilities for the
>   * specified cryptodev device
> --
> 2.25.1


^ permalink raw reply	[flat|nested] 36+ messages in thread

* RE: [PATCH 1/3] eventdev: introduce event cryptodev vector type
  2022-09-24  8:43     ` Gujjar, Abhinandan S
@ 2022-09-26 11:02       ` Volodymyr Fialko
  2022-09-27  9:05         ` Gujjar, Abhinandan S
  0 siblings, 1 reply; 36+ messages in thread
From: Volodymyr Fialko @ 2022-09-26 11:02 UTC (permalink / raw)
  To: Gujjar, Abhinandan S, dev, Jerin Jacob Kollanukkaran,
	Pavan Nikhilesh Bhagavatula, Shijith Thotton, Hemant Agrawal,
	Sachin Saxena, Jayatheerthan, Jay
  Cc: Akhil Goyal, Anoob Joseph



> -----Original Message-----
> From: Gujjar, Abhinandan S <abhinandan.gujjar@intel.com>
> Sent: Saturday, September 24, 2022 10:44 AM
> To: Volodymyr Fialko <vfialko@marvell.com>; dev@dpdk.org; Jerin Jacob Kollanukkaran
> <jerinj@marvell.com>; Pavan Nikhilesh Bhagavatula <pbhagavatula@marvell.com>; Shijith Thotton
> <sthotton@marvell.com>; Hemant Agrawal <hemant.agrawal@nxp.com>; Sachin Saxena
> <sachin.saxena@oss.nxp.com>; Jayatheerthan, Jay <jay.jayatheerthan@intel.com>
> Cc: Akhil Goyal <gakhil@marvell.com>; Anoob Joseph <anoobj@marvell.com>
> Subject: [EXT] RE: [PATCH 1/3] eventdev: introduce event cryptodev vector type
> 
> External Email
> 
> ----------------------------------------------------------------------
> 
> 
> > -----Original Message-----
> > From: Volodymyr Fialko <vfialko@marvell.com>
> > Sent: Thursday, August 4, 2022 3:29 PM
> > To: dev@dpdk.org; Jerin Jacob <jerinj@marvell.com>; Gujjar, Abhinandan
> > S <abhinandan.gujjar@intel.com>; Pavan Nikhilesh
> > <pbhagavatula@marvell.com>; Shijith Thotton <sthotton@marvell.com>;
> > Hemant Agrawal <hemant.agrawal@nxp.com>; Sachin Saxena
> > <sachin.saxena@oss.nxp.com>; Jayatheerthan, Jay
> > <jay.jayatheerthan@intel.com>
> > Cc: gakhil@marvell.com; anoobj@marvell.com; Volodymyr Fialko
> > <vfialko@marvell.com>
> > Subject: [PATCH 1/3] eventdev: introduce event cryptodev vector type
> >
> > Introduce ability to aggregate crypto operations processed by event
> > crypto adapter into single event containing rte_event_vector whose
> > event type is RTE_EVENT_TYPE_CRYPTODEV_VECTOR.
> >
> > Application should set RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR in
> > rte_event_crypto_adapter_queue_conf::flag and provide vector
> > configuration with respect of rte_event_crypto_adapter_vector_limits,
> > which could be obtained by calling
> > rte_event_crypto_adapter_vector_limits_get, to enable vectorization.
> >
> > The event crypto adapter would be responsible for vectorizing the
> > crypto operations based on provided response information in
> > rte_event_crypto_metadata::response_info.
> >
> > Updated drivers and tests accordingly to new API.
> >
> > Signed-off-by: Volodymyr Fialko <vfialko@marvell.com>
> > ---
> >  app/test-eventdev/test_perf_common.c          |  10 +-
> >  app/test/test_event_crypto_adapter.c          |  12 ++-
> >  .../prog_guide/event_crypto_adapter.rst       |  23 +++-
> >  drivers/event/cnxk/cn10k_eventdev.c           |   4 +-
> >  drivers/event/cnxk/cn9k_eventdev.c            |   5 +-
> >  drivers/event/dpaa/dpaa_eventdev.c            |   9 +-
> >  drivers/event/dpaa2/dpaa2_eventdev.c          |   9 +-
> >  drivers/event/octeontx/ssovf_evdev.c          |   4 +-
> >  lib/eventdev/eventdev_pmd.h                   |  35 +++++-
> >  lib/eventdev/eventdev_trace.h                 |   6 +-
> >  lib/eventdev/rte_event_crypto_adapter.c       |  90 ++++++++++++++--
> >  lib/eventdev/rte_event_crypto_adapter.h       | 101 +++++++++++++++++-
> >  lib/eventdev/rte_event_eth_rx_adapter.h       |   3 +-
> >  lib/eventdev/rte_eventdev.h                   |   8 ++
> >  14 files changed, 276 insertions(+), 43 deletions(-)
> >
> 
> I don't see dataplane implementation of vectorization in the crypto adapter!
> Is it missed out in the patch?
> comments inline.
> 
Right now we are targeting crypto_cn10k PMD and ipsec-secgw event mode to support vectorization.

> > diff --git a/app/test-eventdev/test_perf_common.c b/app/test-
> > eventdev/test_perf_common.c index 81420be73a..c770bc93f6 100644
> > --- a/app/test-eventdev/test_perf_common.c
> > +++ b/app/test-eventdev/test_perf_common.c
> > @@ -837,14 +837,14 @@ perf_event_crypto_adapter_setup(struct test_perf
> > *t, struct prod_data *p)
> >  	}
> >
> >  	if (cap &
> > RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
> > -		struct rte_event response_info;
> > +		struct rte_event_crypto_adapter_queue_conf conf;
> >
> > -		response_info.event = 0;
> > -		response_info.sched_type = RTE_SCHED_TYPE_ATOMIC;
> > -		response_info.queue_id = p->queue_id;
> > +		memset(&conf, 0, sizeof(conf));
> > +		conf.ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
> > +		conf.ev.queue_id = p->queue_id;
> >  		ret = rte_event_crypto_adapter_queue_pair_add(
> >  			TEST_PERF_CA_ID, p->ca.cdev_id, p->ca.cdev_qp_id,
> > -			&response_info);
> > +			&conf);
> >  	} else {
> >  		ret = rte_event_crypto_adapter_queue_pair_add(
> >  			TEST_PERF_CA_ID, p->ca.cdev_id, p->ca.cdev_qp_id, NULL); diff
> > --git a/app/test/test_event_crypto_adapter.c
> > b/app/test/test_event_crypto_adapter.c
> > index 2ecc7e2cea..bb617c1042 100644
> > --- a/app/test/test_event_crypto_adapter.c
> > +++ b/app/test/test_event_crypto_adapter.c
> > @@ -1175,6 +1175,10 @@ test_crypto_adapter_create(void)  static int
> >  test_crypto_adapter_qp_add_del(void)
> >  {
> > +	struct rte_event_crypto_adapter_queue_conf queue_conf = {
> > +		.ev = response_info,
> > +	};
> > +
> >  	uint32_t cap;
> >  	int ret;
> >
> > @@ -1183,7 +1187,7 @@ test_crypto_adapter_qp_add_del(void)
> >
> >  	if (cap &
> > RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
> >  		ret =
> > rte_event_crypto_adapter_queue_pair_add(TEST_ADAPTER_ID,
> > -				TEST_CDEV_ID, TEST_CDEV_QP_ID,
> > &response_info);
> > +				TEST_CDEV_ID, TEST_CDEV_QP_ID,
> > &queue_conf);
> >  	} else
> >  		ret =
> > rte_event_crypto_adapter_queue_pair_add(TEST_ADAPTER_ID,
> >  					TEST_CDEV_ID, TEST_CDEV_QP_ID,
> > NULL); @@ -1206,6 +1210,10 @@ configure_event_crypto_adapter(enum
> > rte_event_crypto_adapter_mode mode)
> >  		.new_event_threshold = 1200,
> >  	};
> >
> > +	struct rte_event_crypto_adapter_queue_conf queue_conf = {
> > +		.ev = response_info,
> > +	};
> > +
> >  	uint32_t cap;
> >  	int ret;
> >
> > @@ -1238,7 +1246,7 @@ configure_event_crypto_adapter(enum
> > rte_event_crypto_adapter_mode mode)
> >
> >  	if (cap &
> > RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
> >  		ret =
> > rte_event_crypto_adapter_queue_pair_add(TEST_ADAPTER_ID,
> > -				TEST_CDEV_ID, TEST_CDEV_QP_ID,
> > &response_info);
> > +				TEST_CDEV_ID, TEST_CDEV_QP_ID,
> > &queue_conf);
> >  	} else
> >  		ret =
> > rte_event_crypto_adapter_queue_pair_add(TEST_ADAPTER_ID,
> >  				TEST_CDEV_ID, TEST_CDEV_QP_ID, NULL); diff --git
> > a/doc/guides/prog_guide/event_crypto_adapter.rst
> > b/doc/guides/prog_guide/event_crypto_adapter.rst
> > index 4fb5c688e0..554df7e358 100644
> > --- a/doc/guides/prog_guide/event_crypto_adapter.rst
> > +++ b/doc/guides/prog_guide/event_crypto_adapter.rst
> > @@ -201,10 +201,10 @@ capability, event information must be passed to
> > the add API.
> >
> >          ret = rte_event_crypto_adapter_caps_get(id, evdev, &cap);
> >          if (cap &
> > RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
> > -                struct rte_event event;
> > +                struct rte_event_crypto_adapter_queue_conf conf;
> >
> > -                // Fill in event information & pass it to add API
> > -                rte_event_crypto_adapter_queue_pair_add(id, cdev_id, qp_id,
> > &event);
> > +                // Fill in conf.event information & pass it to add API
> > +                rte_event_crypto_adapter_queue_pair_add(id, cdev_id,
> > + qp_id, &conf);
> >          } else
> >                  rte_event_crypto_adapter_queue_pair_add(id, cdev_id,
> > qp_id, NULL);
> >
> > @@ -291,6 +291,23 @@ the ``rte_crypto_op``.
> >                  rte_memcpy(op + len, &m_data, sizeof(m_data));
> >          }
> >
> > +Enable event vectorization
> > +~~~~~~~~~~~~~~~~~~~~~~~~~~
> > +
> > +The event crypto adapter can aggregate outcoming crypto operations
> > +based on provided response information of
> > +``rte_event_crypto_metadata::response_info``
> > +and generate a ``rte_event`` containing ``rte_event_vector`` whose
> > +event type is ``RTE_EVENT_TYPE_CRYPTODEV_VECTOR``.
> > +To enable vectorization application should set
> > +RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR in
> > +``rte_event_crypto_adapter_queue_conf::flag`` and provide vector
> > +configuration(size, mempool, etc.) with respect of
> > +``rte_event_crypto_adapter_vector_limits``, which could be obtained
> > +by calling ``rte_event_crypto_adapter_vector_limits_get()``.
> > +
> > +The RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR capability indicates
> > +whether PMD supports this feature.
> > +
> >  Start the adapter instance
> >  ~~~~~~~~~~~~~~~~~~~~~~~~~~
> >
> > diff --git a/drivers/event/cnxk/cn10k_eventdev.c
> > b/drivers/event/cnxk/cn10k_eventdev.c
> > index 5a0cab40a9..e74ec57382 100644
> > --- a/drivers/event/cnxk/cn10k_eventdev.c
> > +++ b/drivers/event/cnxk/cn10k_eventdev.c
> > @@ -889,11 +889,11 @@ static int
> >  cn10k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
> >  			    const struct rte_cryptodev *cdev,
> >  			    int32_t queue_pair_id,
> > -			    const struct rte_event *event)
> > +			    const struct rte_event_crypto_adapter_queue_conf
> > *conf)
> >  {
> >  	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
> >
> > -	RTE_SET_USED(event);
> > +	RTE_SET_USED(conf);
> >
> >  	CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k");
> >  	CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k"); diff -- git
> > a/drivers/event/cnxk/cn9k_eventdev.c
> > b/drivers/event/cnxk/cn9k_eventdev.c
> > index 2e27030049..45ed547cb0 100644
> > --- a/drivers/event/cnxk/cn9k_eventdev.c
> > +++ b/drivers/event/cnxk/cn9k_eventdev.c
> > @@ -1120,11 +1120,12 @@ cn9k_crypto_adapter_caps_get(const struct
> > rte_eventdev *event_dev,  static int  cn9k_crypto_adapter_qp_add(const
> > struct rte_eventdev *event_dev,
> >  			   const struct rte_cryptodev *cdev,
> > -			   int32_t queue_pair_id, const struct rte_event *event)
> > +			   int32_t queue_pair_id,
> > +			   const struct rte_event_crypto_adapter_queue_conf
> > *conf)
> >  {
> >  	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
> >
> > -	RTE_SET_USED(event);
> > +	RTE_SET_USED(conf);
> >
> >  	CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k");
> >  	CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k"); diff --git
> > a/drivers/event/dpaa/dpaa_eventdev.c
> > b/drivers/event/dpaa/dpaa_eventdev.c
> > index ff6cc0be18..2b9ecd9fbf 100644
> > --- a/drivers/event/dpaa/dpaa_eventdev.c
> > +++ b/drivers/event/dpaa/dpaa_eventdev.c
> > @@ -26,6 +26,7 @@
> >  #include <rte_eventdev.h>
> >  #include <eventdev_pmd_vdev.h>
> >  #include <rte_ethdev.h>
> > +#include <rte_event_crypto_adapter.h>
> >  #include <rte_event_eth_rx_adapter.h>  #include
> > <rte_event_eth_tx_adapter.h>  #include <cryptodev_pmd.h> @@ -775,10
> > +776,10 @@ static int  dpaa_eventdev_crypto_queue_add(const struct
> > rte_eventdev *dev,
> >  		const struct rte_cryptodev *cryptodev,
> >  		int32_t rx_queue_id,
> > -		const struct rte_event *ev)
> > +		const struct rte_event_crypto_adapter_queue_conf *conf)
> >  {
> >  	struct dpaa_eventdev *priv = dev->data->dev_private;
> > -	uint8_t ev_qid = ev->queue_id;
> > +	uint8_t ev_qid = conf->ev.queue_id;
> >  	u16 ch_id = priv->evq_info[ev_qid].ch_id;
> >  	int ret;
> >
> > @@ -786,10 +787,10 @@ dpaa_eventdev_crypto_queue_add(const struct
> > rte_eventdev *dev,
> >
> >  	if (rx_queue_id == -1)
> >  		return dpaa_eventdev_crypto_queue_add_all(dev,
> > -				cryptodev, ev);
> > +				cryptodev, &conf->ev);
> >
> >  	ret = dpaa_sec_eventq_attach(cryptodev, rx_queue_id,
> > -			ch_id, ev);
> > +			ch_id, &conf->ev);
> >  	if (ret) {
> >  		DPAA_EVENTDEV_ERR(
> >  			"dpaa_sec_eventq_attach failed: ret: %d\n", ret); diff - -git
> > a/drivers/event/dpaa2/dpaa2_eventdev.c
> > b/drivers/event/dpaa2/dpaa2_eventdev.c
> > index ffc7b8b073..0137736794 100644
> > --- a/drivers/event/dpaa2/dpaa2_eventdev.c
> > +++ b/drivers/event/dpaa2/dpaa2_eventdev.c
> > @@ -26,6 +26,7 @@
> >  #include <rte_bus_vdev.h>
> >  #include <ethdev_driver.h>
> >  #include <cryptodev_pmd.h>
> > +#include <rte_event_crypto_adapter.h>
> >  #include <rte_event_eth_rx_adapter.h>  #include
> > <rte_event_eth_tx_adapter.h>
> >
> > @@ -865,10 +866,10 @@ static int
> >  dpaa2_eventdev_crypto_queue_add(const struct rte_eventdev *dev,
> >  		const struct rte_cryptodev *cryptodev,
> >  		int32_t rx_queue_id,
> > -		const struct rte_event *ev)
> > +		const struct rte_event_crypto_adapter_queue_conf *conf)
> >  {
> >  	struct dpaa2_eventdev *priv = dev->data->dev_private;
> > -	uint8_t ev_qid = ev->queue_id;
> > +	uint8_t ev_qid = conf->ev.queue_id;
> >  	struct dpaa2_dpcon_dev *dpcon = priv->evq_info[ev_qid].dpcon;
> >  	int ret;
> >
> > @@ -876,10 +877,10 @@ dpaa2_eventdev_crypto_queue_add(const struct
> > rte_eventdev *dev,
> >
> >  	if (rx_queue_id == -1)
> >  		return dpaa2_eventdev_crypto_queue_add_all(dev,
> > -				cryptodev, ev);
> > +				cryptodev, &conf->ev);
> >
> >  	ret = dpaa2_sec_eventq_attach(cryptodev, rx_queue_id,
> > -				      dpcon, ev);
> > +				      dpcon, &conf->ev);
> >  	if (ret) {
> >  		DPAA2_EVENTDEV_ERR(
> >  			"dpaa2_sec_eventq_attach failed: ret: %d\n", ret); diff --git
> > a/drivers/event/octeontx/ssovf_evdev.c
> > b/drivers/event/octeontx/ssovf_evdev.c
> > index 9e14e35d10..17acd8ef64 100644
> > --- a/drivers/event/octeontx/ssovf_evdev.c
> > +++ b/drivers/event/octeontx/ssovf_evdev.c
> > @@ -745,12 +745,12 @@ static int
> >  ssovf_crypto_adapter_qp_add(const struct rte_eventdev *dev,
> >  			    const struct rte_cryptodev *cdev,
> >  			    int32_t queue_pair_id,
> > -			    const struct rte_event *event)
> > +			    const struct rte_event_crypto_adapter_queue_conf
> > *conf)
> >  {
> >  	struct cpt_instance *qp;
> >  	uint8_t qp_id;
> >
> > -	RTE_SET_USED(event);
> > +	RTE_SET_USED(conf);
> >
> >  	if (queue_pair_id == -1) {
> >  		for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) { diff
> > --git a/lib/eventdev/eventdev_pmd.h b/lib/eventdev/eventdev_pmd.h
> > index 69402668d8..bcfc9cbcb2 100644
> > --- a/lib/eventdev/eventdev_pmd.h
> > +++ b/lib/eventdev/eventdev_pmd.h
> > @@ -907,6 +907,7 @@ rte_event_pmd_selftest_seqn(struct rte_mbuf *mbuf)
> > }
> >
> >  struct rte_cryptodev;
> > +struct rte_event_crypto_adapter_queue_conf;
> >
> >  /**
> >   * This API may change without prior notice @@ -961,11 +962,11 @@
> > typedef int (*eventdev_crypto_adapter_caps_get_t)
> >   *   - <0: Error code returned by the driver function.
> >   *
> >   */
> > -typedef int (*eventdev_crypto_adapter_queue_pair_add_t)
> > -			(const struct rte_eventdev *dev,
> > -			 const struct rte_cryptodev *cdev,
> > -			 int32_t queue_pair_id,
> > -			 const struct rte_event *event);
> > +typedef int (*eventdev_crypto_adapter_queue_pair_add_t)(
> > +		const struct rte_eventdev *dev,
> > +		const struct rte_cryptodev *cdev,
> > +		int32_t queue_pair_id,
> > +		const struct rte_event_crypto_adapter_queue_conf
> > *queue_conf);
> >
> >
> >  /**
> > @@ -1074,6 +1075,27 @@ typedef int
> > (*eventdev_crypto_adapter_stats_reset)
> >  			(const struct rte_eventdev *dev,
> >  			 const struct rte_cryptodev *cdev);
> >
> > +struct rte_event_crypto_adapter_vector_limits;
> > +/**
> > + * Get event vector limits for a given event, crypto device pair.
> > + *
> > + * @param dev
> > + *   Event device pointer
> > + *
> > + * @param cdev
> > + *   Crypto device pointer
> > + *
> > + * @param[out] limits
> > + *   Pointer to the limits structure to be filled.
> > + *
> > + * @return
> > + *   - 0: Success.
> > + *   - <0: Error code returned by the driver function.
> > + */
> > +typedef int (*eventdev_crypto_adapter_vector_limits_get_t)(
> > +	const struct rte_eventdev *dev, const struct rte_cryptodev *cdev,
> > +	struct rte_event_crypto_adapter_vector_limits *limits);
> > +
> >  /**
> >   * Retrieve the event device's eth Tx adapter capabilities.
> >   *
> > @@ -1339,6 +1361,9 @@ struct eventdev_ops {
> >  	/**< Get crypto stats */
> >  	eventdev_crypto_adapter_stats_reset crypto_adapter_stats_reset;
> >  	/**< Reset crypto stats */
> > +	eventdev_crypto_adapter_vector_limits_get_t
> > +		crypto_adapter_vector_limits_get;
> > +	/**< Get event vector limits for the crypto adapter */
> >
> >  	eventdev_eth_rx_adapter_q_stats_get
> > eth_rx_adapter_queue_stats_get;
> >  	/**< Get ethernet Rx queue stats */
> > diff --git a/lib/eventdev/eventdev_trace.h
> > b/lib/eventdev/eventdev_trace.h index 5ec43d80ee..d48cd58850 100644
> > --- a/lib/eventdev/eventdev_trace.h
> > +++ b/lib/eventdev/eventdev_trace.h
> > @@ -18,6 +18,7 @@ extern "C" {
> >  #include <rte_trace_point.h>
> >
> >  #include "rte_eventdev.h"
> > +#include "rte_event_crypto_adapter.h"
> >  #include "rte_event_eth_rx_adapter.h"
> >  #include "rte_event_timer_adapter.h"
> >
> > @@ -271,11 +272,12 @@ RTE_TRACE_POINT(  RTE_TRACE_POINT(
> >  	rte_eventdev_trace_crypto_adapter_queue_pair_add,
> >  	RTE_TRACE_POINT_ARGS(uint8_t adptr_id, uint8_t cdev_id,
> > -		const void *event, int32_t queue_pair_id),
> > +		int32_t queue_pair_id,
> > +		const struct rte_event_crypto_adapter_queue_conf *conf),
> >  	rte_trace_point_emit_u8(adptr_id);
> >  	rte_trace_point_emit_u8(cdev_id);
> >  	rte_trace_point_emit_i32(queue_pair_id);
> > -	rte_trace_point_emit_ptr(event);
> > +	rte_trace_point_emit_ptr(conf);
> >  )
> >
> >  RTE_TRACE_POINT(
> > diff --git a/lib/eventdev/rte_event_crypto_adapter.c
> > b/lib/eventdev/rte_event_crypto_adapter.c
> > index 7c695176f4..73a4f231e2 100644
> > --- a/lib/eventdev/rte_event_crypto_adapter.c
> > +++ b/lib/eventdev/rte_event_crypto_adapter.c
> > @@ -921,11 +921,12 @@ int
> >  rte_event_crypto_adapter_queue_pair_add(uint8_t id,
> >  			uint8_t cdev_id,
> >  			int32_t queue_pair_id,
> > -			const struct rte_event *event)
> > +			const struct rte_event_crypto_adapter_queue_conf
> > *conf)
> >  {
> > +	struct rte_event_crypto_adapter_vector_limits limits;
> >  	struct event_crypto_adapter *adapter;
> > -	struct rte_eventdev *dev;
> >  	struct crypto_device_info *dev_info;
> > +	struct rte_eventdev *dev;
> >  	uint32_t cap;
> >  	int ret;
> >
> > @@ -951,11 +952,47 @@ rte_event_crypto_adapter_queue_pair_add(uint8_t
> > id,
> >  	}
> >
> >  	if ((cap &
> > RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) &&
> > -	    (event == NULL)) {
> > +	    (conf == NULL)) {
> >  		RTE_EDEV_LOG_ERR("Conf value can not be NULL for dev_id=%u",
> >  				  cdev_id);
> >  		return -EINVAL;
> >  	}
> Newline?
> 
Ack

> > +	if ((conf != NULL) &&
> Checking conf twice?
Will rewrite as if conf == NULL/else, to avoid double checking.

> > +	    (conf->flags & RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR)) {
> Else condition if the flags is not set?
There's no additional handing for case when flag is no set.

> > +		if ((cap &
> > RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR) == 0) {
> > +			RTE_EDEV_LOG_ERR("Event vectorization is not
> > supported,"
> > +					 "dev %" PRIu8 " cdev %" PRIu8, id,
> > +					 cdev_id);
> > +			return -ENOTSUP;
> > +		}
> > +
> > +		ret = rte_event_crypto_adapter_vector_limits_get(
> > +			adapter->eventdev_id, cdev_id, &limits);
> > +		if (ret < 0) {
> > +			RTE_EDEV_LOG_ERR("Failed to get event device vector
> > "
> > +					 "limits, dev %" PRIu8 " cdev %" PRIu8,
> > +					 id, cdev_id);
> > +			return -EINVAL;
> > +		}
> New line? Please check other cases.
> 
Ack

> > +		if (conf->vector_sz < limits.min_sz ||
> > +		    conf->vector_sz > limits.max_sz ||
> > +		    conf->vector_timeout_ns < limits.min_timeout_ns ||
> > +		    conf->vector_timeout_ns > limits.max_timeout_ns ||
> > +		    conf->vector_mp == NULL) {
> > +			RTE_EDEV_LOG_ERR("Invalid event vector
> > configuration,"
> > +					 " dev %" PRIu8 " cdev %" PRIu8,
> > +					 id, cdev_id);
> > +			return -EINVAL;
> > +		}
> > +		if (conf->vector_mp->elt_size <
> > +		    (sizeof(struct rte_event_vector) +
> > +		     (sizeof(uintptr_t) * conf->vector_sz))) {
> > +			RTE_EDEV_LOG_ERR("Invalid event vector
> > configuration,"
> > +					 " dev %" PRIu8 " cdev %" PRIu8,
> > +					 id, cdev_id);
> > +			return -EINVAL;
> > +		}
> > +	}
> >
> >  	dev_info = &adapter->cdevs[cdev_id];
> >
> > @@ -990,7 +1027,7 @@ rte_event_crypto_adapter_queue_pair_add(uint8_t
> > id,
> >  		ret = (*dev->dev_ops->crypto_adapter_queue_pair_add)(dev,
> >  				dev_info->dev,
> >  				queue_pair_id,
> > -				event);
> > +				conf);
> >  		if (ret)
> >  			return ret;
> >
> > @@ -1030,8 +1067,8 @@ rte_event_crypto_adapter_queue_pair_add(uint8_t
> > id,
> >  		rte_service_component_runstate_set(adapter->service_id, 1);
> >  	}
> >
> > -	rte_eventdev_trace_crypto_adapter_queue_pair_add(id, cdev_id,
> > event,
> > -		queue_pair_id);
> > +	rte_eventdev_trace_crypto_adapter_queue_pair_add(id, cdev_id,
> > +		queue_pair_id, conf);
> >  	return 0;
> >  }
> >
> > @@ -1290,3 +1327,44 @@ rte_event_crypto_adapter_event_port_get(uint8_t
> > id, uint8_t *event_port_id)
> >
> >  	return 0;
> >  }
> > +
> > +int
> > +rte_event_crypto_adapter_vector_limits_get(
> > +	uint8_t dev_id, uint16_t cdev_id,
> > +	struct rte_event_crypto_adapter_vector_limits *limits) {
> > +	struct rte_cryptodev *cdev;
> > +	struct rte_eventdev *dev;
> > +	uint32_t cap;
> > +	int ret;
> > +
> > +	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
> > +
> > +	if (!rte_cryptodev_is_valid_dev(cdev_id)) {
> > +		RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
> > +		return -EINVAL;
> > +	}
> > +
> > +	if (limits == NULL)
> > +		return -EINVAL;
> Add appropriate error message like above?
Ack, will add.

> > +
> > +	dev = &rte_eventdevs[dev_id];
> > +	cdev = rte_cryptodev_pmd_get_dev(cdev_id);
> > +
> > +	ret = rte_event_crypto_adapter_caps_get(dev_id, cdev_id, &cap);
> > +	if (ret) {
> > +		RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
> > +				 "cdev %" PRIu16, dev_id, cdev_id);
> > +		return ret;
> > +	}
> > +
> > +	if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR))
> > +		return -ENOTSUP;
> Same here.
Ack, will add.

> > +
> > +	RTE_FUNC_PTR_OR_ERR_RET(
> > +		*dev->dev_ops->crypto_adapter_vector_limits_get,
> > +		-ENOTSUP);
> > +
> > +	return dev->dev_ops->crypto_adapter_vector_limits_get(
> > +		dev, cdev, limits);
> > +}
> > diff --git a/lib/eventdev/rte_event_crypto_adapter.h
> > b/lib/eventdev/rte_event_crypto_adapter.h
> > index d90a19e72c..7dd6171b9b 100644
> > --- a/lib/eventdev/rte_event_crypto_adapter.h
> > +++ b/lib/eventdev/rte_event_crypto_adapter.h
> > @@ -253,6 +253,78 @@ struct rte_event_crypto_adapter_conf {
> >  	 */
> >  };
> >
> > +#define RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR	0x1
> > +/**< This flag indicates that crypto operations processed on the
> > +crypto
> > + * adapter need to be vectorized
> > + * @see rte_event_crypto_adapter_queue_conf::flags
> > + */
> > +
> > +/**
> > + * Adapter queue configuration structure  */ struct
> > +rte_event_crypto_adapter_queue_conf {
> > +	uint32_t flags;
> > +	/**< Flags for handling crypto operations
> > +	 * @see RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR
> > +	 */
> > +	struct rte_event ev;
> > +	/**< If HW supports cryptodev queue pair to event queue binding,
> > +	 * application is expected to fill in event information.
> > +	 * @see
> > RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND
> > +	 */
> > +	uint16_t vector_sz;
> > +	/**< Indicates the maximum number for crypto operations to combine
> > and
> > +	 * form a vector.
> > +	 * @see rte_event_crypto_adapter_vector_limits::min_sz
> > +	 * @see rte_event_crypto_adapter_vector_limits::max_sz
> > +	 * Valid when RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR flag is
> > set in
> > +	 * @see rte_event_crypto_adapter_queue_conf::rx_queue_flags
> rx_queue_flags??
Typo, should be conf::flags.

> > +	 */
> > +	uint64_t vector_timeout_ns;
> > +	/**<
> > +	 * Indicates the maximum number of nanoseconds to wait for
> > aggregating
> > +	 * crypto operations. Should be within vectorization limits of the
> > +	 * adapter
> > +	 * @see rte_event_crypto_adapter_vector_limits::min_timeout_ns
> > +	 * @see rte_event_crypto_adapter_vector_limits::max_timeout_ns
> > +	 * Valid when RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR flag is
> > set in
> > +	 * @see rte_event_crypto_adapter_queue_conf::flags
> > +	 */
> > +	struct rte_mempool *vector_mp;
> > +	/**< Indicates the mempool that should be used for allocating
> > +	 * rte_event_vector container.
> > +	 * Should be created by using `rte_event_vector_pool_create`.
> > +	 * Valid when RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR flag is
> > set in
> > +	 * @see rte_event_crypto_adapter_queue_conf::flags.
> > +	 */
> > +};
> > +
> > +/**
> > + * A structure used to retrieve event crypto adapter vector limits.
> > + */
> > +struct rte_event_crypto_adapter_vector_limits {
> > +	uint16_t min_sz;
> > +	/**< Minimum vector limit configurable.
> > +	 * @see rte_event_crypto_adapter_queue_conf::vector_sz
> > +	 */
> > +	uint16_t max_sz;
> > +	/**< Maximum vector limit configurable.
> > +	 * @see rte_event_crypto_adapter_queue_conf::vector_sz
> > +	 */
> > +	uint8_t log2_sz;
> > +	/**< True if the size configured should be in log2.
> > +	 * @see rte_event_crypto_adapter_queue_conf::vector_sz
> > +	 */
> > +	uint64_t min_timeout_ns;
> > +	/**< Minimum vector timeout configurable.
> > +	 * @see rte_event_crypto_adapter_queue_conf::vector_timeout_ns
> > +	 */
> > +	uint64_t max_timeout_ns;
> > +	/**< Maximum vector timeout configurable.
> > +	 * @see rte_event_crypto_adapter_queue_conf::vector_timeout_ns
> > +	 */
> > +};
> > +
> >  /**
> >   * Function type used for adapter configuration callback. The callback is
> >   * used to fill in members of the struct
> > rte_event_crypto_adapter_conf, this @@ -392,10 +464,9 @@
> rte_event_crypto_adapter_free(uint8_t id);
> >   *  Cryptodev queue pair identifier. If queue_pair_id is set -1,
> >   *  adapter adds all the pre configured queue pairs to the instance.
> >   *
> > - * @param event
> > - *  if HW supports cryptodev queue pair to event queue binding,
> > application is
> > - *  expected to fill in event information, else it will be NULL.
> > - *  @see RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND
> > + * @param conf
> > + *  Additional configuration structure of type
> > + *  *rte_event_crypto_adapter_queue_conf*
> >   *
> >   * @return
> >   *  - 0: Success, queue pair added correctly.
> > @@ -405,7 +476,7 @@ int
> >  rte_event_crypto_adapter_queue_pair_add(uint8_t id,
> >  			uint8_t cdev_id,
> >  			int32_t queue_pair_id,
> > -			const struct rte_event *event);
> > +			const struct rte_event_crypto_adapter_queue_conf
> > *conf);
> >
> >  /**
> >   * Delete a queue pair from an event crypto adapter.
> > @@ -523,6 +594,26 @@ rte_event_crypto_adapter_service_id_get(uint8_t
> > id, uint32_t *service_id);  int
> > rte_event_crypto_adapter_event_port_get(uint8_t
> > id, uint8_t *event_port_id);
> >
> > +/**
> > + * Retrieve vector limits for a given event dev and crypto dev pair.
> > + * @see rte_event_crypto_adapter_vector_limits
> > + *
> > + * @param dev_id
> > + *  Event device identifier.
> > + * @param cdev_id
> > + *  Crypto device identifier.
> > + * @param [out] limits
> > + *  A pointer to rte_event_crypto_adapter_vector_limits structure
> > +that has to
> > + * be filled.
> Space missing before "be filled"
Ack

> > + *
> > + * @return
> > + *  - 0: Success.
> > + *  - <0: Error code on failure.
> > + */
> > +int rte_event_crypto_adapter_vector_limits_get(
> > +	uint8_t dev_id, uint16_t cdev_id,
> > +	struct rte_event_crypto_adapter_vector_limits *limits);
> > +
> >  /**
> >   * Enqueue a burst of crypto operations as event objects supplied in
> > *rte_event*
> >   * structure on an event crypto adapter designated by its event
> > *dev_id* through diff --git a/lib/eventdev/rte_event_eth_rx_adapter.h
> > b/lib/eventdev/rte_event_eth_rx_adapter.h
> > index 3608a7b2cf..c8f2936866 100644
> > --- a/lib/eventdev/rte_event_eth_rx_adapter.h
> > +++ b/lib/eventdev/rte_event_eth_rx_adapter.h
> > @@ -457,7 +457,8 @@ int rte_event_eth_rx_adapter_free(uint8_t id);
> >   * @see RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ
> >   *
> >   * @param conf
> > - *  Additional configuration structure of type
> > *rte_event_eth_rx_adapter_conf*
> > + *  Additional configuration structure of type
> > + *  *rte_event_eth_rx_adapter_queue_conf*
> These changes are not relevant. Please consider sending separate patch.
> 
Ack, Will send this change as a separate patch.

> >   *
> >   * @return
> >   *  - 0: Success, Receive queue added correctly.
> > diff --git a/lib/eventdev/rte_eventdev.h b/lib/eventdev/rte_eventdev.h
> > index
> > 6a6f6ea4c1..1a737bf851 100644
> > --- a/lib/eventdev/rte_eventdev.h
> > +++ b/lib/eventdev/rte_eventdev.h
> > @@ -1203,6 +1203,9 @@ struct rte_event_vector {
> >  #define RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR                                   \
> >  	(RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_ETH_RX_ADAPTER) /**< The
> > event vector generated from eth Rx adapter. */
> > +#define RTE_EVENT_TYPE_CRYPTODEV_VECTOR                                        \
> > +	(RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_CRYPTODEV) /**< The
> > event
> > +vector generated from cryptodev adapter. */
> >
> >  #define RTE_EVENT_TYPE_MAX              0x10
> >  /**< Maximum number of event types */ @@ -1420,6 +1423,11 @@
> > rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps);
> >   * the private data information along with the crypto session.
> >   */
> >
> > +#define RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR   0x10
> > +/**< Flag indicates HW is capable of aggregating processed
> > + * crypto operations into rte_event_vector.
> > + */
> > +
> >  /**
> >   * Retrieve the event device's crypto adapter capabilities for the
> >   * specified cryptodev device
> > --
> > 2.25.1


^ permalink raw reply	[flat|nested] 36+ messages in thread

* [PATCH v2 0/3] Vector support for event crypto adapter
  2022-08-04  9:59 ` [PATCH 0/3] Vector support for event crypto adapter Volodymyr Fialko
                     ` (2 preceding siblings ...)
  2022-08-04  9:59   ` [PATCH 3/3] crypto/cnxk: add vectorization " Volodymyr Fialko
@ 2022-09-26 11:36   ` Volodymyr Fialko
  2022-09-26 11:36     ` [PATCH v2 1/3] eventdev: introduce event cryptodev vector type Volodymyr Fialko
                       ` (3 more replies)
  3 siblings, 4 replies; 36+ messages in thread
From: Volodymyr Fialko @ 2022-09-26 11:36 UTC (permalink / raw)
  To: dev; +Cc: jerinj, gakhil, anoobj, abhinandan.gujjar, Volodymyr Fialko

Introduce ability to aggregate crypto operations processed by event
crypto adapter into single event containing rte_event_vector whose event
type is RTE_EVENT_TYPE_CRYPTODEV_VECTOR.

* Changes since v1
- Add missing newline/space issues
- Add missing error messages
- Remove double check of conf input parameter

Volodymyr Fialko (3):
  eventdev: introduce event cryptodev vector type
  crypto/cnxk: add burst enqueue for event crypto
  crypto/cnxk: add vectorization for event crypto

 app/test-eventdev/test_perf_common.c          |  11 +-
 app/test/test_event_crypto_adapter.c          |  12 +-
 .../prog_guide/event_crypto_adapter.rst       |  23 +-
 drivers/crypto/cnxk/cn10k_cryptodev_ops.c     | 456 ++++++++++++++++--
 drivers/crypto/cnxk/cn10k_cryptodev_ops.h     |   9 +-
 drivers/crypto/cnxk/cnxk_cryptodev_ops.h      |   9 +-
 drivers/crypto/cnxk/meson.build               |   2 +-
 drivers/crypto/cnxk/version.map               |   1 +
 drivers/event/cnxk/cn10k_eventdev.c           |  50 +-
 drivers/event/cnxk/cn10k_worker.c             |  10 -
 drivers/event/cnxk/cn10k_worker.h             |   8 +-
 drivers/event/cnxk/cn9k_eventdev.c            |  13 +-
 drivers/event/cnxk/cnxk_eventdev.h            |   4 +-
 drivers/event/cnxk/cnxk_eventdev_adptr.c      |  17 +-
 drivers/event/dpaa/dpaa_eventdev.c            |   9 +-
 drivers/event/dpaa2/dpaa2_eventdev.c          |   9 +-
 drivers/event/octeontx/ssovf_evdev.c          |   4 +-
 lib/eventdev/eventdev_pmd.h                   |  35 +-
 lib/eventdev/eventdev_trace.h                 |   6 +-
 lib/eventdev/rte_event_crypto_adapter.c       | 105 +++-
 lib/eventdev/rte_event_crypto_adapter.h       | 101 +++-
 lib/eventdev/rte_eventdev.h                   |   8 +
 22 files changed, 782 insertions(+), 120 deletions(-)

-- 
2.25.1


^ permalink raw reply	[flat|nested] 36+ messages in thread

* [PATCH v2 1/3] eventdev: introduce event cryptodev vector type
  2022-09-26 11:36   ` [PATCH v2 0/3] Vector support for event crypto adapter Volodymyr Fialko
@ 2022-09-26 11:36     ` Volodymyr Fialko
  2022-09-26 11:36     ` [PATCH v2 2/3] crypto/cnxk: add burst enqueue for event crypto Volodymyr Fialko
                       ` (2 subsequent siblings)
  3 siblings, 0 replies; 36+ messages in thread
From: Volodymyr Fialko @ 2022-09-26 11:36 UTC (permalink / raw)
  To: dev, Jerin Jacob, Abhinandan Gujjar, Pavan Nikhilesh,
	Shijith Thotton, Hemant Agrawal, Sachin Saxena
  Cc: gakhil, anoobj, Volodymyr Fialko

Introduce ability to aggregate crypto operations processed by event
crypto adapter into single event containing rte_event_vector whose event
type is RTE_EVENT_TYPE_CRYPTODEV_VECTOR.

Application should set RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR in
rte_event_crypto_adapter_queue_conf::flag and provide vector configuration
with respect of rte_event_crypto_adapter_vector_limits, which could be
obtained by calling rte_event_crypto_adapter_vector_limits_get, to enable
vectorization.

The event crypto adapter would be responsible for vectorizing the crypto
operations based on provided response information in
rte_event_crypto_metadata::response_info.

Updated drivers and tests accordingly to new API.

Signed-off-by: Volodymyr Fialko <vfialko@marvell.com>
---
 app/test-eventdev/test_perf_common.c          |  11 +-
 app/test/test_event_crypto_adapter.c          |  12 +-
 .../prog_guide/event_crypto_adapter.rst       |  23 +++-
 drivers/event/cnxk/cn10k_eventdev.c           |   4 +-
 drivers/event/cnxk/cn9k_eventdev.c            |   5 +-
 drivers/event/dpaa/dpaa_eventdev.c            |   9 +-
 drivers/event/dpaa2/dpaa2_eventdev.c          |   9 +-
 drivers/event/octeontx/ssovf_evdev.c          |   4 +-
 lib/eventdev/eventdev_pmd.h                   |  35 +++++-
 lib/eventdev/eventdev_trace.h                 |   6 +-
 lib/eventdev/rte_event_crypto_adapter.c       | 105 ++++++++++++++++--
 lib/eventdev/rte_event_crypto_adapter.h       | 101 ++++++++++++++++-
 lib/eventdev/rte_eventdev.h                   |   8 ++
 13 files changed, 285 insertions(+), 47 deletions(-)

diff --git a/app/test-eventdev/test_perf_common.c b/app/test-eventdev/test_perf_common.c
index 81420be73a..8472a87b99 100644
--- a/app/test-eventdev/test_perf_common.c
+++ b/app/test-eventdev/test_perf_common.c
@@ -837,14 +837,13 @@ perf_event_crypto_adapter_setup(struct test_perf *t, struct prod_data *p)
 	}
 
 	if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
-		struct rte_event response_info;
+		struct rte_event_crypto_adapter_queue_conf conf;
 
-		response_info.event = 0;
-		response_info.sched_type = RTE_SCHED_TYPE_ATOMIC;
-		response_info.queue_id = p->queue_id;
+		memset(&conf, 0, sizeof(conf));
+		conf.ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
+		conf.ev.queue_id = p->queue_id;
 		ret = rte_event_crypto_adapter_queue_pair_add(
-			TEST_PERF_CA_ID, p->ca.cdev_id, p->ca.cdev_qp_id,
-			&response_info);
+			TEST_PERF_CA_ID, p->ca.cdev_id, p->ca.cdev_qp_id, &conf);
 	} else {
 		ret = rte_event_crypto_adapter_queue_pair_add(
 			TEST_PERF_CA_ID, p->ca.cdev_id, p->ca.cdev_qp_id, NULL);
diff --git a/app/test/test_event_crypto_adapter.c b/app/test/test_event_crypto_adapter.c
index 2ecc7e2cea..bb617c1042 100644
--- a/app/test/test_event_crypto_adapter.c
+++ b/app/test/test_event_crypto_adapter.c
@@ -1175,6 +1175,10 @@ test_crypto_adapter_create(void)
 static int
 test_crypto_adapter_qp_add_del(void)
 {
+	struct rte_event_crypto_adapter_queue_conf queue_conf = {
+		.ev = response_info,
+	};
+
 	uint32_t cap;
 	int ret;
 
@@ -1183,7 +1187,7 @@ test_crypto_adapter_qp_add_del(void)
 
 	if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
 		ret = rte_event_crypto_adapter_queue_pair_add(TEST_ADAPTER_ID,
-				TEST_CDEV_ID, TEST_CDEV_QP_ID, &response_info);
+				TEST_CDEV_ID, TEST_CDEV_QP_ID, &queue_conf);
 	} else
 		ret = rte_event_crypto_adapter_queue_pair_add(TEST_ADAPTER_ID,
 					TEST_CDEV_ID, TEST_CDEV_QP_ID, NULL);
@@ -1206,6 +1210,10 @@ configure_event_crypto_adapter(enum rte_event_crypto_adapter_mode mode)
 		.new_event_threshold = 1200,
 	};
 
+	struct rte_event_crypto_adapter_queue_conf queue_conf = {
+		.ev = response_info,
+	};
+
 	uint32_t cap;
 	int ret;
 
@@ -1238,7 +1246,7 @@ configure_event_crypto_adapter(enum rte_event_crypto_adapter_mode mode)
 
 	if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
 		ret = rte_event_crypto_adapter_queue_pair_add(TEST_ADAPTER_ID,
-				TEST_CDEV_ID, TEST_CDEV_QP_ID, &response_info);
+				TEST_CDEV_ID, TEST_CDEV_QP_ID, &queue_conf);
 	} else
 		ret = rte_event_crypto_adapter_queue_pair_add(TEST_ADAPTER_ID,
 				TEST_CDEV_ID, TEST_CDEV_QP_ID, NULL);
diff --git a/doc/guides/prog_guide/event_crypto_adapter.rst b/doc/guides/prog_guide/event_crypto_adapter.rst
index 4fb5c688e0..554df7e358 100644
--- a/doc/guides/prog_guide/event_crypto_adapter.rst
+++ b/doc/guides/prog_guide/event_crypto_adapter.rst
@@ -201,10 +201,10 @@ capability, event information must be passed to the add API.
 
         ret = rte_event_crypto_adapter_caps_get(id, evdev, &cap);
         if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
-                struct rte_event event;
+                struct rte_event_crypto_adapter_queue_conf conf;
 
-                // Fill in event information & pass it to add API
-                rte_event_crypto_adapter_queue_pair_add(id, cdev_id, qp_id, &event);
+                // Fill in conf.event information & pass it to add API
+                rte_event_crypto_adapter_queue_pair_add(id, cdev_id, qp_id, &conf);
         } else
                 rte_event_crypto_adapter_queue_pair_add(id, cdev_id, qp_id, NULL);
 
@@ -291,6 +291,23 @@ the ``rte_crypto_op``.
                 rte_memcpy(op + len, &m_data, sizeof(m_data));
         }
 
+Enable event vectorization
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The event crypto adapter can aggregate outcoming crypto operations based on
+provided response information of ``rte_event_crypto_metadata::response_info``
+and generate a ``rte_event`` containing ``rte_event_vector`` whose event type
+is ``RTE_EVENT_TYPE_CRYPTODEV_VECTOR``.
+To enable vectorization application should set
+RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR in
+``rte_event_crypto_adapter_queue_conf::flag`` and provide vector
+configuration(size, mempool, etc.) with respect of
+``rte_event_crypto_adapter_vector_limits``, which could be obtained by calling
+``rte_event_crypto_adapter_vector_limits_get()``.
+
+The RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR capability indicates whether
+PMD supports this feature.
+
 Start the adapter instance
 ~~~~~~~~~~~~~~~~~~~~~~~~~~
 
diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index 87ed18b63a..7adf80236f 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -1029,11 +1029,11 @@ static int
 cn10k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
 			    const struct rte_cryptodev *cdev,
 			    int32_t queue_pair_id,
-			    const struct rte_event *event)
+			    const struct rte_event_crypto_adapter_queue_conf *conf)
 {
 	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
 
-	RTE_SET_USED(event);
+	RTE_SET_USED(conf);
 
 	CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k");
 	CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k");
diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
index 58c72a580a..bac48ebe63 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -1120,11 +1120,12 @@ cn9k_crypto_adapter_caps_get(const struct rte_eventdev *event_dev,
 static int
 cn9k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
 			   const struct rte_cryptodev *cdev,
-			   int32_t queue_pair_id, const struct rte_event *event)
+			   int32_t queue_pair_id,
+			   const struct rte_event_crypto_adapter_queue_conf *conf)
 {
 	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
 
-	RTE_SET_USED(event);
+	RTE_SET_USED(conf);
 
 	CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k");
 	CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k");
diff --git a/drivers/event/dpaa/dpaa_eventdev.c b/drivers/event/dpaa/dpaa_eventdev.c
index ff6cc0be18..2b9ecd9fbf 100644
--- a/drivers/event/dpaa/dpaa_eventdev.c
+++ b/drivers/event/dpaa/dpaa_eventdev.c
@@ -26,6 +26,7 @@
 #include <rte_eventdev.h>
 #include <eventdev_pmd_vdev.h>
 #include <rte_ethdev.h>
+#include <rte_event_crypto_adapter.h>
 #include <rte_event_eth_rx_adapter.h>
 #include <rte_event_eth_tx_adapter.h>
 #include <cryptodev_pmd.h>
@@ -775,10 +776,10 @@ static int
 dpaa_eventdev_crypto_queue_add(const struct rte_eventdev *dev,
 		const struct rte_cryptodev *cryptodev,
 		int32_t rx_queue_id,
-		const struct rte_event *ev)
+		const struct rte_event_crypto_adapter_queue_conf *conf)
 {
 	struct dpaa_eventdev *priv = dev->data->dev_private;
-	uint8_t ev_qid = ev->queue_id;
+	uint8_t ev_qid = conf->ev.queue_id;
 	u16 ch_id = priv->evq_info[ev_qid].ch_id;
 	int ret;
 
@@ -786,10 +787,10 @@ dpaa_eventdev_crypto_queue_add(const struct rte_eventdev *dev,
 
 	if (rx_queue_id == -1)
 		return dpaa_eventdev_crypto_queue_add_all(dev,
-				cryptodev, ev);
+				cryptodev, &conf->ev);
 
 	ret = dpaa_sec_eventq_attach(cryptodev, rx_queue_id,
-			ch_id, ev);
+			ch_id, &conf->ev);
 	if (ret) {
 		DPAA_EVENTDEV_ERR(
 			"dpaa_sec_eventq_attach failed: ret: %d\n", ret);
diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
index ffc7b8b073..0137736794 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.c
+++ b/drivers/event/dpaa2/dpaa2_eventdev.c
@@ -26,6 +26,7 @@
 #include <rte_bus_vdev.h>
 #include <ethdev_driver.h>
 #include <cryptodev_pmd.h>
+#include <rte_event_crypto_adapter.h>
 #include <rte_event_eth_rx_adapter.h>
 #include <rte_event_eth_tx_adapter.h>
 
@@ -865,10 +866,10 @@ static int
 dpaa2_eventdev_crypto_queue_add(const struct rte_eventdev *dev,
 		const struct rte_cryptodev *cryptodev,
 		int32_t rx_queue_id,
-		const struct rte_event *ev)
+		const struct rte_event_crypto_adapter_queue_conf *conf)
 {
 	struct dpaa2_eventdev *priv = dev->data->dev_private;
-	uint8_t ev_qid = ev->queue_id;
+	uint8_t ev_qid = conf->ev.queue_id;
 	struct dpaa2_dpcon_dev *dpcon = priv->evq_info[ev_qid].dpcon;
 	int ret;
 
@@ -876,10 +877,10 @@ dpaa2_eventdev_crypto_queue_add(const struct rte_eventdev *dev,
 
 	if (rx_queue_id == -1)
 		return dpaa2_eventdev_crypto_queue_add_all(dev,
-				cryptodev, ev);
+				cryptodev, &conf->ev);
 
 	ret = dpaa2_sec_eventq_attach(cryptodev, rx_queue_id,
-				      dpcon, ev);
+				      dpcon, &conf->ev);
 	if (ret) {
 		DPAA2_EVENTDEV_ERR(
 			"dpaa2_sec_eventq_attach failed: ret: %d\n", ret);
diff --git a/drivers/event/octeontx/ssovf_evdev.c b/drivers/event/octeontx/ssovf_evdev.c
index 9e14e35d10..17acd8ef64 100644
--- a/drivers/event/octeontx/ssovf_evdev.c
+++ b/drivers/event/octeontx/ssovf_evdev.c
@@ -745,12 +745,12 @@ static int
 ssovf_crypto_adapter_qp_add(const struct rte_eventdev *dev,
 			    const struct rte_cryptodev *cdev,
 			    int32_t queue_pair_id,
-			    const struct rte_event *event)
+			    const struct rte_event_crypto_adapter_queue_conf *conf)
 {
 	struct cpt_instance *qp;
 	uint8_t qp_id;
 
-	RTE_SET_USED(event);
+	RTE_SET_USED(conf);
 
 	if (queue_pair_id == -1) {
 		for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) {
diff --git a/lib/eventdev/eventdev_pmd.h b/lib/eventdev/eventdev_pmd.h
index 1e65d096f1..2a861db9af 100644
--- a/lib/eventdev/eventdev_pmd.h
+++ b/lib/eventdev/eventdev_pmd.h
@@ -910,6 +910,7 @@ rte_event_pmd_selftest_seqn(struct rte_mbuf *mbuf)
 }
 
 struct rte_cryptodev;
+struct rte_event_crypto_adapter_queue_conf;
 
 /**
  * This API may change without prior notice
@@ -964,11 +965,11 @@ typedef int (*eventdev_crypto_adapter_caps_get_t)
  *   - <0: Error code returned by the driver function.
  *
  */
-typedef int (*eventdev_crypto_adapter_queue_pair_add_t)
-			(const struct rte_eventdev *dev,
-			 const struct rte_cryptodev *cdev,
-			 int32_t queue_pair_id,
-			 const struct rte_event *event);
+typedef int (*eventdev_crypto_adapter_queue_pair_add_t)(
+		const struct rte_eventdev *dev,
+		const struct rte_cryptodev *cdev,
+		int32_t queue_pair_id,
+		const struct rte_event_crypto_adapter_queue_conf *queue_conf);
 
 
 /**
@@ -1077,6 +1078,27 @@ typedef int (*eventdev_crypto_adapter_stats_reset)
 			(const struct rte_eventdev *dev,
 			 const struct rte_cryptodev *cdev);
 
+struct rte_event_crypto_adapter_vector_limits;
+/**
+ * Get event vector limits for a given event, crypto device pair.
+ *
+ * @param dev
+ *   Event device pointer
+ *
+ * @param cdev
+ *   Crypto device pointer
+ *
+ * @param[out] limits
+ *   Pointer to the limits structure to be filled.
+ *
+ * @return
+ *   - 0: Success.
+ *   - <0: Error code returned by the driver function.
+ */
+typedef int (*eventdev_crypto_adapter_vector_limits_get_t)(
+	const struct rte_eventdev *dev, const struct rte_cryptodev *cdev,
+	struct rte_event_crypto_adapter_vector_limits *limits);
+
 /**
  * Retrieve the event device's eth Tx adapter capabilities.
  *
@@ -1363,6 +1385,9 @@ struct eventdev_ops {
 	/**< Get crypto stats */
 	eventdev_crypto_adapter_stats_reset crypto_adapter_stats_reset;
 	/**< Reset crypto stats */
+	eventdev_crypto_adapter_vector_limits_get_t
+		crypto_adapter_vector_limits_get;
+	/**< Get event vector limits for the crypto adapter */
 
 	eventdev_eth_rx_adapter_q_stats_get eth_rx_adapter_queue_stats_get;
 	/**< Get ethernet Rx queue stats */
diff --git a/lib/eventdev/eventdev_trace.h b/lib/eventdev/eventdev_trace.h
index 5ec43d80ee..d48cd58850 100644
--- a/lib/eventdev/eventdev_trace.h
+++ b/lib/eventdev/eventdev_trace.h
@@ -18,6 +18,7 @@ extern "C" {
 #include <rte_trace_point.h>
 
 #include "rte_eventdev.h"
+#include "rte_event_crypto_adapter.h"
 #include "rte_event_eth_rx_adapter.h"
 #include "rte_event_timer_adapter.h"
 
@@ -271,11 +272,12 @@ RTE_TRACE_POINT(
 RTE_TRACE_POINT(
 	rte_eventdev_trace_crypto_adapter_queue_pair_add,
 	RTE_TRACE_POINT_ARGS(uint8_t adptr_id, uint8_t cdev_id,
-		const void *event, int32_t queue_pair_id),
+		int32_t queue_pair_id,
+		const struct rte_event_crypto_adapter_queue_conf *conf),
 	rte_trace_point_emit_u8(adptr_id);
 	rte_trace_point_emit_u8(cdev_id);
 	rte_trace_point_emit_i32(queue_pair_id);
-	rte_trace_point_emit_ptr(event);
+	rte_trace_point_emit_ptr(conf);
 )
 
 RTE_TRACE_POINT(
diff --git a/lib/eventdev/rte_event_crypto_adapter.c b/lib/eventdev/rte_event_crypto_adapter.c
index 7c695176f4..0288629fd6 100644
--- a/lib/eventdev/rte_event_crypto_adapter.c
+++ b/lib/eventdev/rte_event_crypto_adapter.c
@@ -921,11 +921,12 @@ int
 rte_event_crypto_adapter_queue_pair_add(uint8_t id,
 			uint8_t cdev_id,
 			int32_t queue_pair_id,
-			const struct rte_event *event)
+			const struct rte_event_crypto_adapter_queue_conf *conf)
 {
+	struct rte_event_crypto_adapter_vector_limits limits;
 	struct event_crypto_adapter *adapter;
-	struct rte_eventdev *dev;
 	struct crypto_device_info *dev_info;
+	struct rte_eventdev *dev;
 	uint32_t cap;
 	int ret;
 
@@ -950,11 +951,49 @@ rte_event_crypto_adapter_queue_pair_add(uint8_t id,
 		return ret;
 	}
 
-	if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) &&
-	    (event == NULL)) {
-		RTE_EDEV_LOG_ERR("Conf value can not be NULL for dev_id=%u",
-				  cdev_id);
-		return -EINVAL;
+	if (conf == NULL) {
+		if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
+			RTE_EDEV_LOG_ERR("Conf value can not be NULL for dev_id=%u",
+					 cdev_id);
+			return -EINVAL;
+		}
+	} else {
+		if (conf->flags & RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR) {
+			if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR) == 0) {
+				RTE_EDEV_LOG_ERR("Event vectorization is not supported,"
+						 "dev %" PRIu8 " cdev %" PRIu8, id,
+						 cdev_id);
+				return -ENOTSUP;
+			}
+
+			ret = rte_event_crypto_adapter_vector_limits_get(
+				adapter->eventdev_id, cdev_id, &limits);
+			if (ret < 0) {
+				RTE_EDEV_LOG_ERR("Failed to get event device vector "
+						 "limits, dev %" PRIu8 " cdev %" PRIu8,
+						 id, cdev_id);
+				return -EINVAL;
+			}
+
+			if (conf->vector_sz < limits.min_sz ||
+			    conf->vector_sz > limits.max_sz ||
+			    conf->vector_timeout_ns < limits.min_timeout_ns ||
+			    conf->vector_timeout_ns > limits.max_timeout_ns ||
+			    conf->vector_mp == NULL) {
+				RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
+						" dev %" PRIu8 " cdev %" PRIu8,
+						id, cdev_id);
+				return -EINVAL;
+			}
+
+			if (conf->vector_mp->elt_size < (sizeof(struct rte_event_vector) +
+			    (sizeof(uintptr_t) * conf->vector_sz))) {
+				RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
+						" dev %" PRIu8 " cdev %" PRIu8,
+						id, cdev_id);
+				return -EINVAL;
+			}
+		}
 	}
 
 	dev_info = &adapter->cdevs[cdev_id];
@@ -990,7 +1029,7 @@ rte_event_crypto_adapter_queue_pair_add(uint8_t id,
 		ret = (*dev->dev_ops->crypto_adapter_queue_pair_add)(dev,
 				dev_info->dev,
 				queue_pair_id,
-				event);
+				conf);
 		if (ret)
 			return ret;
 
@@ -1030,8 +1069,8 @@ rte_event_crypto_adapter_queue_pair_add(uint8_t id,
 		rte_service_component_runstate_set(adapter->service_id, 1);
 	}
 
-	rte_eventdev_trace_crypto_adapter_queue_pair_add(id, cdev_id, event,
-		queue_pair_id);
+	rte_eventdev_trace_crypto_adapter_queue_pair_add(id, cdev_id,
+		queue_pair_id, conf);
 	return 0;
 }
 
@@ -1290,3 +1329,49 @@ rte_event_crypto_adapter_event_port_get(uint8_t id, uint8_t *event_port_id)
 
 	return 0;
 }
+
+int
+rte_event_crypto_adapter_vector_limits_get(
+	uint8_t dev_id, uint16_t cdev_id,
+	struct rte_event_crypto_adapter_vector_limits *limits)
+{
+	struct rte_cryptodev *cdev;
+	struct rte_eventdev *dev;
+	uint32_t cap;
+	int ret;
+
+	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+
+	if (!rte_cryptodev_is_valid_dev(cdev_id)) {
+		RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
+		return -EINVAL;
+	}
+
+	if (limits == NULL) {
+		RTE_EDEV_LOG_ERR("Invalid limits storage provided");
+		return -EINVAL;
+	}
+
+	dev = &rte_eventdevs[dev_id];
+	cdev = rte_cryptodev_pmd_get_dev(cdev_id);
+
+	ret = rte_event_crypto_adapter_caps_get(dev_id, cdev_id, &cap);
+	if (ret) {
+		RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
+				 "cdev %" PRIu16, dev_id, cdev_id);
+		return ret;
+	}
+
+	if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR)) {
+		RTE_EDEV_LOG_ERR("Event vectorization is not supported,"
+				 "dev %" PRIu8 " cdev %" PRIu8, dev_id, cdev_id);
+		return -ENOTSUP;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(
+		*dev->dev_ops->crypto_adapter_vector_limits_get,
+		-ENOTSUP);
+
+	return dev->dev_ops->crypto_adapter_vector_limits_get(
+		dev, cdev, limits);
+}
diff --git a/lib/eventdev/rte_event_crypto_adapter.h b/lib/eventdev/rte_event_crypto_adapter.h
index d90a19e72c..83d154a6ce 100644
--- a/lib/eventdev/rte_event_crypto_adapter.h
+++ b/lib/eventdev/rte_event_crypto_adapter.h
@@ -253,6 +253,78 @@ struct rte_event_crypto_adapter_conf {
 	 */
 };
 
+#define RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR	0x1
+/**< This flag indicates that crypto operations processed on the crypto
+ * adapter need to be vectorized
+ * @see rte_event_crypto_adapter_queue_conf::flags
+ */
+
+/**
+ * Adapter queue configuration structure
+ */
+struct rte_event_crypto_adapter_queue_conf {
+	uint32_t flags;
+	/**< Flags for handling crypto operations
+	 * @see RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR
+	 */
+	struct rte_event ev;
+	/**< If HW supports cryptodev queue pair to event queue binding,
+	 * application is expected to fill in event information.
+	 * @see RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND
+	 */
+	uint16_t vector_sz;
+	/**< Indicates the maximum number for crypto operations to combine and
+	 * form a vector.
+	 * @see rte_event_crypto_adapter_vector_limits::min_sz
+	 * @see rte_event_crypto_adapter_vector_limits::max_sz
+	 * Valid when RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR flag is set in
+	 * @see rte_event_crypto_adapter_queue_conf::flags
+	 */
+	uint64_t vector_timeout_ns;
+	/**<
+	 * Indicates the maximum number of nanoseconds to wait for aggregating
+	 * crypto operations. Should be within vectorization limits of the
+	 * adapter
+	 * @see rte_event_crypto_adapter_vector_limits::min_timeout_ns
+	 * @see rte_event_crypto_adapter_vector_limits::max_timeout_ns
+	 * Valid when RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR flag is set in
+	 * @see rte_event_crypto_adapter_queue_conf::flags
+	 */
+	struct rte_mempool *vector_mp;
+	/**< Indicates the mempool that should be used for allocating
+	 * rte_event_vector container.
+	 * Should be created by using `rte_event_vector_pool_create`.
+	 * Valid when RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR flag is set in
+	 * @see rte_event_crypto_adapter_queue_conf::flags.
+	 */
+};
+
+/**
+ * A structure used to retrieve event crypto adapter vector limits.
+ */
+struct rte_event_crypto_adapter_vector_limits {
+	uint16_t min_sz;
+	/**< Minimum vector limit configurable.
+	 * @see rte_event_crypto_adapter_queue_conf::vector_sz
+	 */
+	uint16_t max_sz;
+	/**< Maximum vector limit configurable.
+	 * @see rte_event_crypto_adapter_queue_conf::vector_sz
+	 */
+	uint8_t log2_sz;
+	/**< True if the size configured should be in log2.
+	 * @see rte_event_crypto_adapter_queue_conf::vector_sz
+	 */
+	uint64_t min_timeout_ns;
+	/**< Minimum vector timeout configurable.
+	 * @see rte_event_crypto_adapter_queue_conf::vector_timeout_ns
+	 */
+	uint64_t max_timeout_ns;
+	/**< Maximum vector timeout configurable.
+	 * @see rte_event_crypto_adapter_queue_conf::vector_timeout_ns
+	 */
+};
+
 /**
  * Function type used for adapter configuration callback. The callback is
  * used to fill in members of the struct rte_event_crypto_adapter_conf, this
@@ -392,10 +464,9 @@ rte_event_crypto_adapter_free(uint8_t id);
  *  Cryptodev queue pair identifier. If queue_pair_id is set -1,
  *  adapter adds all the pre configured queue pairs to the instance.
  *
- * @param event
- *  if HW supports cryptodev queue pair to event queue binding, application is
- *  expected to fill in event information, else it will be NULL.
- *  @see RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND
+ * @param conf
+ *  Additional configuration structure of type
+ *  *rte_event_crypto_adapter_queue_conf*
  *
  * @return
  *  - 0: Success, queue pair added correctly.
@@ -405,7 +476,7 @@ int
 rte_event_crypto_adapter_queue_pair_add(uint8_t id,
 			uint8_t cdev_id,
 			int32_t queue_pair_id,
-			const struct rte_event *event);
+			const struct rte_event_crypto_adapter_queue_conf *conf);
 
 /**
  * Delete a queue pair from an event crypto adapter.
@@ -523,6 +594,26 @@ rte_event_crypto_adapter_service_id_get(uint8_t id, uint32_t *service_id);
 int
 rte_event_crypto_adapter_event_port_get(uint8_t id, uint8_t *event_port_id);
 
+/**
+ * Retrieve vector limits for a given event dev and crypto dev pair.
+ * @see rte_event_crypto_adapter_vector_limits
+ *
+ * @param dev_id
+ *  Event device identifier.
+ * @param cdev_id
+ *  Crypto device identifier.
+ * @param [out] limits
+ *  A pointer to rte_event_crypto_adapter_vector_limits structure that has to
+ *  be filled.
+ *
+ * @return
+ *  - 0: Success.
+ *  - <0: Error code on failure.
+ */
+int rte_event_crypto_adapter_vector_limits_get(
+	uint8_t dev_id, uint16_t cdev_id,
+	struct rte_event_crypto_adapter_vector_limits *limits);
+
 /**
  * Enqueue a burst of crypto operations as event objects supplied in *rte_event*
  * structure on an event crypto adapter designated by its event *dev_id* through
diff --git a/lib/eventdev/rte_eventdev.h b/lib/eventdev/rte_eventdev.h
index f1908b82b2..55bd004373 100644
--- a/lib/eventdev/rte_eventdev.h
+++ b/lib/eventdev/rte_eventdev.h
@@ -1219,6 +1219,9 @@ struct rte_event_vector {
 #define RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR                                   \
 	(RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_ETH_RX_ADAPTER)
 /**< The event vector generated from eth Rx adapter. */
+#define RTE_EVENT_TYPE_CRYPTODEV_VECTOR                                        \
+	(RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_CRYPTODEV)
+/**< The event vector generated from cryptodev adapter. */
 
 #define RTE_EVENT_TYPE_MAX              0x10
 /**< Maximum number of event types */
@@ -1436,6 +1439,11 @@ rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps);
  * the private data information along with the crypto session.
  */
 
+#define RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR   0x10
+/**< Flag indicates HW is capable of aggregating processed
+ * crypto operations into rte_event_vector.
+ */
+
 /**
  * Retrieve the event device's crypto adapter capabilities for the
  * specified cryptodev device
-- 
2.25.1


^ permalink raw reply	[flat|nested] 36+ messages in thread

* [PATCH v2 2/3] crypto/cnxk: add burst enqueue for event crypto
  2022-09-26 11:36   ` [PATCH v2 0/3] Vector support for event crypto adapter Volodymyr Fialko
  2022-09-26 11:36     ` [PATCH v2 1/3] eventdev: introduce event cryptodev vector type Volodymyr Fialko
@ 2022-09-26 11:36     ` Volodymyr Fialko
  2022-09-26 11:36     ` [PATCH v2 3/3] crypto/cnxk: add vectorization " Volodymyr Fialko
  2022-10-01  0:42     ` [PATCH v3 0/2] Vector support for event crypto adapter Volodymyr Fialko
  3 siblings, 0 replies; 36+ messages in thread
From: Volodymyr Fialko @ 2022-09-26 11:36 UTC (permalink / raw)
  To: dev, Ankur Dwivedi, Anoob Joseph, Tejasree Kondoj,
	Pavan Nikhilesh, Shijith Thotton
  Cc: jerinj, gakhil, abhinandan.gujjar, Volodymyr Fialko

Added support for burst enqueue for cn10k event crypto adapter.
Instruction will be grouped based on the queue pair and sent in a burst.

Signed-off-by: Volodymyr Fialko <vfialko@marvell.com>
---
 drivers/crypto/cnxk/cn10k_cryptodev_ops.c | 147 ++++++++++++++++------
 drivers/crypto/cnxk/cn10k_cryptodev_ops.h |   7 +-
 drivers/crypto/cnxk/meson.build           |   2 +-
 drivers/event/cnxk/cn10k_eventdev.c       |   2 +-
 drivers/event/cnxk/cn10k_worker.c         |  10 --
 drivers/event/cnxk/cn10k_worker.h         |   2 -
 6 files changed, 113 insertions(+), 57 deletions(-)

diff --git a/drivers/crypto/cnxk/cn10k_cryptodev_ops.c b/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
index f761ba36e2..2fbf374da3 100644
--- a/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
+++ b/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
@@ -9,11 +9,12 @@
 
 #include "cn10k_cryptodev.h"
 #include "cn10k_cryptodev_ops.h"
-#include "cn10k_ipsec_la_ops.h"
 #include "cn10k_ipsec.h"
+#include "cn10k_ipsec_la_ops.h"
 #include "cnxk_ae.h"
 #include "cnxk_cryptodev.h"
 #include "cnxk_cryptodev_ops.h"
+#include "cnxk_eventdev.h"
 #include "cnxk_se.h"
 
 #include "roc_api.h"
@@ -391,74 +392,140 @@ cn10k_ca_meta_info_extract(struct rte_crypto_op *op,
 	return 0;
 }
 
-uint16_t
-cn10k_cpt_crypto_adapter_enqueue(uintptr_t base, struct rte_crypto_op *op)
+static inline uint16_t
+ca_lmtst_burst_submit(struct cn10k_sso_hws *ws, uint64_t w2[],
+		      struct cnxk_cpt_qp *qp, struct rte_crypto_op *op[],
+		      uint16_t nb_ops)
 {
+	struct cpt_inflight_req *infl_reqs[PKTS_PER_LOOP];
+	uint64_t lmt_base, lmt_arg, io_addr;
+	struct cpt_inst_s *inst, *inst_base;
 	struct cpt_inflight_req *infl_req;
-	uint64_t lmt_base, lmt_arg, w2;
-	struct cpt_inst_s *inst;
 	union cpt_fc_write_s fc;
-	struct cnxk_cpt_qp *qp;
 	uint64_t *fc_addr;
 	uint16_t lmt_id;
-	int ret;
+	int ret, i;
 
-	ret = cn10k_ca_meta_info_extract(op, &qp, &w2);
-	if (unlikely(ret)) {
-		rte_errno = EINVAL;
-		return 0;
-	}
+	lmt_base = qp->lmtline.lmt_base;
+	io_addr = qp->lmtline.io_addr;
+	fc_addr = qp->lmtline.fc_addr;
+
+	const uint32_t fc_thresh = qp->lmtline.fc_thresh;
+
+	ROC_LMT_BASE_ID_GET(lmt_base, lmt_id);
+	inst_base = (struct cpt_inst_s *)lmt_base;
 
 	if (unlikely(!qp->ca.enabled)) {
 		rte_errno = EINVAL;
 		return 0;
 	}
 
-	if (unlikely(rte_mempool_get(qp->ca.req_mp, (void **)&infl_req))) {
+	if (unlikely(rte_mempool_get_bulk(qp->ca.req_mp, (void **)infl_reqs,
+					  nb_ops))) {
 		rte_errno = ENOMEM;
 		return 0;
 	}
-	infl_req->op_flags = 0;
 
-	lmt_base = qp->lmtline.lmt_base;
-	fc_addr = qp->lmtline.fc_addr;
-
-	const uint32_t fc_thresh = qp->lmtline.fc_thresh;
+	for (i = 0; i < nb_ops; i++) {
+		inst = &inst_base[2 * i];
+		infl_req = infl_reqs[i];
+		infl_req->op_flags = 0;
 
-	ROC_LMT_BASE_ID_GET(lmt_base, lmt_id);
-	inst = (struct cpt_inst_s *)lmt_base;
+		ret = cn10k_cpt_fill_inst(qp, &op[i], inst, infl_req);
+		if (unlikely(ret != 1)) {
+			plt_dp_err("Could not process op: %p", op[i]);
+			if (i != 0)
+				goto submit;
+			else
+				goto put;
+		}
 
-	ret = cn10k_cpt_fill_inst(qp, &op, inst, infl_req);
-	if (unlikely(ret != 1)) {
-		plt_dp_err("Could not process op: %p", op);
-		rte_mempool_put(qp->ca.req_mp, infl_req);
-		return 0;
+		infl_req->res.cn10k.compcode = CPT_COMP_NOT_DONE;
+		infl_req->qp = qp;
+		inst->w0.u64 = 0;
+		inst->res_addr = (uint64_t)&infl_req->res;
+		inst->w2.u64 = w2[i];
+		inst->w3.u64 = CNXK_CPT_INST_W3(1, infl_req);
 	}
 
-	infl_req->cop = op;
-	infl_req->res.cn10k.compcode = CPT_COMP_NOT_DONE;
-	infl_req->qp = qp;
-	inst->w0.u64 = 0;
-	inst->res_addr = (uint64_t)&infl_req->res;
-	inst->w2.u64 = w2;
-	inst->w3.u64 = CNXK_CPT_INST_W3(1, infl_req);
-
 	fc.u64[0] = __atomic_load_n(fc_addr, __ATOMIC_RELAXED);
 	if (unlikely(fc.s.qsize > fc_thresh)) {
-		rte_mempool_put(qp->ca.req_mp, infl_req);
 		rte_errno = EAGAIN;
-		return 0;
+		i = 0;
+		goto put;
 	}
 
-	if (inst->w2.s.tt == RTE_SCHED_TYPE_ORDERED)
-		roc_sso_hws_head_wait(base);
+submit:
+	if (CNXK_TT_FROM_TAG(ws->gw_rdata) == SSO_TT_ORDERED)
+		roc_sso_hws_head_wait(ws->base);
 
-	lmt_arg = ROC_CN10K_CPT_LMT_ARG | (uint64_t)lmt_id;
-	roc_lmt_submit_steorl(lmt_arg, qp->lmtline.io_addr);
+	if (i > PKTS_PER_STEORL) {
+		lmt_arg = ROC_CN10K_CPT_LMT_ARG | (PKTS_PER_STEORL - 1) << 12 |
+			  (uint64_t)lmt_id;
+		roc_lmt_submit_steorl(lmt_arg, io_addr);
+		lmt_arg = ROC_CN10K_CPT_LMT_ARG |
+			  (i - PKTS_PER_STEORL - 1) << 12 |
+			  (uint64_t)(lmt_id + PKTS_PER_STEORL);
+		roc_lmt_submit_steorl(lmt_arg, io_addr);
+	} else {
+		lmt_arg = ROC_CN10K_CPT_LMT_ARG | (i - 1) << 12 |
+			  (uint64_t)lmt_id;
+		roc_lmt_submit_steorl(lmt_arg, io_addr);
+	}
 
 	rte_io_wmb();
 
-	return 1;
+put:
+	if (unlikely(i != nb_ops))
+		rte_mempool_put_bulk(qp->ca.req_mp, (void *)&infl_reqs[i],
+				     nb_ops - i);
+
+	return i;
+}
+
+uint16_t __rte_hot
+cn10k_cpt_crypto_adapter_enqueue(void *ws, struct rte_event ev[],
+				 uint16_t nb_events)
+{
+	struct rte_crypto_op *ops[PKTS_PER_LOOP], *op;
+	struct cnxk_cpt_qp *qp, *curr_qp = NULL;
+	uint64_t w2s[PKTS_PER_LOOP], w2;
+	uint16_t submitted, count = 0;
+	int ret, i, ops_len = 0;
+
+	for (i = 0; i < nb_events; i++) {
+		op = ev[i].event_ptr;
+		ret = cn10k_ca_meta_info_extract(op, &qp, &w2);
+		if (unlikely(ret)) {
+			rte_errno = EINVAL;
+			return count;
+		}
+
+		if (qp != curr_qp) {
+			if (ops_len) {
+				submitted = ca_lmtst_burst_submit(
+					ws, w2s, curr_qp, ops, ops_len);
+				count += submitted;
+				if (unlikely(submitted != ops_len))
+					return count;
+				ops_len = 0;
+			}
+			curr_qp = qp;
+		}
+		w2s[ops_len] = w2;
+		ops[ops_len] = op;
+		if (++ops_len == PKTS_PER_LOOP) {
+			submitted = ca_lmtst_burst_submit(ws, w2s, curr_qp, ops,
+							  ops_len);
+			count += submitted;
+			if (unlikely(submitted != ops_len))
+				return count;
+			ops_len = 0;
+		}
+	}
+	if (ops_len)
+		count += ca_lmtst_burst_submit(ws, w2s, curr_qp, ops, ops_len);
+	return count;
 }
 
 static inline void
diff --git a/drivers/crypto/cnxk/cn10k_cryptodev_ops.h b/drivers/crypto/cnxk/cn10k_cryptodev_ops.h
index 1ad4c16873..628d6a567c 100644
--- a/drivers/crypto/cnxk/cn10k_cryptodev_ops.h
+++ b/drivers/crypto/cnxk/cn10k_cryptodev_ops.h
@@ -5,16 +5,17 @@
 #ifndef _CN10K_CRYPTODEV_OPS_H_
 #define _CN10K_CRYPTODEV_OPS_H_
 
-#include <rte_cryptodev.h>
 #include <cryptodev_pmd.h>
+#include <rte_cryptodev.h>
+#include <rte_eventdev.h>
 
 extern struct rte_cryptodev_ops cn10k_cpt_ops;
 
 void cn10k_cpt_set_enqdeq_fns(struct rte_cryptodev *dev);
 
 __rte_internal
-uint16_t cn10k_cpt_crypto_adapter_enqueue(uintptr_t base,
-					  struct rte_crypto_op *op);
+uint16_t __rte_hot cn10k_cpt_crypto_adapter_enqueue(void *ws, struct rte_event ev[],
+		uint16_t nb_events);
 __rte_internal
 uintptr_t cn10k_cpt_crypto_adapter_dequeue(uintptr_t get_work1);
 
diff --git a/drivers/crypto/cnxk/meson.build b/drivers/crypto/cnxk/meson.build
index 23a1cc3aac..952554ac12 100644
--- a/drivers/crypto/cnxk/meson.build
+++ b/drivers/crypto/cnxk/meson.build
@@ -24,7 +24,7 @@ sources = files(
 
 deps += ['bus_pci', 'common_cnxk', 'security', 'eventdev']
 
-includes += include_directories('../../../lib/net')
+includes += include_directories('../../../lib/net', '../../event/cnxk')
 
 if get_option('buildtype').contains('debug')
     cflags += [ '-DLA_IPSEC_DEBUG' ]
diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index 7adf80236f..b0982d6c3b 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -594,7 +594,7 @@ cn10k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 			}
 		}
 	}
-	event_dev->ca_enqueue = cn10k_sso_hws_ca_enq;
+	event_dev->ca_enqueue = cn10k_cpt_crypto_adapter_enqueue;
 
 	if (dev->tx_offloads & NIX_TX_MULTI_SEG_F)
 		CN10K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
diff --git a/drivers/event/cnxk/cn10k_worker.c b/drivers/event/cnxk/cn10k_worker.c
index f953e19dd0..4581c41233 100644
--- a/drivers/event/cnxk/cn10k_worker.c
+++ b/drivers/event/cnxk/cn10k_worker.c
@@ -64,13 +64,3 @@ cn10k_sso_hws_enq_fwd_burst(void *port, const struct rte_event ev[],
 
 	return 1;
 }
-
-uint16_t __rte_hot
-cn10k_sso_hws_ca_enq(void *port, struct rte_event ev[], uint16_t nb_events)
-{
-	struct cn10k_sso_hws *ws = port;
-
-	RTE_SET_USED(nb_events);
-
-	return cn10k_cpt_crypto_adapter_enqueue(ws->base, ev->event_ptr);
-}
diff --git a/drivers/event/cnxk/cn10k_worker.h b/drivers/event/cnxk/cn10k_worker.h
index a71e076ff8..56bf4cec50 100644
--- a/drivers/event/cnxk/cn10k_worker.h
+++ b/drivers/event/cnxk/cn10k_worker.h
@@ -353,8 +353,6 @@ uint16_t __rte_hot cn10k_sso_hws_enq_new_burst(void *port,
 uint16_t __rte_hot cn10k_sso_hws_enq_fwd_burst(void *port,
 					       const struct rte_event ev[],
 					       uint16_t nb_events);
-uint16_t __rte_hot cn10k_sso_hws_ca_enq(void *port, struct rte_event ev[],
-					uint16_t nb_events);
 
 #define R(name, flags)                                                         \
 	uint16_t __rte_hot cn10k_sso_hws_deq_##name(                           \
-- 
2.25.1


^ permalink raw reply	[flat|nested] 36+ messages in thread

* [PATCH v2 3/3] crypto/cnxk: add vectorization for event crypto
  2022-09-26 11:36   ` [PATCH v2 0/3] Vector support for event crypto adapter Volodymyr Fialko
  2022-09-26 11:36     ` [PATCH v2 1/3] eventdev: introduce event cryptodev vector type Volodymyr Fialko
  2022-09-26 11:36     ` [PATCH v2 2/3] crypto/cnxk: add burst enqueue for event crypto Volodymyr Fialko
@ 2022-09-26 11:36     ` Volodymyr Fialko
  2022-10-01  0:42     ` [PATCH v3 0/2] Vector support for event crypto adapter Volodymyr Fialko
  3 siblings, 0 replies; 36+ messages in thread
From: Volodymyr Fialko @ 2022-09-26 11:36 UTC (permalink / raw)
  To: dev, Ankur Dwivedi, Anoob Joseph, Tejasree Kondoj, Ray Kinsella,
	Pavan Nikhilesh, Shijith Thotton
  Cc: jerinj, gakhil, abhinandan.gujjar, Volodymyr Fialko

Add support for vector aggregation of crypto operations for cn10k.
Crypto operations will be grouped by sub event type, flow id, scheduler
type and queue id fields from  rte_event_crypto_metadata::response_info.

Signed-off-by: Volodymyr Fialko <vfialko@marvell.com>
---
 drivers/crypto/cnxk/cn10k_cryptodev_ops.c | 403 +++++++++++++++++++---
 drivers/crypto/cnxk/cn10k_cryptodev_ops.h |   2 +
 drivers/crypto/cnxk/cnxk_cryptodev_ops.h  |   9 +-
 drivers/crypto/cnxk/version.map           |   1 +
 drivers/event/cnxk/cn10k_eventdev.c       |  46 ++-
 drivers/event/cnxk/cn10k_worker.h         |   6 +-
 drivers/event/cnxk/cn9k_eventdev.c        |  10 +-
 drivers/event/cnxk/cnxk_eventdev.h        |   4 +-
 drivers/event/cnxk/cnxk_eventdev_adptr.c  |  17 +-
 9 files changed, 433 insertions(+), 65 deletions(-)

diff --git a/drivers/crypto/cnxk/cn10k_cryptodev_ops.c b/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
index 2fbf374da3..14fe01cd4f 100644
--- a/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
+++ b/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
@@ -19,6 +19,25 @@
 
 #include "roc_api.h"
 
+#define PKTS_PER_LOOP	32
+#define PKTS_PER_STEORL 16
+
+/* Holds information required to send crypto operations in one burst */
+struct ops_burst {
+	struct rte_crypto_op *op[PKTS_PER_LOOP];
+	uint64_t w2[PKTS_PER_LOOP];
+	struct cn10k_sso_hws *ws;
+	struct cnxk_cpt_qp *qp;
+	uint16_t nb_ops;
+};
+
+/* Holds information required to send vector of operations */
+struct vec_request {
+	struct cpt_inflight_req *req;
+	struct rte_event_vector *vec;
+	uint64_t w2;
+};
+
 static inline struct cnxk_se_sess *
 cn10k_cpt_sym_temp_sess_create(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op)
 {
@@ -183,9 +202,6 @@ cn10k_cpt_fill_inst(struct cnxk_cpt_qp *qp, struct rte_crypto_op *ops[],
 	return 1;
 }
 
-#define PKTS_PER_LOOP	32
-#define PKTS_PER_STEORL 16
-
 static uint16_t
 cn10k_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
 {
@@ -286,9 +302,9 @@ cn10k_cpt_crypto_adapter_ev_mdata_set(struct rte_cryptodev *dev __rte_unused,
 	union rte_event_crypto_metadata *ec_mdata = mdata;
 	struct rte_event *rsp_info;
 	struct cnxk_cpt_qp *qp;
+	uint64_t w2, tag_type;
 	uint8_t cdev_id;
 	int16_t qp_id;
-	uint64_t w2;
 
 	/* Get queue pair */
 	cdev_id = ec_mdata->request_info.cdev_id;
@@ -296,9 +312,9 @@ cn10k_cpt_crypto_adapter_ev_mdata_set(struct rte_cryptodev *dev __rte_unused,
 	qp = rte_cryptodevs[cdev_id].data->queue_pairs[qp_id];
 
 	/* Prepare w2 */
+	tag_type = qp->ca.vector_sz ? RTE_EVENT_TYPE_CRYPTODEV_VECTOR : RTE_EVENT_TYPE_CRYPTODEV;
 	rsp_info = &ec_mdata->response_info;
-	w2 = CNXK_CPT_INST_W2((RTE_EVENT_TYPE_CRYPTODEV << 28) |
-				      (rsp_info->sub_event_type << 20) |
+	w2 = CNXK_CPT_INST_W2((tag_type << 28) | (rsp_info->sub_event_type << 20) |
 				      rsp_info->flow_id,
 			      rsp_info->sched_type, rsp_info->queue_id, 0);
 
@@ -392,19 +408,236 @@ cn10k_ca_meta_info_extract(struct rte_crypto_op *op,
 	return 0;
 }
 
+static inline void
+cn10k_cpt_vec_inst_fill(struct vec_request *vec_req, struct cpt_inst_s *inst,
+			struct cnxk_cpt_qp *qp)
+{
+	const union cpt_res_s res = {.cn10k.compcode = CPT_COMP_NOT_DONE};
+	struct cpt_inflight_req *infl_req = vec_req->req;
+
+	const union cpt_inst_w4 w4 = {
+		.s.opcode_major = ROC_SE_MAJOR_OP_MISC,
+		.s.opcode_minor = ROC_SE_MISC_MINOR_OP_PASSTHROUGH,
+		.s.param1 = 1,
+		.s.param2 = 1,
+		.s.dlen = 0,
+	};
+
+	infl_req->vec = vec_req->vec;
+	infl_req->qp = qp;
+
+	inst->res_addr = (uint64_t)&infl_req->res;
+	__atomic_store_n(&infl_req->res.u64[0], res.u64[0], __ATOMIC_RELAXED);
+
+	inst->w0.u64 = 0;
+	inst->w2.u64 = vec_req->w2;
+	inst->w3.u64 = CNXK_CPT_INST_W3(1, infl_req);
+	inst->w4.u64 = w4.u64;
+	inst->w7.u64 = ROC_CPT_DFLT_ENG_GRP_SE << 61;
+}
+
+static void
+cn10k_cpt_vec_pkt_submission_timeout_handle(void)
+{
+	plt_dp_err("Vector packet submission timedout");
+	abort();
+}
+
+static inline void
+cn10k_cpt_vec_submit(struct vec_request vec_tbl[], uint16_t vec_tbl_len, struct cnxk_cpt_qp *qp)
+{
+	uint64_t lmt_base, lmt_arg, lmt_id, io_addr;
+	union cpt_fc_write_s fc;
+	struct cpt_inst_s *inst;
+	uint16_t burst_size;
+	uint64_t *fc_addr;
+	int i;
+
+	if (vec_tbl_len == 0)
+		return;
+
+	const uint32_t fc_thresh = qp->lmtline.fc_thresh;
+	/*
+	 * Use 10 mins timeout for the poll. It is not possible to recover from partial submission
+	 * of vector packet. Actual packets for processing are submitted to CPT prior to this
+	 * routine. Hence, any failure for submission of vector packet would indicate an
+	 * unrecoverable error for the application.
+	 */
+	const uint64_t timeout = rte_get_timer_cycles() + 10 * 60 * rte_get_timer_hz();
+
+	lmt_base = qp->lmtline.lmt_base;
+	io_addr = qp->lmtline.io_addr;
+	fc_addr = qp->lmtline.fc_addr;
+	ROC_LMT_BASE_ID_GET(lmt_base, lmt_id);
+	inst = (struct cpt_inst_s *)lmt_base;
+
+again:
+	burst_size = RTE_MIN(PKTS_PER_STEORL, vec_tbl_len);
+	for (i = 0; i < burst_size; i++)
+		cn10k_cpt_vec_inst_fill(&vec_tbl[i], &inst[i * 2], qp);
+
+	do {
+		fc.u64[0] = __atomic_load_n(fc_addr, __ATOMIC_RELAXED);
+		if (likely(fc.s.qsize < fc_thresh))
+			break;
+		if (unlikely(rte_get_timer_cycles() > timeout))
+			cn10k_cpt_vec_pkt_submission_timeout_handle();
+	} while (true);
+
+	lmt_arg = ROC_CN10K_CPT_LMT_ARG | (i - 1) << 12 | lmt_id;
+	roc_lmt_submit_steorl(lmt_arg, io_addr);
+
+	rte_io_wmb();
+
+	vec_tbl_len -= i;
+
+	if (vec_tbl_len > 0) {
+		vec_tbl += i;
+		goto again;
+	}
+}
+
+static inline int
+ca_lmtst_vec_submit(struct ops_burst *burst, struct vec_request vec_tbl[], uint16_t *vec_tbl_len)
+{
+	struct cpt_inflight_req *infl_reqs[PKTS_PER_LOOP];
+	uint64_t lmt_base, lmt_arg, io_addr;
+	uint16_t lmt_id, len = *vec_tbl_len;
+	struct cpt_inst_s *inst, *inst_base;
+	struct cpt_inflight_req *infl_req;
+	struct rte_event_vector *vec;
+	union cpt_fc_write_s fc;
+	struct cnxk_cpt_qp *qp;
+	uint64_t *fc_addr;
+	int ret, i, vi;
+
+	qp = burst->qp;
+
+	lmt_base = qp->lmtline.lmt_base;
+	io_addr = qp->lmtline.io_addr;
+	fc_addr = qp->lmtline.fc_addr;
+
+	const uint32_t fc_thresh = qp->lmtline.fc_thresh;
+
+	ROC_LMT_BASE_ID_GET(lmt_base, lmt_id);
+	inst_base = (struct cpt_inst_s *)lmt_base;
+
+#ifdef CNXK_CRYPTODEV_DEBUG
+	if (unlikely(!qp->ca.enabled)) {
+		rte_errno = EINVAL;
+		return 0;
+	}
+#endif
+
+	/* Perform fc check before putting packets into vectors */
+	fc.u64[0] = __atomic_load_n(fc_addr, __ATOMIC_RELAXED);
+	if (unlikely(fc.s.qsize > fc_thresh)) {
+		rte_errno = EAGAIN;
+		return 0;
+	}
+
+	if (unlikely(rte_mempool_get_bulk(qp->ca.req_mp, (void **)infl_reqs, burst->nb_ops))) {
+		rte_errno = ENOMEM;
+		return 0;
+	}
+
+	for (i = 0; i < burst->nb_ops; i++) {
+		inst = &inst_base[2 * i];
+		infl_req = infl_reqs[i];
+		infl_req->op_flags = 0;
+
+		ret = cn10k_cpt_fill_inst(qp, &burst->op[i], inst, infl_req);
+		if (unlikely(ret != 1)) {
+			plt_cpt_dbg("Could not process op: %p", burst->op[i]);
+			if (i != 0)
+				goto submit;
+			else
+				goto put;
+		}
+
+		infl_req->res.cn10k.compcode = CPT_COMP_NOT_DONE;
+		infl_req->qp = qp;
+		inst->w3.u64 = 0x1;
+
+		/* Lookup for existing vector by w2 */
+		for (vi = len - 1; vi >= 0; vi--) {
+			if (vec_tbl[vi].w2 != burst->w2[i])
+				continue;
+			vec = vec_tbl[vi].vec;
+			if (unlikely(vec->nb_elem == qp->ca.vector_sz))
+				continue;
+			vec->ptrs[vec->nb_elem++] = infl_req;
+			goto next_op; /* continue outer loop */
+		}
+
+		/* No available vectors found, allocate a new one */
+		if (unlikely(rte_mempool_get(qp->ca.vector_mp, (void **)&vec_tbl[len].vec))) {
+			rte_errno = ENOMEM;
+			if (i != 0)
+				goto submit;
+			else
+				goto put;
+		}
+		/* Also preallocate in-flight request, that will be used to
+		 * submit misc passthrough instruction
+		 */
+		if (unlikely(rte_mempool_get(qp->ca.req_mp, (void **)&vec_tbl[len].req))) {
+			rte_mempool_put(qp->ca.vector_mp, vec_tbl[len].vec);
+			rte_errno = ENOMEM;
+			if (i != 0)
+				goto submit;
+			else
+				goto put;
+		}
+		vec_tbl[len].w2 = burst->w2[i];
+		vec_tbl[len].vec->ptrs[0] = infl_req;
+		vec_tbl[len].vec->nb_elem = 1;
+		len++;
+
+next_op:;
+	}
+
+	/* Submit operations in burst */
+submit:
+	if (CNXK_TT_FROM_TAG(burst->ws->gw_rdata) == SSO_TT_ORDERED)
+		roc_sso_hws_head_wait(burst->ws->base);
+
+	if (i > PKTS_PER_STEORL) {
+		lmt_arg = ROC_CN10K_CPT_LMT_ARG | (PKTS_PER_STEORL - 1) << 12 | (uint64_t)lmt_id;
+		roc_lmt_submit_steorl(lmt_arg, io_addr);
+		lmt_arg = ROC_CN10K_CPT_LMT_ARG | (i - PKTS_PER_STEORL - 1) << 12 |
+			  (uint64_t)(lmt_id + PKTS_PER_STEORL);
+		roc_lmt_submit_steorl(lmt_arg, io_addr);
+	} else {
+		lmt_arg = ROC_CN10K_CPT_LMT_ARG | (i - 1) << 12 | (uint64_t)lmt_id;
+		roc_lmt_submit_steorl(lmt_arg, io_addr);
+	}
+
+	rte_io_wmb();
+
+put:
+	if (i != burst->nb_ops)
+		rte_mempool_put_bulk(qp->ca.req_mp, (void *)&infl_reqs[i], burst->nb_ops - i);
+
+	*vec_tbl_len = len;
+
+	return i;
+}
+
 static inline uint16_t
-ca_lmtst_burst_submit(struct cn10k_sso_hws *ws, uint64_t w2[],
-		      struct cnxk_cpt_qp *qp, struct rte_crypto_op *op[],
-		      uint16_t nb_ops)
+ca_lmtst_burst_submit(struct ops_burst *burst)
 {
 	struct cpt_inflight_req *infl_reqs[PKTS_PER_LOOP];
 	uint64_t lmt_base, lmt_arg, io_addr;
 	struct cpt_inst_s *inst, *inst_base;
 	struct cpt_inflight_req *infl_req;
 	union cpt_fc_write_s fc;
+	struct cnxk_cpt_qp *qp;
 	uint64_t *fc_addr;
 	uint16_t lmt_id;
-	int ret, i;
+	int ret, i, j;
+
+	qp = burst->qp;
 
 	lmt_base = qp->lmtline.lmt_base;
 	io_addr = qp->lmtline.io_addr;
@@ -415,25 +648,26 @@ ca_lmtst_burst_submit(struct cn10k_sso_hws *ws, uint64_t w2[],
 	ROC_LMT_BASE_ID_GET(lmt_base, lmt_id);
 	inst_base = (struct cpt_inst_s *)lmt_base;
 
+#ifdef CNXK_CRYPTODEV_DEBUG
 	if (unlikely(!qp->ca.enabled)) {
 		rte_errno = EINVAL;
 		return 0;
 	}
+#endif
 
-	if (unlikely(rte_mempool_get_bulk(qp->ca.req_mp, (void **)infl_reqs,
-					  nb_ops))) {
+	if (unlikely(rte_mempool_get_bulk(qp->ca.req_mp, (void **)infl_reqs, burst->nb_ops))) {
 		rte_errno = ENOMEM;
 		return 0;
 	}
 
-	for (i = 0; i < nb_ops; i++) {
+	for (i = 0; i < burst->nb_ops; i++) {
 		inst = &inst_base[2 * i];
 		infl_req = infl_reqs[i];
 		infl_req->op_flags = 0;
 
-		ret = cn10k_cpt_fill_inst(qp, &op[i], inst, infl_req);
+		ret = cn10k_cpt_fill_inst(qp, &burst->op[i], inst, infl_req);
 		if (unlikely(ret != 1)) {
-			plt_dp_err("Could not process op: %p", op[i]);
+			plt_dp_dbg("Could not process op: %p", burst->op[i]);
 			if (i != 0)
 				goto submit;
 			else
@@ -444,20 +678,25 @@ ca_lmtst_burst_submit(struct cn10k_sso_hws *ws, uint64_t w2[],
 		infl_req->qp = qp;
 		inst->w0.u64 = 0;
 		inst->res_addr = (uint64_t)&infl_req->res;
-		inst->w2.u64 = w2[i];
+		inst->w2.u64 = burst->w2[i];
 		inst->w3.u64 = CNXK_CPT_INST_W3(1, infl_req);
 	}
 
 	fc.u64[0] = __atomic_load_n(fc_addr, __ATOMIC_RELAXED);
 	if (unlikely(fc.s.qsize > fc_thresh)) {
 		rte_errno = EAGAIN;
+		for (j = 0; j < i; j++) {
+			infl_req = infl_reqs[j];
+			if (unlikely(infl_req->op_flags & CPT_OP_FLAGS_METABUF))
+				rte_mempool_put(qp->meta_info.pool, infl_req->mdata);
+		}
 		i = 0;
 		goto put;
 	}
 
 submit:
-	if (CNXK_TT_FROM_TAG(ws->gw_rdata) == SSO_TT_ORDERED)
-		roc_sso_hws_head_wait(ws->base);
+	if (CNXK_TT_FROM_TAG(burst->ws->gw_rdata) == SSO_TT_ORDERED)
+		roc_sso_hws_head_wait(burst->ws->base);
 
 	if (i > PKTS_PER_STEORL) {
 		lmt_arg = ROC_CN10K_CPT_LMT_ARG | (PKTS_PER_STEORL - 1) << 12 |
@@ -476,9 +715,8 @@ ca_lmtst_burst_submit(struct cn10k_sso_hws *ws, uint64_t w2[],
 	rte_io_wmb();
 
 put:
-	if (unlikely(i != nb_ops))
-		rte_mempool_put_bulk(qp->ca.req_mp, (void *)&infl_reqs[i],
-				     nb_ops - i);
+	if (unlikely(i != burst->nb_ops))
+		rte_mempool_put_bulk(qp->ca.req_mp, (void *)&infl_reqs[i], burst->nb_ops - i);
 
 	return i;
 }
@@ -487,44 +725,76 @@ uint16_t __rte_hot
 cn10k_cpt_crypto_adapter_enqueue(void *ws, struct rte_event ev[],
 				 uint16_t nb_events)
 {
-	struct rte_crypto_op *ops[PKTS_PER_LOOP], *op;
-	struct cnxk_cpt_qp *qp, *curr_qp = NULL;
-	uint64_t w2s[PKTS_PER_LOOP], w2;
-	uint16_t submitted, count = 0;
-	int ret, i, ops_len = 0;
+	uint16_t submitted, count = 0, vec_tbl_len = 0;
+	struct vec_request vec_tbl[nb_events];
+	struct rte_crypto_op *op;
+	struct ops_burst burst;
+	struct cnxk_cpt_qp *qp;
+	bool is_vector = false;
+	uint64_t w2;
+	int ret, i;
+
+	burst.ws = ws;
+	burst.qp = NULL;
+	burst.nb_ops = 0;
 
 	for (i = 0; i < nb_events; i++) {
 		op = ev[i].event_ptr;
 		ret = cn10k_ca_meta_info_extract(op, &qp, &w2);
 		if (unlikely(ret)) {
 			rte_errno = EINVAL;
-			return count;
+			goto vec_submit;
 		}
 
-		if (qp != curr_qp) {
-			if (ops_len) {
-				submitted = ca_lmtst_burst_submit(
-					ws, w2s, curr_qp, ops, ops_len);
+		/* Queue pair change check */
+		if (qp != burst.qp) {
+			if (burst.nb_ops) {
+				if (is_vector) {
+					submitted =
+						ca_lmtst_vec_submit(&burst, vec_tbl, &vec_tbl_len);
+					/*
+					 * Vector submission is required on qp change, but not in
+					 * other cases, since we could send several vectors per
+					 * lmtst instruction only for same qp
+					 */
+					cn10k_cpt_vec_submit(vec_tbl, vec_tbl_len, burst.qp);
+					vec_tbl_len = 0;
+				} else {
+					submitted = ca_lmtst_burst_submit(&burst);
+				}
 				count += submitted;
-				if (unlikely(submitted != ops_len))
-					return count;
-				ops_len = 0;
+				if (unlikely(submitted != burst.nb_ops))
+					goto vec_submit;
+				burst.nb_ops = 0;
 			}
-			curr_qp = qp;
+			is_vector = qp->ca.vector_sz;
+			burst.qp = qp;
 		}
-		w2s[ops_len] = w2;
-		ops[ops_len] = op;
-		if (++ops_len == PKTS_PER_LOOP) {
-			submitted = ca_lmtst_burst_submit(ws, w2s, curr_qp, ops,
-							  ops_len);
+		burst.w2[burst.nb_ops] = w2;
+		burst.op[burst.nb_ops] = op;
+
+		/* Max nb_ops per burst check */
+		if (++burst.nb_ops == PKTS_PER_LOOP) {
+			if (is_vector)
+				submitted = ca_lmtst_vec_submit(&burst, vec_tbl, &vec_tbl_len);
+			else
+				submitted = ca_lmtst_burst_submit(&burst);
 			count += submitted;
-			if (unlikely(submitted != ops_len))
-				return count;
-			ops_len = 0;
+			if (unlikely(submitted != burst.nb_ops))
+				goto vec_submit;
+			burst.nb_ops = 0;
 		}
 	}
-	if (ops_len)
-		count += ca_lmtst_burst_submit(ws, w2s, curr_qp, ops, ops_len);
+	/* Submit the rest of crypto operations */
+	if (burst.nb_ops) {
+		if (is_vector)
+			count += ca_lmtst_vec_submit(&burst, vec_tbl, &vec_tbl_len);
+		else
+			count += ca_lmtst_burst_submit(&burst);
+	}
+
+vec_submit:
+	cn10k_cpt_vec_submit(vec_tbl, vec_tbl_len, burst.qp);
 	return count;
 }
 
@@ -683,6 +953,49 @@ cn10k_cpt_crypto_adapter_dequeue(uintptr_t get_work1)
 	return (uintptr_t)cop;
 }
 
+uintptr_t
+cn10k_cpt_crypto_adapter_vector_dequeue(uintptr_t get_work1)
+{
+	struct cpt_inflight_req *infl_req, *vec_infl_req;
+	struct rte_mempool *meta_mp, *req_mp;
+	struct rte_event_vector *vec;
+	struct rte_crypto_op *cop;
+	struct cnxk_cpt_qp *qp;
+	union cpt_res_s res;
+	int i;
+
+	vec_infl_req = (struct cpt_inflight_req *)(get_work1);
+
+	vec = vec_infl_req->vec;
+	qp = vec_infl_req->qp;
+	meta_mp = qp->meta_info.pool;
+	req_mp = qp->ca.req_mp;
+
+#ifdef CNXK_CRYPTODEV_DEBUG
+	res.u64[0] = __atomic_load_n(&vec_infl_req->res.u64[0], __ATOMIC_RELAXED);
+	PLT_ASSERT(res.cn10k.compcode == CPT_COMP_WARN);
+	PLT_ASSERT(res.cn10k.uc_compcode == 0);
+#endif
+
+	for (i = 0; i < vec->nb_elem; i++) {
+		infl_req = vec->ptrs[i];
+		cop = infl_req->cop;
+
+		res.u64[0] = __atomic_load_n(&infl_req->res.u64[0], __ATOMIC_RELAXED);
+		cn10k_cpt_dequeue_post_process(qp, cop, infl_req, &res.cn10k);
+
+		vec->ptrs[i] = cop;
+		if (unlikely(infl_req->op_flags & CPT_OP_FLAGS_METABUF))
+			rte_mempool_put(meta_mp, infl_req->mdata);
+
+		rte_mempool_put(req_mp, infl_req);
+	}
+
+	rte_mempool_put(req_mp, vec_infl_req);
+
+	return (uintptr_t)vec;
+}
+
 static uint16_t
 cn10k_cpt_dequeue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
 {
diff --git a/drivers/crypto/cnxk/cn10k_cryptodev_ops.h b/drivers/crypto/cnxk/cn10k_cryptodev_ops.h
index 628d6a567c..8104310c30 100644
--- a/drivers/crypto/cnxk/cn10k_cryptodev_ops.h
+++ b/drivers/crypto/cnxk/cn10k_cryptodev_ops.h
@@ -18,5 +18,7 @@ uint16_t __rte_hot cn10k_cpt_crypto_adapter_enqueue(void *ws, struct rte_event e
 		uint16_t nb_events);
 __rte_internal
 uintptr_t cn10k_cpt_crypto_adapter_dequeue(uintptr_t get_work1);
+__rte_internal
+uintptr_t cn10k_cpt_crypto_adapter_vector_dequeue(uintptr_t get_work1);
 
 #endif /* _CN10K_CRYPTODEV_OPS_H_ */
diff --git a/drivers/crypto/cnxk/cnxk_cryptodev_ops.h b/drivers/crypto/cnxk/cnxk_cryptodev_ops.h
index ffe4ae19aa..d9ed43b40b 100644
--- a/drivers/crypto/cnxk/cnxk_cryptodev_ops.h
+++ b/drivers/crypto/cnxk/cnxk_cryptodev_ops.h
@@ -37,7 +37,10 @@ struct cpt_qp_meta_info {
 
 struct cpt_inflight_req {
 	union cpt_res_s res;
-	struct rte_crypto_op *cop;
+	union {
+		struct rte_crypto_op *cop;
+		struct rte_event_vector *vec;
+	};
 	void *mdata;
 	uint8_t op_flags;
 	void *qp;
@@ -63,6 +66,10 @@ struct crypto_adpter_info {
 	/**< Set if queue pair is added to crypto adapter */
 	struct rte_mempool *req_mp;
 	/**< CPT inflight request mempool */
+	uint16_t vector_sz;
+	/** Maximum number of cops to combine into single vector */
+	struct rte_mempool *vector_mp;
+	/** Pool for allocating rte_event_vector */
 };
 
 struct cnxk_cpt_qp {
diff --git a/drivers/crypto/cnxk/version.map b/drivers/crypto/cnxk/version.map
index 0178c416ec..4735e70550 100644
--- a/drivers/crypto/cnxk/version.map
+++ b/drivers/crypto/cnxk/version.map
@@ -5,6 +5,7 @@ INTERNAL {
 	cn9k_cpt_crypto_adapter_dequeue;
 	cn10k_cpt_crypto_adapter_enqueue;
 	cn10k_cpt_crypto_adapter_dequeue;
+	cn10k_cpt_crypto_adapter_vector_dequeue;
 
 	local: *;
 };
diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index b0982d6c3b..1b116e5b18 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -1020,7 +1020,8 @@ cn10k_crypto_adapter_caps_get(const struct rte_eventdev *event_dev,
 	CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k");
 
 	*caps = RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD |
-		RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA;
+		RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA |
+		RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR;
 
 	return 0;
 }
@@ -1033,20 +1034,20 @@ cn10k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
 {
 	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
 
-	RTE_SET_USED(conf);
-
 	CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k");
 	CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k");
 
 	dev->is_ca_internal_port = 1;
 	cn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
 
-	return cnxk_crypto_adapter_qp_add(event_dev, cdev, queue_pair_id);
+	ret = cnxk_crypto_adapter_qp_add(event_dev, cdev, queue_pair_id, conf);
+	cn10k_sso_set_priv_mem(event_dev, NULL, 0);
+
+	return ret;
 }
 
 static int
-cn10k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev,
-			    const struct rte_cryptodev *cdev,
+cn10k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev, const struct rte_cryptodev *cdev,
 			    int32_t queue_pair_id)
 {
 	CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k");
@@ -1055,6 +1056,34 @@ cn10k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev,
 	return cnxk_crypto_adapter_qp_del(cdev, queue_pair_id);
 }
 
+static int
+cn10k_tim_caps_get(const struct rte_eventdev *evdev, uint64_t flags,
+		   uint32_t *caps, const struct event_timer_adapter_ops **ops)
+{
+	return cnxk_tim_caps_get(evdev, flags, caps, ops,
+				 cn10k_sso_set_priv_mem);
+}
+
+static int
+cn10k_crypto_adapter_vec_limits(const struct rte_eventdev *event_dev,
+				const struct rte_cryptodev *cdev,
+				struct rte_event_crypto_adapter_vector_limits *limits)
+{
+	CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k");
+	CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k");
+
+	limits->log2_sz = false;
+	limits->min_sz = 0;
+	limits->max_sz = UINT16_MAX;
+	/* Unused timeout, in software implementation we aggregate all crypto
+	 * operations passed to the enqueue function
+	 */
+	limits->min_timeout_ns = 0;
+	limits->max_timeout_ns = 0;
+
+	return 0;
+}
+
 static struct eventdev_ops cn10k_sso_dev_ops = {
 	.dev_infos_get = cn10k_sso_info_get,
 	.dev_configure = cn10k_sso_dev_configure,
@@ -1092,6 +1121,11 @@ static struct eventdev_ops cn10k_sso_dev_ops = {
 	.crypto_adapter_caps_get = cn10k_crypto_adapter_caps_get,
 	.crypto_adapter_queue_pair_add = cn10k_crypto_adapter_qp_add,
 	.crypto_adapter_queue_pair_del = cn10k_crypto_adapter_qp_del,
+	.crypto_adapter_vector_limits_get = cn10k_crypto_adapter_vec_limits,
+
+	.xstats_get = cnxk_sso_xstats_get,
+	.xstats_reset = cnxk_sso_xstats_reset,
+	.xstats_get_names = cnxk_sso_xstats_get_names,
 
 	.dump = cnxk_sso_dump,
 	.dev_start = cn10k_sso_start,
diff --git a/drivers/event/cnxk/cn10k_worker.h b/drivers/event/cnxk/cn10k_worker.h
index 56bf4cec50..0c266ea225 100644
--- a/drivers/event/cnxk/cn10k_worker.h
+++ b/drivers/event/cnxk/cn10k_worker.h
@@ -207,6 +207,9 @@ cn10k_sso_hws_post_process(struct cn10k_sso_hws *ws, uint64_t *u64,
 	if ((flags & CPT_RX_WQE_F) &&
 	    (CNXK_EVENT_TYPE_FROM_TAG(u64[0]) == RTE_EVENT_TYPE_CRYPTODEV)) {
 		u64[1] = cn10k_cpt_crypto_adapter_dequeue(u64[1]);
+	} else if ((flags & CPT_RX_WQE_F) &&
+		   (CNXK_EVENT_TYPE_FROM_TAG(u64[0]) == RTE_EVENT_TYPE_CRYPTODEV_VECTOR)) {
+		u64[1] = cn10k_cpt_crypto_adapter_vector_dequeue(u64[1]);
 	} else if (CNXK_EVENT_TYPE_FROM_TAG(u64[0]) == RTE_EVENT_TYPE_ETHDEV) {
 		uint8_t port = CNXK_SUB_EVENT_FROM_TAG(u64[0]);
 		uint64_t mbuf;
@@ -253,8 +256,7 @@ cn10k_sso_hws_post_process(struct cn10k_sso_hws *ws, uint64_t *u64,
 					 flags & NIX_RX_OFFLOAD_TSTAMP_F,
 					 (uint64_t *)tstamp_ptr);
 		u64[1] = mbuf;
-	} else if (CNXK_EVENT_TYPE_FROM_TAG(u64[0]) ==
-		   RTE_EVENT_TYPE_ETHDEV_VECTOR) {
+	} else if (CNXK_EVENT_TYPE_FROM_TAG(u64[0]) == RTE_EVENT_TYPE_ETHDEV_VECTOR) {
 		uint8_t port = CNXK_SUB_EVENT_FROM_TAG(u64[0]);
 		__uint128_t vwqe_hdr = *(__uint128_t *)u64[1];
 
diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
index bac48ebe63..30304c7c5d 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -1125,20 +1125,20 @@ cn9k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
 {
 	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
 
-	RTE_SET_USED(conf);
-
 	CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k");
 	CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k");
 
 	dev->is_ca_internal_port = 1;
 	cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
 
-	return cnxk_crypto_adapter_qp_add(event_dev, cdev, queue_pair_id);
+	ret = cnxk_crypto_adapter_qp_add(event_dev, cdev, queue_pair_id, conf);
+	cn9k_sso_set_priv_mem(event_dev, NULL, 0);
+
+	return ret;
 }
 
 static int
-cn9k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev,
-			   const struct rte_cryptodev *cdev,
+cn9k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev, const struct rte_cryptodev *cdev,
 			   int32_t queue_pair_id)
 {
 	CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k");
diff --git a/drivers/event/cnxk/cnxk_eventdev.h b/drivers/event/cnxk/cnxk_eventdev.h
index e8129bf774..64f0ac270f 100644
--- a/drivers/event/cnxk/cnxk_eventdev.h
+++ b/drivers/event/cnxk/cnxk_eventdev.h
@@ -10,6 +10,7 @@
 #include <cryptodev_pmd.h>
 #include <rte_devargs.h>
 #include <rte_ethdev.h>
+#include <rte_event_crypto_adapter.h>
 #include <rte_event_eth_rx_adapter.h>
 #include <rte_event_eth_tx_adapter.h>
 #include <rte_kvargs.h>
@@ -303,7 +304,8 @@ int cnxk_sso_tx_adapter_start(uint8_t id, const struct rte_eventdev *event_dev);
 int cnxk_sso_tx_adapter_stop(uint8_t id, const struct rte_eventdev *event_dev);
 int cnxk_sso_tx_adapter_free(uint8_t id, const struct rte_eventdev *event_dev);
 int cnxk_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
-			       const struct rte_cryptodev *cdev, int32_t queue_pair_id);
+			       const struct rte_cryptodev *cdev, int32_t queue_pair_id,
+			       const struct rte_event_crypto_adapter_queue_conf *conf);
 int cnxk_crypto_adapter_qp_del(const struct rte_cryptodev *cdev, int32_t queue_pair_id);
 
 #endif /* __CNXK_EVENTDEV_H__ */
diff --git a/drivers/event/cnxk/cnxk_eventdev_adptr.c b/drivers/event/cnxk/cnxk_eventdev_adptr.c
index 3f46e79ba8..cd238fe074 100644
--- a/drivers/event/cnxk/cnxk_eventdev_adptr.c
+++ b/drivers/event/cnxk/cnxk_eventdev_adptr.c
@@ -631,7 +631,8 @@ cnxk_sso_tx_adapter_free(uint8_t id __rte_unused,
 }
 
 static int
-crypto_adapter_qp_setup(const struct rte_cryptodev *cdev, struct cnxk_cpt_qp *qp)
+crypto_adapter_qp_setup(const struct rte_cryptodev *cdev, struct cnxk_cpt_qp *qp,
+			const struct rte_event_crypto_adapter_queue_conf *conf)
 {
 	char name[RTE_MEMPOOL_NAMESIZE];
 	uint32_t cache_size, nb_req;
@@ -664,6 +665,10 @@ crypto_adapter_qp_setup(const struct rte_cryptodev *cdev, struct cnxk_cpt_qp *qp
 	if (qp->ca.req_mp == NULL)
 		return -ENOMEM;
 
+	if (conf != NULL) {
+		qp->ca.vector_sz = conf->vector_sz;
+		qp->ca.vector_mp = conf->vector_mp;
+	}
 	qp->ca.enabled = true;
 
 	return 0;
@@ -671,7 +676,8 @@ crypto_adapter_qp_setup(const struct rte_cryptodev *cdev, struct cnxk_cpt_qp *qp
 
 int
 cnxk_crypto_adapter_qp_add(const struct rte_eventdev *event_dev, const struct rte_cryptodev *cdev,
-			   int32_t queue_pair_id)
+			   int32_t queue_pair_id,
+			   const struct rte_event_crypto_adapter_queue_conf *conf)
 {
 	struct cnxk_sso_evdev *sso_evdev = cnxk_sso_pmd_priv(event_dev);
 	uint32_t adptr_xae_cnt = 0;
@@ -683,7 +689,7 @@ cnxk_crypto_adapter_qp_add(const struct rte_eventdev *event_dev, const struct rt
 
 		for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) {
 			qp = cdev->data->queue_pairs[qp_id];
-			ret = crypto_adapter_qp_setup(cdev, qp);
+			ret = crypto_adapter_qp_setup(cdev, qp, conf);
 			if (ret) {
 				cnxk_crypto_adapter_qp_del(cdev, -1);
 				return ret;
@@ -692,7 +698,7 @@ cnxk_crypto_adapter_qp_add(const struct rte_eventdev *event_dev, const struct rt
 		}
 	} else {
 		qp = cdev->data->queue_pairs[queue_pair_id];
-		ret = crypto_adapter_qp_setup(cdev, qp);
+		ret = crypto_adapter_qp_setup(cdev, qp, conf);
 		if (ret)
 			return ret;
 		adptr_xae_cnt = qp->ca.req_mp->size;
@@ -723,7 +729,8 @@ crypto_adapter_qp_free(struct cnxk_cpt_qp *qp)
 }
 
 int
-cnxk_crypto_adapter_qp_del(const struct rte_cryptodev *cdev, int32_t queue_pair_id)
+cnxk_crypto_adapter_qp_del(const struct rte_cryptodev *cdev,
+			   int32_t queue_pair_id)
 {
 	struct cnxk_cpt_qp *qp;
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 36+ messages in thread

* RE: [PATCH 1/3] eventdev: introduce event cryptodev vector type
  2022-09-26 11:02       ` Volodymyr Fialko
@ 2022-09-27  9:05         ` Gujjar, Abhinandan S
  2022-09-27  9:24           ` Volodymyr Fialko
  0 siblings, 1 reply; 36+ messages in thread
From: Gujjar, Abhinandan S @ 2022-09-27  9:05 UTC (permalink / raw)
  To: Volodymyr Fialko, dev, Jerin Jacob Kollanukkaran,
	Pavan Nikhilesh Bhagavatula, Shijith Thotton, Hemant Agrawal,
	Sachin Saxena, Jayatheerthan, Jay
  Cc: Akhil Goyal, Anoob Joseph



> -----Original Message-----
> From: Volodymyr Fialko <vfialko@marvell.com>
> Sent: Monday, September 26, 2022 4:32 PM
> To: Gujjar, Abhinandan S <abhinandan.gujjar@intel.com>; dev@dpdk.org; Jerin
> Jacob Kollanukkaran <jerinj@marvell.com>; Pavan Nikhilesh Bhagavatula
> <pbhagavatula@marvell.com>; Shijith Thotton <sthotton@marvell.com>;
> Hemant Agrawal <hemant.agrawal@nxp.com>; Sachin Saxena
> <sachin.saxena@oss.nxp.com>; Jayatheerthan, Jay
> <jay.jayatheerthan@intel.com>
> Cc: Akhil Goyal <gakhil@marvell.com>; Anoob Joseph <anoobj@marvell.com>
> Subject: RE: [PATCH 1/3] eventdev: introduce event cryptodev vector type
> 
> 
> 
> > -----Original Message-----
> > From: Gujjar, Abhinandan S <abhinandan.gujjar@intel.com>
> > Sent: Saturday, September 24, 2022 10:44 AM
> > To: Volodymyr Fialko <vfialko@marvell.com>; dev@dpdk.org; Jerin Jacob
> > Kollanukkaran <jerinj@marvell.com>; Pavan Nikhilesh Bhagavatula
> > <pbhagavatula@marvell.com>; Shijith Thotton <sthotton@marvell.com>;
> > Hemant Agrawal <hemant.agrawal@nxp.com>; Sachin Saxena
> > <sachin.saxena@oss.nxp.com>; Jayatheerthan, Jay
> > <jay.jayatheerthan@intel.com>
> > Cc: Akhil Goyal <gakhil@marvell.com>; Anoob Joseph
> > <anoobj@marvell.com>
> > Subject: [EXT] RE: [PATCH 1/3] eventdev: introduce event cryptodev
> > vector type
> >
> > External Email
> >
> > ----------------------------------------------------------------------
> >
> >
> > > -----Original Message-----
> > > From: Volodymyr Fialko <vfialko@marvell.com>
> > > Sent: Thursday, August 4, 2022 3:29 PM
> > > To: dev@dpdk.org; Jerin Jacob <jerinj@marvell.com>; Gujjar,
> > > Abhinandan S <abhinandan.gujjar@intel.com>; Pavan Nikhilesh
> > > <pbhagavatula@marvell.com>; Shijith Thotton <sthotton@marvell.com>;
> > > Hemant Agrawal <hemant.agrawal@nxp.com>; Sachin Saxena
> > > <sachin.saxena@oss.nxp.com>; Jayatheerthan, Jay
> > > <jay.jayatheerthan@intel.com>
> > > Cc: gakhil@marvell.com; anoobj@marvell.com; Volodymyr Fialko
> > > <vfialko@marvell.com>
> > > Subject: [PATCH 1/3] eventdev: introduce event cryptodev vector type
> > >
> > > Introduce ability to aggregate crypto operations processed by event
> > > crypto adapter into single event containing rte_event_vector whose
> > > event type is RTE_EVENT_TYPE_CRYPTODEV_VECTOR.
> > >
> > > Application should set RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR in
> > > rte_event_crypto_adapter_queue_conf::flag and provide vector
> > > configuration with respect of
> > > rte_event_crypto_adapter_vector_limits,
> > > which could be obtained by calling
> > > rte_event_crypto_adapter_vector_limits_get, to enable vectorization.
> > >
> > > The event crypto adapter would be responsible for vectorizing the
> > > crypto operations based on provided response information in
> > > rte_event_crypto_metadata::response_info.
> > >
> > > Updated drivers and tests accordingly to new API.
> > >
> > > Signed-off-by: Volodymyr Fialko <vfialko@marvell.com>
> > > ---
> > >  app/test-eventdev/test_perf_common.c          |  10 +-
> > >  app/test/test_event_crypto_adapter.c          |  12 ++-
> > >  .../prog_guide/event_crypto_adapter.rst       |  23 +++-
> > >  drivers/event/cnxk/cn10k_eventdev.c           |   4 +-
> > >  drivers/event/cnxk/cn9k_eventdev.c            |   5 +-
> > >  drivers/event/dpaa/dpaa_eventdev.c            |   9 +-
> > >  drivers/event/dpaa2/dpaa2_eventdev.c          |   9 +-
> > >  drivers/event/octeontx/ssovf_evdev.c          |   4 +-
> > >  lib/eventdev/eventdev_pmd.h                   |  35 +++++-
> > >  lib/eventdev/eventdev_trace.h                 |   6 +-
> > >  lib/eventdev/rte_event_crypto_adapter.c       |  90 ++++++++++++++--
> > >  lib/eventdev/rte_event_crypto_adapter.h       | 101 +++++++++++++++++-
> > >  lib/eventdev/rte_event_eth_rx_adapter.h       |   3 +-
> > >  lib/eventdev/rte_eventdev.h                   |   8 ++
> > >  14 files changed, 276 insertions(+), 43 deletions(-)
> > >
> >
> > I don't see dataplane implementation of vectorization in the crypto adapter!
> > Is it missed out in the patch?
> > comments inline.
> >
> Right now we are targeting crypto_cn10k PMD and ipsec-secgw event mode to
> support vectorization.
Is there a way to test this? When can be dataplane changes expected?

> 
> > > diff --git a/app/test-eventdev/test_perf_common.c b/app/test-
> > > eventdev/test_perf_common.c index 81420be73a..c770bc93f6 100644
> > > --- a/app/test-eventdev/test_perf_common.c
> > > +++ b/app/test-eventdev/test_perf_common.c
> > > @@ -837,14 +837,14 @@ perf_event_crypto_adapter_setup(struct
> > > test_perf *t, struct prod_data *p)
> > >  	}
> > >
> > >  	if (cap &
> > > RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
> > > -		struct rte_event response_info;
> > > +		struct rte_event_crypto_adapter_queue_conf conf;
> > >
> > > -		response_info.event = 0;
> > > -		response_info.sched_type = RTE_SCHED_TYPE_ATOMIC;
> > > -		response_info.queue_id = p->queue_id;
> > > +		memset(&conf, 0, sizeof(conf));
> > > +		conf.ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
> > > +		conf.ev.queue_id = p->queue_id;
> > >  		ret = rte_event_crypto_adapter_queue_pair_add(
> > >  			TEST_PERF_CA_ID, p->ca.cdev_id, p->ca.cdev_qp_id,
> > > -			&response_info);
> > > +			&conf);
> > >  	} else {
> > >  		ret = rte_event_crypto_adapter_queue_pair_add(
> > >  			TEST_PERF_CA_ID, p->ca.cdev_id, p->ca.cdev_qp_id,
> NULL); diff
> > > --git a/app/test/test_event_crypto_adapter.c
> > > b/app/test/test_event_crypto_adapter.c
> > > index 2ecc7e2cea..bb617c1042 100644
> > > --- a/app/test/test_event_crypto_adapter.c
> > > +++ b/app/test/test_event_crypto_adapter.c
> > > @@ -1175,6 +1175,10 @@ test_crypto_adapter_create(void)  static int
> > >  test_crypto_adapter_qp_add_del(void)
> > >  {
> > > +	struct rte_event_crypto_adapter_queue_conf queue_conf = {
> > > +		.ev = response_info,
> > > +	};
> > > +
> > >  	uint32_t cap;
> > >  	int ret;
> > >
> > > @@ -1183,7 +1187,7 @@ test_crypto_adapter_qp_add_del(void)
> > >
> > >  	if (cap &
> > > RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
> > >  		ret =
> > > rte_event_crypto_adapter_queue_pair_add(TEST_ADAPTER_ID,
> > > -				TEST_CDEV_ID, TEST_CDEV_QP_ID,
> > > &response_info);
> > > +				TEST_CDEV_ID, TEST_CDEV_QP_ID,
> > > &queue_conf);
> > >  	} else
> > >  		ret =
> > > rte_event_crypto_adapter_queue_pair_add(TEST_ADAPTER_ID,
> > >  					TEST_CDEV_ID, TEST_CDEV_QP_ID,
> NULL); @@ -1206,6 +1210,10 @@
> > > configure_event_crypto_adapter(enum
> > > rte_event_crypto_adapter_mode mode)
> > >  		.new_event_threshold = 1200,
> > >  	};
> > >
> > > +	struct rte_event_crypto_adapter_queue_conf queue_conf = {
> > > +		.ev = response_info,
> > > +	};
> > > +
> > >  	uint32_t cap;
> > >  	int ret;
> > >
> > > @@ -1238,7 +1246,7 @@ configure_event_crypto_adapter(enum
> > > rte_event_crypto_adapter_mode mode)
> > >
> > >  	if (cap &
> > > RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
> > >  		ret =
> > > rte_event_crypto_adapter_queue_pair_add(TEST_ADAPTER_ID,
> > > -				TEST_CDEV_ID, TEST_CDEV_QP_ID,
> > > &response_info);
> > > +				TEST_CDEV_ID, TEST_CDEV_QP_ID,
> > > &queue_conf);
> > >  	} else
> > >  		ret =
> > > rte_event_crypto_adapter_queue_pair_add(TEST_ADAPTER_ID,
> > >  				TEST_CDEV_ID, TEST_CDEV_QP_ID, NULL); diff
> --git
> > > a/doc/guides/prog_guide/event_crypto_adapter.rst
> > > b/doc/guides/prog_guide/event_crypto_adapter.rst
> > > index 4fb5c688e0..554df7e358 100644
> > > --- a/doc/guides/prog_guide/event_crypto_adapter.rst
> > > +++ b/doc/guides/prog_guide/event_crypto_adapter.rst
> > > @@ -201,10 +201,10 @@ capability, event information must be passed
> > > to the add API.
> > >
> > >          ret = rte_event_crypto_adapter_caps_get(id, evdev, &cap);
> > >          if (cap &
> > > RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
> > > -                struct rte_event event;
> > > +                struct rte_event_crypto_adapter_queue_conf conf;
> > >
> > > -                // Fill in event information & pass it to add API
> > > -                rte_event_crypto_adapter_queue_pair_add(id, cdev_id, qp_id,
> > > &event);
> > > +                // Fill in conf.event information & pass it to add API
> > > +                rte_event_crypto_adapter_queue_pair_add(id,
> > > + cdev_id, qp_id, &conf);
> > >          } else
> > >                  rte_event_crypto_adapter_queue_pair_add(id,
> > > cdev_id, qp_id, NULL);
> > >
> > > @@ -291,6 +291,23 @@ the ``rte_crypto_op``.
> > >                  rte_memcpy(op + len, &m_data, sizeof(m_data));
> > >          }
> > >
> > > +Enable event vectorization
> > > +~~~~~~~~~~~~~~~~~~~~~~~~~~
> > > +
> > > +The event crypto adapter can aggregate outcoming crypto operations
> > > +based on provided response information of
> > > +``rte_event_crypto_metadata::response_info``
> > > +and generate a ``rte_event`` containing ``rte_event_vector`` whose
> > > +event type is ``RTE_EVENT_TYPE_CRYPTODEV_VECTOR``.
> > > +To enable vectorization application should set
> > > +RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR in
> > > +``rte_event_crypto_adapter_queue_conf::flag`` and provide vector
> > > +configuration(size, mempool, etc.) with respect of
> > > +``rte_event_crypto_adapter_vector_limits``, which could be obtained
> > > +by calling ``rte_event_crypto_adapter_vector_limits_get()``.
> > > +
> > > +The RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR capability
> indicates
> > > +whether PMD supports this feature.
> > > +
> > >  Start the adapter instance
> > >  ~~~~~~~~~~~~~~~~~~~~~~~~~~
> > >
> > > diff --git a/drivers/event/cnxk/cn10k_eventdev.c
> > > b/drivers/event/cnxk/cn10k_eventdev.c
> > > index 5a0cab40a9..e74ec57382 100644
> > > --- a/drivers/event/cnxk/cn10k_eventdev.c
> > > +++ b/drivers/event/cnxk/cn10k_eventdev.c
> > > @@ -889,11 +889,11 @@ static int
> > >  cn10k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
> > >  			    const struct rte_cryptodev *cdev,
> > >  			    int32_t queue_pair_id,
> > > -			    const struct rte_event *event)
> > > +			    const struct rte_event_crypto_adapter_queue_conf
> > > *conf)
> > >  {
> > >  	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
> > >
> > > -	RTE_SET_USED(event);
> > > +	RTE_SET_USED(conf);
> > >
> > >  	CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k");
> > >  	CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k"); diff --
> > > git a/drivers/event/cnxk/cn9k_eventdev.c
> > > b/drivers/event/cnxk/cn9k_eventdev.c
> > > index 2e27030049..45ed547cb0 100644
> > > --- a/drivers/event/cnxk/cn9k_eventdev.c
> > > +++ b/drivers/event/cnxk/cn9k_eventdev.c
> > > @@ -1120,11 +1120,12 @@ cn9k_crypto_adapter_caps_get(const struct
> > > rte_eventdev *event_dev,  static int
> > > cn9k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
> > >  			   const struct rte_cryptodev *cdev,
> > > -			   int32_t queue_pair_id, const struct rte_event *event)
> > > +			   int32_t queue_pair_id,
> > > +			   const struct rte_event_crypto_adapter_queue_conf
> > > *conf)
> > >  {
> > >  	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
> > >
> > > -	RTE_SET_USED(event);
> > > +	RTE_SET_USED(conf);
> > >
> > >  	CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k");
> > >  	CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k"); diff --git
> > > a/drivers/event/dpaa/dpaa_eventdev.c
> > > b/drivers/event/dpaa/dpaa_eventdev.c
> > > index ff6cc0be18..2b9ecd9fbf 100644
> > > --- a/drivers/event/dpaa/dpaa_eventdev.c
> > > +++ b/drivers/event/dpaa/dpaa_eventdev.c
> > > @@ -26,6 +26,7 @@
> > >  #include <rte_eventdev.h>
> > >  #include <eventdev_pmd_vdev.h>
> > >  #include <rte_ethdev.h>
> > > +#include <rte_event_crypto_adapter.h>
> > >  #include <rte_event_eth_rx_adapter.h>  #include
> > > <rte_event_eth_tx_adapter.h>  #include <cryptodev_pmd.h> @@ -775,10
> > > +776,10 @@ static int  dpaa_eventdev_crypto_queue_add(const struct
> > > rte_eventdev *dev,
> > >  		const struct rte_cryptodev *cryptodev,
> > >  		int32_t rx_queue_id,
> > > -		const struct rte_event *ev)
> > > +		const struct rte_event_crypto_adapter_queue_conf *conf)
> > >  {
> > >  	struct dpaa_eventdev *priv = dev->data->dev_private;
> > > -	uint8_t ev_qid = ev->queue_id;
> > > +	uint8_t ev_qid = conf->ev.queue_id;
> > >  	u16 ch_id = priv->evq_info[ev_qid].ch_id;
> > >  	int ret;
> > >
> > > @@ -786,10 +787,10 @@ dpaa_eventdev_crypto_queue_add(const struct
> > > rte_eventdev *dev,
> > >
> > >  	if (rx_queue_id == -1)
> > >  		return dpaa_eventdev_crypto_queue_add_all(dev,
> > > -				cryptodev, ev);
> > > +				cryptodev, &conf->ev);
> > >
> > >  	ret = dpaa_sec_eventq_attach(cryptodev, rx_queue_id,
> > > -			ch_id, ev);
> > > +			ch_id, &conf->ev);
> > >  	if (ret) {
> > >  		DPAA_EVENTDEV_ERR(
> > >  			"dpaa_sec_eventq_attach failed: ret: %d\n", ret); diff -
> -git
> > > a/drivers/event/dpaa2/dpaa2_eventdev.c
> > > b/drivers/event/dpaa2/dpaa2_eventdev.c
> > > index ffc7b8b073..0137736794 100644
> > > --- a/drivers/event/dpaa2/dpaa2_eventdev.c
> > > +++ b/drivers/event/dpaa2/dpaa2_eventdev.c
> > > @@ -26,6 +26,7 @@
> > >  #include <rte_bus_vdev.h>
> > >  #include <ethdev_driver.h>
> > >  #include <cryptodev_pmd.h>
> > > +#include <rte_event_crypto_adapter.h>
> > >  #include <rte_event_eth_rx_adapter.h>  #include
> > > <rte_event_eth_tx_adapter.h>
> > >
> > > @@ -865,10 +866,10 @@ static int
> > >  dpaa2_eventdev_crypto_queue_add(const struct rte_eventdev *dev,
> > >  		const struct rte_cryptodev *cryptodev,
> > >  		int32_t rx_queue_id,
> > > -		const struct rte_event *ev)
> > > +		const struct rte_event_crypto_adapter_queue_conf *conf)
> > >  {
> > >  	struct dpaa2_eventdev *priv = dev->data->dev_private;
> > > -	uint8_t ev_qid = ev->queue_id;
> > > +	uint8_t ev_qid = conf->ev.queue_id;
> > >  	struct dpaa2_dpcon_dev *dpcon = priv->evq_info[ev_qid].dpcon;
> > >  	int ret;
> > >
> > > @@ -876,10 +877,10 @@ dpaa2_eventdev_crypto_queue_add(const struct
> > > rte_eventdev *dev,
> > >
> > >  	if (rx_queue_id == -1)
> > >  		return dpaa2_eventdev_crypto_queue_add_all(dev,
> > > -				cryptodev, ev);
> > > +				cryptodev, &conf->ev);
> > >
> > >  	ret = dpaa2_sec_eventq_attach(cryptodev, rx_queue_id,
> > > -				      dpcon, ev);
> > > +				      dpcon, &conf->ev);
> > >  	if (ret) {
> > >  		DPAA2_EVENTDEV_ERR(
> > >  			"dpaa2_sec_eventq_attach failed: ret: %d\n", ret); diff
> --git
> > > a/drivers/event/octeontx/ssovf_evdev.c
> > > b/drivers/event/octeontx/ssovf_evdev.c
> > > index 9e14e35d10..17acd8ef64 100644
> > > --- a/drivers/event/octeontx/ssovf_evdev.c
> > > +++ b/drivers/event/octeontx/ssovf_evdev.c
> > > @@ -745,12 +745,12 @@ static int
> > >  ssovf_crypto_adapter_qp_add(const struct rte_eventdev *dev,
> > >  			    const struct rte_cryptodev *cdev,
> > >  			    int32_t queue_pair_id,
> > > -			    const struct rte_event *event)
> > > +			    const struct rte_event_crypto_adapter_queue_conf
> > > *conf)
> > >  {
> > >  	struct cpt_instance *qp;
> > >  	uint8_t qp_id;
> > >
> > > -	RTE_SET_USED(event);
> > > +	RTE_SET_USED(conf);
> > >
> > >  	if (queue_pair_id == -1) {
> > >  		for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) {
> > > diff --git a/lib/eventdev/eventdev_pmd.h
> > > b/lib/eventdev/eventdev_pmd.h index 69402668d8..bcfc9cbcb2 100644
> > > --- a/lib/eventdev/eventdev_pmd.h
> > > +++ b/lib/eventdev/eventdev_pmd.h
> > > @@ -907,6 +907,7 @@ rte_event_pmd_selftest_seqn(struct rte_mbuf
> > > *mbuf) }
> > >
> > >  struct rte_cryptodev;
> > > +struct rte_event_crypto_adapter_queue_conf;
> > >
> > >  /**
> > >   * This API may change without prior notice @@ -961,11 +962,11 @@
> > > typedef int (*eventdev_crypto_adapter_caps_get_t)
> > >   *   - <0: Error code returned by the driver function.
> > >   *
> > >   */
> > > -typedef int (*eventdev_crypto_adapter_queue_pair_add_t)
> > > -			(const struct rte_eventdev *dev,
> > > -			 const struct rte_cryptodev *cdev,
> > > -			 int32_t queue_pair_id,
> > > -			 const struct rte_event *event);
> > > +typedef int (*eventdev_crypto_adapter_queue_pair_add_t)(
> > > +		const struct rte_eventdev *dev,
> > > +		const struct rte_cryptodev *cdev,
> > > +		int32_t queue_pair_id,
> > > +		const struct rte_event_crypto_adapter_queue_conf
> > > *queue_conf);
> > >
> > >
> > >  /**
> > > @@ -1074,6 +1075,27 @@ typedef int
> > > (*eventdev_crypto_adapter_stats_reset)
> > >  			(const struct rte_eventdev *dev,
> > >  			 const struct rte_cryptodev *cdev);
> > >
> > > +struct rte_event_crypto_adapter_vector_limits;
> > > +/**
> > > + * Get event vector limits for a given event, crypto device pair.
> > > + *
> > > + * @param dev
> > > + *   Event device pointer
> > > + *
> > > + * @param cdev
> > > + *   Crypto device pointer
> > > + *
> > > + * @param[out] limits
> > > + *   Pointer to the limits structure to be filled.
> > > + *
> > > + * @return
> > > + *   - 0: Success.
> > > + *   - <0: Error code returned by the driver function.
> > > + */
> > > +typedef int (*eventdev_crypto_adapter_vector_limits_get_t)(
> > > +	const struct rte_eventdev *dev, const struct rte_cryptodev *cdev,
> > > +	struct rte_event_crypto_adapter_vector_limits *limits);
> > > +
> > >  /**
> > >   * Retrieve the event device's eth Tx adapter capabilities.
> > >   *
> > > @@ -1339,6 +1361,9 @@ struct eventdev_ops {
> > >  	/**< Get crypto stats */
> > >  	eventdev_crypto_adapter_stats_reset crypto_adapter_stats_reset;
> > >  	/**< Reset crypto stats */
> > > +	eventdev_crypto_adapter_vector_limits_get_t
> > > +		crypto_adapter_vector_limits_get;
> > > +	/**< Get event vector limits for the crypto adapter */
> > >
> > >  	eventdev_eth_rx_adapter_q_stats_get
> > > eth_rx_adapter_queue_stats_get;
> > >  	/**< Get ethernet Rx queue stats */ diff --git
> > > a/lib/eventdev/eventdev_trace.h b/lib/eventdev/eventdev_trace.h
> > > index 5ec43d80ee..d48cd58850 100644
> > > --- a/lib/eventdev/eventdev_trace.h
> > > +++ b/lib/eventdev/eventdev_trace.h
> > > @@ -18,6 +18,7 @@ extern "C" {
> > >  #include <rte_trace_point.h>
> > >
> > >  #include "rte_eventdev.h"
> > > +#include "rte_event_crypto_adapter.h"
> > >  #include "rte_event_eth_rx_adapter.h"
> > >  #include "rte_event_timer_adapter.h"
> > >
> > > @@ -271,11 +272,12 @@ RTE_TRACE_POINT(  RTE_TRACE_POINT(
> > >  	rte_eventdev_trace_crypto_adapter_queue_pair_add,
> > >  	RTE_TRACE_POINT_ARGS(uint8_t adptr_id, uint8_t cdev_id,
> > > -		const void *event, int32_t queue_pair_id),
> > > +		int32_t queue_pair_id,
> > > +		const struct rte_event_crypto_adapter_queue_conf *conf),
> > >  	rte_trace_point_emit_u8(adptr_id);
> > >  	rte_trace_point_emit_u8(cdev_id);
> > >  	rte_trace_point_emit_i32(queue_pair_id);
> > > -	rte_trace_point_emit_ptr(event);
> > > +	rte_trace_point_emit_ptr(conf);
> > >  )
> > >
> > >  RTE_TRACE_POINT(
> > > diff --git a/lib/eventdev/rte_event_crypto_adapter.c
> > > b/lib/eventdev/rte_event_crypto_adapter.c
> > > index 7c695176f4..73a4f231e2 100644
> > > --- a/lib/eventdev/rte_event_crypto_adapter.c
> > > +++ b/lib/eventdev/rte_event_crypto_adapter.c
> > > @@ -921,11 +921,12 @@ int
> > >  rte_event_crypto_adapter_queue_pair_add(uint8_t id,
> > >  			uint8_t cdev_id,
> > >  			int32_t queue_pair_id,
> > > -			const struct rte_event *event)
> > > +			const struct rte_event_crypto_adapter_queue_conf
> > > *conf)
> > >  {
> > > +	struct rte_event_crypto_adapter_vector_limits limits;
> > >  	struct event_crypto_adapter *adapter;
> > > -	struct rte_eventdev *dev;
> > >  	struct crypto_device_info *dev_info;
> > > +	struct rte_eventdev *dev;
> > >  	uint32_t cap;
> > >  	int ret;
> > >
> > > @@ -951,11 +952,47 @@
> > > rte_event_crypto_adapter_queue_pair_add(uint8_t
> > > id,
> > >  	}
> > >
> > >  	if ((cap &
> > > RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) &&
> > > -	    (event == NULL)) {
> > > +	    (conf == NULL)) {
> > >  		RTE_EDEV_LOG_ERR("Conf value can not be NULL for
> dev_id=%u",
> > >  				  cdev_id);
> > >  		return -EINVAL;
> > >  	}
> > Newline?
> >
> Ack
> 
> > > +	if ((conf != NULL) &&
> > Checking conf twice?
> Will rewrite as if conf == NULL/else, to avoid double checking.
> 
> > > +	    (conf->flags & RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR)) {
> > Else condition if the flags is not set?
> There's no additional handing for case when flag is no set.
> 
> > > +		if ((cap &
> > > RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR) == 0) {
> > > +			RTE_EDEV_LOG_ERR("Event vectorization is not
> > > supported,"
> > > +					 "dev %" PRIu8 " cdev %" PRIu8, id,
> > > +					 cdev_id);
> > > +			return -ENOTSUP;
> > > +		}
> > > +
> > > +		ret = rte_event_crypto_adapter_vector_limits_get(
> > > +			adapter->eventdev_id, cdev_id, &limits);
> > > +		if (ret < 0) {
> > > +			RTE_EDEV_LOG_ERR("Failed to get event device vector
> > > "
> > > +					 "limits, dev %" PRIu8 " cdev %" PRIu8,
> > > +					 id, cdev_id);
> > > +			return -EINVAL;
> > > +		}
> > New line? Please check other cases.
> >
> Ack
> 
> > > +		if (conf->vector_sz < limits.min_sz ||
> > > +		    conf->vector_sz > limits.max_sz ||
> > > +		    conf->vector_timeout_ns < limits.min_timeout_ns ||
> > > +		    conf->vector_timeout_ns > limits.max_timeout_ns ||
> > > +		    conf->vector_mp == NULL) {
> > > +			RTE_EDEV_LOG_ERR("Invalid event vector
> > > configuration,"
> > > +					 " dev %" PRIu8 " cdev %" PRIu8,
> > > +					 id, cdev_id);
> > > +			return -EINVAL;
> > > +		}
> > > +		if (conf->vector_mp->elt_size <
> > > +		    (sizeof(struct rte_event_vector) +
> > > +		     (sizeof(uintptr_t) * conf->vector_sz))) {
> > > +			RTE_EDEV_LOG_ERR("Invalid event vector
> > > configuration,"
> > > +					 " dev %" PRIu8 " cdev %" PRIu8,
> > > +					 id, cdev_id);
> > > +			return -EINVAL;
> > > +		}
> > > +	}
> > >
> > >  	dev_info = &adapter->cdevs[cdev_id];
> > >
> > > @@ -990,7 +1027,7 @@
> rte_event_crypto_adapter_queue_pair_add(uint8_t
> > > id,
> > >  		ret = (*dev->dev_ops->crypto_adapter_queue_pair_add)(dev,
> > >  				dev_info->dev,
> > >  				queue_pair_id,
> > > -				event);
> > > +				conf);
> > >  		if (ret)
> > >  			return ret;
> > >
> > > @@ -1030,8 +1067,8 @@
> > > rte_event_crypto_adapter_queue_pair_add(uint8_t
> > > id,
> > >  		rte_service_component_runstate_set(adapter->service_id, 1);
> > >  	}
> > >
> > > -	rte_eventdev_trace_crypto_adapter_queue_pair_add(id, cdev_id,
> > > event,
> > > -		queue_pair_id);
> > > +	rte_eventdev_trace_crypto_adapter_queue_pair_add(id, cdev_id,
> > > +		queue_pair_id, conf);
> > >  	return 0;
> > >  }
> > >
> > > @@ -1290,3 +1327,44 @@
> > > rte_event_crypto_adapter_event_port_get(uint8_t
> > > id, uint8_t *event_port_id)
> > >
> > >  	return 0;
> > >  }
> > > +
> > > +int
> > > +rte_event_crypto_adapter_vector_limits_get(
> > > +	uint8_t dev_id, uint16_t cdev_id,
> > > +	struct rte_event_crypto_adapter_vector_limits *limits) {
> > > +	struct rte_cryptodev *cdev;
> > > +	struct rte_eventdev *dev;
> > > +	uint32_t cap;
> > > +	int ret;
> > > +
> > > +	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
> > > +
> > > +	if (!rte_cryptodev_is_valid_dev(cdev_id)) {
> > > +		RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
> > > +		return -EINVAL;
> > > +	}
> > > +
> > > +	if (limits == NULL)
> > > +		return -EINVAL;
> > Add appropriate error message like above?
> Ack, will add.
> 
> > > +
> > > +	dev = &rte_eventdevs[dev_id];
> > > +	cdev = rte_cryptodev_pmd_get_dev(cdev_id);
> > > +
> > > +	ret = rte_event_crypto_adapter_caps_get(dev_id, cdev_id, &cap);
> > > +	if (ret) {
> > > +		RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
> > > +				 "cdev %" PRIu16, dev_id, cdev_id);
> > > +		return ret;
> > > +	}
> > > +
> > > +	if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR))
> > > +		return -ENOTSUP;
> > Same here.
> Ack, will add.
> 
> > > +
> > > +	RTE_FUNC_PTR_OR_ERR_RET(
> > > +		*dev->dev_ops->crypto_adapter_vector_limits_get,
> > > +		-ENOTSUP);
> > > +
> > > +	return dev->dev_ops->crypto_adapter_vector_limits_get(
> > > +		dev, cdev, limits);
> > > +}
> > > diff --git a/lib/eventdev/rte_event_crypto_adapter.h
> > > b/lib/eventdev/rte_event_crypto_adapter.h
> > > index d90a19e72c..7dd6171b9b 100644
> > > --- a/lib/eventdev/rte_event_crypto_adapter.h
> > > +++ b/lib/eventdev/rte_event_crypto_adapter.h
> > > @@ -253,6 +253,78 @@ struct rte_event_crypto_adapter_conf {
> > >  	 */
> > >  };
> > >
> > > +#define RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR	0x1
> > > +/**< This flag indicates that crypto operations processed on the
> > > +crypto
> > > + * adapter need to be vectorized
> > > + * @see rte_event_crypto_adapter_queue_conf::flags
> > > + */
> > > +
> > > +/**
> > > + * Adapter queue configuration structure  */ struct
> > > +rte_event_crypto_adapter_queue_conf {
> > > +	uint32_t flags;
> > > +	/**< Flags for handling crypto operations
> > > +	 * @see RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR
> > > +	 */
> > > +	struct rte_event ev;
> > > +	/**< If HW supports cryptodev queue pair to event queue binding,
> > > +	 * application is expected to fill in event information.
> > > +	 * @see
> > > RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND
> > > +	 */
> > > +	uint16_t vector_sz;
> > > +	/**< Indicates the maximum number for crypto operations to combine
> > > and
> > > +	 * form a vector.
> > > +	 * @see rte_event_crypto_adapter_vector_limits::min_sz
> > > +	 * @see rte_event_crypto_adapter_vector_limits::max_sz
> > > +	 * Valid when RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR flag is
> > > set in
> > > +	 * @see rte_event_crypto_adapter_queue_conf::rx_queue_flags
> > rx_queue_flags??
> Typo, should be conf::flags.
> 
> > > +	 */
> > > +	uint64_t vector_timeout_ns;
> > > +	/**<
> > > +	 * Indicates the maximum number of nanoseconds to wait for
> > > aggregating
> > > +	 * crypto operations. Should be within vectorization limits of the
> > > +	 * adapter
> > > +	 * @see rte_event_crypto_adapter_vector_limits::min_timeout_ns
> > > +	 * @see rte_event_crypto_adapter_vector_limits::max_timeout_ns
> > > +	 * Valid when RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR flag is
> > > set in
> > > +	 * @see rte_event_crypto_adapter_queue_conf::flags
> > > +	 */
> > > +	struct rte_mempool *vector_mp;
> > > +	/**< Indicates the mempool that should be used for allocating
> > > +	 * rte_event_vector container.
> > > +	 * Should be created by using `rte_event_vector_pool_create`.
> > > +	 * Valid when RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR flag is
> > > set in
> > > +	 * @see rte_event_crypto_adapter_queue_conf::flags.
> > > +	 */
> > > +};
> > > +
> > > +/**
> > > + * A structure used to retrieve event crypto adapter vector limits.
> > > + */
> > > +struct rte_event_crypto_adapter_vector_limits {
> > > +	uint16_t min_sz;
> > > +	/**< Minimum vector limit configurable.
> > > +	 * @see rte_event_crypto_adapter_queue_conf::vector_sz
> > > +	 */
> > > +	uint16_t max_sz;
> > > +	/**< Maximum vector limit configurable.
> > > +	 * @see rte_event_crypto_adapter_queue_conf::vector_sz
> > > +	 */
> > > +	uint8_t log2_sz;
> > > +	/**< True if the size configured should be in log2.
> > > +	 * @see rte_event_crypto_adapter_queue_conf::vector_sz
> > > +	 */
> > > +	uint64_t min_timeout_ns;
> > > +	/**< Minimum vector timeout configurable.
> > > +	 * @see rte_event_crypto_adapter_queue_conf::vector_timeout_ns
> > > +	 */
> > > +	uint64_t max_timeout_ns;
> > > +	/**< Maximum vector timeout configurable.
> > > +	 * @see rte_event_crypto_adapter_queue_conf::vector_timeout_ns
> > > +	 */
> > > +};
> > > +
> > >  /**
> > >   * Function type used for adapter configuration callback. The callback is
> > >   * used to fill in members of the struct
> > > rte_event_crypto_adapter_conf, this @@ -392,10 +464,9 @@
> > rte_event_crypto_adapter_free(uint8_t id);
> > >   *  Cryptodev queue pair identifier. If queue_pair_id is set -1,
> > >   *  adapter adds all the pre configured queue pairs to the instance.
> > >   *
> > > - * @param event
> > > - *  if HW supports cryptodev queue pair to event queue binding,
> > > application is
> > > - *  expected to fill in event information, else it will be NULL.
> > > - *  @see
> RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND
> > > + * @param conf
> > > + *  Additional configuration structure of type
> > > + *  *rte_event_crypto_adapter_queue_conf*
> > >   *
> > >   * @return
> > >   *  - 0: Success, queue pair added correctly.
> > > @@ -405,7 +476,7 @@ int
> > >  rte_event_crypto_adapter_queue_pair_add(uint8_t id,
> > >  			uint8_t cdev_id,
> > >  			int32_t queue_pair_id,
> > > -			const struct rte_event *event);
> > > +			const struct rte_event_crypto_adapter_queue_conf
> > > *conf);
> > >
> > >  /**
> > >   * Delete a queue pair from an event crypto adapter.
> > > @@ -523,6 +594,26 @@ rte_event_crypto_adapter_service_id_get(uint8_t
> > > id, uint32_t *service_id);  int
> > > rte_event_crypto_adapter_event_port_get(uint8_t
> > > id, uint8_t *event_port_id);
> > >
> > > +/**
> > > + * Retrieve vector limits for a given event dev and crypto dev pair.
> > > + * @see rte_event_crypto_adapter_vector_limits
> > > + *
> > > + * @param dev_id
> > > + *  Event device identifier.
> > > + * @param cdev_id
> > > + *  Crypto device identifier.
> > > + * @param [out] limits
> > > + *  A pointer to rte_event_crypto_adapter_vector_limits structure
> > > +that has to
> > > + * be filled.
> > Space missing before "be filled"
> Ack
> 
> > > + *
> > > + * @return
> > > + *  - 0: Success.
> > > + *  - <0: Error code on failure.
> > > + */
> > > +int rte_event_crypto_adapter_vector_limits_get(
> > > +	uint8_t dev_id, uint16_t cdev_id,
> > > +	struct rte_event_crypto_adapter_vector_limits *limits);
> > > +
> > >  /**
> > >   * Enqueue a burst of crypto operations as event objects supplied
> > > in
> > > *rte_event*
> > >   * structure on an event crypto adapter designated by its event
> > > *dev_id* through diff --git
> > > a/lib/eventdev/rte_event_eth_rx_adapter.h
> > > b/lib/eventdev/rte_event_eth_rx_adapter.h
> > > index 3608a7b2cf..c8f2936866 100644
> > > --- a/lib/eventdev/rte_event_eth_rx_adapter.h
> > > +++ b/lib/eventdev/rte_event_eth_rx_adapter.h
> > > @@ -457,7 +457,8 @@ int rte_event_eth_rx_adapter_free(uint8_t id);
> > >   * @see RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ
> > >   *
> > >   * @param conf
> > > - *  Additional configuration structure of type
> > > *rte_event_eth_rx_adapter_conf*
> > > + *  Additional configuration structure of type
> > > + *  *rte_event_eth_rx_adapter_queue_conf*
> > These changes are not relevant. Please consider sending separate patch.
> >
> Ack, Will send this change as a separate patch.
> 
> > >   *
> > >   * @return
> > >   *  - 0: Success, Receive queue added correctly.
> > > diff --git a/lib/eventdev/rte_eventdev.h
> > > b/lib/eventdev/rte_eventdev.h index
> > > 6a6f6ea4c1..1a737bf851 100644
> > > --- a/lib/eventdev/rte_eventdev.h
> > > +++ b/lib/eventdev/rte_eventdev.h
> > > @@ -1203,6 +1203,9 @@ struct rte_event_vector {
> > >  #define RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR
> \
> > >  	(RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_ETH_RX_ADAPTER)
> /**< The
> > > event vector generated from eth Rx adapter. */
> > > +#define RTE_EVENT_TYPE_CRYPTODEV_VECTOR                                        \
> > > +	(RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_CRYPTODEV) /**< The
> > > event
> > > +vector generated from cryptodev adapter. */
> > >
> > >  #define RTE_EVENT_TYPE_MAX              0x10
> > >  /**< Maximum number of event types */ @@ -1420,6 +1423,11 @@
> > > rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps);
> > >   * the private data information along with the crypto session.
> > >   */
> > >
> > > +#define RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR   0x10
> > > +/**< Flag indicates HW is capable of aggregating processed
> > > + * crypto operations into rte_event_vector.
> > > + */
> > > +
> > >  /**
> > >   * Retrieve the event device's crypto adapter capabilities for the
> > >   * specified cryptodev device
> > > --
> > > 2.25.1


^ permalink raw reply	[flat|nested] 36+ messages in thread

* RE: [PATCH 1/3] eventdev: introduce event cryptodev vector type
  2022-09-27  9:05         ` Gujjar, Abhinandan S
@ 2022-09-27  9:24           ` Volodymyr Fialko
  2022-09-27  9:38             ` Gujjar, Abhinandan S
  0 siblings, 1 reply; 36+ messages in thread
From: Volodymyr Fialko @ 2022-09-27  9:24 UTC (permalink / raw)
  To: Gujjar, Abhinandan S, dev, Jerin Jacob Kollanukkaran,
	Pavan Nikhilesh Bhagavatula, Shijith Thotton, Hemant Agrawal,
	Sachin Saxena, Jayatheerthan, Jay
  Cc: Akhil Goyal, Anoob Joseph



> -----Original Message-----
> From: Gujjar, Abhinandan S <abhinandan.gujjar@intel.com>
> Sent: Tuesday, September 27, 2022 11:05 AM
> To: Volodymyr Fialko <vfialko@marvell.com>; dev@dpdk.org; Jerin Jacob Kollanukkaran
> <jerinj@marvell.com>; Pavan Nikhilesh Bhagavatula <pbhagavatula@marvell.com>; Shijith Thotton
> <sthotton@marvell.com>; Hemant Agrawal <hemant.agrawal@nxp.com>; Sachin Saxena
> <sachin.saxena@oss.nxp.com>; Jayatheerthan, Jay <jay.jayatheerthan@intel.com>
> Cc: Akhil Goyal <gakhil@marvell.com>; Anoob Joseph <anoobj@marvell.com>
> Subject: [EXT] RE: [PATCH 1/3] eventdev: introduce event cryptodev vector type
> 
> External Email
> 
> ----------------------------------------------------------------------
> 
> 
> > -----Original Message-----
> > From: Volodymyr Fialko <vfialko@marvell.com>
> > Sent: Monday, September 26, 2022 4:32 PM
> > To: Gujjar, Abhinandan S <abhinandan.gujjar@intel.com>; dev@dpdk.org;
> > Jerin Jacob Kollanukkaran <jerinj@marvell.com>; Pavan Nikhilesh
> > Bhagavatula <pbhagavatula@marvell.com>; Shijith Thotton
> > <sthotton@marvell.com>; Hemant Agrawal <hemant.agrawal@nxp.com>;
> > Sachin Saxena <sachin.saxena@oss.nxp.com>; Jayatheerthan, Jay
> > <jay.jayatheerthan@intel.com>
> > Cc: Akhil Goyal <gakhil@marvell.com>; Anoob Joseph
> > <anoobj@marvell.com>
> > Subject: RE: [PATCH 1/3] eventdev: introduce event cryptodev vector
> > type
> >
> >
> >
> > > -----Original Message-----
> > > From: Gujjar, Abhinandan S <abhinandan.gujjar@intel.com>
> > > Sent: Saturday, September 24, 2022 10:44 AM
> > > To: Volodymyr Fialko <vfialko@marvell.com>; dev@dpdk.org; Jerin
> > > Jacob Kollanukkaran <jerinj@marvell.com>; Pavan Nikhilesh
> > > Bhagavatula <pbhagavatula@marvell.com>; Shijith Thotton
> > > <sthotton@marvell.com>; Hemant Agrawal <hemant.agrawal@nxp.com>;
> > > Sachin Saxena <sachin.saxena@oss.nxp.com>; Jayatheerthan, Jay
> > > <jay.jayatheerthan@intel.com>
> > > Cc: Akhil Goyal <gakhil@marvell.com>; Anoob Joseph
> > > <anoobj@marvell.com>
> > > Subject: [EXT] RE: [PATCH 1/3] eventdev: introduce event cryptodev
> > > vector type
> > >
> > > External Email
> > >
> > > --------------------------------------------------------------------
> > > --
> > >
> > >
> > > > -----Original Message-----
> > > > From: Volodymyr Fialko <vfialko@marvell.com>
> > > > Sent: Thursday, August 4, 2022 3:29 PM
> > > > To: dev@dpdk.org; Jerin Jacob <jerinj@marvell.com>; Gujjar,
> > > > Abhinandan S <abhinandan.gujjar@intel.com>; Pavan Nikhilesh
> > > > <pbhagavatula@marvell.com>; Shijith Thotton
> > > > <sthotton@marvell.com>; Hemant Agrawal <hemant.agrawal@nxp.com>;
> > > > Sachin Saxena <sachin.saxena@oss.nxp.com>; Jayatheerthan, Jay
> > > > <jay.jayatheerthan@intel.com>
> > > > Cc: gakhil@marvell.com; anoobj@marvell.com; Volodymyr Fialko
> > > > <vfialko@marvell.com>
> > > > Subject: [PATCH 1/3] eventdev: introduce event cryptodev vector
> > > > type
> > > >
> > > > Introduce ability to aggregate crypto operations processed by
> > > > event crypto adapter into single event containing rte_event_vector
> > > > whose event type is RTE_EVENT_TYPE_CRYPTODEV_VECTOR.
> > > >
> > > > Application should set RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR in
> > > > rte_event_crypto_adapter_queue_conf::flag and provide vector
> > > > configuration with respect of
> > > > rte_event_crypto_adapter_vector_limits,
> > > > which could be obtained by calling
> > > > rte_event_crypto_adapter_vector_limits_get, to enable vectorization.
> > > >
> > > > The event crypto adapter would be responsible for vectorizing the
> > > > crypto operations based on provided response information in
> > > > rte_event_crypto_metadata::response_info.
> > > >
> > > > Updated drivers and tests accordingly to new API.
> > > >
> > > > Signed-off-by: Volodymyr Fialko <vfialko@marvell.com>
> > > > ---
> > > >  app/test-eventdev/test_perf_common.c          |  10 +-
> > > >  app/test/test_event_crypto_adapter.c          |  12 ++-
> > > >  .../prog_guide/event_crypto_adapter.rst       |  23 +++-
> > > >  drivers/event/cnxk/cn10k_eventdev.c           |   4 +-
> > > >  drivers/event/cnxk/cn9k_eventdev.c            |   5 +-
> > > >  drivers/event/dpaa/dpaa_eventdev.c            |   9 +-
> > > >  drivers/event/dpaa2/dpaa2_eventdev.c          |   9 +-
> > > >  drivers/event/octeontx/ssovf_evdev.c          |   4 +-
> > > >  lib/eventdev/eventdev_pmd.h                   |  35 +++++-
> > > >  lib/eventdev/eventdev_trace.h                 |   6 +-
> > > >  lib/eventdev/rte_event_crypto_adapter.c       |  90 ++++++++++++++--
> > > >  lib/eventdev/rte_event_crypto_adapter.h       | 101 +++++++++++++++++-
> > > >  lib/eventdev/rte_event_eth_rx_adapter.h       |   3 +-
> > > >  lib/eventdev/rte_eventdev.h                   |   8 ++
> > > >  14 files changed, 276 insertions(+), 43 deletions(-)
> > > >
> > >
> > > I don't see dataplane implementation of vectorization in the crypto adapter!
> > > Is it missed out in the patch?
> > > comments inline.
> > >
> > Right now we are targeting crypto_cn10k PMD and ipsec-secgw event mode
> > to support vectorization.
> Is there a way to test this? When can be dataplane changes expected?
> 
If the spec looks okay, support in s/w crypto adapter and other h/w PMDs can be added by respective maintainers. Currently, we are adding library change, support for one PMD and an application to test the feature. Feature is exposed with capability flag to not break existing functionality.

> >
> > > > diff --git a/app/test-eventdev/test_perf_common.c b/app/test-
> > > > eventdev/test_perf_common.c index 81420be73a..c770bc93f6 100644
> > > > --- a/app/test-eventdev/test_perf_common.c
> > > > +++ b/app/test-eventdev/test_perf_common.c
> > > > @@ -837,14 +837,14 @@ perf_event_crypto_adapter_setup(struct
> > > > test_perf *t, struct prod_data *p)
> > > >  	}
> > > >
> > > >  	if (cap &
> > > > RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
> > > > -		struct rte_event response_info;
> > > > +		struct rte_event_crypto_adapter_queue_conf conf;
> > > >
> > > > -		response_info.event = 0;
> > > > -		response_info.sched_type = RTE_SCHED_TYPE_ATOMIC;
> > > > -		response_info.queue_id = p->queue_id;
> > > > +		memset(&conf, 0, sizeof(conf));
> > > > +		conf.ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
> > > > +		conf.ev.queue_id = p->queue_id;
> > > >  		ret = rte_event_crypto_adapter_queue_pair_add(
> > > >  			TEST_PERF_CA_ID, p->ca.cdev_id, p->ca.cdev_qp_id,
> > > > -			&response_info);
> > > > +			&conf);
> > > >  	} else {
> > > >  		ret = rte_event_crypto_adapter_queue_pair_add(
> > > >  			TEST_PERF_CA_ID, p->ca.cdev_id, p->ca.cdev_qp_id,
> > NULL); diff
> > > > --git a/app/test/test_event_crypto_adapter.c
> > > > b/app/test/test_event_crypto_adapter.c
> > > > index 2ecc7e2cea..bb617c1042 100644
> > > > --- a/app/test/test_event_crypto_adapter.c
> > > > +++ b/app/test/test_event_crypto_adapter.c
> > > > @@ -1175,6 +1175,10 @@ test_crypto_adapter_create(void)  static
> > > > int
> > > >  test_crypto_adapter_qp_add_del(void)
> > > >  {
> > > > +	struct rte_event_crypto_adapter_queue_conf queue_conf = {
> > > > +		.ev = response_info,
> > > > +	};
> > > > +
> > > >  	uint32_t cap;
> > > >  	int ret;
> > > >
> > > > @@ -1183,7 +1187,7 @@ test_crypto_adapter_qp_add_del(void)
> > > >
> > > >  	if (cap &
> > > > RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
> > > >  		ret =
> > > > rte_event_crypto_adapter_queue_pair_add(TEST_ADAPTER_ID,
> > > > -				TEST_CDEV_ID, TEST_CDEV_QP_ID,
> > > > &response_info);
> > > > +				TEST_CDEV_ID, TEST_CDEV_QP_ID,
> > > > &queue_conf);
> > > >  	} else
> > > >  		ret =
> > > > rte_event_crypto_adapter_queue_pair_add(TEST_ADAPTER_ID,
> > > >  					TEST_CDEV_ID, TEST_CDEV_QP_ID,
> > NULL); @@ -1206,6 +1210,10 @@
> > > > configure_event_crypto_adapter(enum
> > > > rte_event_crypto_adapter_mode mode)
> > > >  		.new_event_threshold = 1200,
> > > >  	};
> > > >
> > > > +	struct rte_event_crypto_adapter_queue_conf queue_conf = {
> > > > +		.ev = response_info,
> > > > +	};
> > > > +
> > > >  	uint32_t cap;
> > > >  	int ret;
> > > >
> > > > @@ -1238,7 +1246,7 @@ configure_event_crypto_adapter(enum
> > > > rte_event_crypto_adapter_mode mode)
> > > >
> > > >  	if (cap &
> > > > RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
> > > >  		ret =
> > > > rte_event_crypto_adapter_queue_pair_add(TEST_ADAPTER_ID,
> > > > -				TEST_CDEV_ID, TEST_CDEV_QP_ID,
> > > > &response_info);
> > > > +				TEST_CDEV_ID, TEST_CDEV_QP_ID,
> > > > &queue_conf);
> > > >  	} else
> > > >  		ret =
> > > > rte_event_crypto_adapter_queue_pair_add(TEST_ADAPTER_ID,
> > > >  				TEST_CDEV_ID, TEST_CDEV_QP_ID, NULL); diff
> > --git
> > > > a/doc/guides/prog_guide/event_crypto_adapter.rst
> > > > b/doc/guides/prog_guide/event_crypto_adapter.rst
> > > > index 4fb5c688e0..554df7e358 100644
> > > > --- a/doc/guides/prog_guide/event_crypto_adapter.rst
> > > > +++ b/doc/guides/prog_guide/event_crypto_adapter.rst
> > > > @@ -201,10 +201,10 @@ capability, event information must be passed
> > > > to the add API.
> > > >
> > > >          ret = rte_event_crypto_adapter_caps_get(id, evdev, &cap);
> > > >          if (cap &
> > > > RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
> > > > -                struct rte_event event;
> > > > +                struct rte_event_crypto_adapter_queue_conf conf;
> > > >
> > > > -                // Fill in event information & pass it to add API
> > > > -                rte_event_crypto_adapter_queue_pair_add(id, cdev_id, qp_id,
> > > > &event);
> > > > +                // Fill in conf.event information & pass it to add API
> > > > +                rte_event_crypto_adapter_queue_pair_add(id,
> > > > + cdev_id, qp_id, &conf);
> > > >          } else
> > > >                  rte_event_crypto_adapter_queue_pair_add(id,
> > > > cdev_id, qp_id, NULL);
> > > >
> > > > @@ -291,6 +291,23 @@ the ``rte_crypto_op``.
> > > >                  rte_memcpy(op + len, &m_data, sizeof(m_data));
> > > >          }
> > > >
> > > > +Enable event vectorization
> > > > +~~~~~~~~~~~~~~~~~~~~~~~~~~
> > > > +
> > > > +The event crypto adapter can aggregate outcoming crypto
> > > > +operations based on provided response information of
> > > > +``rte_event_crypto_metadata::response_info``
> > > > +and generate a ``rte_event`` containing ``rte_event_vector``
> > > > +whose event type is ``RTE_EVENT_TYPE_CRYPTODEV_VECTOR``.
> > > > +To enable vectorization application should set
> > > > +RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR in
> > > > +``rte_event_crypto_adapter_queue_conf::flag`` and provide vector
> > > > +configuration(size, mempool, etc.) with respect of
> > > > +``rte_event_crypto_adapter_vector_limits``, which could be
> > > > +obtained by calling ``rte_event_crypto_adapter_vector_limits_get()``.
> > > > +
> > > > +The RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR capability
> > indicates
> > > > +whether PMD supports this feature.
> > > > +
> > > >  Start the adapter instance
> > > >  ~~~~~~~~~~~~~~~~~~~~~~~~~~
> > > >
> > > > diff --git a/drivers/event/cnxk/cn10k_eventdev.c
> > > > b/drivers/event/cnxk/cn10k_eventdev.c
> > > > index 5a0cab40a9..e74ec57382 100644
> > > > --- a/drivers/event/cnxk/cn10k_eventdev.c
> > > > +++ b/drivers/event/cnxk/cn10k_eventdev.c
> > > > @@ -889,11 +889,11 @@ static int
> > > >  cn10k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
> > > >  			    const struct rte_cryptodev *cdev,
> > > >  			    int32_t queue_pair_id,
> > > > -			    const struct rte_event *event)
> > > > +			    const struct rte_event_crypto_adapter_queue_conf
> > > > *conf)
> > > >  {
> > > >  	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
> > > >
> > > > -	RTE_SET_USED(event);
> > > > +	RTE_SET_USED(conf);
> > > >
> > > >  	CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k");
> > > >  	CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k"); diff --
> > > > git a/drivers/event/cnxk/cn9k_eventdev.c
> > > > b/drivers/event/cnxk/cn9k_eventdev.c
> > > > index 2e27030049..45ed547cb0 100644
> > > > --- a/drivers/event/cnxk/cn9k_eventdev.c
> > > > +++ b/drivers/event/cnxk/cn9k_eventdev.c
> > > > @@ -1120,11 +1120,12 @@ cn9k_crypto_adapter_caps_get(const struct
> > > > rte_eventdev *event_dev,  static int
> > > > cn9k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
> > > >  			   const struct rte_cryptodev *cdev,
> > > > -			   int32_t queue_pair_id, const struct rte_event *event)
> > > > +			   int32_t queue_pair_id,
> > > > +			   const struct rte_event_crypto_adapter_queue_conf
> > > > *conf)
> > > >  {
> > > >  	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
> > > >
> > > > -	RTE_SET_USED(event);
> > > > +	RTE_SET_USED(conf);
> > > >
> > > >  	CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k");
> > > >  	CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k"); diff
> > > > --git a/drivers/event/dpaa/dpaa_eventdev.c
> > > > b/drivers/event/dpaa/dpaa_eventdev.c
> > > > index ff6cc0be18..2b9ecd9fbf 100644
> > > > --- a/drivers/event/dpaa/dpaa_eventdev.c
> > > > +++ b/drivers/event/dpaa/dpaa_eventdev.c
> > > > @@ -26,6 +26,7 @@
> > > >  #include <rte_eventdev.h>
> > > >  #include <eventdev_pmd_vdev.h>
> > > >  #include <rte_ethdev.h>
> > > > +#include <rte_event_crypto_adapter.h>
> > > >  #include <rte_event_eth_rx_adapter.h>  #include
> > > > <rte_event_eth_tx_adapter.h>  #include <cryptodev_pmd.h> @@
> > > > -775,10
> > > > +776,10 @@ static int  dpaa_eventdev_crypto_queue_add(const struct
> > > > rte_eventdev *dev,
> > > >  		const struct rte_cryptodev *cryptodev,
> > > >  		int32_t rx_queue_id,
> > > > -		const struct rte_event *ev)
> > > > +		const struct rte_event_crypto_adapter_queue_conf *conf)
> > > >  {
> > > >  	struct dpaa_eventdev *priv = dev->data->dev_private;
> > > > -	uint8_t ev_qid = ev->queue_id;
> > > > +	uint8_t ev_qid = conf->ev.queue_id;
> > > >  	u16 ch_id = priv->evq_info[ev_qid].ch_id;
> > > >  	int ret;
> > > >
> > > > @@ -786,10 +787,10 @@ dpaa_eventdev_crypto_queue_add(const struct
> > > > rte_eventdev *dev,
> > > >
> > > >  	if (rx_queue_id == -1)
> > > >  		return dpaa_eventdev_crypto_queue_add_all(dev,
> > > > -				cryptodev, ev);
> > > > +				cryptodev, &conf->ev);
> > > >
> > > >  	ret = dpaa_sec_eventq_attach(cryptodev, rx_queue_id,
> > > > -			ch_id, ev);
> > > > +			ch_id, &conf->ev);
> > > >  	if (ret) {
> > > >  		DPAA_EVENTDEV_ERR(
> > > >  			"dpaa_sec_eventq_attach failed: ret: %d\n", ret); diff -
> > -git
> > > > a/drivers/event/dpaa2/dpaa2_eventdev.c
> > > > b/drivers/event/dpaa2/dpaa2_eventdev.c
> > > > index ffc7b8b073..0137736794 100644
> > > > --- a/drivers/event/dpaa2/dpaa2_eventdev.c
> > > > +++ b/drivers/event/dpaa2/dpaa2_eventdev.c
> > > > @@ -26,6 +26,7 @@
> > > >  #include <rte_bus_vdev.h>
> > > >  #include <ethdev_driver.h>
> > > >  #include <cryptodev_pmd.h>
> > > > +#include <rte_event_crypto_adapter.h>
> > > >  #include <rte_event_eth_rx_adapter.h>  #include
> > > > <rte_event_eth_tx_adapter.h>
> > > >
> > > > @@ -865,10 +866,10 @@ static int
> > > >  dpaa2_eventdev_crypto_queue_add(const struct rte_eventdev *dev,
> > > >  		const struct rte_cryptodev *cryptodev,
> > > >  		int32_t rx_queue_id,
> > > > -		const struct rte_event *ev)
> > > > +		const struct rte_event_crypto_adapter_queue_conf *conf)
> > > >  {
> > > >  	struct dpaa2_eventdev *priv = dev->data->dev_private;
> > > > -	uint8_t ev_qid = ev->queue_id;
> > > > +	uint8_t ev_qid = conf->ev.queue_id;
> > > >  	struct dpaa2_dpcon_dev *dpcon = priv->evq_info[ev_qid].dpcon;
> > > >  	int ret;
> > > >
> > > > @@ -876,10 +877,10 @@ dpaa2_eventdev_crypto_queue_add(const struct
> > > > rte_eventdev *dev,
> > > >
> > > >  	if (rx_queue_id == -1)
> > > >  		return dpaa2_eventdev_crypto_queue_add_all(dev,
> > > > -				cryptodev, ev);
> > > > +				cryptodev, &conf->ev);
> > > >
> > > >  	ret = dpaa2_sec_eventq_attach(cryptodev, rx_queue_id,
> > > > -				      dpcon, ev);
> > > > +				      dpcon, &conf->ev);
> > > >  	if (ret) {
> > > >  		DPAA2_EVENTDEV_ERR(
> > > >  			"dpaa2_sec_eventq_attach failed: ret: %d\n", ret); diff
> > --git
> > > > a/drivers/event/octeontx/ssovf_evdev.c
> > > > b/drivers/event/octeontx/ssovf_evdev.c
> > > > index 9e14e35d10..17acd8ef64 100644
> > > > --- a/drivers/event/octeontx/ssovf_evdev.c
> > > > +++ b/drivers/event/octeontx/ssovf_evdev.c
> > > > @@ -745,12 +745,12 @@ static int
> > > >  ssovf_crypto_adapter_qp_add(const struct rte_eventdev *dev,
> > > >  			    const struct rte_cryptodev *cdev,
> > > >  			    int32_t queue_pair_id,
> > > > -			    const struct rte_event *event)
> > > > +			    const struct rte_event_crypto_adapter_queue_conf
> > > > *conf)
> > > >  {
> > > >  	struct cpt_instance *qp;
> > > >  	uint8_t qp_id;
> > > >
> > > > -	RTE_SET_USED(event);
> > > > +	RTE_SET_USED(conf);
> > > >
> > > >  	if (queue_pair_id == -1) {
> > > >  		for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) {
> > > > diff --git a/lib/eventdev/eventdev_pmd.h
> > > > b/lib/eventdev/eventdev_pmd.h index 69402668d8..bcfc9cbcb2 100644
> > > > --- a/lib/eventdev/eventdev_pmd.h
> > > > +++ b/lib/eventdev/eventdev_pmd.h
> > > > @@ -907,6 +907,7 @@ rte_event_pmd_selftest_seqn(struct rte_mbuf
> > > > *mbuf) }
> > > >
> > > >  struct rte_cryptodev;
> > > > +struct rte_event_crypto_adapter_queue_conf;
> > > >
> > > >  /**
> > > >   * This API may change without prior notice @@ -961,11 +962,11 @@
> > > > typedef int (*eventdev_crypto_adapter_caps_get_t)
> > > >   *   - <0: Error code returned by the driver function.
> > > >   *
> > > >   */
> > > > -typedef int (*eventdev_crypto_adapter_queue_pair_add_t)
> > > > -			(const struct rte_eventdev *dev,
> > > > -			 const struct rte_cryptodev *cdev,
> > > > -			 int32_t queue_pair_id,
> > > > -			 const struct rte_event *event);
> > > > +typedef int (*eventdev_crypto_adapter_queue_pair_add_t)(
> > > > +		const struct rte_eventdev *dev,
> > > > +		const struct rte_cryptodev *cdev,
> > > > +		int32_t queue_pair_id,
> > > > +		const struct rte_event_crypto_adapter_queue_conf
> > > > *queue_conf);
> > > >
> > > >
> > > >  /**
> > > > @@ -1074,6 +1075,27 @@ typedef int
> > > > (*eventdev_crypto_adapter_stats_reset)
> > > >  			(const struct rte_eventdev *dev,
> > > >  			 const struct rte_cryptodev *cdev);
> > > >
> > > > +struct rte_event_crypto_adapter_vector_limits;
> > > > +/**
> > > > + * Get event vector limits for a given event, crypto device pair.
> > > > + *
> > > > + * @param dev
> > > > + *   Event device pointer
> > > > + *
> > > > + * @param cdev
> > > > + *   Crypto device pointer
> > > > + *
> > > > + * @param[out] limits
> > > > + *   Pointer to the limits structure to be filled.
> > > > + *
> > > > + * @return
> > > > + *   - 0: Success.
> > > > + *   - <0: Error code returned by the driver function.
> > > > + */
> > > > +typedef int (*eventdev_crypto_adapter_vector_limits_get_t)(
> > > > +	const struct rte_eventdev *dev, const struct rte_cryptodev *cdev,
> > > > +	struct rte_event_crypto_adapter_vector_limits *limits);
> > > > +
> > > >  /**
> > > >   * Retrieve the event device's eth Tx adapter capabilities.
> > > >   *
> > > > @@ -1339,6 +1361,9 @@ struct eventdev_ops {
> > > >  	/**< Get crypto stats */
> > > >  	eventdev_crypto_adapter_stats_reset crypto_adapter_stats_reset;
> > > >  	/**< Reset crypto stats */
> > > > +	eventdev_crypto_adapter_vector_limits_get_t
> > > > +		crypto_adapter_vector_limits_get;
> > > > +	/**< Get event vector limits for the crypto adapter */
> > > >
> > > >  	eventdev_eth_rx_adapter_q_stats_get
> > > > eth_rx_adapter_queue_stats_get;
> > > >  	/**< Get ethernet Rx queue stats */ diff --git
> > > > a/lib/eventdev/eventdev_trace.h b/lib/eventdev/eventdev_trace.h
> > > > index 5ec43d80ee..d48cd58850 100644
> > > > --- a/lib/eventdev/eventdev_trace.h
> > > > +++ b/lib/eventdev/eventdev_trace.h
> > > > @@ -18,6 +18,7 @@ extern "C" {
> > > >  #include <rte_trace_point.h>
> > > >
> > > >  #include "rte_eventdev.h"
> > > > +#include "rte_event_crypto_adapter.h"
> > > >  #include "rte_event_eth_rx_adapter.h"
> > > >  #include "rte_event_timer_adapter.h"
> > > >
> > > > @@ -271,11 +272,12 @@ RTE_TRACE_POINT(  RTE_TRACE_POINT(
> > > >  	rte_eventdev_trace_crypto_adapter_queue_pair_add,
> > > >  	RTE_TRACE_POINT_ARGS(uint8_t adptr_id, uint8_t cdev_id,
> > > > -		const void *event, int32_t queue_pair_id),
> > > > +		int32_t queue_pair_id,
> > > > +		const struct rte_event_crypto_adapter_queue_conf *conf),
> > > >  	rte_trace_point_emit_u8(adptr_id);
> > > >  	rte_trace_point_emit_u8(cdev_id);
> > > >  	rte_trace_point_emit_i32(queue_pair_id);
> > > > -	rte_trace_point_emit_ptr(event);
> > > > +	rte_trace_point_emit_ptr(conf);
> > > >  )
> > > >
> > > >  RTE_TRACE_POINT(
> > > > diff --git a/lib/eventdev/rte_event_crypto_adapter.c
> > > > b/lib/eventdev/rte_event_crypto_adapter.c
> > > > index 7c695176f4..73a4f231e2 100644
> > > > --- a/lib/eventdev/rte_event_crypto_adapter.c
> > > > +++ b/lib/eventdev/rte_event_crypto_adapter.c
> > > > @@ -921,11 +921,12 @@ int
> > > >  rte_event_crypto_adapter_queue_pair_add(uint8_t id,
> > > >  			uint8_t cdev_id,
> > > >  			int32_t queue_pair_id,
> > > > -			const struct rte_event *event)
> > > > +			const struct rte_event_crypto_adapter_queue_conf
> > > > *conf)
> > > >  {
> > > > +	struct rte_event_crypto_adapter_vector_limits limits;
> > > >  	struct event_crypto_adapter *adapter;
> > > > -	struct rte_eventdev *dev;
> > > >  	struct crypto_device_info *dev_info;
> > > > +	struct rte_eventdev *dev;
> > > >  	uint32_t cap;
> > > >  	int ret;
> > > >
> > > > @@ -951,11 +952,47 @@
> > > > rte_event_crypto_adapter_queue_pair_add(uint8_t
> > > > id,
> > > >  	}
> > > >
> > > >  	if ((cap &
> > > > RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) &&
> > > > -	    (event == NULL)) {
> > > > +	    (conf == NULL)) {
> > > >  		RTE_EDEV_LOG_ERR("Conf value can not be NULL for
> > dev_id=%u",
> > > >  				  cdev_id);
> > > >  		return -EINVAL;
> > > >  	}
> > > Newline?
> > >
> > Ack
> >
> > > > +	if ((conf != NULL) &&
> > > Checking conf twice?
> > Will rewrite as if conf == NULL/else, to avoid double checking.
> >
> > > > +	    (conf->flags & RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR)) {
> > > Else condition if the flags is not set?
> > There's no additional handing for case when flag is no set.
> >
> > > > +		if ((cap &
> > > > RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR) == 0) {
> > > > +			RTE_EDEV_LOG_ERR("Event vectorization is not
> > > > supported,"
> > > > +					 "dev %" PRIu8 " cdev %" PRIu8, id,
> > > > +					 cdev_id);
> > > > +			return -ENOTSUP;
> > > > +		}
> > > > +
> > > > +		ret = rte_event_crypto_adapter_vector_limits_get(
> > > > +			adapter->eventdev_id, cdev_id, &limits);
> > > > +		if (ret < 0) {
> > > > +			RTE_EDEV_LOG_ERR("Failed to get event device vector
> > > > "
> > > > +					 "limits, dev %" PRIu8 " cdev %" PRIu8,
> > > > +					 id, cdev_id);
> > > > +			return -EINVAL;
> > > > +		}
> > > New line? Please check other cases.
> > >
> > Ack
> >
> > > > +		if (conf->vector_sz < limits.min_sz ||
> > > > +		    conf->vector_sz > limits.max_sz ||
> > > > +		    conf->vector_timeout_ns < limits.min_timeout_ns ||
> > > > +		    conf->vector_timeout_ns > limits.max_timeout_ns ||
> > > > +		    conf->vector_mp == NULL) {
> > > > +			RTE_EDEV_LOG_ERR("Invalid event vector
> > > > configuration,"
> > > > +					 " dev %" PRIu8 " cdev %" PRIu8,
> > > > +					 id, cdev_id);
> > > > +			return -EINVAL;
> > > > +		}
> > > > +		if (conf->vector_mp->elt_size <
> > > > +		    (sizeof(struct rte_event_vector) +
> > > > +		     (sizeof(uintptr_t) * conf->vector_sz))) {
> > > > +			RTE_EDEV_LOG_ERR("Invalid event vector
> > > > configuration,"
> > > > +					 " dev %" PRIu8 " cdev %" PRIu8,
> > > > +					 id, cdev_id);
> > > > +			return -EINVAL;
> > > > +		}
> > > > +	}
> > > >
> > > >  	dev_info = &adapter->cdevs[cdev_id];
> > > >
> > > > @@ -990,7 +1027,7 @@
> > rte_event_crypto_adapter_queue_pair_add(uint8_t
> > > > id,
> > > >  		ret = (*dev->dev_ops->crypto_adapter_queue_pair_add)(dev,
> > > >  				dev_info->dev,
> > > >  				queue_pair_id,
> > > > -				event);
> > > > +				conf);
> > > >  		if (ret)
> > > >  			return ret;
> > > >
> > > > @@ -1030,8 +1067,8 @@
> > > > rte_event_crypto_adapter_queue_pair_add(uint8_t
> > > > id,
> > > >  		rte_service_component_runstate_set(adapter->service_id, 1);
> > > >  	}
> > > >
> > > > -	rte_eventdev_trace_crypto_adapter_queue_pair_add(id, cdev_id,
> > > > event,
> > > > -		queue_pair_id);
> > > > +	rte_eventdev_trace_crypto_adapter_queue_pair_add(id, cdev_id,
> > > > +		queue_pair_id, conf);
> > > >  	return 0;
> > > >  }
> > > >
> > > > @@ -1290,3 +1327,44 @@
> > > > rte_event_crypto_adapter_event_port_get(uint8_t
> > > > id, uint8_t *event_port_id)
> > > >
> > > >  	return 0;
> > > >  }
> > > > +
> > > > +int
> > > > +rte_event_crypto_adapter_vector_limits_get(
> > > > +	uint8_t dev_id, uint16_t cdev_id,
> > > > +	struct rte_event_crypto_adapter_vector_limits *limits) {
> > > > +	struct rte_cryptodev *cdev;
> > > > +	struct rte_eventdev *dev;
> > > > +	uint32_t cap;
> > > > +	int ret;
> > > > +
> > > > +	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
> > > > +
> > > > +	if (!rte_cryptodev_is_valid_dev(cdev_id)) {
> > > > +		RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
> > > > +		return -EINVAL;
> > > > +	}
> > > > +
> > > > +	if (limits == NULL)
> > > > +		return -EINVAL;
> > > Add appropriate error message like above?
> > Ack, will add.
> >
> > > > +
> > > > +	dev = &rte_eventdevs[dev_id];
> > > > +	cdev = rte_cryptodev_pmd_get_dev(cdev_id);
> > > > +
> > > > +	ret = rte_event_crypto_adapter_caps_get(dev_id, cdev_id, &cap);
> > > > +	if (ret) {
> > > > +		RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
> > > > +				 "cdev %" PRIu16, dev_id, cdev_id);
> > > > +		return ret;
> > > > +	}
> > > > +
> > > > +	if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR))
> > > > +		return -ENOTSUP;
> > > Same here.
> > Ack, will add.
> >
> > > > +
> > > > +	RTE_FUNC_PTR_OR_ERR_RET(
> > > > +		*dev->dev_ops->crypto_adapter_vector_limits_get,
> > > > +		-ENOTSUP);
> > > > +
> > > > +	return dev->dev_ops->crypto_adapter_vector_limits_get(
> > > > +		dev, cdev, limits);
> > > > +}
> > > > diff --git a/lib/eventdev/rte_event_crypto_adapter.h
> > > > b/lib/eventdev/rte_event_crypto_adapter.h
> > > > index d90a19e72c..7dd6171b9b 100644
> > > > --- a/lib/eventdev/rte_event_crypto_adapter.h
> > > > +++ b/lib/eventdev/rte_event_crypto_adapter.h
> > > > @@ -253,6 +253,78 @@ struct rte_event_crypto_adapter_conf {
> > > >  	 */
> > > >  };
> > > >
> > > > +#define RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR	0x1
> > > > +/**< This flag indicates that crypto operations processed on the
> > > > +crypto
> > > > + * adapter need to be vectorized
> > > > + * @see rte_event_crypto_adapter_queue_conf::flags
> > > > + */
> > > > +
> > > > +/**
> > > > + * Adapter queue configuration structure  */ struct
> > > > +rte_event_crypto_adapter_queue_conf {
> > > > +	uint32_t flags;
> > > > +	/**< Flags for handling crypto operations
> > > > +	 * @see RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR
> > > > +	 */
> > > > +	struct rte_event ev;
> > > > +	/**< If HW supports cryptodev queue pair to event queue binding,
> > > > +	 * application is expected to fill in event information.
> > > > +	 * @see
> > > > RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND
> > > > +	 */
> > > > +	uint16_t vector_sz;
> > > > +	/**< Indicates the maximum number for crypto operations to
> > > > +combine
> > > > and
> > > > +	 * form a vector.
> > > > +	 * @see rte_event_crypto_adapter_vector_limits::min_sz
> > > > +	 * @see rte_event_crypto_adapter_vector_limits::max_sz
> > > > +	 * Valid when RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR flag is
> > > > set in
> > > > +	 * @see rte_event_crypto_adapter_queue_conf::rx_queue_flags
> > > rx_queue_flags??
> > Typo, should be conf::flags.
> >
> > > > +	 */
> > > > +	uint64_t vector_timeout_ns;
> > > > +	/**<
> > > > +	 * Indicates the maximum number of nanoseconds to wait for
> > > > aggregating
> > > > +	 * crypto operations. Should be within vectorization limits of the
> > > > +	 * adapter
> > > > +	 * @see rte_event_crypto_adapter_vector_limits::min_timeout_ns
> > > > +	 * @see rte_event_crypto_adapter_vector_limits::max_timeout_ns
> > > > +	 * Valid when RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR flag is
> > > > set in
> > > > +	 * @see rte_event_crypto_adapter_queue_conf::flags
> > > > +	 */
> > > > +	struct rte_mempool *vector_mp;
> > > > +	/**< Indicates the mempool that should be used for allocating
> > > > +	 * rte_event_vector container.
> > > > +	 * Should be created by using `rte_event_vector_pool_create`.
> > > > +	 * Valid when RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR flag is
> > > > set in
> > > > +	 * @see rte_event_crypto_adapter_queue_conf::flags.
> > > > +	 */
> > > > +};
> > > > +
> > > > +/**
> > > > + * A structure used to retrieve event crypto adapter vector limits.
> > > > + */
> > > > +struct rte_event_crypto_adapter_vector_limits {
> > > > +	uint16_t min_sz;
> > > > +	/**< Minimum vector limit configurable.
> > > > +	 * @see rte_event_crypto_adapter_queue_conf::vector_sz
> > > > +	 */
> > > > +	uint16_t max_sz;
> > > > +	/**< Maximum vector limit configurable.
> > > > +	 * @see rte_event_crypto_adapter_queue_conf::vector_sz
> > > > +	 */
> > > > +	uint8_t log2_sz;
> > > > +	/**< True if the size configured should be in log2.
> > > > +	 * @see rte_event_crypto_adapter_queue_conf::vector_sz
> > > > +	 */
> > > > +	uint64_t min_timeout_ns;
> > > > +	/**< Minimum vector timeout configurable.
> > > > +	 * @see rte_event_crypto_adapter_queue_conf::vector_timeout_ns
> > > > +	 */
> > > > +	uint64_t max_timeout_ns;
> > > > +	/**< Maximum vector timeout configurable.
> > > > +	 * @see rte_event_crypto_adapter_queue_conf::vector_timeout_ns
> > > > +	 */
> > > > +};
> > > > +
> > > >  /**
> > > >   * Function type used for adapter configuration callback. The callback is
> > > >   * used to fill in members of the struct
> > > > rte_event_crypto_adapter_conf, this @@ -392,10 +464,9 @@
> > > rte_event_crypto_adapter_free(uint8_t id);
> > > >   *  Cryptodev queue pair identifier. If queue_pair_id is set -1,
> > > >   *  adapter adds all the pre configured queue pairs to the instance.
> > > >   *
> > > > - * @param event
> > > > - *  if HW supports cryptodev queue pair to event queue binding,
> > > > application is
> > > > - *  expected to fill in event information, else it will be NULL.
> > > > - *  @see
> > RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND
> > > > + * @param conf
> > > > + *  Additional configuration structure of type
> > > > + *  *rte_event_crypto_adapter_queue_conf*
> > > >   *
> > > >   * @return
> > > >   *  - 0: Success, queue pair added correctly.
> > > > @@ -405,7 +476,7 @@ int
> > > >  rte_event_crypto_adapter_queue_pair_add(uint8_t id,
> > > >  			uint8_t cdev_id,
> > > >  			int32_t queue_pair_id,
> > > > -			const struct rte_event *event);
> > > > +			const struct rte_event_crypto_adapter_queue_conf
> > > > *conf);
> > > >
> > > >  /**
> > > >   * Delete a queue pair from an event crypto adapter.
> > > > @@ -523,6 +594,26 @@
> > > > rte_event_crypto_adapter_service_id_get(uint8_t
> > > > id, uint32_t *service_id);  int
> > > > rte_event_crypto_adapter_event_port_get(uint8_t
> > > > id, uint8_t *event_port_id);
> > > >
> > > > +/**
> > > > + * Retrieve vector limits for a given event dev and crypto dev pair.
> > > > + * @see rte_event_crypto_adapter_vector_limits
> > > > + *
> > > > + * @param dev_id
> > > > + *  Event device identifier.
> > > > + * @param cdev_id
> > > > + *  Crypto device identifier.
> > > > + * @param [out] limits
> > > > + *  A pointer to rte_event_crypto_adapter_vector_limits structure
> > > > +that has to
> > > > + * be filled.
> > > Space missing before "be filled"
> > Ack
> >
> > > > + *
> > > > + * @return
> > > > + *  - 0: Success.
> > > > + *  - <0: Error code on failure.
> > > > + */
> > > > +int rte_event_crypto_adapter_vector_limits_get(
> > > > +	uint8_t dev_id, uint16_t cdev_id,
> > > > +	struct rte_event_crypto_adapter_vector_limits *limits);
> > > > +
> > > >  /**
> > > >   * Enqueue a burst of crypto operations as event objects supplied
> > > > in
> > > > *rte_event*
> > > >   * structure on an event crypto adapter designated by its event
> > > > *dev_id* through diff --git
> > > > a/lib/eventdev/rte_event_eth_rx_adapter.h
> > > > b/lib/eventdev/rte_event_eth_rx_adapter.h
> > > > index 3608a7b2cf..c8f2936866 100644
> > > > --- a/lib/eventdev/rte_event_eth_rx_adapter.h
> > > > +++ b/lib/eventdev/rte_event_eth_rx_adapter.h
> > > > @@ -457,7 +457,8 @@ int rte_event_eth_rx_adapter_free(uint8_t id);
> > > >   * @see RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ
> > > >   *
> > > >   * @param conf
> > > > - *  Additional configuration structure of type
> > > > *rte_event_eth_rx_adapter_conf*
> > > > + *  Additional configuration structure of type
> > > > + *  *rte_event_eth_rx_adapter_queue_conf*
> > > These changes are not relevant. Please consider sending separate patch.
> > >
> > Ack, Will send this change as a separate patch.
> >
> > > >   *
> > > >   * @return
> > > >   *  - 0: Success, Receive queue added correctly.
> > > > diff --git a/lib/eventdev/rte_eventdev.h
> > > > b/lib/eventdev/rte_eventdev.h index
> > > > 6a6f6ea4c1..1a737bf851 100644
> > > > --- a/lib/eventdev/rte_eventdev.h
> > > > +++ b/lib/eventdev/rte_eventdev.h
> > > > @@ -1203,6 +1203,9 @@ struct rte_event_vector {  #define
> > > > RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR
> > \
> > > >  	(RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_ETH_RX_ADAPTER)
> > /**< The
> > > > event vector generated from eth Rx adapter. */
> > > > +#define RTE_EVENT_TYPE_CRYPTODEV_VECTOR                                        \
> > > > +	(RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_CRYPTODEV) /**< The
> > > > event
> > > > +vector generated from cryptodev adapter. */
> > > >
> > > >  #define RTE_EVENT_TYPE_MAX              0x10
> > > >  /**< Maximum number of event types */ @@ -1420,6 +1423,11 @@
> > > > rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps);
> > > >   * the private data information along with the crypto session.
> > > >   */
> > > >
> > > > +#define RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR   0x10
> > > > +/**< Flag indicates HW is capable of aggregating processed
> > > > + * crypto operations into rte_event_vector.
> > > > + */
> > > > +
> > > >  /**
> > > >   * Retrieve the event device's crypto adapter capabilities for the
> > > >   * specified cryptodev device
> > > > --
> > > > 2.25.1


^ permalink raw reply	[flat|nested] 36+ messages in thread

* RE: [PATCH 1/3] eventdev: introduce event cryptodev vector type
  2022-09-27  9:24           ` Volodymyr Fialko
@ 2022-09-27  9:38             ` Gujjar, Abhinandan S
  2022-09-27 13:26               ` Jerin Jacob
  0 siblings, 1 reply; 36+ messages in thread
From: Gujjar, Abhinandan S @ 2022-09-27  9:38 UTC (permalink / raw)
  To: Volodymyr Fialko, dev, Jerin Jacob Kollanukkaran,
	Pavan Nikhilesh Bhagavatula, Shijith Thotton, Hemant Agrawal,
	Sachin Saxena, Jayatheerthan, Jay
  Cc: Akhil Goyal, Anoob Joseph



> -----Original Message-----
> From: Volodymyr Fialko <vfialko@marvell.com>
> Sent: Tuesday, September 27, 2022 2:55 PM
> To: Gujjar, Abhinandan S <abhinandan.gujjar@intel.com>; dev@dpdk.org; Jerin
> Jacob Kollanukkaran <jerinj@marvell.com>; Pavan Nikhilesh Bhagavatula
> <pbhagavatula@marvell.com>; Shijith Thotton <sthotton@marvell.com>;
> Hemant Agrawal <hemant.agrawal@nxp.com>; Sachin Saxena
> <sachin.saxena@oss.nxp.com>; Jayatheerthan, Jay
> <jay.jayatheerthan@intel.com>
> Cc: Akhil Goyal <gakhil@marvell.com>; Anoob Joseph <anoobj@marvell.com>
> Subject: RE: [PATCH 1/3] eventdev: introduce event cryptodev vector type
> 
> 
> 
> > -----Original Message-----
> > From: Gujjar, Abhinandan S <abhinandan.gujjar@intel.com>
> > Sent: Tuesday, September 27, 2022 11:05 AM
> > To: Volodymyr Fialko <vfialko@marvell.com>; dev@dpdk.org; Jerin Jacob
> > Kollanukkaran <jerinj@marvell.com>; Pavan Nikhilesh Bhagavatula
> > <pbhagavatula@marvell.com>; Shijith Thotton <sthotton@marvell.com>;
> > Hemant Agrawal <hemant.agrawal@nxp.com>; Sachin Saxena
> > <sachin.saxena@oss.nxp.com>; Jayatheerthan, Jay
> > <jay.jayatheerthan@intel.com>
> > Cc: Akhil Goyal <gakhil@marvell.com>; Anoob Joseph
> > <anoobj@marvell.com>
> > Subject: [EXT] RE: [PATCH 1/3] eventdev: introduce event cryptodev
> > vector type
> >
> > External Email
> >
> > ----------------------------------------------------------------------
> >
> >
> > > -----Original Message-----
> > > From: Volodymyr Fialko <vfialko@marvell.com>
> > > Sent: Monday, September 26, 2022 4:32 PM
> > > To: Gujjar, Abhinandan S <abhinandan.gujjar@intel.com>;
> > > dev@dpdk.org; Jerin Jacob Kollanukkaran <jerinj@marvell.com>; Pavan
> > > Nikhilesh Bhagavatula <pbhagavatula@marvell.com>; Shijith Thotton
> > > <sthotton@marvell.com>; Hemant Agrawal <hemant.agrawal@nxp.com>;
> > > Sachin Saxena <sachin.saxena@oss.nxp.com>; Jayatheerthan, Jay
> > > <jay.jayatheerthan@intel.com>
> > > Cc: Akhil Goyal <gakhil@marvell.com>; Anoob Joseph
> > > <anoobj@marvell.com>
> > > Subject: RE: [PATCH 1/3] eventdev: introduce event cryptodev vector
> > > type
> > >
> > >
> > >
> > > > -----Original Message-----
> > > > From: Gujjar, Abhinandan S <abhinandan.gujjar@intel.com>
> > > > Sent: Saturday, September 24, 2022 10:44 AM
> > > > To: Volodymyr Fialko <vfialko@marvell.com>; dev@dpdk.org; Jerin
> > > > Jacob Kollanukkaran <jerinj@marvell.com>; Pavan Nikhilesh
> > > > Bhagavatula <pbhagavatula@marvell.com>; Shijith Thotton
> > > > <sthotton@marvell.com>; Hemant Agrawal <hemant.agrawal@nxp.com>;
> > > > Sachin Saxena <sachin.saxena@oss.nxp.com>; Jayatheerthan, Jay
> > > > <jay.jayatheerthan@intel.com>
> > > > Cc: Akhil Goyal <gakhil@marvell.com>; Anoob Joseph
> > > > <anoobj@marvell.com>
> > > > Subject: [EXT] RE: [PATCH 1/3] eventdev: introduce event cryptodev
> > > > vector type
> > > >
> > > > External Email
> > > >
> > > > ------------------------------------------------------------------
> > > > --
> > > > --
> > > >
> > > >
> > > > > -----Original Message-----
> > > > > From: Volodymyr Fialko <vfialko@marvell.com>
> > > > > Sent: Thursday, August 4, 2022 3:29 PM
> > > > > To: dev@dpdk.org; Jerin Jacob <jerinj@marvell.com>; Gujjar,
> > > > > Abhinandan S <abhinandan.gujjar@intel.com>; Pavan Nikhilesh
> > > > > <pbhagavatula@marvell.com>; Shijith Thotton
> > > > > <sthotton@marvell.com>; Hemant Agrawal
> <hemant.agrawal@nxp.com>;
> > > > > Sachin Saxena <sachin.saxena@oss.nxp.com>; Jayatheerthan, Jay
> > > > > <jay.jayatheerthan@intel.com>
> > > > > Cc: gakhil@marvell.com; anoobj@marvell.com; Volodymyr Fialko
> > > > > <vfialko@marvell.com>
> > > > > Subject: [PATCH 1/3] eventdev: introduce event cryptodev vector
> > > > > type
> > > > >
> > > > > Introduce ability to aggregate crypto operations processed by
> > > > > event crypto adapter into single event containing
> > > > > rte_event_vector whose event type is
> RTE_EVENT_TYPE_CRYPTODEV_VECTOR.
> > > > >
> > > > > Application should set RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR
> in
> > > > > rte_event_crypto_adapter_queue_conf::flag and provide vector
> > > > > configuration with respect of
> > > > > rte_event_crypto_adapter_vector_limits,
> > > > > which could be obtained by calling
> > > > > rte_event_crypto_adapter_vector_limits_get, to enable vectorization.
> > > > >
> > > > > The event crypto adapter would be responsible for vectorizing
> > > > > the crypto operations based on provided response information in
> > > > > rte_event_crypto_metadata::response_info.
> > > > >
> > > > > Updated drivers and tests accordingly to new API.
> > > > >
> > > > > Signed-off-by: Volodymyr Fialko <vfialko@marvell.com>
> > > > > ---
> > > > >  app/test-eventdev/test_perf_common.c          |  10 +-
> > > > >  app/test/test_event_crypto_adapter.c          |  12 ++-
> > > > >  .../prog_guide/event_crypto_adapter.rst       |  23 +++-
> > > > >  drivers/event/cnxk/cn10k_eventdev.c           |   4 +-
> > > > >  drivers/event/cnxk/cn9k_eventdev.c            |   5 +-
> > > > >  drivers/event/dpaa/dpaa_eventdev.c            |   9 +-
> > > > >  drivers/event/dpaa2/dpaa2_eventdev.c          |   9 +-
> > > > >  drivers/event/octeontx/ssovf_evdev.c          |   4 +-
> > > > >  lib/eventdev/eventdev_pmd.h                   |  35 +++++-
> > > > >  lib/eventdev/eventdev_trace.h                 |   6 +-
> > > > >  lib/eventdev/rte_event_crypto_adapter.c       |  90 ++++++++++++++--
> > > > >  lib/eventdev/rte_event_crypto_adapter.h       | 101
> +++++++++++++++++-
> > > > >  lib/eventdev/rte_event_eth_rx_adapter.h       |   3 +-
> > > > >  lib/eventdev/rte_eventdev.h                   |   8 ++
> > > > >  14 files changed, 276 insertions(+), 43 deletions(-)
> > > > >
> > > >
> > > > I don't see dataplane implementation of vectorization in the crypto
> adapter!
> > > > Is it missed out in the patch?
> > > > comments inline.
> > > >
> > > Right now we are targeting crypto_cn10k PMD and ipsec-secgw event
> > > mode to support vectorization.
> > Is there a way to test this? When can be dataplane changes expected?
> >
> If the spec looks okay, support in s/w crypto adapter and other h/w PMDs can
> be added by respective maintainers. Currently, we are adding library change,
> support for one PMD and an application to test the feature. Feature is exposed
> with capability flag to not break existing functionality.
Got it. How do I test this feature without data plane changes?
This design is right now tested for cn10k, I am not sure this works for sw adapter.
If there is a way to test this, please let me know.
I need to have perf data with and without vectorization support to approve.

> 
> > >
> > > > > diff --git a/app/test-eventdev/test_perf_common.c b/app/test-
> > > > > eventdev/test_perf_common.c index 81420be73a..c770bc93f6 100644
> > > > > --- a/app/test-eventdev/test_perf_common.c
> > > > > +++ b/app/test-eventdev/test_perf_common.c
> > > > > @@ -837,14 +837,14 @@ perf_event_crypto_adapter_setup(struct
> > > > > test_perf *t, struct prod_data *p)
> > > > >  	}
> > > > >
> > > > >  	if (cap &
> > > > > RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
> > > > > -		struct rte_event response_info;
> > > > > +		struct rte_event_crypto_adapter_queue_conf conf;
> > > > >
> > > > > -		response_info.event = 0;
> > > > > -		response_info.sched_type = RTE_SCHED_TYPE_ATOMIC;
> > > > > -		response_info.queue_id = p->queue_id;
> > > > > +		memset(&conf, 0, sizeof(conf));
> > > > > +		conf.ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
> > > > > +		conf.ev.queue_id = p->queue_id;
> > > > >  		ret = rte_event_crypto_adapter_queue_pair_add(
> > > > >  			TEST_PERF_CA_ID, p->ca.cdev_id, p->ca.cdev_qp_id,
> > > > > -			&response_info);
> > > > > +			&conf);
> > > > >  	} else {
> > > > >  		ret = rte_event_crypto_adapter_queue_pair_add(
> > > > >  			TEST_PERF_CA_ID, p->ca.cdev_id, p->ca.cdev_qp_id,
> > > NULL); diff
> > > > > --git a/app/test/test_event_crypto_adapter.c
> > > > > b/app/test/test_event_crypto_adapter.c
> > > > > index 2ecc7e2cea..bb617c1042 100644
> > > > > --- a/app/test/test_event_crypto_adapter.c
> > > > > +++ b/app/test/test_event_crypto_adapter.c
> > > > > @@ -1175,6 +1175,10 @@ test_crypto_adapter_create(void)  static
> > > > > int
> > > > >  test_crypto_adapter_qp_add_del(void)
> > > > >  {
> > > > > +	struct rte_event_crypto_adapter_queue_conf queue_conf = {
> > > > > +		.ev = response_info,
> > > > > +	};
> > > > > +
> > > > >  	uint32_t cap;
> > > > >  	int ret;
> > > > >
> > > > > @@ -1183,7 +1187,7 @@ test_crypto_adapter_qp_add_del(void)
> > > > >
> > > > >  	if (cap &
> > > > > RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
> > > > >  		ret =
> > > > > rte_event_crypto_adapter_queue_pair_add(TEST_ADAPTER_ID,
> > > > > -				TEST_CDEV_ID, TEST_CDEV_QP_ID,
> > > > > &response_info);
> > > > > +				TEST_CDEV_ID, TEST_CDEV_QP_ID,
> > > > > &queue_conf);
> > > > >  	} else
> > > > >  		ret =
> > > > > rte_event_crypto_adapter_queue_pair_add(TEST_ADAPTER_ID,
> > > > >  					TEST_CDEV_ID, TEST_CDEV_QP_ID,
> > > NULL); @@ -1206,6 +1210,10 @@
> > > > > configure_event_crypto_adapter(enum
> > > > > rte_event_crypto_adapter_mode mode)
> > > > >  		.new_event_threshold = 1200,
> > > > >  	};
> > > > >
> > > > > +	struct rte_event_crypto_adapter_queue_conf queue_conf = {
> > > > > +		.ev = response_info,
> > > > > +	};
> > > > > +
> > > > >  	uint32_t cap;
> > > > >  	int ret;
> > > > >
> > > > > @@ -1238,7 +1246,7 @@ configure_event_crypto_adapter(enum
> > > > > rte_event_crypto_adapter_mode mode)
> > > > >
> > > > >  	if (cap &
> > > > > RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
> > > > >  		ret =
> > > > > rte_event_crypto_adapter_queue_pair_add(TEST_ADAPTER_ID,
> > > > > -				TEST_CDEV_ID, TEST_CDEV_QP_ID,
> > > > > &response_info);
> > > > > +				TEST_CDEV_ID, TEST_CDEV_QP_ID,
> > > > > &queue_conf);
> > > > >  	} else
> > > > >  		ret =
> > > > > rte_event_crypto_adapter_queue_pair_add(TEST_ADAPTER_ID,
> > > > >  				TEST_CDEV_ID, TEST_CDEV_QP_ID, NULL); diff
> > > --git
> > > > > a/doc/guides/prog_guide/event_crypto_adapter.rst
> > > > > b/doc/guides/prog_guide/event_crypto_adapter.rst
> > > > > index 4fb5c688e0..554df7e358 100644
> > > > > --- a/doc/guides/prog_guide/event_crypto_adapter.rst
> > > > > +++ b/doc/guides/prog_guide/event_crypto_adapter.rst
> > > > > @@ -201,10 +201,10 @@ capability, event information must be
> > > > > passed to the add API.
> > > > >
> > > > >          ret = rte_event_crypto_adapter_caps_get(id, evdev, &cap);
> > > > >          if (cap &
> > > > > RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
> > > > > -                struct rte_event event;
> > > > > +                struct rte_event_crypto_adapter_queue_conf
> > > > > + conf;
> > > > >
> > > > > -                // Fill in event information & pass it to add API
> > > > > -                rte_event_crypto_adapter_queue_pair_add(id, cdev_id,
> qp_id,
> > > > > &event);
> > > > > +                // Fill in conf.event information & pass it to add API
> > > > > +                rte_event_crypto_adapter_queue_pair_add(id,
> > > > > + cdev_id, qp_id, &conf);
> > > > >          } else
> > > > >                  rte_event_crypto_adapter_queue_pair_add(id,
> > > > > cdev_id, qp_id, NULL);
> > > > >
> > > > > @@ -291,6 +291,23 @@ the ``rte_crypto_op``.
> > > > >                  rte_memcpy(op + len, &m_data, sizeof(m_data));
> > > > >          }
> > > > >
> > > > > +Enable event vectorization
> > > > > +~~~~~~~~~~~~~~~~~~~~~~~~~~
> > > > > +
> > > > > +The event crypto adapter can aggregate outcoming crypto
> > > > > +operations based on provided response information of
> > > > > +``rte_event_crypto_metadata::response_info``
> > > > > +and generate a ``rte_event`` containing ``rte_event_vector``
> > > > > +whose event type is ``RTE_EVENT_TYPE_CRYPTODEV_VECTOR``.
> > > > > +To enable vectorization application should set
> > > > > +RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR in
> > > > > +``rte_event_crypto_adapter_queue_conf::flag`` and provide
> > > > > +vector configuration(size, mempool, etc.) with respect of
> > > > > +``rte_event_crypto_adapter_vector_limits``, which could be
> > > > > +obtained by calling ``rte_event_crypto_adapter_vector_limits_get()``.
> > > > > +
> > > > > +The RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR capability
> > > indicates
> > > > > +whether PMD supports this feature.
> > > > > +
> > > > >  Start the adapter instance
> > > > >  ~~~~~~~~~~~~~~~~~~~~~~~~~~
> > > > >
> > > > > diff --git a/drivers/event/cnxk/cn10k_eventdev.c
> > > > > b/drivers/event/cnxk/cn10k_eventdev.c
> > > > > index 5a0cab40a9..e74ec57382 100644
> > > > > --- a/drivers/event/cnxk/cn10k_eventdev.c
> > > > > +++ b/drivers/event/cnxk/cn10k_eventdev.c
> > > > > @@ -889,11 +889,11 @@ static int
> > > > > cn10k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
> > > > >  			    const struct rte_cryptodev *cdev,
> > > > >  			    int32_t queue_pair_id,
> > > > > -			    const struct rte_event *event)
> > > > > +			    const struct
> rte_event_crypto_adapter_queue_conf
> > > > > *conf)
> > > > >  {
> > > > >  	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
> > > > >
> > > > > -	RTE_SET_USED(event);
> > > > > +	RTE_SET_USED(conf);
> > > > >
> > > > >  	CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k");
> > > > >  	CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k"); diff
> > > > > -- git a/drivers/event/cnxk/cn9k_eventdev.c
> > > > > b/drivers/event/cnxk/cn9k_eventdev.c
> > > > > index 2e27030049..45ed547cb0 100644
> > > > > --- a/drivers/event/cnxk/cn9k_eventdev.c
> > > > > +++ b/drivers/event/cnxk/cn9k_eventdev.c
> > > > > @@ -1120,11 +1120,12 @@ cn9k_crypto_adapter_caps_get(const
> > > > > struct rte_eventdev *event_dev,  static int
> > > > > cn9k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
> > > > >  			   const struct rte_cryptodev *cdev,
> > > > > -			   int32_t queue_pair_id, const struct rte_event *event)
> > > > > +			   int32_t queue_pair_id,
> > > > > +			   const struct
> rte_event_crypto_adapter_queue_conf
> > > > > *conf)
> > > > >  {
> > > > >  	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
> > > > >
> > > > > -	RTE_SET_USED(event);
> > > > > +	RTE_SET_USED(conf);
> > > > >
> > > > >  	CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k");
> > > > >  	CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k"); diff
> > > > > --git a/drivers/event/dpaa/dpaa_eventdev.c
> > > > > b/drivers/event/dpaa/dpaa_eventdev.c
> > > > > index ff6cc0be18..2b9ecd9fbf 100644
> > > > > --- a/drivers/event/dpaa/dpaa_eventdev.c
> > > > > +++ b/drivers/event/dpaa/dpaa_eventdev.c
> > > > > @@ -26,6 +26,7 @@
> > > > >  #include <rte_eventdev.h>
> > > > >  #include <eventdev_pmd_vdev.h>
> > > > >  #include <rte_ethdev.h>
> > > > > +#include <rte_event_crypto_adapter.h>
> > > > >  #include <rte_event_eth_rx_adapter.h>  #include
> > > > > <rte_event_eth_tx_adapter.h>  #include <cryptodev_pmd.h> @@
> > > > > -775,10
> > > > > +776,10 @@ static int  dpaa_eventdev_crypto_queue_add(const
> > > > > +struct
> > > > > rte_eventdev *dev,
> > > > >  		const struct rte_cryptodev *cryptodev,
> > > > >  		int32_t rx_queue_id,
> > > > > -		const struct rte_event *ev)
> > > > > +		const struct rte_event_crypto_adapter_queue_conf
> *conf)
> > > > >  {
> > > > >  	struct dpaa_eventdev *priv = dev->data->dev_private;
> > > > > -	uint8_t ev_qid = ev->queue_id;
> > > > > +	uint8_t ev_qid = conf->ev.queue_id;
> > > > >  	u16 ch_id = priv->evq_info[ev_qid].ch_id;
> > > > >  	int ret;
> > > > >
> > > > > @@ -786,10 +787,10 @@ dpaa_eventdev_crypto_queue_add(const
> > > > > struct rte_eventdev *dev,
> > > > >
> > > > >  	if (rx_queue_id == -1)
> > > > >  		return dpaa_eventdev_crypto_queue_add_all(dev,
> > > > > -				cryptodev, ev);
> > > > > +				cryptodev, &conf->ev);
> > > > >
> > > > >  	ret = dpaa_sec_eventq_attach(cryptodev, rx_queue_id,
> > > > > -			ch_id, ev);
> > > > > +			ch_id, &conf->ev);
> > > > >  	if (ret) {
> > > > >  		DPAA_EVENTDEV_ERR(
> > > > >  			"dpaa_sec_eventq_attach failed: ret: %d\n", ret); diff -
> > > -git
> > > > > a/drivers/event/dpaa2/dpaa2_eventdev.c
> > > > > b/drivers/event/dpaa2/dpaa2_eventdev.c
> > > > > index ffc7b8b073..0137736794 100644
> > > > > --- a/drivers/event/dpaa2/dpaa2_eventdev.c
> > > > > +++ b/drivers/event/dpaa2/dpaa2_eventdev.c
> > > > > @@ -26,6 +26,7 @@
> > > > >  #include <rte_bus_vdev.h>
> > > > >  #include <ethdev_driver.h>
> > > > >  #include <cryptodev_pmd.h>
> > > > > +#include <rte_event_crypto_adapter.h>
> > > > >  #include <rte_event_eth_rx_adapter.h>  #include
> > > > > <rte_event_eth_tx_adapter.h>
> > > > >
> > > > > @@ -865,10 +866,10 @@ static int
> > > > > dpaa2_eventdev_crypto_queue_add(const struct rte_eventdev *dev,
> > > > >  		const struct rte_cryptodev *cryptodev,
> > > > >  		int32_t rx_queue_id,
> > > > > -		const struct rte_event *ev)
> > > > > +		const struct rte_event_crypto_adapter_queue_conf
> *conf)
> > > > >  {
> > > > >  	struct dpaa2_eventdev *priv = dev->data->dev_private;
> > > > > -	uint8_t ev_qid = ev->queue_id;
> > > > > +	uint8_t ev_qid = conf->ev.queue_id;
> > > > >  	struct dpaa2_dpcon_dev *dpcon = priv->evq_info[ev_qid].dpcon;
> > > > >  	int ret;
> > > > >
> > > > > @@ -876,10 +877,10 @@ dpaa2_eventdev_crypto_queue_add(const
> > > > > struct rte_eventdev *dev,
> > > > >
> > > > >  	if (rx_queue_id == -1)
> > > > >  		return dpaa2_eventdev_crypto_queue_add_all(dev,
> > > > > -				cryptodev, ev);
> > > > > +				cryptodev, &conf->ev);
> > > > >
> > > > >  	ret = dpaa2_sec_eventq_attach(cryptodev, rx_queue_id,
> > > > > -				      dpcon, ev);
> > > > > +				      dpcon, &conf->ev);
> > > > >  	if (ret) {
> > > > >  		DPAA2_EVENTDEV_ERR(
> > > > >  			"dpaa2_sec_eventq_attach failed: ret: %d\n", ret); diff
> > > --git
> > > > > a/drivers/event/octeontx/ssovf_evdev.c
> > > > > b/drivers/event/octeontx/ssovf_evdev.c
> > > > > index 9e14e35d10..17acd8ef64 100644
> > > > > --- a/drivers/event/octeontx/ssovf_evdev.c
> > > > > +++ b/drivers/event/octeontx/ssovf_evdev.c
> > > > > @@ -745,12 +745,12 @@ static int
> > > > > ssovf_crypto_adapter_qp_add(const struct rte_eventdev *dev,
> > > > >  			    const struct rte_cryptodev *cdev,
> > > > >  			    int32_t queue_pair_id,
> > > > > -			    const struct rte_event *event)
> > > > > +			    const struct
> rte_event_crypto_adapter_queue_conf
> > > > > *conf)
> > > > >  {
> > > > >  	struct cpt_instance *qp;
> > > > >  	uint8_t qp_id;
> > > > >
> > > > > -	RTE_SET_USED(event);
> > > > > +	RTE_SET_USED(conf);
> > > > >
> > > > >  	if (queue_pair_id == -1) {
> > > > >  		for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++)
> > > > > { diff --git a/lib/eventdev/eventdev_pmd.h
> > > > > b/lib/eventdev/eventdev_pmd.h index 69402668d8..bcfc9cbcb2
> > > > > 100644
> > > > > --- a/lib/eventdev/eventdev_pmd.h
> > > > > +++ b/lib/eventdev/eventdev_pmd.h
> > > > > @@ -907,6 +907,7 @@ rte_event_pmd_selftest_seqn(struct rte_mbuf
> > > > > *mbuf) }
> > > > >
> > > > >  struct rte_cryptodev;
> > > > > +struct rte_event_crypto_adapter_queue_conf;
> > > > >
> > > > >  /**
> > > > >   * This API may change without prior notice @@ -961,11 +962,11
> > > > > @@ typedef int (*eventdev_crypto_adapter_caps_get_t)
> > > > >   *   - <0: Error code returned by the driver function.
> > > > >   *
> > > > >   */
> > > > > -typedef int (*eventdev_crypto_adapter_queue_pair_add_t)
> > > > > -			(const struct rte_eventdev *dev,
> > > > > -			 const struct rte_cryptodev *cdev,
> > > > > -			 int32_t queue_pair_id,
> > > > > -			 const struct rte_event *event);
> > > > > +typedef int (*eventdev_crypto_adapter_queue_pair_add_t)(
> > > > > +		const struct rte_eventdev *dev,
> > > > > +		const struct rte_cryptodev *cdev,
> > > > > +		int32_t queue_pair_id,
> > > > > +		const struct rte_event_crypto_adapter_queue_conf
> > > > > *queue_conf);
> > > > >
> > > > >
> > > > >  /**
> > > > > @@ -1074,6 +1075,27 @@ typedef int
> > > > > (*eventdev_crypto_adapter_stats_reset)
> > > > >  			(const struct rte_eventdev *dev,
> > > > >  			 const struct rte_cryptodev *cdev);
> > > > >
> > > > > +struct rte_event_crypto_adapter_vector_limits;
> > > > > +/**
> > > > > + * Get event vector limits for a given event, crypto device pair.
> > > > > + *
> > > > > + * @param dev
> > > > > + *   Event device pointer
> > > > > + *
> > > > > + * @param cdev
> > > > > + *   Crypto device pointer
> > > > > + *
> > > > > + * @param[out] limits
> > > > > + *   Pointer to the limits structure to be filled.
> > > > > + *
> > > > > + * @return
> > > > > + *   - 0: Success.
> > > > > + *   - <0: Error code returned by the driver function.
> > > > > + */
> > > > > +typedef int (*eventdev_crypto_adapter_vector_limits_get_t)(
> > > > > +	const struct rte_eventdev *dev, const struct rte_cryptodev
> *cdev,
> > > > > +	struct rte_event_crypto_adapter_vector_limits *limits);
> > > > > +
> > > > >  /**
> > > > >   * Retrieve the event device's eth Tx adapter capabilities.
> > > > >   *
> > > > > @@ -1339,6 +1361,9 @@ struct eventdev_ops {
> > > > >  	/**< Get crypto stats */
> > > > >  	eventdev_crypto_adapter_stats_reset crypto_adapter_stats_reset;
> > > > >  	/**< Reset crypto stats */
> > > > > +	eventdev_crypto_adapter_vector_limits_get_t
> > > > > +		crypto_adapter_vector_limits_get;
> > > > > +	/**< Get event vector limits for the crypto adapter */
> > > > >
> > > > >  	eventdev_eth_rx_adapter_q_stats_get
> > > > > eth_rx_adapter_queue_stats_get;
> > > > >  	/**< Get ethernet Rx queue stats */ diff --git
> > > > > a/lib/eventdev/eventdev_trace.h b/lib/eventdev/eventdev_trace.h
> > > > > index 5ec43d80ee..d48cd58850 100644
> > > > > --- a/lib/eventdev/eventdev_trace.h
> > > > > +++ b/lib/eventdev/eventdev_trace.h
> > > > > @@ -18,6 +18,7 @@ extern "C" {
> > > > >  #include <rte_trace_point.h>
> > > > >
> > > > >  #include "rte_eventdev.h"
> > > > > +#include "rte_event_crypto_adapter.h"
> > > > >  #include "rte_event_eth_rx_adapter.h"
> > > > >  #include "rte_event_timer_adapter.h"
> > > > >
> > > > > @@ -271,11 +272,12 @@ RTE_TRACE_POINT(  RTE_TRACE_POINT(
> > > > >  	rte_eventdev_trace_crypto_adapter_queue_pair_add,
> > > > >  	RTE_TRACE_POINT_ARGS(uint8_t adptr_id, uint8_t cdev_id,
> > > > > -		const void *event, int32_t queue_pair_id),
> > > > > +		int32_t queue_pair_id,
> > > > > +		const struct rte_event_crypto_adapter_queue_conf
> *conf),
> > > > >  	rte_trace_point_emit_u8(adptr_id);
> > > > >  	rte_trace_point_emit_u8(cdev_id);
> > > > >  	rte_trace_point_emit_i32(queue_pair_id);
> > > > > -	rte_trace_point_emit_ptr(event);
> > > > > +	rte_trace_point_emit_ptr(conf);
> > > > >  )
> > > > >
> > > > >  RTE_TRACE_POINT(
> > > > > diff --git a/lib/eventdev/rte_event_crypto_adapter.c
> > > > > b/lib/eventdev/rte_event_crypto_adapter.c
> > > > > index 7c695176f4..73a4f231e2 100644
> > > > > --- a/lib/eventdev/rte_event_crypto_adapter.c
> > > > > +++ b/lib/eventdev/rte_event_crypto_adapter.c
> > > > > @@ -921,11 +921,12 @@ int
> > > > >  rte_event_crypto_adapter_queue_pair_add(uint8_t id,
> > > > >  			uint8_t cdev_id,
> > > > >  			int32_t queue_pair_id,
> > > > > -			const struct rte_event *event)
> > > > > +			const struct
> rte_event_crypto_adapter_queue_conf
> > > > > *conf)
> > > > >  {
> > > > > +	struct rte_event_crypto_adapter_vector_limits limits;
> > > > >  	struct event_crypto_adapter *adapter;
> > > > > -	struct rte_eventdev *dev;
> > > > >  	struct crypto_device_info *dev_info;
> > > > > +	struct rte_eventdev *dev;
> > > > >  	uint32_t cap;
> > > > >  	int ret;
> > > > >
> > > > > @@ -951,11 +952,47 @@
> > > > > rte_event_crypto_adapter_queue_pair_add(uint8_t
> > > > > id,
> > > > >  	}
> > > > >
> > > > >  	if ((cap &
> > > > > RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND)
> &&
> > > > > -	    (event == NULL)) {
> > > > > +	    (conf == NULL)) {
> > > > >  		RTE_EDEV_LOG_ERR("Conf value can not be NULL for
> > > dev_id=%u",
> > > > >  				  cdev_id);
> > > > >  		return -EINVAL;
> > > > >  	}
> > > > Newline?
> > > >
> > > Ack
> > >
> > > > > +	if ((conf != NULL) &&
> > > > Checking conf twice?
> > > Will rewrite as if conf == NULL/else, to avoid double checking.
> > >
> > > > > +	    (conf->flags &
> RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR)) {
> > > > Else condition if the flags is not set?
> > > There's no additional handing for case when flag is no set.
> > >
> > > > > +		if ((cap &
> > > > > RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR) == 0) {
> > > > > +			RTE_EDEV_LOG_ERR("Event vectorization is
> not
> > > > > supported,"
> > > > > +					 "dev %" PRIu8 " cdev %"
> PRIu8, id,
> > > > > +					 cdev_id);
> > > > > +			return -ENOTSUP;
> > > > > +		}
> > > > > +
> > > > > +		ret = rte_event_crypto_adapter_vector_limits_get(
> > > > > +			adapter->eventdev_id, cdev_id, &limits);
> > > > > +		if (ret < 0) {
> > > > > +			RTE_EDEV_LOG_ERR("Failed to get event
> device vector
> > > > > "
> > > > > +					 "limits, dev %" PRIu8 " cdev
> %" PRIu8,
> > > > > +					 id, cdev_id);
> > > > > +			return -EINVAL;
> > > > > +		}
> > > > New line? Please check other cases.
> > > >
> > > Ack
> > >
> > > > > +		if (conf->vector_sz < limits.min_sz ||
> > > > > +		    conf->vector_sz > limits.max_sz ||
> > > > > +		    conf->vector_timeout_ns < limits.min_timeout_ns ||
> > > > > +		    conf->vector_timeout_ns > limits.max_timeout_ns
> ||
> > > > > +		    conf->vector_mp == NULL) {
> > > > > +			RTE_EDEV_LOG_ERR("Invalid event vector
> > > > > configuration,"
> > > > > +					 " dev %" PRIu8 " cdev %"
> PRIu8,
> > > > > +					 id, cdev_id);
> > > > > +			return -EINVAL;
> > > > > +		}
> > > > > +		if (conf->vector_mp->elt_size <
> > > > > +		    (sizeof(struct rte_event_vector) +
> > > > > +		     (sizeof(uintptr_t) * conf->vector_sz))) {
> > > > > +			RTE_EDEV_LOG_ERR("Invalid event vector
> > > > > configuration,"
> > > > > +					 " dev %" PRIu8 " cdev %"
> PRIu8,
> > > > > +					 id, cdev_id);
> > > > > +			return -EINVAL;
> > > > > +		}
> > > > > +	}
> > > > >
> > > > >  	dev_info = &adapter->cdevs[cdev_id];
> > > > >
> > > > > @@ -990,7 +1027,7 @@
> > > rte_event_crypto_adapter_queue_pair_add(uint8_t
> > > > > id,
> > > > >  		ret = (*dev->dev_ops->crypto_adapter_queue_pair_add)(dev,
> > > > >  				dev_info->dev,
> > > > >  				queue_pair_id,
> > > > > -				event);
> > > > > +				conf);
> > > > >  		if (ret)
> > > > >  			return ret;
> > > > >
> > > > > @@ -1030,8 +1067,8 @@
> > > > > rte_event_crypto_adapter_queue_pair_add(uint8_t
> > > > > id,
> > > > >  		rte_service_component_runstate_set(adapter->service_id, 1);
> > > > >  	}
> > > > >
> > > > > -	rte_eventdev_trace_crypto_adapter_queue_pair_add(id, cdev_id,
> > > > > event,
> > > > > -		queue_pair_id);
> > > > > +	rte_eventdev_trace_crypto_adapter_queue_pair_add(id,
> cdev_id,
> > > > > +		queue_pair_id, conf);
> > > > >  	return 0;
> > > > >  }
> > > > >
> > > > > @@ -1290,3 +1327,44 @@
> > > > > rte_event_crypto_adapter_event_port_get(uint8_t
> > > > > id, uint8_t *event_port_id)
> > > > >
> > > > >  	return 0;
> > > > >  }
> > > > > +
> > > > > +int
> > > > > +rte_event_crypto_adapter_vector_limits_get(
> > > > > +	uint8_t dev_id, uint16_t cdev_id,
> > > > > +	struct rte_event_crypto_adapter_vector_limits *limits) {
> > > > > +	struct rte_cryptodev *cdev;
> > > > > +	struct rte_eventdev *dev;
> > > > > +	uint32_t cap;
> > > > > +	int ret;
> > > > > +
> > > > > +	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
> > > > > +
> > > > > +	if (!rte_cryptodev_is_valid_dev(cdev_id)) {
> > > > > +		RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8,
> cdev_id);
> > > > > +		return -EINVAL;
> > > > > +	}
> > > > > +
> > > > > +	if (limits == NULL)
> > > > > +		return -EINVAL;
> > > > Add appropriate error message like above?
> > > Ack, will add.
> > >
> > > > > +
> > > > > +	dev = &rte_eventdevs[dev_id];
> > > > > +	cdev = rte_cryptodev_pmd_get_dev(cdev_id);
> > > > > +
> > > > > +	ret = rte_event_crypto_adapter_caps_get(dev_id, cdev_id,
> &cap);
> > > > > +	if (ret) {
> > > > > +		RTE_EDEV_LOG_ERR("Failed to get adapter caps edev
> %" PRIu8
> > > > > +				 "cdev %" PRIu16, dev_id, cdev_id);
> > > > > +		return ret;
> > > > > +	}
> > > > > +
> > > > > +	if (!(cap &
> RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR))
> > > > > +		return -ENOTSUP;
> > > > Same here.
> > > Ack, will add.
> > >
> > > > > +
> > > > > +	RTE_FUNC_PTR_OR_ERR_RET(
> > > > > +		*dev->dev_ops->crypto_adapter_vector_limits_get,
> > > > > +		-ENOTSUP);
> > > > > +
> > > > > +	return dev->dev_ops->crypto_adapter_vector_limits_get(
> > > > > +		dev, cdev, limits);
> > > > > +}
> > > > > diff --git a/lib/eventdev/rte_event_crypto_adapter.h
> > > > > b/lib/eventdev/rte_event_crypto_adapter.h
> > > > > index d90a19e72c..7dd6171b9b 100644
> > > > > --- a/lib/eventdev/rte_event_crypto_adapter.h
> > > > > +++ b/lib/eventdev/rte_event_crypto_adapter.h
> > > > > @@ -253,6 +253,78 @@ struct rte_event_crypto_adapter_conf {
> > > > >  	 */
> > > > >  };
> > > > >
> > > > > +#define RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR	0x1
> > > > > +/**< This flag indicates that crypto operations processed on
> > > > > +the crypto
> > > > > + * adapter need to be vectorized
> > > > > + * @see rte_event_crypto_adapter_queue_conf::flags
> > > > > + */
> > > > > +
> > > > > +/**
> > > > > + * Adapter queue configuration structure  */ struct
> > > > > +rte_event_crypto_adapter_queue_conf {
> > > > > +	uint32_t flags;
> > > > > +	/**< Flags for handling crypto operations
> > > > > +	 * @see RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR
> > > > > +	 */
> > > > > +	struct rte_event ev;
> > > > > +	/**< If HW supports cryptodev queue pair to event queue
> binding,
> > > > > +	 * application is expected to fill in event information.
> > > > > +	 * @see
> > > > > RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND
> > > > > +	 */
> > > > > +	uint16_t vector_sz;
> > > > > +	/**< Indicates the maximum number for crypto operations to
> > > > > +combine
> > > > > and
> > > > > +	 * form a vector.
> > > > > +	 * @see rte_event_crypto_adapter_vector_limits::min_sz
> > > > > +	 * @see rte_event_crypto_adapter_vector_limits::max_sz
> > > > > +	 * Valid when RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR
> flag is
> > > > > set in
> > > > > +	 * @see
> rte_event_crypto_adapter_queue_conf::rx_queue_flags
> > > > rx_queue_flags??
> > > Typo, should be conf::flags.
> > >
> > > > > +	 */
> > > > > +	uint64_t vector_timeout_ns;
> > > > > +	/**<
> > > > > +	 * Indicates the maximum number of nanoseconds to wait for
> > > > > aggregating
> > > > > +	 * crypto operations. Should be within vectorization limits of
> the
> > > > > +	 * adapter
> > > > > +	 * @see
> rte_event_crypto_adapter_vector_limits::min_timeout_ns
> > > > > +	 * @see
> rte_event_crypto_adapter_vector_limits::max_timeout_ns
> > > > > +	 * Valid when RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR
> flag is
> > > > > set in
> > > > > +	 * @see rte_event_crypto_adapter_queue_conf::flags
> > > > > +	 */
> > > > > +	struct rte_mempool *vector_mp;
> > > > > +	/**< Indicates the mempool that should be used for allocating
> > > > > +	 * rte_event_vector container.
> > > > > +	 * Should be created by using `rte_event_vector_pool_create`.
> > > > > +	 * Valid when RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR
> flag is
> > > > > set in
> > > > > +	 * @see rte_event_crypto_adapter_queue_conf::flags.
> > > > > +	 */
> > > > > +};
> > > > > +
> > > > > +/**
> > > > > + * A structure used to retrieve event crypto adapter vector limits.
> > > > > + */
> > > > > +struct rte_event_crypto_adapter_vector_limits {
> > > > > +	uint16_t min_sz;
> > > > > +	/**< Minimum vector limit configurable.
> > > > > +	 * @see rte_event_crypto_adapter_queue_conf::vector_sz
> > > > > +	 */
> > > > > +	uint16_t max_sz;
> > > > > +	/**< Maximum vector limit configurable.
> > > > > +	 * @see rte_event_crypto_adapter_queue_conf::vector_sz
> > > > > +	 */
> > > > > +	uint8_t log2_sz;
> > > > > +	/**< True if the size configured should be in log2.
> > > > > +	 * @see rte_event_crypto_adapter_queue_conf::vector_sz
> > > > > +	 */
> > > > > +	uint64_t min_timeout_ns;
> > > > > +	/**< Minimum vector timeout configurable.
> > > > > +	 * @see
> rte_event_crypto_adapter_queue_conf::vector_timeout_ns
> > > > > +	 */
> > > > > +	uint64_t max_timeout_ns;
> > > > > +	/**< Maximum vector timeout configurable.
> > > > > +	 * @see
> rte_event_crypto_adapter_queue_conf::vector_timeout_ns
> > > > > +	 */
> > > > > +};
> > > > > +
> > > > >  /**
> > > > >   * Function type used for adapter configuration callback. The callback is
> > > > >   * used to fill in members of the struct
> > > > > rte_event_crypto_adapter_conf, this @@ -392,10 +464,9 @@
> > > > rte_event_crypto_adapter_free(uint8_t id);
> > > > >   *  Cryptodev queue pair identifier. If queue_pair_id is set -1,
> > > > >   *  adapter adds all the pre configured queue pairs to the instance.
> > > > >   *
> > > > > - * @param event
> > > > > - *  if HW supports cryptodev queue pair to event queue binding,
> > > > > application is
> > > > > - *  expected to fill in event information, else it will be NULL.
> > > > > - *  @see
> > > RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND
> > > > > + * @param conf
> > > > > + *  Additional configuration structure of type
> > > > > + *  *rte_event_crypto_adapter_queue_conf*
> > > > >   *
> > > > >   * @return
> > > > >   *  - 0: Success, queue pair added correctly.
> > > > > @@ -405,7 +476,7 @@ int
> > > > >  rte_event_crypto_adapter_queue_pair_add(uint8_t id,
> > > > >  			uint8_t cdev_id,
> > > > >  			int32_t queue_pair_id,
> > > > > -			const struct rte_event *event);
> > > > > +			const struct
> rte_event_crypto_adapter_queue_conf
> > > > > *conf);
> > > > >
> > > > >  /**
> > > > >   * Delete a queue pair from an event crypto adapter.
> > > > > @@ -523,6 +594,26 @@
> > > > > rte_event_crypto_adapter_service_id_get(uint8_t
> > > > > id, uint32_t *service_id);  int
> > > > > rte_event_crypto_adapter_event_port_get(uint8_t
> > > > > id, uint8_t *event_port_id);
> > > > >
> > > > > +/**
> > > > > + * Retrieve vector limits for a given event dev and crypto dev pair.
> > > > > + * @see rte_event_crypto_adapter_vector_limits
> > > > > + *
> > > > > + * @param dev_id
> > > > > + *  Event device identifier.
> > > > > + * @param cdev_id
> > > > > + *  Crypto device identifier.
> > > > > + * @param [out] limits
> > > > > + *  A pointer to rte_event_crypto_adapter_vector_limits
> > > > > +structure that has to
> > > > > + * be filled.
> > > > Space missing before "be filled"
> > > Ack
> > >
> > > > > + *
> > > > > + * @return
> > > > > + *  - 0: Success.
> > > > > + *  - <0: Error code on failure.
> > > > > + */
> > > > > +int rte_event_crypto_adapter_vector_limits_get(
> > > > > +	uint8_t dev_id, uint16_t cdev_id,
> > > > > +	struct rte_event_crypto_adapter_vector_limits *limits);
> > > > > +
> > > > >  /**
> > > > >   * Enqueue a burst of crypto operations as event objects
> > > > > supplied in
> > > > > *rte_event*
> > > > >   * structure on an event crypto adapter designated by its event
> > > > > *dev_id* through diff --git
> > > > > a/lib/eventdev/rte_event_eth_rx_adapter.h
> > > > > b/lib/eventdev/rte_event_eth_rx_adapter.h
> > > > > index 3608a7b2cf..c8f2936866 100644
> > > > > --- a/lib/eventdev/rte_event_eth_rx_adapter.h
> > > > > +++ b/lib/eventdev/rte_event_eth_rx_adapter.h
> > > > > @@ -457,7 +457,8 @@ int rte_event_eth_rx_adapter_free(uint8_t id);
> > > > >   * @see RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ
> > > > >   *
> > > > >   * @param conf
> > > > > - *  Additional configuration structure of type
> > > > > *rte_event_eth_rx_adapter_conf*
> > > > > + *  Additional configuration structure of type
> > > > > + *  *rte_event_eth_rx_adapter_queue_conf*
> > > > These changes are not relevant. Please consider sending separate patch.
> > > >
> > > Ack, Will send this change as a separate patch.
> > >
> > > > >   *
> > > > >   * @return
> > > > >   *  - 0: Success, Receive queue added correctly.
> > > > > diff --git a/lib/eventdev/rte_eventdev.h
> > > > > b/lib/eventdev/rte_eventdev.h index
> > > > > 6a6f6ea4c1..1a737bf851 100644
> > > > > --- a/lib/eventdev/rte_eventdev.h
> > > > > +++ b/lib/eventdev/rte_eventdev.h
> > > > > @@ -1203,6 +1203,9 @@ struct rte_event_vector {  #define
> > > > > RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR
> > > \
> > > > >  	(RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_ETH_RX_ADAPTER)
> > > /**< The
> > > > > event vector generated from eth Rx adapter. */
> > > > > +#define RTE_EVENT_TYPE_CRYPTODEV_VECTOR
> \
> > > > > +	(RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_CRYPTODEV)
> /**< The
> > > > > event
> > > > > +vector generated from cryptodev adapter. */
> > > > >
> > > > >  #define RTE_EVENT_TYPE_MAX              0x10
> > > > >  /**< Maximum number of event types */ @@ -1420,6 +1423,11 @@
> > > > > rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps);
> > > > >   * the private data information along with the crypto session.
> > > > >   */
> > > > >
> > > > > +#define RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR   0x10
> > > > > +/**< Flag indicates HW is capable of aggregating processed
> > > > > + * crypto operations into rte_event_vector.
> > > > > + */
> > > > > +
> > > > >  /**
> > > > >   * Retrieve the event device's crypto adapter capabilities for the
> > > > >   * specified cryptodev device
> > > > > --
> > > > > 2.25.1


^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 1/3] eventdev: introduce event cryptodev vector type
  2022-09-27  9:38             ` Gujjar, Abhinandan S
@ 2022-09-27 13:26               ` Jerin Jacob
  2022-09-28 14:43                 ` Gujjar, Abhinandan S
  0 siblings, 1 reply; 36+ messages in thread
From: Jerin Jacob @ 2022-09-27 13:26 UTC (permalink / raw)
  To: Gujjar, Abhinandan S
  Cc: Volodymyr Fialko, dev, Jerin Jacob Kollanukkaran,
	Pavan Nikhilesh Bhagavatula, Shijith Thotton, Hemant Agrawal,
	Sachin Saxena, Jayatheerthan, Jay, Akhil Goyal, Anoob Joseph

> > > > >
> > > > Right now we are targeting crypto_cn10k PMD and ipsec-secgw event
> > > > mode to support vectorization.
> > > Is there a way to test this? When can be dataplane changes expected?
> > >
> > If the spec looks okay, support in s/w crypto adapter and other h/w PMDs can
> > be added by respective maintainers. Currently, we are adding library change,
> > support for one PMD and an application to test the feature. Feature is exposed
> > with capability flag to not break existing functionality.
> Got it. How do I test this feature without data plane changes?


Hi @Gujjar, Abhinandan S

> If there is a way to test this, please let me know.

Dataplane changes can be tested on the cn10k platform.
This feature is a hardware assisted feature.

> This design is right now tested for cn10k, I am not sure this works for sw adapter.

SW driver support is not added in this series as in order to accept a
API change, one would need,
1)API spec
2)One of the driver
3)Test application to exercise the API.

It is a similar case for all ethdev, rte_flow features etc.
Community can add SW driver support just like any other subsystem APIs.

Also, The proposed library changes don't differentiate between SW & HW PMDs.
The proposed changes are exposed with a capability flag and so SW
crypto adapter will not have any perf impact.

> I need to have perf data with and without vectorization support to approve.

On the cn10k platform, we see nearly 2.5x performance with
vectorization. Eth rx adapter already supports vectorization and this
spec change is in line with that.

Also IPsec gateway update to exercise these APIs. See
http://patches.dpdk.org/project/dpdk/patch/20220804103626.102688-6-vfialko@marvell.com/

Command to test on drivers which have this functionality.

./dpdk-ipsec-secgw -c 0xff0000 -a 0002:01:00.1 -a 0002:20:00.1 -a
0002:1e:00.0 -- -P -p 0x1 -P  --transfer-mode event -l
--event-schedule-type parallel --desc-nb 8192 --event-vector -f
simple.conf

sample.conf

sp ipv4 out esp protect 19 pri 1 dst 192.18.0.0/32 sport 0:65535 dport 0:65535
sa out 19 aead_algo aes-128-gcm aead_key
73:69:78:74:65:65:6e:62:79:74:65:73:20:6b:65:79:64:70:64:6b mode
ipv4-tunnel src 2.1.1.1 dst 1.1.1.1 type lookaside-protocol-offload
port_id 0

neigh port 0 d0:37:45:02:b0:d3
rt ipv4 dst 1.1.0.0/16 port 0

In order to make forward progress and merge patch in RC1, I would request
1)Review the API specific patch(eventdev: introduce event cryptodev
vector type), If spec needs to be changed to adapt any other driver(SW
or HW) then the author should address that.
2)If you think, API usage is not enough with dpdk-ipsec-secgw
application, I think, author should update the test-eventdev
application to support the new mode.Which can be merged after RC1 as
it is a test application change.

Let us know what you think to make forward progress.

^ permalink raw reply	[flat|nested] 36+ messages in thread

* RE: [PATCH 1/3] eventdev: introduce event cryptodev vector type
  2022-09-27 13:26               ` Jerin Jacob
@ 2022-09-28 14:43                 ` Gujjar, Abhinandan S
  2022-09-28 16:13                   ` Jerin Jacob
  0 siblings, 1 reply; 36+ messages in thread
From: Gujjar, Abhinandan S @ 2022-09-28 14:43 UTC (permalink / raw)
  To: Jerin Jacob, Volodymyr Fialko, Jayatheerthan, Jay
  Cc: dev, Jerin Jacob Kollanukkaran, Pavan Nikhilesh Bhagavatula,
	Shijith Thotton, Hemant Agrawal, Sachin Saxena, Akhil Goyal,
	Anoob Joseph



> -----Original Message-----
> From: Jerin Jacob <jerinjacobk@gmail.com>
> Sent: Tuesday, September 27, 2022 6:56 PM
> To: Gujjar, Abhinandan S <abhinandan.gujjar@intel.com>
> Cc: Volodymyr Fialko <vfialko@marvell.com>; dev@dpdk.org; Jerin Jacob
> Kollanukkaran <jerinj@marvell.com>; Pavan Nikhilesh Bhagavatula
> <pbhagavatula@marvell.com>; Shijith Thotton <sthotton@marvell.com>;
> Hemant Agrawal <hemant.agrawal@nxp.com>; Sachin Saxena
> <sachin.saxena@oss.nxp.com>; Jayatheerthan, Jay
> <jay.jayatheerthan@intel.com>; Akhil Goyal <gakhil@marvell.com>; Anoob
> Joseph <anoobj@marvell.com>
> Subject: Re: [PATCH 1/3] eventdev: introduce event cryptodev vector type
> 
> > > > > >
> > > > > Right now we are targeting crypto_cn10k PMD and ipsec-secgw
> > > > > event mode to support vectorization.
> > > > Is there a way to test this? When can be dataplane changes expected?
> > > >
> > > If the spec looks okay, support in s/w crypto adapter and other h/w
> > > PMDs can be added by respective maintainers. Currently, we are
> > > adding library change, support for one PMD and an application to
> > > test the feature. Feature is exposed with capability flag to not break
> existing functionality.
> > Got it. How do I test this feature without data plane changes?
> 
> 
> Hi @Gujjar, Abhinandan S
> 
> > If there is a way to test this, please let me know.
> 
> Dataplane changes can be tested on the cn10k platform.
> This feature is a hardware assisted feature.
> 
> > This design is right now tested for cn10k, I am not sure this works for sw
> adapter.
> 
> SW driver support is not added in this series as in order to accept a API change,
> one would need, 1)API spec 2)One of the driver 3)Test application to exercise
> the API.
> 
> It is a similar case for all ethdev, rte_flow features etc.
> Community can add SW driver support just like any other subsystem APIs.
> 
> Also, The proposed library changes don't differentiate between SW & HW
> PMDs.
> The proposed changes are exposed with a capability flag and so SW crypto
> adapter will not have any perf impact.
> 
> > I need to have perf data with and without vectorization support to approve.
> 
> On the cn10k platform, we see nearly 2.5x performance with vectorization. Eth
> rx adapter already supports vectorization and this spec change is in line with
> that.
> 
> Also IPsec gateway update to exercise these APIs. See
> http://patches.dpdk.org/project/dpdk/patch/20220804103626.102688-6-
> vfialko@marvell.com/
> 
> Command to test on drivers which have this functionality.
> 
> ./dpdk-ipsec-secgw -c 0xff0000 -a 0002:01:00.1 -a 0002:20:00.1 -a
> 0002:1e:00.0 -- -P -p 0x1 -P  --transfer-mode event -l --event-schedule-type
> parallel --desc-nb 8192 --event-vector -f simple.conf
> 
> sample.conf
> 
> sp ipv4 out esp protect 19 pri 1 dst 192.18.0.0/32 sport 0:65535 dport 0:65535
> sa out 19 aead_algo aes-128-gcm aead_key
> 73:69:78:74:65:65:6e:62:79:74:65:73:20:6b:65:79:64:70:64:6b mode ipv4-
> tunnel src 2.1.1.1 dst 1.1.1.1 type lookaside-protocol-offload port_id 0
> 
> neigh port 0 d0:37:45:02:b0:d3
> rt ipv4 dst 1.1.0.0/16 port 0
> 
> In order to make forward progress and merge patch in RC1, I would request
> 1)Review the API specific patch(eventdev: introduce event cryptodev vector
> type), If spec needs to be changed to adapt any other driver(SW or HW) then
> the author should address that.
> 2)If you think, API usage is not enough with dpdk-ipsec-secgw application, I
> think, author should update the test-eventdev application to support the new
> mode.Which can be merged after RC1 as it is a test application change.
> 
> Let us know what you think to make forward progress.

Hi @Jerin Jacob & @Volodymyr Fialko,
Thanks for clarifying. We would like to have a call to discuss on this.
Could you please setup a call & include @Jayatheerthan, Jay also into it?

Thanks
Abhinandan 

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 1/3] eventdev: introduce event cryptodev vector type
  2022-09-28 14:43                 ` Gujjar, Abhinandan S
@ 2022-09-28 16:13                   ` Jerin Jacob
  0 siblings, 0 replies; 36+ messages in thread
From: Jerin Jacob @ 2022-09-28 16:13 UTC (permalink / raw)
  To: Gujjar, Abhinandan S
  Cc: Volodymyr Fialko, Jayatheerthan, Jay, dev,
	Jerin Jacob Kollanukkaran, Pavan Nikhilesh Bhagavatula,
	Shijith Thotton, Hemant Agrawal, Sachin Saxena, Akhil Goyal,
	Anoob Joseph

On Wed, Sep 28, 2022 at 8:14 PM Gujjar, Abhinandan S
<abhinandan.gujjar@intel.com> wrote:
>
>
>
> > -----Original Message-----
> > From: Jerin Jacob <jerinjacobk@gmail.com>
> > Sent: Tuesday, September 27, 2022 6:56 PM
> > To: Gujjar, Abhinandan S <abhinandan.gujjar@intel.com>
> > Cc: Volodymyr Fialko <vfialko@marvell.com>; dev@dpdk.org; Jerin Jacob
> > Kollanukkaran <jerinj@marvell.com>; Pavan Nikhilesh Bhagavatula
> > <pbhagavatula@marvell.com>; Shijith Thotton <sthotton@marvell.com>;
> > Hemant Agrawal <hemant.agrawal@nxp.com>; Sachin Saxena
> > <sachin.saxena@oss.nxp.com>; Jayatheerthan, Jay
> > <jay.jayatheerthan@intel.com>; Akhil Goyal <gakhil@marvell.com>; Anoob
> > Joseph <anoobj@marvell.com>
> > Subject: Re: [PATCH 1/3] eventdev: introduce event cryptodev vector type
> >
> > > > > > >
> > > > > > Right now we are targeting crypto_cn10k PMD and ipsec-secgw
> > > > > > event mode to support vectorization.
> > > > > Is there a way to test this? When can be dataplane changes expected?
> > > > >
> > > > If the spec looks okay, support in s/w crypto adapter and other h/w
> > > > PMDs can be added by respective maintainers. Currently, we are
> > > > adding library change, support for one PMD and an application to
> > > > test the feature. Feature is exposed with capability flag to not break
> > existing functionality.
> > > Got it. How do I test this feature without data plane changes?
> >
> >
> > Hi @Gujjar, Abhinandan S
> >
> > > If there is a way to test this, please let me know.
> >
> > Dataplane changes can be tested on the cn10k platform.
> > This feature is a hardware assisted feature.
> >
> > > This design is right now tested for cn10k, I am not sure this works for sw
> > adapter.
> >
> > SW driver support is not added in this series as in order to accept a API change,
> > one would need, 1)API spec 2)One of the driver 3)Test application to exercise
> > the API.
> >
> > It is a similar case for all ethdev, rte_flow features etc.
> > Community can add SW driver support just like any other subsystem APIs.
> >
> > Also, The proposed library changes don't differentiate between SW & HW
> > PMDs.
> > The proposed changes are exposed with a capability flag and so SW crypto
> > adapter will not have any perf impact.
> >
> > > I need to have perf data with and without vectorization support to approve.
> >
> > On the cn10k platform, we see nearly 2.5x performance with vectorization. Eth
> > rx adapter already supports vectorization and this spec change is in line with
> > that.
> >
> > Also IPsec gateway update to exercise these APIs. See
> > http://patches.dpdk.org/project/dpdk/patch/20220804103626.102688-6-
> > vfialko@marvell.com/
> >
> > Command to test on drivers which have this functionality.
> >
> > ./dpdk-ipsec-secgw -c 0xff0000 -a 0002:01:00.1 -a 0002:20:00.1 -a
> > 0002:1e:00.0 -- -P -p 0x1 -P  --transfer-mode event -l --event-schedule-type
> > parallel --desc-nb 8192 --event-vector -f simple.conf
> >
> > sample.conf
> >
> > sp ipv4 out esp protect 19 pri 1 dst 192.18.0.0/32 sport 0:65535 dport 0:65535
> > sa out 19 aead_algo aes-128-gcm aead_key
> > 73:69:78:74:65:65:6e:62:79:74:65:73:20:6b:65:79:64:70:64:6b mode ipv4-
> > tunnel src 2.1.1.1 dst 1.1.1.1 type lookaside-protocol-offload port_id 0
> >
> > neigh port 0 d0:37:45:02:b0:d3
> > rt ipv4 dst 1.1.0.0/16 port 0
> >
> > In order to make forward progress and merge patch in RC1, I would request
> > 1)Review the API specific patch(eventdev: introduce event cryptodev vector
> > type), If spec needs to be changed to adapt any other driver(SW or HW) then
> > the author should address that.
> > 2)If you think, API usage is not enough with dpdk-ipsec-secgw application, I
> > think, author should update the test-eventdev application to support the new
> > mode.Which can be merged after RC1 as it is a test application change.
> >
> > Let us know what you think to make forward progress.
>
> Hi @Jerin Jacob & @Volodymyr Fialko,
> Thanks for clarifying. We would like to have a call to discuss on this.
> Could you please setup a call & include @Jayatheerthan, Jay also into it?

Sure. Setting up one at 4PM IST on 29 Oct.

Please find the details.

Hi there,

Jerin Jacob Kollanukkaran is inviting you to a scheduled Zoom meeting.

Topic: Jerin Jacob Kollanukkaran's Personal Meeting Room


Join Zoom Meeting:
https://marvell.zoom.us/j/9901077677?pwd=T2lTTGMwYlc1YTQzMnR4eGRWQXR6QT09
    Password: 339888


Or Telephone:
    Dial(for higher quality, dial a number based on your current location):
        US: +1 646 558 8656  or +1 646 931 3860  or +1 669 444 9171
or +1 669 900 6833  or +1 719 359 4580  or +1 253 215 8782  or +1 301
715 8592  or +1 309 205 3325  or +1 312 626 6799  or +1 346 248 7799
or +1 386 347 5053  or +1 564 217 2000  or 888 788 0099 (Toll Free) or
833 548 0276 (Toll Free) or 833 548 0282 (Toll Free) or 833 928 4608
(Toll Free) or 833 928 4609 (Toll Free) or 833 928 4610 (Toll Free) or
877 853 5247 (Toll Free)
    Meeting ID: 990 107 7677
    Password: 358309
    International numbers available: https://marvell.zoom.us/u/adpcCpMHYt

Or a Video Conference Room:
From Touchpad: Tap Join Zoom button. When prompted, enter 990 107 7677
Password: 358309

For China locations, from Touchpad: Dial* then 990 107 7677
    Password: 358309



>
> Thanks
> Abhinandan

^ permalink raw reply	[flat|nested] 36+ messages in thread

* [PATCH v3 0/2] Vector support for event crypto adapter
  2022-09-26 11:36   ` [PATCH v2 0/3] Vector support for event crypto adapter Volodymyr Fialko
                       ` (2 preceding siblings ...)
  2022-09-26 11:36     ` [PATCH v2 3/3] crypto/cnxk: add vectorization " Volodymyr Fialko
@ 2022-10-01  0:42     ` Volodymyr Fialko
  2022-10-01  0:42       ` [PATCH v3 1/2] eventdev: introduce event cryptodev vector type Volodymyr Fialko
                         ` (2 more replies)
  3 siblings, 3 replies; 36+ messages in thread
From: Volodymyr Fialko @ 2022-10-01  0:42 UTC (permalink / raw)
  To: dev; +Cc: jerinj, gakhil, anoobj, abhinandan.gujjar, Volodymyr Fialko

Introduce ability to aggregate crypto operations processed by event
crypto adapter into single event containing rte_event_vector whose event
type is RTE_EVENT_TYPE_CRYPTODEV_VECTOR.

* Changes since v1
- Add missing newline/space issues
- Add missing error messages
- Remove double check of conf input parameter

* Changes since v2
- Rebase on top of dpdk-next-eventdev branch
- Remove already merged patch

Volodymyr Fialko (2):
  eventdev: introduce event cryptodev vector type
  crypto/cnxk: add vectorization for event crypto

 app/test-eventdev/test_perf_common.c          |  11 +-
 app/test/test_event_crypto_adapter.c          |  12 +-
 .../prog_guide/event_crypto_adapter.rst       |  23 +-
 drivers/crypto/cnxk/cn10k_cryptodev_ops.c     | 398 ++++++++++++++++--
 drivers/crypto/cnxk/cn10k_cryptodev_ops.h     |   2 +
 drivers/crypto/cnxk/cnxk_cryptodev_ops.h      |   9 +-
 drivers/crypto/cnxk/version.map               |   1 +
 drivers/event/cnxk/cn10k_eventdev.c           |  33 +-
 drivers/event/cnxk/cn10k_worker.h             |   6 +-
 drivers/event/cnxk/cn9k_eventdev.c            |  10 +-
 drivers/event/cnxk/cnxk_eventdev.h            |   4 +-
 drivers/event/cnxk/cnxk_eventdev_adptr.c      |  17 +-
 drivers/event/dpaa/dpaa_eventdev.c            |   9 +-
 drivers/event/dpaa2/dpaa2_eventdev.c          |   9 +-
 drivers/event/octeontx/ssovf_evdev.c          |   4 +-
 lib/eventdev/eventdev_pmd.h                   |  35 +-
 lib/eventdev/eventdev_trace.h                 |   6 +-
 lib/eventdev/rte_event_crypto_adapter.c       | 105 ++++-
 lib/eventdev/rte_event_crypto_adapter.h       | 101 ++++-
 lib/eventdev/rte_eventdev.h                   |   8 +
 20 files changed, 698 insertions(+), 105 deletions(-)

-- 
2.25.1


^ permalink raw reply	[flat|nested] 36+ messages in thread

* [PATCH v3 1/2] eventdev: introduce event cryptodev vector type
  2022-10-01  0:42     ` [PATCH v3 0/2] Vector support for event crypto adapter Volodymyr Fialko
@ 2022-10-01  0:42       ` Volodymyr Fialko
  2022-10-01  0:42       ` [PATCH v3 2/2] crypto/cnxk: add vectorization for event crypto Volodymyr Fialko
  2022-10-01  3:42       ` [PATCH v3 0/2] Vector support for event crypto adapter Akhil Goyal
  2 siblings, 0 replies; 36+ messages in thread
From: Volodymyr Fialko @ 2022-10-01  0:42 UTC (permalink / raw)
  To: dev, Jerin Jacob, Abhinandan Gujjar, Pavan Nikhilesh,
	Shijith Thotton, Hemant Agrawal, Sachin Saxena
  Cc: gakhil, anoobj, Volodymyr Fialko

Introduce ability to aggregate crypto operations processed by event
crypto adapter into single event containing rte_event_vector whose event
type is RTE_EVENT_TYPE_CRYPTODEV_VECTOR.

Application should set RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR in
rte_event_crypto_adapter_queue_conf::flag and provide vector configuration
with respect of rte_event_crypto_adapter_vector_limits, which could be
obtained by calling rte_event_crypto_adapter_vector_limits_get, to enable
vectorization.

The event crypto adapter would be responsible for vectorizing the crypto
operations based on provided response information in
rte_event_crypto_metadata::response_info.

Updated drivers and tests accordingly to new API.

Signed-off-by: Volodymyr Fialko <vfialko@marvell.com>
---
 app/test-eventdev/test_perf_common.c          |  11 +-
 app/test/test_event_crypto_adapter.c          |  12 +-
 .../prog_guide/event_crypto_adapter.rst       |  23 +++-
 drivers/event/cnxk/cn10k_eventdev.c           |   4 +-
 drivers/event/cnxk/cn9k_eventdev.c            |   5 +-
 drivers/event/dpaa/dpaa_eventdev.c            |   9 +-
 drivers/event/dpaa2/dpaa2_eventdev.c          |   9 +-
 drivers/event/octeontx/ssovf_evdev.c          |   4 +-
 lib/eventdev/eventdev_pmd.h                   |  35 +++++-
 lib/eventdev/eventdev_trace.h                 |   6 +-
 lib/eventdev/rte_event_crypto_adapter.c       | 105 ++++++++++++++++--
 lib/eventdev/rte_event_crypto_adapter.h       | 101 ++++++++++++++++-
 lib/eventdev/rte_eventdev.h                   |   8 ++
 13 files changed, 285 insertions(+), 47 deletions(-)

diff --git a/app/test-eventdev/test_perf_common.c b/app/test-eventdev/test_perf_common.c
index 81420be73a..8472a87b99 100644
--- a/app/test-eventdev/test_perf_common.c
+++ b/app/test-eventdev/test_perf_common.c
@@ -837,14 +837,13 @@ perf_event_crypto_adapter_setup(struct test_perf *t, struct prod_data *p)
 	}
 
 	if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
-		struct rte_event response_info;
+		struct rte_event_crypto_adapter_queue_conf conf;
 
-		response_info.event = 0;
-		response_info.sched_type = RTE_SCHED_TYPE_ATOMIC;
-		response_info.queue_id = p->queue_id;
+		memset(&conf, 0, sizeof(conf));
+		conf.ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
+		conf.ev.queue_id = p->queue_id;
 		ret = rte_event_crypto_adapter_queue_pair_add(
-			TEST_PERF_CA_ID, p->ca.cdev_id, p->ca.cdev_qp_id,
-			&response_info);
+			TEST_PERF_CA_ID, p->ca.cdev_id, p->ca.cdev_qp_id, &conf);
 	} else {
 		ret = rte_event_crypto_adapter_queue_pair_add(
 			TEST_PERF_CA_ID, p->ca.cdev_id, p->ca.cdev_qp_id, NULL);
diff --git a/app/test/test_event_crypto_adapter.c b/app/test/test_event_crypto_adapter.c
index 2ecc7e2cea..bb617c1042 100644
--- a/app/test/test_event_crypto_adapter.c
+++ b/app/test/test_event_crypto_adapter.c
@@ -1175,6 +1175,10 @@ test_crypto_adapter_create(void)
 static int
 test_crypto_adapter_qp_add_del(void)
 {
+	struct rte_event_crypto_adapter_queue_conf queue_conf = {
+		.ev = response_info,
+	};
+
 	uint32_t cap;
 	int ret;
 
@@ -1183,7 +1187,7 @@ test_crypto_adapter_qp_add_del(void)
 
 	if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
 		ret = rte_event_crypto_adapter_queue_pair_add(TEST_ADAPTER_ID,
-				TEST_CDEV_ID, TEST_CDEV_QP_ID, &response_info);
+				TEST_CDEV_ID, TEST_CDEV_QP_ID, &queue_conf);
 	} else
 		ret = rte_event_crypto_adapter_queue_pair_add(TEST_ADAPTER_ID,
 					TEST_CDEV_ID, TEST_CDEV_QP_ID, NULL);
@@ -1206,6 +1210,10 @@ configure_event_crypto_adapter(enum rte_event_crypto_adapter_mode mode)
 		.new_event_threshold = 1200,
 	};
 
+	struct rte_event_crypto_adapter_queue_conf queue_conf = {
+		.ev = response_info,
+	};
+
 	uint32_t cap;
 	int ret;
 
@@ -1238,7 +1246,7 @@ configure_event_crypto_adapter(enum rte_event_crypto_adapter_mode mode)
 
 	if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
 		ret = rte_event_crypto_adapter_queue_pair_add(TEST_ADAPTER_ID,
-				TEST_CDEV_ID, TEST_CDEV_QP_ID, &response_info);
+				TEST_CDEV_ID, TEST_CDEV_QP_ID, &queue_conf);
 	} else
 		ret = rte_event_crypto_adapter_queue_pair_add(TEST_ADAPTER_ID,
 				TEST_CDEV_ID, TEST_CDEV_QP_ID, NULL);
diff --git a/doc/guides/prog_guide/event_crypto_adapter.rst b/doc/guides/prog_guide/event_crypto_adapter.rst
index 4fb5c688e0..554df7e358 100644
--- a/doc/guides/prog_guide/event_crypto_adapter.rst
+++ b/doc/guides/prog_guide/event_crypto_adapter.rst
@@ -201,10 +201,10 @@ capability, event information must be passed to the add API.
 
         ret = rte_event_crypto_adapter_caps_get(id, evdev, &cap);
         if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
-                struct rte_event event;
+                struct rte_event_crypto_adapter_queue_conf conf;
 
-                // Fill in event information & pass it to add API
-                rte_event_crypto_adapter_queue_pair_add(id, cdev_id, qp_id, &event);
+                // Fill in conf.event information & pass it to add API
+                rte_event_crypto_adapter_queue_pair_add(id, cdev_id, qp_id, &conf);
         } else
                 rte_event_crypto_adapter_queue_pair_add(id, cdev_id, qp_id, NULL);
 
@@ -291,6 +291,23 @@ the ``rte_crypto_op``.
                 rte_memcpy(op + len, &m_data, sizeof(m_data));
         }
 
+Enable event vectorization
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The event crypto adapter can aggregate outcoming crypto operations based on
+provided response information of ``rte_event_crypto_metadata::response_info``
+and generate a ``rte_event`` containing ``rte_event_vector`` whose event type
+is ``RTE_EVENT_TYPE_CRYPTODEV_VECTOR``.
+To enable vectorization application should set
+RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR in
+``rte_event_crypto_adapter_queue_conf::flag`` and provide vector
+configuration(size, mempool, etc.) with respect of
+``rte_event_crypto_adapter_vector_limits``, which could be obtained by calling
+``rte_event_crypto_adapter_vector_limits_get()``.
+
+The RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR capability indicates whether
+PMD supports this feature.
+
 Start the adapter instance
 ~~~~~~~~~~~~~~~~~~~~~~~~~~
 
diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index bbaa6d0361..c55d69724b 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -1034,12 +1034,12 @@ static int
 cn10k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
 			    const struct rte_cryptodev *cdev,
 			    int32_t queue_pair_id,
-			    const struct rte_event *event)
+			    const struct rte_event_crypto_adapter_queue_conf *conf)
 {
 	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
 	int ret;
 
-	RTE_SET_USED(event);
+	RTE_SET_USED(conf);
 
 	CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k");
 	CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k");
diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
index 764963db85..fca7b5f3a5 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -1125,12 +1125,13 @@ cn9k_crypto_adapter_caps_get(const struct rte_eventdev *event_dev,
 static int
 cn9k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
 			   const struct rte_cryptodev *cdev,
-			   int32_t queue_pair_id, const struct rte_event *event)
+			   int32_t queue_pair_id,
+			   const struct rte_event_crypto_adapter_queue_conf *conf)
 {
 	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
 	int ret;
 
-	RTE_SET_USED(event);
+	RTE_SET_USED(conf);
 
 	CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k");
 	CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k");
diff --git a/drivers/event/dpaa/dpaa_eventdev.c b/drivers/event/dpaa/dpaa_eventdev.c
index 8e470584ea..4b3d16735b 100644
--- a/drivers/event/dpaa/dpaa_eventdev.c
+++ b/drivers/event/dpaa/dpaa_eventdev.c
@@ -26,6 +26,7 @@
 #include <rte_eventdev.h>
 #include <eventdev_pmd_vdev.h>
 #include <rte_ethdev.h>
+#include <rte_event_crypto_adapter.h>
 #include <rte_event_eth_rx_adapter.h>
 #include <rte_event_eth_tx_adapter.h>
 #include <cryptodev_pmd.h>
@@ -775,10 +776,10 @@ static int
 dpaa_eventdev_crypto_queue_add(const struct rte_eventdev *dev,
 		const struct rte_cryptodev *cryptodev,
 		int32_t rx_queue_id,
-		const struct rte_event *ev)
+		const struct rte_event_crypto_adapter_queue_conf *conf)
 {
 	struct dpaa_eventdev *priv = dev->data->dev_private;
-	uint8_t ev_qid = ev->queue_id;
+	uint8_t ev_qid = conf->ev.queue_id;
 	u16 ch_id = priv->evq_info[ev_qid].ch_id;
 	int ret;
 
@@ -786,10 +787,10 @@ dpaa_eventdev_crypto_queue_add(const struct rte_eventdev *dev,
 
 	if (rx_queue_id == -1)
 		return dpaa_eventdev_crypto_queue_add_all(dev,
-				cryptodev, ev);
+				cryptodev, &conf->ev);
 
 	ret = dpaa_sec_eventq_attach(cryptodev, rx_queue_id,
-			ch_id, ev);
+			ch_id, &conf->ev);
 	if (ret) {
 		DPAA_EVENTDEV_ERR(
 			"dpaa_sec_eventq_attach failed: ret: %d\n", ret);
diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
index 1001297cda..f499d0d015 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.c
+++ b/drivers/event/dpaa2/dpaa2_eventdev.c
@@ -26,6 +26,7 @@
 #include <bus_vdev_driver.h>
 #include <ethdev_driver.h>
 #include <cryptodev_pmd.h>
+#include <rte_event_crypto_adapter.h>
 #include <rte_event_eth_rx_adapter.h>
 #include <rte_event_eth_tx_adapter.h>
 
@@ -865,10 +866,10 @@ static int
 dpaa2_eventdev_crypto_queue_add(const struct rte_eventdev *dev,
 		const struct rte_cryptodev *cryptodev,
 		int32_t rx_queue_id,
-		const struct rte_event *ev)
+		const struct rte_event_crypto_adapter_queue_conf *conf)
 {
 	struct dpaa2_eventdev *priv = dev->data->dev_private;
-	uint8_t ev_qid = ev->queue_id;
+	uint8_t ev_qid = conf->ev.queue_id;
 	struct dpaa2_dpcon_dev *dpcon = priv->evq_info[ev_qid].dpcon;
 	int ret;
 
@@ -876,10 +877,10 @@ dpaa2_eventdev_crypto_queue_add(const struct rte_eventdev *dev,
 
 	if (rx_queue_id == -1)
 		return dpaa2_eventdev_crypto_queue_add_all(dev,
-				cryptodev, ev);
+				cryptodev, &conf->ev);
 
 	ret = dpaa2_sec_eventq_attach(cryptodev, rx_queue_id,
-				      dpcon, ev);
+				      dpcon, &conf->ev);
 	if (ret) {
 		DPAA2_EVENTDEV_ERR(
 			"dpaa2_sec_eventq_attach failed: ret: %d\n", ret);
diff --git a/drivers/event/octeontx/ssovf_evdev.c b/drivers/event/octeontx/ssovf_evdev.c
index 9d4347a16a..650266b996 100644
--- a/drivers/event/octeontx/ssovf_evdev.c
+++ b/drivers/event/octeontx/ssovf_evdev.c
@@ -746,12 +746,12 @@ static int
 ssovf_crypto_adapter_qp_add(const struct rte_eventdev *dev,
 			    const struct rte_cryptodev *cdev,
 			    int32_t queue_pair_id,
-			    const struct rte_event *event)
+			    const struct rte_event_crypto_adapter_queue_conf *conf)
 {
 	struct cpt_instance *qp;
 	uint8_t qp_id;
 
-	RTE_SET_USED(event);
+	RTE_SET_USED(conf);
 
 	if (queue_pair_id == -1) {
 		for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) {
diff --git a/lib/eventdev/eventdev_pmd.h b/lib/eventdev/eventdev_pmd.h
index 2c74332c4a..e49ff23db5 100644
--- a/lib/eventdev/eventdev_pmd.h
+++ b/lib/eventdev/eventdev_pmd.h
@@ -910,6 +910,7 @@ rte_event_pmd_selftest_seqn(struct rte_mbuf *mbuf)
 }
 
 struct rte_cryptodev;
+struct rte_event_crypto_adapter_queue_conf;
 
 /**
  * This API may change without prior notice
@@ -964,11 +965,11 @@ typedef int (*eventdev_crypto_adapter_caps_get_t)
  *   - <0: Error code returned by the driver function.
  *
  */
-typedef int (*eventdev_crypto_adapter_queue_pair_add_t)
-			(const struct rte_eventdev *dev,
-			 const struct rte_cryptodev *cdev,
-			 int32_t queue_pair_id,
-			 const struct rte_event *event);
+typedef int (*eventdev_crypto_adapter_queue_pair_add_t)(
+		const struct rte_eventdev *dev,
+		const struct rte_cryptodev *cdev,
+		int32_t queue_pair_id,
+		const struct rte_event_crypto_adapter_queue_conf *queue_conf);
 
 
 /**
@@ -1077,6 +1078,27 @@ typedef int (*eventdev_crypto_adapter_stats_reset)
 			(const struct rte_eventdev *dev,
 			 const struct rte_cryptodev *cdev);
 
+struct rte_event_crypto_adapter_vector_limits;
+/**
+ * Get event vector limits for a given event, crypto device pair.
+ *
+ * @param dev
+ *   Event device pointer
+ *
+ * @param cdev
+ *   Crypto device pointer
+ *
+ * @param[out] limits
+ *   Pointer to the limits structure to be filled.
+ *
+ * @return
+ *   - 0: Success.
+ *   - <0: Error code returned by the driver function.
+ */
+typedef int (*eventdev_crypto_adapter_vector_limits_get_t)(
+	const struct rte_eventdev *dev, const struct rte_cryptodev *cdev,
+	struct rte_event_crypto_adapter_vector_limits *limits);
+
 /**
  * Retrieve the event device's eth Tx adapter capabilities.
  *
@@ -1402,6 +1424,9 @@ struct eventdev_ops {
 	/**< Get crypto stats */
 	eventdev_crypto_adapter_stats_reset crypto_adapter_stats_reset;
 	/**< Reset crypto stats */
+	eventdev_crypto_adapter_vector_limits_get_t
+		crypto_adapter_vector_limits_get;
+	/**< Get event vector limits for the crypto adapter */
 
 	eventdev_eth_rx_adapter_q_stats_get eth_rx_adapter_queue_stats_get;
 	/**< Get ethernet Rx queue stats */
diff --git a/lib/eventdev/eventdev_trace.h b/lib/eventdev/eventdev_trace.h
index 5ec43d80ee..d48cd58850 100644
--- a/lib/eventdev/eventdev_trace.h
+++ b/lib/eventdev/eventdev_trace.h
@@ -18,6 +18,7 @@ extern "C" {
 #include <rte_trace_point.h>
 
 #include "rte_eventdev.h"
+#include "rte_event_crypto_adapter.h"
 #include "rte_event_eth_rx_adapter.h"
 #include "rte_event_timer_adapter.h"
 
@@ -271,11 +272,12 @@ RTE_TRACE_POINT(
 RTE_TRACE_POINT(
 	rte_eventdev_trace_crypto_adapter_queue_pair_add,
 	RTE_TRACE_POINT_ARGS(uint8_t adptr_id, uint8_t cdev_id,
-		const void *event, int32_t queue_pair_id),
+		int32_t queue_pair_id,
+		const struct rte_event_crypto_adapter_queue_conf *conf),
 	rte_trace_point_emit_u8(adptr_id);
 	rte_trace_point_emit_u8(cdev_id);
 	rte_trace_point_emit_i32(queue_pair_id);
-	rte_trace_point_emit_ptr(event);
+	rte_trace_point_emit_ptr(conf);
 )
 
 RTE_TRACE_POINT(
diff --git a/lib/eventdev/rte_event_crypto_adapter.c b/lib/eventdev/rte_event_crypto_adapter.c
index a8ef5bac06..49e5305800 100644
--- a/lib/eventdev/rte_event_crypto_adapter.c
+++ b/lib/eventdev/rte_event_crypto_adapter.c
@@ -921,11 +921,12 @@ int
 rte_event_crypto_adapter_queue_pair_add(uint8_t id,
 			uint8_t cdev_id,
 			int32_t queue_pair_id,
-			const struct rte_event *event)
+			const struct rte_event_crypto_adapter_queue_conf *conf)
 {
+	struct rte_event_crypto_adapter_vector_limits limits;
 	struct event_crypto_adapter *adapter;
-	struct rte_eventdev *dev;
 	struct crypto_device_info *dev_info;
+	struct rte_eventdev *dev;
 	uint32_t cap;
 	int ret;
 
@@ -950,11 +951,49 @@ rte_event_crypto_adapter_queue_pair_add(uint8_t id,
 		return ret;
 	}
 
-	if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) &&
-	    (event == NULL)) {
-		RTE_EDEV_LOG_ERR("Conf value can not be NULL for dev_id=%u",
-				  cdev_id);
-		return -EINVAL;
+	if (conf == NULL) {
+		if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
+			RTE_EDEV_LOG_ERR("Conf value can not be NULL for dev_id=%u",
+					 cdev_id);
+			return -EINVAL;
+		}
+	} else {
+		if (conf->flags & RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR) {
+			if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR) == 0) {
+				RTE_EDEV_LOG_ERR("Event vectorization is not supported,"
+						 "dev %" PRIu8 " cdev %" PRIu8, id,
+						 cdev_id);
+				return -ENOTSUP;
+			}
+
+			ret = rte_event_crypto_adapter_vector_limits_get(
+				adapter->eventdev_id, cdev_id, &limits);
+			if (ret < 0) {
+				RTE_EDEV_LOG_ERR("Failed to get event device vector "
+						 "limits, dev %" PRIu8 " cdev %" PRIu8,
+						 id, cdev_id);
+				return -EINVAL;
+			}
+
+			if (conf->vector_sz < limits.min_sz ||
+			    conf->vector_sz > limits.max_sz ||
+			    conf->vector_timeout_ns < limits.min_timeout_ns ||
+			    conf->vector_timeout_ns > limits.max_timeout_ns ||
+			    conf->vector_mp == NULL) {
+				RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
+						" dev %" PRIu8 " cdev %" PRIu8,
+						id, cdev_id);
+				return -EINVAL;
+			}
+
+			if (conf->vector_mp->elt_size < (sizeof(struct rte_event_vector) +
+			    (sizeof(uintptr_t) * conf->vector_sz))) {
+				RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
+						" dev %" PRIu8 " cdev %" PRIu8,
+						id, cdev_id);
+				return -EINVAL;
+			}
+		}
 	}
 
 	dev_info = &adapter->cdevs[cdev_id];
@@ -989,7 +1028,7 @@ rte_event_crypto_adapter_queue_pair_add(uint8_t id,
 		ret = (*dev->dev_ops->crypto_adapter_queue_pair_add)(dev,
 				dev_info->dev,
 				queue_pair_id,
-				event);
+				conf);
 		if (ret)
 			return ret;
 
@@ -1029,8 +1068,8 @@ rte_event_crypto_adapter_queue_pair_add(uint8_t id,
 		rte_service_component_runstate_set(adapter->service_id, 1);
 	}
 
-	rte_eventdev_trace_crypto_adapter_queue_pair_add(id, cdev_id, event,
-		queue_pair_id);
+	rte_eventdev_trace_crypto_adapter_queue_pair_add(id, cdev_id,
+		queue_pair_id, conf);
 	return 0;
 }
 
@@ -1288,3 +1327,49 @@ rte_event_crypto_adapter_event_port_get(uint8_t id, uint8_t *event_port_id)
 
 	return 0;
 }
+
+int
+rte_event_crypto_adapter_vector_limits_get(
+	uint8_t dev_id, uint16_t cdev_id,
+	struct rte_event_crypto_adapter_vector_limits *limits)
+{
+	struct rte_cryptodev *cdev;
+	struct rte_eventdev *dev;
+	uint32_t cap;
+	int ret;
+
+	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+
+	if (!rte_cryptodev_is_valid_dev(cdev_id)) {
+		RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
+		return -EINVAL;
+	}
+
+	if (limits == NULL) {
+		RTE_EDEV_LOG_ERR("Invalid limits storage provided");
+		return -EINVAL;
+	}
+
+	dev = &rte_eventdevs[dev_id];
+	cdev = rte_cryptodev_pmd_get_dev(cdev_id);
+
+	ret = rte_event_crypto_adapter_caps_get(dev_id, cdev_id, &cap);
+	if (ret) {
+		RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
+				 "cdev %" PRIu16, dev_id, cdev_id);
+		return ret;
+	}
+
+	if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR)) {
+		RTE_EDEV_LOG_ERR("Event vectorization is not supported,"
+				 "dev %" PRIu8 " cdev %" PRIu8, dev_id, cdev_id);
+		return -ENOTSUP;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(
+		*dev->dev_ops->crypto_adapter_vector_limits_get,
+		-ENOTSUP);
+
+	return dev->dev_ops->crypto_adapter_vector_limits_get(
+		dev, cdev, limits);
+}
diff --git a/lib/eventdev/rte_event_crypto_adapter.h b/lib/eventdev/rte_event_crypto_adapter.h
index d90a19e72c..83d154a6ce 100644
--- a/lib/eventdev/rte_event_crypto_adapter.h
+++ b/lib/eventdev/rte_event_crypto_adapter.h
@@ -253,6 +253,78 @@ struct rte_event_crypto_adapter_conf {
 	 */
 };
 
+#define RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR	0x1
+/**< This flag indicates that crypto operations processed on the crypto
+ * adapter need to be vectorized
+ * @see rte_event_crypto_adapter_queue_conf::flags
+ */
+
+/**
+ * Adapter queue configuration structure
+ */
+struct rte_event_crypto_adapter_queue_conf {
+	uint32_t flags;
+	/**< Flags for handling crypto operations
+	 * @see RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR
+	 */
+	struct rte_event ev;
+	/**< If HW supports cryptodev queue pair to event queue binding,
+	 * application is expected to fill in event information.
+	 * @see RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND
+	 */
+	uint16_t vector_sz;
+	/**< Indicates the maximum number for crypto operations to combine and
+	 * form a vector.
+	 * @see rte_event_crypto_adapter_vector_limits::min_sz
+	 * @see rte_event_crypto_adapter_vector_limits::max_sz
+	 * Valid when RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR flag is set in
+	 * @see rte_event_crypto_adapter_queue_conf::flags
+	 */
+	uint64_t vector_timeout_ns;
+	/**<
+	 * Indicates the maximum number of nanoseconds to wait for aggregating
+	 * crypto operations. Should be within vectorization limits of the
+	 * adapter
+	 * @see rte_event_crypto_adapter_vector_limits::min_timeout_ns
+	 * @see rte_event_crypto_adapter_vector_limits::max_timeout_ns
+	 * Valid when RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR flag is set in
+	 * @see rte_event_crypto_adapter_queue_conf::flags
+	 */
+	struct rte_mempool *vector_mp;
+	/**< Indicates the mempool that should be used for allocating
+	 * rte_event_vector container.
+	 * Should be created by using `rte_event_vector_pool_create`.
+	 * Valid when RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR flag is set in
+	 * @see rte_event_crypto_adapter_queue_conf::flags.
+	 */
+};
+
+/**
+ * A structure used to retrieve event crypto adapter vector limits.
+ */
+struct rte_event_crypto_adapter_vector_limits {
+	uint16_t min_sz;
+	/**< Minimum vector limit configurable.
+	 * @see rte_event_crypto_adapter_queue_conf::vector_sz
+	 */
+	uint16_t max_sz;
+	/**< Maximum vector limit configurable.
+	 * @see rte_event_crypto_adapter_queue_conf::vector_sz
+	 */
+	uint8_t log2_sz;
+	/**< True if the size configured should be in log2.
+	 * @see rte_event_crypto_adapter_queue_conf::vector_sz
+	 */
+	uint64_t min_timeout_ns;
+	/**< Minimum vector timeout configurable.
+	 * @see rte_event_crypto_adapter_queue_conf::vector_timeout_ns
+	 */
+	uint64_t max_timeout_ns;
+	/**< Maximum vector timeout configurable.
+	 * @see rte_event_crypto_adapter_queue_conf::vector_timeout_ns
+	 */
+};
+
 /**
  * Function type used for adapter configuration callback. The callback is
  * used to fill in members of the struct rte_event_crypto_adapter_conf, this
@@ -392,10 +464,9 @@ rte_event_crypto_adapter_free(uint8_t id);
  *  Cryptodev queue pair identifier. If queue_pair_id is set -1,
  *  adapter adds all the pre configured queue pairs to the instance.
  *
- * @param event
- *  if HW supports cryptodev queue pair to event queue binding, application is
- *  expected to fill in event information, else it will be NULL.
- *  @see RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND
+ * @param conf
+ *  Additional configuration structure of type
+ *  *rte_event_crypto_adapter_queue_conf*
  *
  * @return
  *  - 0: Success, queue pair added correctly.
@@ -405,7 +476,7 @@ int
 rte_event_crypto_adapter_queue_pair_add(uint8_t id,
 			uint8_t cdev_id,
 			int32_t queue_pair_id,
-			const struct rte_event *event);
+			const struct rte_event_crypto_adapter_queue_conf *conf);
 
 /**
  * Delete a queue pair from an event crypto adapter.
@@ -523,6 +594,26 @@ rte_event_crypto_adapter_service_id_get(uint8_t id, uint32_t *service_id);
 int
 rte_event_crypto_adapter_event_port_get(uint8_t id, uint8_t *event_port_id);
 
+/**
+ * Retrieve vector limits for a given event dev and crypto dev pair.
+ * @see rte_event_crypto_adapter_vector_limits
+ *
+ * @param dev_id
+ *  Event device identifier.
+ * @param cdev_id
+ *  Crypto device identifier.
+ * @param [out] limits
+ *  A pointer to rte_event_crypto_adapter_vector_limits structure that has to
+ *  be filled.
+ *
+ * @return
+ *  - 0: Success.
+ *  - <0: Error code on failure.
+ */
+int rte_event_crypto_adapter_vector_limits_get(
+	uint8_t dev_id, uint16_t cdev_id,
+	struct rte_event_crypto_adapter_vector_limits *limits);
+
 /**
  * Enqueue a burst of crypto operations as event objects supplied in *rte_event*
  * structure on an event crypto adapter designated by its event *dev_id* through
diff --git a/lib/eventdev/rte_eventdev.h b/lib/eventdev/rte_eventdev.h
index 88e7c809c0..60e9043ac4 100644
--- a/lib/eventdev/rte_eventdev.h
+++ b/lib/eventdev/rte_eventdev.h
@@ -1220,6 +1220,9 @@ struct rte_event_vector {
 #define RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR                                   \
 	(RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_ETH_RX_ADAPTER)
 /**< The event vector generated from eth Rx adapter. */
+#define RTE_EVENT_TYPE_CRYPTODEV_VECTOR                                        \
+	(RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_CRYPTODEV)
+/**< The event vector generated from cryptodev adapter. */
 
 #define RTE_EVENT_TYPE_MAX              0x10
 /**< Maximum number of event types */
@@ -1437,6 +1440,11 @@ rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps);
  * the private data information along with the crypto session.
  */
 
+#define RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR   0x10
+/**< Flag indicates HW is capable of aggregating processed
+ * crypto operations into rte_event_vector.
+ */
+
 /**
  * Retrieve the event device's crypto adapter capabilities for the
  * specified cryptodev device
-- 
2.25.1


^ permalink raw reply	[flat|nested] 36+ messages in thread

* [PATCH v3 2/2] crypto/cnxk: add vectorization for event crypto
  2022-10-01  0:42     ` [PATCH v3 0/2] Vector support for event crypto adapter Volodymyr Fialko
  2022-10-01  0:42       ` [PATCH v3 1/2] eventdev: introduce event cryptodev vector type Volodymyr Fialko
@ 2022-10-01  0:42       ` Volodymyr Fialko
  2022-10-01  3:42       ` [PATCH v3 0/2] Vector support for event crypto adapter Akhil Goyal
  2 siblings, 0 replies; 36+ messages in thread
From: Volodymyr Fialko @ 2022-10-01  0:42 UTC (permalink / raw)
  To: dev, Ankur Dwivedi, Anoob Joseph, Tejasree Kondoj, Ray Kinsella,
	Pavan Nikhilesh, Shijith Thotton
  Cc: jerinj, gakhil, abhinandan.gujjar, Volodymyr Fialko

Add support for vector aggregation of crypto operations for cn10k.
Crypto operations will be grouped by sub event type, flow id, scheduler
type and queue id fields from  rte_event_crypto_metadata::response_info.

Signed-off-by: Volodymyr Fialko <vfialko@marvell.com>
---
 drivers/crypto/cnxk/cn10k_cryptodev_ops.c | 398 +++++++++++++++++++---
 drivers/crypto/cnxk/cn10k_cryptodev_ops.h |   2 +
 drivers/crypto/cnxk/cnxk_cryptodev_ops.h  |   9 +-
 drivers/crypto/cnxk/version.map           |   1 +
 drivers/event/cnxk/cn10k_eventdev.c       |  31 +-
 drivers/event/cnxk/cn10k_worker.h         |   6 +-
 drivers/event/cnxk/cn9k_eventdev.c        |   7 +-
 drivers/event/cnxk/cnxk_eventdev.h        |   4 +-
 drivers/event/cnxk/cnxk_eventdev_adptr.c  |  17 +-
 9 files changed, 415 insertions(+), 60 deletions(-)

diff --git a/drivers/crypto/cnxk/cn10k_cryptodev_ops.c b/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
index 586941cd70..7bbe8726e3 100644
--- a/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
+++ b/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
@@ -19,6 +19,25 @@
 
 #include "roc_api.h"
 
+#define PKTS_PER_LOOP	32
+#define PKTS_PER_STEORL 16
+
+/* Holds information required to send crypto operations in one burst */
+struct ops_burst {
+	struct rte_crypto_op *op[PKTS_PER_LOOP];
+	uint64_t w2[PKTS_PER_LOOP];
+	struct cn10k_sso_hws *ws;
+	struct cnxk_cpt_qp *qp;
+	uint16_t nb_ops;
+};
+
+/* Holds information required to send vector of operations */
+struct vec_request {
+	struct cpt_inflight_req *req;
+	struct rte_event_vector *vec;
+	uint64_t w2;
+};
+
 static inline struct cnxk_se_sess *
 cn10k_cpt_sym_temp_sess_create(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op)
 {
@@ -164,9 +183,6 @@ cn10k_cpt_fill_inst(struct cnxk_cpt_qp *qp, struct rte_crypto_op *ops[],
 	return 1;
 }
 
-#define PKTS_PER_LOOP	32
-#define PKTS_PER_STEORL 16
-
 static uint16_t
 cn10k_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
 {
@@ -267,9 +283,9 @@ cn10k_cpt_crypto_adapter_ev_mdata_set(struct rte_cryptodev *dev __rte_unused,
 	union rte_event_crypto_metadata *ec_mdata = mdata;
 	struct rte_event *rsp_info;
 	struct cnxk_cpt_qp *qp;
+	uint64_t w2, tag_type;
 	uint8_t cdev_id;
 	int16_t qp_id;
-	uint64_t w2;
 
 	/* Get queue pair */
 	cdev_id = ec_mdata->request_info.cdev_id;
@@ -277,9 +293,9 @@ cn10k_cpt_crypto_adapter_ev_mdata_set(struct rte_cryptodev *dev __rte_unused,
 	qp = rte_cryptodevs[cdev_id].data->queue_pairs[qp_id];
 
 	/* Prepare w2 */
+	tag_type = qp->ca.vector_sz ? RTE_EVENT_TYPE_CRYPTODEV_VECTOR : RTE_EVENT_TYPE_CRYPTODEV;
 	rsp_info = &ec_mdata->response_info;
-	w2 = CNXK_CPT_INST_W2((RTE_EVENT_TYPE_CRYPTODEV << 28) |
-				      (rsp_info->sub_event_type << 20) |
+	w2 = CNXK_CPT_INST_W2((tag_type << 28) | (rsp_info->sub_event_type << 20) |
 				      rsp_info->flow_id,
 			      rsp_info->sched_type, rsp_info->queue_id, 0);
 
@@ -373,18 +389,236 @@ cn10k_ca_meta_info_extract(struct rte_crypto_op *op,
 	return 0;
 }
 
+static inline void
+cn10k_cpt_vec_inst_fill(struct vec_request *vec_req, struct cpt_inst_s *inst,
+			struct cnxk_cpt_qp *qp)
+{
+	const union cpt_res_s res = {.cn10k.compcode = CPT_COMP_NOT_DONE};
+	struct cpt_inflight_req *infl_req = vec_req->req;
+
+	const union cpt_inst_w4 w4 = {
+		.s.opcode_major = ROC_SE_MAJOR_OP_MISC,
+		.s.opcode_minor = ROC_SE_MISC_MINOR_OP_PASSTHROUGH,
+		.s.param1 = 1,
+		.s.param2 = 1,
+		.s.dlen = 0,
+	};
+
+	infl_req->vec = vec_req->vec;
+	infl_req->qp = qp;
+
+	inst->res_addr = (uint64_t)&infl_req->res;
+	__atomic_store_n(&infl_req->res.u64[0], res.u64[0], __ATOMIC_RELAXED);
+
+	inst->w0.u64 = 0;
+	inst->w2.u64 = vec_req->w2;
+	inst->w3.u64 = CNXK_CPT_INST_W3(1, infl_req);
+	inst->w4.u64 = w4.u64;
+	inst->w7.u64 = ROC_CPT_DFLT_ENG_GRP_SE << 61;
+}
+
+static void
+cn10k_cpt_vec_pkt_submission_timeout_handle(void)
+{
+	plt_dp_err("Vector packet submission timedout");
+	abort();
+}
+
+static inline void
+cn10k_cpt_vec_submit(struct vec_request vec_tbl[], uint16_t vec_tbl_len, struct cnxk_cpt_qp *qp)
+{
+	uint64_t lmt_base, lmt_arg, lmt_id, io_addr;
+	union cpt_fc_write_s fc;
+	struct cpt_inst_s *inst;
+	uint16_t burst_size;
+	uint64_t *fc_addr;
+	int i;
+
+	if (vec_tbl_len == 0)
+		return;
+
+	const uint32_t fc_thresh = qp->lmtline.fc_thresh;
+	/*
+	 * Use 10 mins timeout for the poll. It is not possible to recover from partial submission
+	 * of vector packet. Actual packets for processing are submitted to CPT prior to this
+	 * routine. Hence, any failure for submission of vector packet would indicate an
+	 * unrecoverable error for the application.
+	 */
+	const uint64_t timeout = rte_get_timer_cycles() + 10 * 60 * rte_get_timer_hz();
+
+	lmt_base = qp->lmtline.lmt_base;
+	io_addr = qp->lmtline.io_addr;
+	fc_addr = qp->lmtline.fc_addr;
+	ROC_LMT_BASE_ID_GET(lmt_base, lmt_id);
+	inst = (struct cpt_inst_s *)lmt_base;
+
+again:
+	burst_size = RTE_MIN(PKTS_PER_STEORL, vec_tbl_len);
+	for (i = 0; i < burst_size; i++)
+		cn10k_cpt_vec_inst_fill(&vec_tbl[i], &inst[i * 2], qp);
+
+	do {
+		fc.u64[0] = __atomic_load_n(fc_addr, __ATOMIC_RELAXED);
+		if (likely(fc.s.qsize < fc_thresh))
+			break;
+		if (unlikely(rte_get_timer_cycles() > timeout))
+			cn10k_cpt_vec_pkt_submission_timeout_handle();
+	} while (true);
+
+	lmt_arg = ROC_CN10K_CPT_LMT_ARG | (i - 1) << 12 | lmt_id;
+	roc_lmt_submit_steorl(lmt_arg, io_addr);
+
+	rte_io_wmb();
+
+	vec_tbl_len -= i;
+
+	if (vec_tbl_len > 0) {
+		vec_tbl += i;
+		goto again;
+	}
+}
+
+static inline int
+ca_lmtst_vec_submit(struct ops_burst *burst, struct vec_request vec_tbl[], uint16_t *vec_tbl_len)
+{
+	struct cpt_inflight_req *infl_reqs[PKTS_PER_LOOP];
+	uint64_t lmt_base, lmt_arg, io_addr;
+	uint16_t lmt_id, len = *vec_tbl_len;
+	struct cpt_inst_s *inst, *inst_base;
+	struct cpt_inflight_req *infl_req;
+	struct rte_event_vector *vec;
+	union cpt_fc_write_s fc;
+	struct cnxk_cpt_qp *qp;
+	uint64_t *fc_addr;
+	int ret, i, vi;
+
+	qp = burst->qp;
+
+	lmt_base = qp->lmtline.lmt_base;
+	io_addr = qp->lmtline.io_addr;
+	fc_addr = qp->lmtline.fc_addr;
+
+	const uint32_t fc_thresh = qp->lmtline.fc_thresh;
+
+	ROC_LMT_BASE_ID_GET(lmt_base, lmt_id);
+	inst_base = (struct cpt_inst_s *)lmt_base;
+
+#ifdef CNXK_CRYPTODEV_DEBUG
+	if (unlikely(!qp->ca.enabled)) {
+		rte_errno = EINVAL;
+		return 0;
+	}
+#endif
+
+	/* Perform fc check before putting packets into vectors */
+	fc.u64[0] = __atomic_load_n(fc_addr, __ATOMIC_RELAXED);
+	if (unlikely(fc.s.qsize > fc_thresh)) {
+		rte_errno = EAGAIN;
+		return 0;
+	}
+
+	if (unlikely(rte_mempool_get_bulk(qp->ca.req_mp, (void **)infl_reqs, burst->nb_ops))) {
+		rte_errno = ENOMEM;
+		return 0;
+	}
+
+	for (i = 0; i < burst->nb_ops; i++) {
+		inst = &inst_base[2 * i];
+		infl_req = infl_reqs[i];
+		infl_req->op_flags = 0;
+
+		ret = cn10k_cpt_fill_inst(qp, &burst->op[i], inst, infl_req);
+		if (unlikely(ret != 1)) {
+			plt_cpt_dbg("Could not process op: %p", burst->op[i]);
+			if (i != 0)
+				goto submit;
+			else
+				goto put;
+		}
+
+		infl_req->res.cn10k.compcode = CPT_COMP_NOT_DONE;
+		infl_req->qp = qp;
+		inst->w3.u64 = 0x1;
+
+		/* Lookup for existing vector by w2 */
+		for (vi = len - 1; vi >= 0; vi--) {
+			if (vec_tbl[vi].w2 != burst->w2[i])
+				continue;
+			vec = vec_tbl[vi].vec;
+			if (unlikely(vec->nb_elem == qp->ca.vector_sz))
+				continue;
+			vec->ptrs[vec->nb_elem++] = infl_req;
+			goto next_op; /* continue outer loop */
+		}
+
+		/* No available vectors found, allocate a new one */
+		if (unlikely(rte_mempool_get(qp->ca.vector_mp, (void **)&vec_tbl[len].vec))) {
+			rte_errno = ENOMEM;
+			if (i != 0)
+				goto submit;
+			else
+				goto put;
+		}
+		/* Also preallocate in-flight request, that will be used to
+		 * submit misc passthrough instruction
+		 */
+		if (unlikely(rte_mempool_get(qp->ca.req_mp, (void **)&vec_tbl[len].req))) {
+			rte_mempool_put(qp->ca.vector_mp, vec_tbl[len].vec);
+			rte_errno = ENOMEM;
+			if (i != 0)
+				goto submit;
+			else
+				goto put;
+		}
+		vec_tbl[len].w2 = burst->w2[i];
+		vec_tbl[len].vec->ptrs[0] = infl_req;
+		vec_tbl[len].vec->nb_elem = 1;
+		len++;
+
+next_op:;
+	}
+
+	/* Submit operations in burst */
+submit:
+	if (CNXK_TT_FROM_TAG(burst->ws->gw_rdata) == SSO_TT_ORDERED)
+		roc_sso_hws_head_wait(burst->ws->base);
+
+	if (i > PKTS_PER_STEORL) {
+		lmt_arg = ROC_CN10K_CPT_LMT_ARG | (PKTS_PER_STEORL - 1) << 12 | (uint64_t)lmt_id;
+		roc_lmt_submit_steorl(lmt_arg, io_addr);
+		lmt_arg = ROC_CN10K_CPT_LMT_ARG | (i - PKTS_PER_STEORL - 1) << 12 |
+			  (uint64_t)(lmt_id + PKTS_PER_STEORL);
+		roc_lmt_submit_steorl(lmt_arg, io_addr);
+	} else {
+		lmt_arg = ROC_CN10K_CPT_LMT_ARG | (i - 1) << 12 | (uint64_t)lmt_id;
+		roc_lmt_submit_steorl(lmt_arg, io_addr);
+	}
+
+	rte_io_wmb();
+
+put:
+	if (i != burst->nb_ops)
+		rte_mempool_put_bulk(qp->ca.req_mp, (void *)&infl_reqs[i], burst->nb_ops - i);
+
+	*vec_tbl_len = len;
+
+	return i;
+}
+
 static inline uint16_t
-ca_lmtst_burst_submit(struct cn10k_sso_hws *ws, uint64_t w2[], struct cnxk_cpt_qp *qp,
-		      struct rte_crypto_op *op[], uint16_t nb_ops)
+ca_lmtst_burst_submit(struct ops_burst *burst)
 {
 	struct cpt_inflight_req *infl_reqs[PKTS_PER_LOOP];
 	uint64_t lmt_base, lmt_arg, io_addr;
 	struct cpt_inst_s *inst, *inst_base;
 	struct cpt_inflight_req *infl_req;
 	union cpt_fc_write_s fc;
+	struct cnxk_cpt_qp *qp;
 	uint64_t *fc_addr;
 	uint16_t lmt_id;
-	int ret, i;
+	int ret, i, j;
+
+	qp = burst->qp;
 
 	lmt_base = qp->lmtline.lmt_base;
 	io_addr = qp->lmtline.io_addr;
@@ -395,24 +629,26 @@ ca_lmtst_burst_submit(struct cn10k_sso_hws *ws, uint64_t w2[], struct cnxk_cpt_q
 	ROC_LMT_BASE_ID_GET(lmt_base, lmt_id);
 	inst_base = (struct cpt_inst_s *)lmt_base;
 
+#ifdef CNXK_CRYPTODEV_DEBUG
 	if (unlikely(!qp->ca.enabled)) {
 		rte_errno = EINVAL;
 		return 0;
 	}
+#endif
 
-	if (unlikely(rte_mempool_get_bulk(qp->ca.req_mp, (void **)infl_reqs, nb_ops))) {
+	if (unlikely(rte_mempool_get_bulk(qp->ca.req_mp, (void **)infl_reqs, burst->nb_ops))) {
 		rte_errno = ENOMEM;
 		return 0;
 	}
 
-	for (i = 0; i < nb_ops; i++) {
+	for (i = 0; i < burst->nb_ops; i++) {
 		inst = &inst_base[2 * i];
 		infl_req = infl_reqs[i];
 		infl_req->op_flags = 0;
 
-		ret = cn10k_cpt_fill_inst(qp, &op[i], inst, infl_req);
+		ret = cn10k_cpt_fill_inst(qp, &burst->op[i], inst, infl_req);
 		if (unlikely(ret != 1)) {
-			plt_dp_dbg("Could not process op: %p", op[i]);
+			plt_dp_dbg("Could not process op: %p", burst->op[i]);
 			if (i != 0)
 				goto submit;
 			else
@@ -423,20 +659,25 @@ ca_lmtst_burst_submit(struct cn10k_sso_hws *ws, uint64_t w2[], struct cnxk_cpt_q
 		infl_req->qp = qp;
 		inst->w0.u64 = 0;
 		inst->res_addr = (uint64_t)&infl_req->res;
-		inst->w2.u64 = w2[i];
+		inst->w2.u64 = burst->w2[i];
 		inst->w3.u64 = CNXK_CPT_INST_W3(1, infl_req);
 	}
 
 	fc.u64[0] = __atomic_load_n(fc_addr, __ATOMIC_RELAXED);
 	if (unlikely(fc.s.qsize > fc_thresh)) {
 		rte_errno = EAGAIN;
+		for (j = 0; j < i; j++) {
+			infl_req = infl_reqs[j];
+			if (unlikely(infl_req->op_flags & CPT_OP_FLAGS_METABUF))
+				rte_mempool_put(qp->meta_info.pool, infl_req->mdata);
+		}
 		i = 0;
 		goto put;
 	}
 
 submit:
-	if (CNXK_TT_FROM_TAG(ws->gw_rdata) == SSO_TT_ORDERED)
-		roc_sso_hws_head_wait(ws->base);
+	if (CNXK_TT_FROM_TAG(burst->ws->gw_rdata) == SSO_TT_ORDERED)
+		roc_sso_hws_head_wait(burst->ws->base);
 
 	if (i > PKTS_PER_STEORL) {
 		lmt_arg = ROC_CN10K_CPT_LMT_ARG | (PKTS_PER_STEORL - 1) << 12 | (uint64_t)lmt_id;
@@ -452,8 +693,8 @@ ca_lmtst_burst_submit(struct cn10k_sso_hws *ws, uint64_t w2[], struct cnxk_cpt_q
 	rte_io_wmb();
 
 put:
-	if (unlikely(i != nb_ops))
-		rte_mempool_put_bulk(qp->ca.req_mp, (void *)&infl_reqs[i], nb_ops - i);
+	if (unlikely(i != burst->nb_ops))
+		rte_mempool_put_bulk(qp->ca.req_mp, (void *)&infl_reqs[i], burst->nb_ops - i);
 
 	return i;
 }
@@ -461,42 +702,76 @@ ca_lmtst_burst_submit(struct cn10k_sso_hws *ws, uint64_t w2[], struct cnxk_cpt_q
 uint16_t __rte_hot
 cn10k_cpt_crypto_adapter_enqueue(void *ws, struct rte_event ev[], uint16_t nb_events)
 {
-	struct rte_crypto_op *ops[PKTS_PER_LOOP], *op;
-	struct cnxk_cpt_qp *qp, *curr_qp = NULL;
-	uint64_t w2s[PKTS_PER_LOOP], w2;
-	uint16_t submitted, count = 0;
-	int ret, i, ops_len = 0;
+	uint16_t submitted, count = 0, vec_tbl_len = 0;
+	struct vec_request vec_tbl[nb_events];
+	struct rte_crypto_op *op;
+	struct ops_burst burst;
+	struct cnxk_cpt_qp *qp;
+	bool is_vector = false;
+	uint64_t w2;
+	int ret, i;
+
+	burst.ws = ws;
+	burst.qp = NULL;
+	burst.nb_ops = 0;
 
 	for (i = 0; i < nb_events; i++) {
 		op = ev[i].event_ptr;
 		ret = cn10k_ca_meta_info_extract(op, &qp, &w2);
 		if (unlikely(ret)) {
 			rte_errno = EINVAL;
-			return count;
+			goto vec_submit;
 		}
 
-		if (qp != curr_qp) {
-			if (ops_len) {
-				submitted = ca_lmtst_burst_submit(ws, w2s, curr_qp, ops, ops_len);
+		/* Queue pair change check */
+		if (qp != burst.qp) {
+			if (burst.nb_ops) {
+				if (is_vector) {
+					submitted =
+						ca_lmtst_vec_submit(&burst, vec_tbl, &vec_tbl_len);
+					/*
+					 * Vector submission is required on qp change, but not in
+					 * other cases, since we could send several vectors per
+					 * lmtst instruction only for same qp
+					 */
+					cn10k_cpt_vec_submit(vec_tbl, vec_tbl_len, burst.qp);
+					vec_tbl_len = 0;
+				} else {
+					submitted = ca_lmtst_burst_submit(&burst);
+				}
 				count += submitted;
-				if (unlikely(submitted != ops_len))
-					return count;
-				ops_len = 0;
+				if (unlikely(submitted != burst.nb_ops))
+					goto vec_submit;
+				burst.nb_ops = 0;
 			}
-			curr_qp = qp;
+			is_vector = qp->ca.vector_sz;
+			burst.qp = qp;
 		}
-		w2s[ops_len] = w2;
-		ops[ops_len] = op;
-		if (++ops_len == PKTS_PER_LOOP) {
-			submitted = ca_lmtst_burst_submit(ws, w2s, curr_qp, ops, ops_len);
+		burst.w2[burst.nb_ops] = w2;
+		burst.op[burst.nb_ops] = op;
+
+		/* Max nb_ops per burst check */
+		if (++burst.nb_ops == PKTS_PER_LOOP) {
+			if (is_vector)
+				submitted = ca_lmtst_vec_submit(&burst, vec_tbl, &vec_tbl_len);
+			else
+				submitted = ca_lmtst_burst_submit(&burst);
 			count += submitted;
-			if (unlikely(submitted != ops_len))
-				return count;
-			ops_len = 0;
+			if (unlikely(submitted != burst.nb_ops))
+				goto vec_submit;
+			burst.nb_ops = 0;
 		}
 	}
-	if (ops_len)
-		count += ca_lmtst_burst_submit(ws, w2s, curr_qp, ops, ops_len);
+	/* Submit the rest of crypto operations */
+	if (burst.nb_ops) {
+		if (is_vector)
+			count += ca_lmtst_vec_submit(&burst, vec_tbl, &vec_tbl_len);
+		else
+			count += ca_lmtst_burst_submit(&burst);
+	}
+
+vec_submit:
+	cn10k_cpt_vec_submit(vec_tbl, vec_tbl_len, burst.qp);
 	return count;
 }
 
@@ -654,6 +929,49 @@ cn10k_cpt_crypto_adapter_dequeue(uintptr_t get_work1)
 	return (uintptr_t)cop;
 }
 
+uintptr_t
+cn10k_cpt_crypto_adapter_vector_dequeue(uintptr_t get_work1)
+{
+	struct cpt_inflight_req *infl_req, *vec_infl_req;
+	struct rte_mempool *meta_mp, *req_mp;
+	struct rte_event_vector *vec;
+	struct rte_crypto_op *cop;
+	struct cnxk_cpt_qp *qp;
+	union cpt_res_s res;
+	int i;
+
+	vec_infl_req = (struct cpt_inflight_req *)(get_work1);
+
+	vec = vec_infl_req->vec;
+	qp = vec_infl_req->qp;
+	meta_mp = qp->meta_info.pool;
+	req_mp = qp->ca.req_mp;
+
+#ifdef CNXK_CRYPTODEV_DEBUG
+	res.u64[0] = __atomic_load_n(&vec_infl_req->res.u64[0], __ATOMIC_RELAXED);
+	PLT_ASSERT(res.cn10k.compcode == CPT_COMP_WARN);
+	PLT_ASSERT(res.cn10k.uc_compcode == 0);
+#endif
+
+	for (i = 0; i < vec->nb_elem; i++) {
+		infl_req = vec->ptrs[i];
+		cop = infl_req->cop;
+
+		res.u64[0] = __atomic_load_n(&infl_req->res.u64[0], __ATOMIC_RELAXED);
+		cn10k_cpt_dequeue_post_process(qp, cop, infl_req, &res.cn10k);
+
+		vec->ptrs[i] = cop;
+		if (unlikely(infl_req->op_flags & CPT_OP_FLAGS_METABUF))
+			rte_mempool_put(meta_mp, infl_req->mdata);
+
+		rte_mempool_put(req_mp, infl_req);
+	}
+
+	rte_mempool_put(req_mp, vec_infl_req);
+
+	return (uintptr_t)vec;
+}
+
 static uint16_t
 cn10k_cpt_dequeue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
 {
diff --git a/drivers/crypto/cnxk/cn10k_cryptodev_ops.h b/drivers/crypto/cnxk/cn10k_cryptodev_ops.h
index 628d6a567c..8104310c30 100644
--- a/drivers/crypto/cnxk/cn10k_cryptodev_ops.h
+++ b/drivers/crypto/cnxk/cn10k_cryptodev_ops.h
@@ -18,5 +18,7 @@ uint16_t __rte_hot cn10k_cpt_crypto_adapter_enqueue(void *ws, struct rte_event e
 		uint16_t nb_events);
 __rte_internal
 uintptr_t cn10k_cpt_crypto_adapter_dequeue(uintptr_t get_work1);
+__rte_internal
+uintptr_t cn10k_cpt_crypto_adapter_vector_dequeue(uintptr_t get_work1);
 
 #endif /* _CN10K_CRYPTODEV_OPS_H_ */
diff --git a/drivers/crypto/cnxk/cnxk_cryptodev_ops.h b/drivers/crypto/cnxk/cnxk_cryptodev_ops.h
index ffe4ae19aa..d9ed43b40b 100644
--- a/drivers/crypto/cnxk/cnxk_cryptodev_ops.h
+++ b/drivers/crypto/cnxk/cnxk_cryptodev_ops.h
@@ -37,7 +37,10 @@ struct cpt_qp_meta_info {
 
 struct cpt_inflight_req {
 	union cpt_res_s res;
-	struct rte_crypto_op *cop;
+	union {
+		struct rte_crypto_op *cop;
+		struct rte_event_vector *vec;
+	};
 	void *mdata;
 	uint8_t op_flags;
 	void *qp;
@@ -63,6 +66,10 @@ struct crypto_adpter_info {
 	/**< Set if queue pair is added to crypto adapter */
 	struct rte_mempool *req_mp;
 	/**< CPT inflight request mempool */
+	uint16_t vector_sz;
+	/** Maximum number of cops to combine into single vector */
+	struct rte_mempool *vector_mp;
+	/** Pool for allocating rte_event_vector */
 };
 
 struct cnxk_cpt_qp {
diff --git a/drivers/crypto/cnxk/version.map b/drivers/crypto/cnxk/version.map
index 0178c416ec..4735e70550 100644
--- a/drivers/crypto/cnxk/version.map
+++ b/drivers/crypto/cnxk/version.map
@@ -5,6 +5,7 @@ INTERNAL {
 	cn9k_cpt_crypto_adapter_dequeue;
 	cn10k_cpt_crypto_adapter_enqueue;
 	cn10k_cpt_crypto_adapter_dequeue;
+	cn10k_cpt_crypto_adapter_vector_dequeue;
 
 	local: *;
 };
diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index c55d69724b..742e43a5c6 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -1025,7 +1025,8 @@ cn10k_crypto_adapter_caps_get(const struct rte_eventdev *event_dev,
 	CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k");
 
 	*caps = RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD |
-		RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA;
+		RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA |
+		RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR;
 
 	return 0;
 }
@@ -1039,23 +1040,20 @@ cn10k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
 	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
 	int ret;
 
-	RTE_SET_USED(conf);
-
 	CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k");
 	CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k");
 
 	dev->is_ca_internal_port = 1;
 	cn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
 
-	ret = cnxk_crypto_adapter_qp_add(event_dev, cdev, queue_pair_id);
+	ret = cnxk_crypto_adapter_qp_add(event_dev, cdev, queue_pair_id, conf);
 	cn10k_sso_set_priv_mem(event_dev, NULL, 0);
 
 	return ret;
 }
 
 static int
-cn10k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev,
-			    const struct rte_cryptodev *cdev,
+cn10k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev, const struct rte_cryptodev *cdev,
 			    int32_t queue_pair_id)
 {
 	CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k");
@@ -1072,6 +1070,26 @@ cn10k_tim_caps_get(const struct rte_eventdev *evdev, uint64_t flags,
 				 cn10k_sso_set_priv_mem);
 }
 
+static int
+cn10k_crypto_adapter_vec_limits(const struct rte_eventdev *event_dev,
+				const struct rte_cryptodev *cdev,
+				struct rte_event_crypto_adapter_vector_limits *limits)
+{
+	CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k");
+	CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k");
+
+	limits->log2_sz = false;
+	limits->min_sz = 0;
+	limits->max_sz = UINT16_MAX;
+	/* Unused timeout, in software implementation we aggregate all crypto
+	 * operations passed to the enqueue function
+	 */
+	limits->min_timeout_ns = 0;
+	limits->max_timeout_ns = 0;
+
+	return 0;
+}
+
 static struct eventdev_ops cn10k_sso_dev_ops = {
 	.dev_infos_get = cn10k_sso_info_get,
 	.dev_configure = cn10k_sso_dev_configure,
@@ -1109,6 +1127,7 @@ static struct eventdev_ops cn10k_sso_dev_ops = {
 	.crypto_adapter_caps_get = cn10k_crypto_adapter_caps_get,
 	.crypto_adapter_queue_pair_add = cn10k_crypto_adapter_qp_add,
 	.crypto_adapter_queue_pair_del = cn10k_crypto_adapter_qp_del,
+	.crypto_adapter_vector_limits_get = cn10k_crypto_adapter_vec_limits,
 
 	.xstats_get = cnxk_sso_xstats_get,
 	.xstats_reset = cnxk_sso_xstats_reset,
diff --git a/drivers/event/cnxk/cn10k_worker.h b/drivers/event/cnxk/cn10k_worker.h
index 41b6ba8912..7a82dd352a 100644
--- a/drivers/event/cnxk/cn10k_worker.h
+++ b/drivers/event/cnxk/cn10k_worker.h
@@ -230,6 +230,9 @@ cn10k_sso_hws_post_process(struct cn10k_sso_hws *ws, uint64_t *u64,
 	if ((flags & CPT_RX_WQE_F) &&
 	    (CNXK_EVENT_TYPE_FROM_TAG(u64[0]) == RTE_EVENT_TYPE_CRYPTODEV)) {
 		u64[1] = cn10k_cpt_crypto_adapter_dequeue(u64[1]);
+	} else if ((flags & CPT_RX_WQE_F) &&
+		   (CNXK_EVENT_TYPE_FROM_TAG(u64[0]) == RTE_EVENT_TYPE_CRYPTODEV_VECTOR)) {
+		u64[1] = cn10k_cpt_crypto_adapter_vector_dequeue(u64[1]);
 	} else if (CNXK_EVENT_TYPE_FROM_TAG(u64[0]) == RTE_EVENT_TYPE_ETHDEV) {
 		uint8_t port = CNXK_SUB_EVENT_FROM_TAG(u64[0]);
 		uint64_t mbuf;
@@ -272,8 +275,7 @@ cn10k_sso_hws_post_process(struct cn10k_sso_hws *ws, uint64_t *u64,
 			cn10k_sso_process_tstamp(u64[1], mbuf,
 						 ws->tstamp[port]);
 		u64[1] = mbuf;
-	} else if (CNXK_EVENT_TYPE_FROM_TAG(u64[0]) ==
-		   RTE_EVENT_TYPE_ETHDEV_VECTOR) {
+	} else if (CNXK_EVENT_TYPE_FROM_TAG(u64[0]) == RTE_EVENT_TYPE_ETHDEV_VECTOR) {
 		uint8_t port = CNXK_SUB_EVENT_FROM_TAG(u64[0]);
 		__uint128_t vwqe_hdr = *(__uint128_t *)u64[1];
 
diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
index fca7b5f3a5..f5a42a86f8 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -1131,23 +1131,20 @@ cn9k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
 	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
 	int ret;
 
-	RTE_SET_USED(conf);
-
 	CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k");
 	CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k");
 
 	dev->is_ca_internal_port = 1;
 	cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
 
-	ret = cnxk_crypto_adapter_qp_add(event_dev, cdev, queue_pair_id);
+	ret = cnxk_crypto_adapter_qp_add(event_dev, cdev, queue_pair_id, conf);
 	cn9k_sso_set_priv_mem(event_dev, NULL, 0);
 
 	return ret;
 }
 
 static int
-cn9k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev,
-			   const struct rte_cryptodev *cdev,
+cn9k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev, const struct rte_cryptodev *cdev,
 			   int32_t queue_pair_id)
 {
 	CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k");
diff --git a/drivers/event/cnxk/cnxk_eventdev.h b/drivers/event/cnxk/cnxk_eventdev.h
index 293e0fff3f..f68c2aee23 100644
--- a/drivers/event/cnxk/cnxk_eventdev.h
+++ b/drivers/event/cnxk/cnxk_eventdev.h
@@ -10,6 +10,7 @@
 #include <cryptodev_pmd.h>
 #include <rte_devargs.h>
 #include <rte_ethdev.h>
+#include <rte_event_crypto_adapter.h>
 #include <rte_event_eth_rx_adapter.h>
 #include <rte_event_eth_tx_adapter.h>
 #include <rte_kvargs.h>
@@ -305,7 +306,8 @@ int cnxk_sso_tx_adapter_start(uint8_t id, const struct rte_eventdev *event_dev);
 int cnxk_sso_tx_adapter_stop(uint8_t id, const struct rte_eventdev *event_dev);
 int cnxk_sso_tx_adapter_free(uint8_t id, const struct rte_eventdev *event_dev);
 int cnxk_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
-			       const struct rte_cryptodev *cdev, int32_t queue_pair_id);
+			       const struct rte_cryptodev *cdev, int32_t queue_pair_id,
+			       const struct rte_event_crypto_adapter_queue_conf *conf);
 int cnxk_crypto_adapter_qp_del(const struct rte_cryptodev *cdev, int32_t queue_pair_id);
 
 #endif /* __CNXK_EVENTDEV_H__ */
diff --git a/drivers/event/cnxk/cnxk_eventdev_adptr.c b/drivers/event/cnxk/cnxk_eventdev_adptr.c
index 3ba5b246f0..5ec436382c 100644
--- a/drivers/event/cnxk/cnxk_eventdev_adptr.c
+++ b/drivers/event/cnxk/cnxk_eventdev_adptr.c
@@ -641,7 +641,8 @@ cnxk_sso_tx_adapter_free(uint8_t id __rte_unused,
 }
 
 static int
-crypto_adapter_qp_setup(const struct rte_cryptodev *cdev, struct cnxk_cpt_qp *qp)
+crypto_adapter_qp_setup(const struct rte_cryptodev *cdev, struct cnxk_cpt_qp *qp,
+			const struct rte_event_crypto_adapter_queue_conf *conf)
 {
 	char name[RTE_MEMPOOL_NAMESIZE];
 	uint32_t cache_size, nb_req;
@@ -674,6 +675,10 @@ crypto_adapter_qp_setup(const struct rte_cryptodev *cdev, struct cnxk_cpt_qp *qp
 	if (qp->ca.req_mp == NULL)
 		return -ENOMEM;
 
+	if (conf != NULL) {
+		qp->ca.vector_sz = conf->vector_sz;
+		qp->ca.vector_mp = conf->vector_mp;
+	}
 	qp->ca.enabled = true;
 
 	return 0;
@@ -681,7 +686,8 @@ crypto_adapter_qp_setup(const struct rte_cryptodev *cdev, struct cnxk_cpt_qp *qp
 
 int
 cnxk_crypto_adapter_qp_add(const struct rte_eventdev *event_dev, const struct rte_cryptodev *cdev,
-			   int32_t queue_pair_id)
+			   int32_t queue_pair_id,
+			   const struct rte_event_crypto_adapter_queue_conf *conf)
 {
 	struct cnxk_sso_evdev *sso_evdev = cnxk_sso_pmd_priv(event_dev);
 	uint32_t adptr_xae_cnt = 0;
@@ -693,7 +699,7 @@ cnxk_crypto_adapter_qp_add(const struct rte_eventdev *event_dev, const struct rt
 
 		for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) {
 			qp = cdev->data->queue_pairs[qp_id];
-			ret = crypto_adapter_qp_setup(cdev, qp);
+			ret = crypto_adapter_qp_setup(cdev, qp, conf);
 			if (ret) {
 				cnxk_crypto_adapter_qp_del(cdev, -1);
 				return ret;
@@ -702,7 +708,7 @@ cnxk_crypto_adapter_qp_add(const struct rte_eventdev *event_dev, const struct rt
 		}
 	} else {
 		qp = cdev->data->queue_pairs[queue_pair_id];
-		ret = crypto_adapter_qp_setup(cdev, qp);
+		ret = crypto_adapter_qp_setup(cdev, qp, conf);
 		if (ret)
 			return ret;
 		adptr_xae_cnt = qp->ca.req_mp->size;
@@ -733,7 +739,8 @@ crypto_adapter_qp_free(struct cnxk_cpt_qp *qp)
 }
 
 int
-cnxk_crypto_adapter_qp_del(const struct rte_cryptodev *cdev, int32_t queue_pair_id)
+cnxk_crypto_adapter_qp_del(const struct rte_cryptodev *cdev,
+			   int32_t queue_pair_id)
 {
 	struct cnxk_cpt_qp *qp;
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 36+ messages in thread

* RE: [PATCH v3 0/2] Vector support for event crypto adapter
  2022-10-01  0:42     ` [PATCH v3 0/2] Vector support for event crypto adapter Volodymyr Fialko
  2022-10-01  0:42       ` [PATCH v3 1/2] eventdev: introduce event cryptodev vector type Volodymyr Fialko
  2022-10-01  0:42       ` [PATCH v3 2/2] crypto/cnxk: add vectorization for event crypto Volodymyr Fialko
@ 2022-10-01  3:42       ` Akhil Goyal
  2022-10-01  8:00         ` Gujjar, Abhinandan S
  2 siblings, 1 reply; 36+ messages in thread
From: Akhil Goyal @ 2022-10-01  3:42 UTC (permalink / raw)
  To: Volodymyr Fialko, dev
  Cc: Jerin Jacob Kollanukkaran, Anoob Joseph, abhinandan.gujjar,
	Volodymyr Fialko

> Subject: [PATCH v3 0/2] Vector support for event crypto adapter
> 
> Introduce ability to aggregate crypto operations processed by event
> crypto adapter into single event containing rte_event_vector whose event
> type is RTE_EVENT_TYPE_CRYPTODEV_VECTOR.
> 
> * Changes since v1
> - Add missing newline/space issues
> - Add missing error messages
> - Remove double check of conf input parameter
> 
> * Changes since v2
> - Rebase on top of dpdk-next-eventdev branch
> - Remove already merged patch
> 

Series Acked-by: Akhil Goyal <gakhil@marvell.com>

Since there is no response and we are approaching RC1 deadline patches are applied to dpdk-next-crypto
 with below changes.

Updated release notes and removed deprecation notice.
Added a build fix because of removal of a macro RTE_FUNC_PTR_OR_ERR_RET.

Thanks.

^ permalink raw reply	[flat|nested] 36+ messages in thread

* RE: [PATCH v3 0/2] Vector support for event crypto adapter
  2022-10-01  3:42       ` [PATCH v3 0/2] Vector support for event crypto adapter Akhil Goyal
@ 2022-10-01  8:00         ` Gujjar, Abhinandan S
  2022-10-01  8:47           ` Akhil Goyal
  0 siblings, 1 reply; 36+ messages in thread
From: Gujjar, Abhinandan S @ 2022-10-01  8:00 UTC (permalink / raw)
  To: Akhil Goyal, Volodymyr Fialko, dev
  Cc: Jerin Jacob Kollanukkaran, Anoob Joseph, Volodymyr Fialko,
	Jayatheerthan,  Jay



> -----Original Message-----
> From: Akhil Goyal <gakhil@marvell.com>
> Sent: Saturday, October 1, 2022 9:12 AM
> To: Volodymyr Fialko <vfialko@marvell.com>; dev@dpdk.org
> Cc: Jerin Jacob Kollanukkaran <jerinj@marvell.com>; Anoob Joseph
> <anoobj@marvell.com>; Gujjar, Abhinandan S <abhinandan.gujjar@intel.com>;
> Volodymyr Fialko <vfialko@marvell.com>
> Subject: RE: [PATCH v3 0/2] Vector support for event crypto adapter
> 
> > Subject: [PATCH v3 0/2] Vector support for event crypto adapter
> >
> > Introduce ability to aggregate crypto operations processed by event
> > crypto adapter into single event containing rte_event_vector whose
> > event type is RTE_EVENT_TYPE_CRYPTODEV_VECTOR.
> >
> > * Changes since v1
> > - Add missing newline/space issues
> > - Add missing error messages
> > - Remove double check of conf input parameter
> >
> > * Changes since v2
> > - Rebase on top of dpdk-next-eventdev branch
> > - Remove already merged patch
> >
> 
> Series Acked-by: Akhil Goyal <gakhil@marvell.com>
> 
> Since there is no response and we are approaching RC1 deadline patches are
> applied to dpdk-next-crypto  with below changes.
Hi @Akhil Goyal,

Did not we discuss in the call, and I committed to acked the patch by Saturday?
You cant call that as no response!
> 
> Updated release notes and removed deprecation notice.
> Added a build fix because of removal of a macro RTE_FUNC_PTR_OR_ERR_RET.
> 
> Thanks.

^ permalink raw reply	[flat|nested] 36+ messages in thread

* RE: [PATCH v3 0/2] Vector support for event crypto adapter
  2022-10-01  8:00         ` Gujjar, Abhinandan S
@ 2022-10-01  8:47           ` Akhil Goyal
  2022-10-02  1:56             ` Gujjar, Abhinandan S
  0 siblings, 1 reply; 36+ messages in thread
From: Akhil Goyal @ 2022-10-01  8:47 UTC (permalink / raw)
  To: Gujjar, Abhinandan S, Volodymyr Fialko, dev
  Cc: Jerin Jacob Kollanukkaran, Anoob Joseph, Volodymyr Fialko,
	Jayatheerthan, Jay

> > > Subject: [PATCH v3 0/2] Vector support for event crypto adapter
> > >
> > > Introduce ability to aggregate crypto operations processed by event
> > > crypto adapter into single event containing rte_event_vector whose
> > > event type is RTE_EVENT_TYPE_CRYPTODEV_VECTOR.
> > >
> > > * Changes since v1
> > > - Add missing newline/space issues
> > > - Add missing error messages
> > > - Remove double check of conf input parameter
> > >
> > > * Changes since v2
> > > - Rebase on top of dpdk-next-eventdev branch
> > > - Remove already merged patch
> > >
> >
> > Series Acked-by: Akhil Goyal <gakhil@marvell.com>
> >
> > Since there is no response and we are approaching RC1 deadline patches are
> > applied to dpdk-next-crypto  with below changes.
> Hi @Akhil Goyal,
> 
> Did not we discuss in the call, and I committed to acked the patch by Saturday?
> You cant call that as no response!

I mentioned in the call that we have RC1 deadline, ack/nack shall be provided as soon as possible.
It was agreed that it will come by Friday. I can not spend all of my weekend on a particular series.
There are other series also.
The patches have been there for about 2 months(V1 was shared on August 4).
As per DPDK guidelines, I could merge a patch if there is no response for more than 3 weeks even without asking anybody.
I asked out to you just to be courteous on Sep 22. And even after that your responses were pretty slow.
You should give ample time for rework to happen before the deadline.

From next time onwards, I will not ask for review. You should be proactive within 3 weeks.
> >
> > Updated release notes and removed deprecation notice.
> > Added a build fix because of removal of a macro
> RTE_FUNC_PTR_OR_ERR_RET.
> >
> > Thanks.

^ permalink raw reply	[flat|nested] 36+ messages in thread

* RE: [PATCH v3 0/2] Vector support for event crypto adapter
  2022-10-01  8:47           ` Akhil Goyal
@ 2022-10-02  1:56             ` Gujjar, Abhinandan S
  0 siblings, 0 replies; 36+ messages in thread
From: Gujjar, Abhinandan S @ 2022-10-02  1:56 UTC (permalink / raw)
  To: Akhil Goyal, Volodymyr Fialko, dev
  Cc: Jerin Jacob Kollanukkaran, Anoob Joseph, Volodymyr Fialko,
	Jayatheerthan,  Jay



> -----Original Message-----
> From: Akhil Goyal <gakhil@marvell.com>
> Sent: Saturday, October 1, 2022 2:18 PM
> To: Gujjar, Abhinandan S <abhinandan.gujjar@intel.com>; Volodymyr Fialko
> <vfialko@marvell.com>; dev@dpdk.org
> Cc: Jerin Jacob Kollanukkaran <jerinj@marvell.com>; Anoob Joseph
> <anoobj@marvell.com>; Volodymyr Fialko <vfialko@marvell.com>;
> Jayatheerthan, Jay <jay.jayatheerthan@intel.com>
> Subject: RE: [PATCH v3 0/2] Vector support for event crypto adapter
> 
> > > > Subject: [PATCH v3 0/2] Vector support for event crypto adapter
> > > >
> > > > Introduce ability to aggregate crypto operations processed by
> > > > event crypto adapter into single event containing rte_event_vector
> > > > whose event type is RTE_EVENT_TYPE_CRYPTODEV_VECTOR.
> > > >
> > > > * Changes since v1
> > > > - Add missing newline/space issues
> > > > - Add missing error messages
> > > > - Remove double check of conf input parameter
> > > >
> > > > * Changes since v2
> > > > - Rebase on top of dpdk-next-eventdev branch
> > > > - Remove already merged patch
> > > >
> > >
> > > Series Acked-by: Akhil Goyal <gakhil@marvell.com>
> > >
> > > Since there is no response and we are approaching RC1 deadline
> > > patches are applied to dpdk-next-crypto  with below changes.
> > Hi @Akhil Goyal,
> >
> > Did not we discuss in the call, and I committed to acked the patch by
> Saturday?
> > You cant call that as no response!
> 
> I mentioned in the call that we have RC1 deadline, ack/nack shall be provided
> as soon as possible.
> It was agreed that it will come by Friday. I can not spend all of my weekend on
> a particular series.
> There are other series also.
> The patches have been there for about 2 months(V1 was shared on August 4).
> As per DPDK guidelines, I could merge a patch if there is no response for more
> than 3 weeks even without asking anybody.
> I asked out to you just to be courteous on Sep 22. And even after that your
> responses were pretty slow.
> You should give ample time for rework to happen before the deadline.
But this time few things were not clear. So, we had a call.
I would not have sent this mail if I had agreed on Friday.
> 
> From next time onwards, I will not ask for review. You should be proactive
> within 3 weeks.
BTW, there is nothing wrong in sending reminder for the review.
It will help in prioritizing the task in the busy schedule.
> > >
> > > Updated release notes and removed deprecation notice.
> > > Added a build fix because of removal of a macro
> > RTE_FUNC_PTR_OR_ERR_RET.
> > >
> > > Thanks.

^ permalink raw reply	[flat|nested] 36+ messages in thread

end of thread, other threads:[~2022-10-02  1:56 UTC | newest]

Thread overview: 36+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-06-22  1:38 [PATCH v2] app/test: add event inline security tests Volodymyr Fialko
2022-06-22 11:33 ` [PATCH v3] " Volodymyr Fialko
2022-06-22 16:32   ` Anoob Joseph
2022-06-28  8:29   ` Akhil Goyal
2022-06-28 12:09 ` [PATCH] doc: announce change in crypto adapter queue add Volodymyr Fialko
2022-06-28 12:40   ` Akhil Goyal
2022-07-11 14:56     ` Jerin Jacob
2022-07-12  5:31       ` [EXT] " Akhil Goyal
2022-07-13  6:49         ` Gujjar, Abhinandan S
2022-07-14  9:04   ` Hemant Agrawal
2022-07-17 11:32   ` Thomas Monjalon
2022-08-04  9:59 ` [PATCH 0/3] Vector support for event crypto adapter Volodymyr Fialko
2022-08-04  9:59   ` [PATCH 1/3] eventdev: introduce event cryptodev vector type Volodymyr Fialko
2022-09-21 18:32     ` Akhil Goyal
2022-09-22  4:53       ` Gujjar, Abhinandan S
2022-09-24  8:43     ` Gujjar, Abhinandan S
2022-09-26 11:02       ` Volodymyr Fialko
2022-09-27  9:05         ` Gujjar, Abhinandan S
2022-09-27  9:24           ` Volodymyr Fialko
2022-09-27  9:38             ` Gujjar, Abhinandan S
2022-09-27 13:26               ` Jerin Jacob
2022-09-28 14:43                 ` Gujjar, Abhinandan S
2022-09-28 16:13                   ` Jerin Jacob
2022-08-04  9:59   ` [PATCH 2/3] crypto/cnxk: add burst enqueue for event crypto Volodymyr Fialko
2022-08-04  9:59   ` [PATCH 3/3] crypto/cnxk: add vectorization " Volodymyr Fialko
2022-09-26 11:36   ` [PATCH v2 0/3] Vector support for event crypto adapter Volodymyr Fialko
2022-09-26 11:36     ` [PATCH v2 1/3] eventdev: introduce event cryptodev vector type Volodymyr Fialko
2022-09-26 11:36     ` [PATCH v2 2/3] crypto/cnxk: add burst enqueue for event crypto Volodymyr Fialko
2022-09-26 11:36     ` [PATCH v2 3/3] crypto/cnxk: add vectorization " Volodymyr Fialko
2022-10-01  0:42     ` [PATCH v3 0/2] Vector support for event crypto adapter Volodymyr Fialko
2022-10-01  0:42       ` [PATCH v3 1/2] eventdev: introduce event cryptodev vector type Volodymyr Fialko
2022-10-01  0:42       ` [PATCH v3 2/2] crypto/cnxk: add vectorization for event crypto Volodymyr Fialko
2022-10-01  3:42       ` [PATCH v3 0/2] Vector support for event crypto adapter Akhil Goyal
2022-10-01  8:00         ` Gujjar, Abhinandan S
2022-10-01  8:47           ` Akhil Goyal
2022-10-02  1:56             ` Gujjar, Abhinandan S

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).