DPDK patches and discussions
 help / color / mirror / Atom feed
From: Volodymyr Fialko <vfialko@marvell.com>
To: <dev@dpdk.org>, Ankur Dwivedi <adwivedi@marvell.com>,
	Anoob Joseph <anoobj@marvell.com>,
	Tejasree Kondoj <ktejasree@marvell.com>,
	Ray Kinsella <mdr@ashroe.eu>,
	Pavan Nikhilesh <pbhagavatula@marvell.com>,
	Shijith Thotton <sthotton@marvell.com>
Cc: <jerinj@marvell.com>, <gakhil@marvell.com>,
	Volodymyr Fialko <vfialko@marvell.com>
Subject: [PATCH 3/3] crypto/cnxk: add vectorization for event crypto
Date: Thu, 4 Aug 2022 11:59:07 +0200	[thread overview]
Message-ID: <20220804095907.97895-4-vfialko@marvell.com> (raw)
In-Reply-To: <20220804095907.97895-1-vfialko@marvell.com>

Add support for vector aggregation of crypto operations for cn10k.
Crypto operations will be grouped by sub event type, flow id, scheduler
type and queue id fields from  rte_event_crypto_metadata::response_info.

Signed-off-by: Volodymyr Fialko <vfialko@marvell.com>
---
Depends-on: Series-24099
Depends-on: Series-24066

 drivers/crypto/cnxk/cn10k_cryptodev_ops.c | 398 +++++++++++++++++++---
 drivers/crypto/cnxk/cn10k_cryptodev_ops.h |   2 +
 drivers/crypto/cnxk/cnxk_cryptodev_ops.h  |   9 +-
 drivers/crypto/cnxk/version.map           |   1 +
 drivers/event/cnxk/cn10k_eventdev.c       |  43 ++-
 drivers/event/cnxk/cn10k_worker.h         |   6 +-
 drivers/event/cnxk/cn9k_eventdev.c        |  11 +-
 drivers/event/cnxk/cnxk_eventdev.h        |   4 +-
 drivers/event/cnxk/cnxk_eventdev_adptr.c  |  17 +-
 9 files changed, 431 insertions(+), 60 deletions(-)

diff --git a/drivers/crypto/cnxk/cn10k_cryptodev_ops.c b/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
index bfa6374005..c94a32f268 100644
--- a/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
+++ b/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
@@ -19,6 +19,25 @@
 
 #include "roc_api.h"
 
+#define PKTS_PER_LOOP	32
+#define PKTS_PER_STEORL 16
+
+/* Holds information required to send crypto operations in one burst */
+struct ops_burst {
+	struct rte_crypto_op *op[PKTS_PER_LOOP];
+	uint64_t w2[PKTS_PER_LOOP];
+	struct cn10k_sso_hws *ws;
+	struct cnxk_cpt_qp *qp;
+	uint16_t nb_ops;
+};
+
+/* Holds information required to send vector of operations */
+struct vec_request {
+	struct cpt_inflight_req *req;
+	struct rte_event_vector *vec;
+	uint64_t w2;
+};
+
 static inline struct cnxk_se_sess *
 cn10k_cpt_sym_temp_sess_create(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op)
 {
@@ -183,9 +202,6 @@ cn10k_cpt_fill_inst(struct cnxk_cpt_qp *qp, struct rte_crypto_op *ops[],
 	return 1;
 }
 
-#define PKTS_PER_LOOP	32
-#define PKTS_PER_STEORL 16
-
 static uint16_t
 cn10k_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
 {
@@ -286,9 +302,9 @@ cn10k_cpt_crypto_adapter_ev_mdata_set(struct rte_cryptodev *dev __rte_unused,
 	union rte_event_crypto_metadata *ec_mdata = mdata;
 	struct rte_event *rsp_info;
 	struct cnxk_cpt_qp *qp;
+	uint64_t w2, tag_type;
 	uint8_t cdev_id;
 	int16_t qp_id;
-	uint64_t w2;
 
 	/* Get queue pair */
 	cdev_id = ec_mdata->request_info.cdev_id;
@@ -296,9 +312,9 @@ cn10k_cpt_crypto_adapter_ev_mdata_set(struct rte_cryptodev *dev __rte_unused,
 	qp = rte_cryptodevs[cdev_id].data->queue_pairs[qp_id];
 
 	/* Prepare w2 */
+	tag_type = qp->ca.vector_sz ? RTE_EVENT_TYPE_CRYPTODEV_VECTOR : RTE_EVENT_TYPE_CRYPTODEV;
 	rsp_info = &ec_mdata->response_info;
-	w2 = CNXK_CPT_INST_W2((RTE_EVENT_TYPE_CRYPTODEV << 28) |
-				      (rsp_info->sub_event_type << 20) |
+	w2 = CNXK_CPT_INST_W2((tag_type << 28) | (rsp_info->sub_event_type << 20) |
 				      rsp_info->flow_id,
 			      rsp_info->sched_type, rsp_info->queue_id, 0);
 
@@ -392,18 +408,236 @@ cn10k_ca_meta_info_extract(struct rte_crypto_op *op,
 	return 0;
 }
 
+static inline void
+cn10k_cpt_vec_inst_fill(struct vec_request *vec_req, struct cpt_inst_s *inst,
+			struct cnxk_cpt_qp *qp)
+{
+	const union cpt_res_s res = {.cn10k.compcode = CPT_COMP_NOT_DONE};
+	struct cpt_inflight_req *infl_req = vec_req->req;
+
+	const union cpt_inst_w4 w4 = {
+		.s.opcode_major = ROC_SE_MAJOR_OP_MISC,
+		.s.opcode_minor = ROC_SE_MISC_MINOR_OP_PASSTHROUGH,
+		.s.param1 = 1,
+		.s.param2 = 1,
+		.s.dlen = 0,
+	};
+
+	infl_req->vec = vec_req->vec;
+	infl_req->qp = qp;
+
+	inst->res_addr = (uint64_t)&infl_req->res;
+	__atomic_store_n(&infl_req->res.u64[0], res.u64[0], __ATOMIC_RELAXED);
+
+	inst->w0.u64 = 0;
+	inst->w2.u64 = vec_req->w2;
+	inst->w3.u64 = CNXK_CPT_INST_W3(1, infl_req);
+	inst->w4.u64 = w4.u64;
+	inst->w7.u64 = ROC_CPT_DFLT_ENG_GRP_SE << 61;
+}
+
+static void
+cn10k_cpt_vec_pkt_submission_timeout_handle(void)
+{
+	plt_dp_err("Vector packet submission timedout");
+	abort();
+}
+
+static inline void
+cn10k_cpt_vec_submit(struct vec_request vec_tbl[], uint16_t vec_tbl_len, struct cnxk_cpt_qp *qp)
+{
+	uint64_t lmt_base, lmt_arg, lmt_id, io_addr;
+	union cpt_fc_write_s fc;
+	struct cpt_inst_s *inst;
+	uint16_t burst_size;
+	uint64_t *fc_addr;
+	int i;
+
+	if (vec_tbl_len == 0)
+		return;
+
+	const uint32_t fc_thresh = qp->lmtline.fc_thresh;
+	/*
+	 * Use 10 mins timeout for the poll. It is not possible to recover from partial submission
+	 * of vector packet. Actual packets for processing are submitted to CPT prior to this
+	 * routine. Hence, any failure for submission of vector packet would indicate an
+	 * unrecoverable error for the application.
+	 */
+	const uint64_t timeout = rte_get_timer_cycles() + 10 * 60 * rte_get_timer_hz();
+
+	lmt_base = qp->lmtline.lmt_base;
+	io_addr = qp->lmtline.io_addr;
+	fc_addr = qp->lmtline.fc_addr;
+	ROC_LMT_BASE_ID_GET(lmt_base, lmt_id);
+	inst = (struct cpt_inst_s *)lmt_base;
+
+again:
+	burst_size = RTE_MIN(PKTS_PER_STEORL, vec_tbl_len);
+	for (i = 0; i < burst_size; i++)
+		cn10k_cpt_vec_inst_fill(&vec_tbl[i], &inst[i * 2], qp);
+
+	do {
+		fc.u64[0] = __atomic_load_n(fc_addr, __ATOMIC_RELAXED);
+		if (likely(fc.s.qsize < fc_thresh))
+			break;
+		if (unlikely(rte_get_timer_cycles() > timeout))
+			cn10k_cpt_vec_pkt_submission_timeout_handle();
+	} while (true);
+
+	lmt_arg = ROC_CN10K_CPT_LMT_ARG | (i - 1) << 12 | lmt_id;
+	roc_lmt_submit_steorl(lmt_arg, io_addr);
+
+	rte_io_wmb();
+
+	vec_tbl_len -= i;
+
+	if (vec_tbl_len > 0) {
+		vec_tbl += i;
+		goto again;
+	}
+}
+
+static inline int
+ca_lmtst_vec_submit(struct ops_burst *burst, struct vec_request vec_tbl[], uint16_t *vec_tbl_len)
+{
+	struct cpt_inflight_req *infl_reqs[PKTS_PER_LOOP];
+	uint64_t lmt_base, lmt_arg, io_addr;
+	uint16_t lmt_id, len = *vec_tbl_len;
+	struct cpt_inst_s *inst, *inst_base;
+	struct cpt_inflight_req *infl_req;
+	struct rte_event_vector *vec;
+	union cpt_fc_write_s fc;
+	struct cnxk_cpt_qp *qp;
+	uint64_t *fc_addr;
+	int ret, i, vi;
+
+	qp = burst->qp;
+
+	lmt_base = qp->lmtline.lmt_base;
+	io_addr = qp->lmtline.io_addr;
+	fc_addr = qp->lmtline.fc_addr;
+
+	const uint32_t fc_thresh = qp->lmtline.fc_thresh;
+
+	ROC_LMT_BASE_ID_GET(lmt_base, lmt_id);
+	inst_base = (struct cpt_inst_s *)lmt_base;
+
+#ifdef CNXK_CRYPTODEV_DEBUG
+	if (unlikely(!qp->ca.enabled)) {
+		rte_errno = EINVAL;
+		return 0;
+	}
+#endif
+
+	/* Perform fc check before putting packets into vectors */
+	fc.u64[0] = __atomic_load_n(fc_addr, __ATOMIC_RELAXED);
+	if (unlikely(fc.s.qsize > fc_thresh)) {
+		rte_errno = EAGAIN;
+		return 0;
+	}
+
+	if (unlikely(rte_mempool_get_bulk(qp->ca.req_mp, (void **)infl_reqs, burst->nb_ops))) {
+		rte_errno = ENOMEM;
+		return 0;
+	}
+
+	for (i = 0; i < burst->nb_ops; i++) {
+		inst = &inst_base[2 * i];
+		infl_req = infl_reqs[i];
+		infl_req->op_flags = 0;
+
+		ret = cn10k_cpt_fill_inst(qp, &burst->op[i], inst, infl_req);
+		if (unlikely(ret != 1)) {
+			plt_cpt_dbg("Could not process op: %p", burst->op[i]);
+			if (i != 0)
+				goto submit;
+			else
+				goto put;
+		}
+
+		infl_req->res.cn10k.compcode = CPT_COMP_NOT_DONE;
+		infl_req->qp = qp;
+		inst->w3.u64 = 0x1;
+
+		/* Lookup for existing vector by w2 */
+		for (vi = len - 1; vi >= 0; vi--) {
+			if (vec_tbl[vi].w2 != burst->w2[i])
+				continue;
+			vec = vec_tbl[vi].vec;
+			if (unlikely(vec->nb_elem == qp->ca.vector_sz))
+				continue;
+			vec->ptrs[vec->nb_elem++] = infl_req;
+			goto next_op; /* continue outer loop */
+		}
+
+		/* No available vectors found, allocate a new one */
+		if (unlikely(rte_mempool_get(qp->ca.vector_mp, (void **)&vec_tbl[len].vec))) {
+			rte_errno = ENOMEM;
+			if (i != 0)
+				goto submit;
+			else
+				goto put;
+		}
+		/* Also preallocate in-flight request, that will be used to
+		 * submit misc passthrough instruction
+		 */
+		if (unlikely(rte_mempool_get(qp->ca.req_mp, (void **)&vec_tbl[len].req))) {
+			rte_mempool_put(qp->ca.vector_mp, vec_tbl[len].vec);
+			rte_errno = ENOMEM;
+			if (i != 0)
+				goto submit;
+			else
+				goto put;
+		}
+		vec_tbl[len].w2 = burst->w2[i];
+		vec_tbl[len].vec->ptrs[0] = infl_req;
+		vec_tbl[len].vec->nb_elem = 1;
+		len++;
+
+next_op:;
+	}
+
+	/* Submit operations in burst */
+submit:
+	if (CNXK_TT_FROM_TAG(burst->ws->gw_rdata) == SSO_TT_ORDERED)
+		roc_sso_hws_head_wait(burst->ws->base);
+
+	if (i > PKTS_PER_STEORL) {
+		lmt_arg = ROC_CN10K_CPT_LMT_ARG | (PKTS_PER_STEORL - 1) << 12 | (uint64_t)lmt_id;
+		roc_lmt_submit_steorl(lmt_arg, io_addr);
+		lmt_arg = ROC_CN10K_CPT_LMT_ARG | (i - PKTS_PER_STEORL - 1) << 12 |
+			  (uint64_t)(lmt_id + PKTS_PER_STEORL);
+		roc_lmt_submit_steorl(lmt_arg, io_addr);
+	} else {
+		lmt_arg = ROC_CN10K_CPT_LMT_ARG | (i - 1) << 12 | (uint64_t)lmt_id;
+		roc_lmt_submit_steorl(lmt_arg, io_addr);
+	}
+
+	rte_io_wmb();
+
+put:
+	if (i != burst->nb_ops)
+		rte_mempool_put_bulk(qp->ca.req_mp, (void *)&infl_reqs[i], burst->nb_ops - i);
+
+	*vec_tbl_len = len;
+
+	return i;
+}
+
 static inline uint16_t
-ca_lmtst_burst_submit(struct cn10k_sso_hws *ws, uint64_t w2[], struct cnxk_cpt_qp *qp,
-		      struct rte_crypto_op *op[], uint16_t nb_ops)
+ca_lmtst_burst_submit(struct ops_burst *burst)
 {
 	struct cpt_inflight_req *infl_reqs[PKTS_PER_LOOP];
 	uint64_t lmt_base, lmt_arg, io_addr;
 	struct cpt_inst_s *inst, *inst_base;
 	struct cpt_inflight_req *infl_req;
 	union cpt_fc_write_s fc;
+	struct cnxk_cpt_qp *qp;
 	uint64_t *fc_addr;
 	uint16_t lmt_id;
-	int ret, i;
+	int ret, i, j;
+
+	qp = burst->qp;
 
 	lmt_base = qp->lmtline.lmt_base;
 	io_addr = qp->lmtline.io_addr;
@@ -414,24 +648,26 @@ ca_lmtst_burst_submit(struct cn10k_sso_hws *ws, uint64_t w2[], struct cnxk_cpt_q
 	ROC_LMT_BASE_ID_GET(lmt_base, lmt_id);
 	inst_base = (struct cpt_inst_s *)lmt_base;
 
+#ifdef CNXK_CRYPTODEV_DEBUG
 	if (unlikely(!qp->ca.enabled)) {
 		rte_errno = EINVAL;
 		return 0;
 	}
+#endif
 
-	if (unlikely(rte_mempool_get_bulk(qp->ca.req_mp, (void **)infl_reqs, nb_ops))) {
+	if (unlikely(rte_mempool_get_bulk(qp->ca.req_mp, (void **)infl_reqs, burst->nb_ops))) {
 		rte_errno = ENOMEM;
 		return 0;
 	}
 
-	for (i = 0; i < nb_ops; i++) {
+	for (i = 0; i < burst->nb_ops; i++) {
 		inst = &inst_base[2 * i];
 		infl_req = infl_reqs[i];
 		infl_req->op_flags = 0;
 
-		ret = cn10k_cpt_fill_inst(qp, &op[i], inst, infl_req);
+		ret = cn10k_cpt_fill_inst(qp, &burst->op[i], inst, infl_req);
 		if (unlikely(ret != 1)) {
-			plt_dp_err("Could not process op: %p", op[i]);
+			plt_dp_dbg("Could not process op: %p", burst->op[i]);
 			if (i != 0)
 				goto submit;
 			else
@@ -442,20 +678,25 @@ ca_lmtst_burst_submit(struct cn10k_sso_hws *ws, uint64_t w2[], struct cnxk_cpt_q
 		infl_req->qp = qp;
 		inst->w0.u64 = 0;
 		inst->res_addr = (uint64_t)&infl_req->res;
-		inst->w2.u64 = w2[i];
+		inst->w2.u64 = burst->w2[i];
 		inst->w3.u64 = CNXK_CPT_INST_W3(1, infl_req);
 	}
 
 	fc.u64[0] = __atomic_load_n(fc_addr, __ATOMIC_RELAXED);
 	if (unlikely(fc.s.qsize > fc_thresh)) {
 		rte_errno = EAGAIN;
+		for (j = 0; j < i; j++) {
+			infl_req = infl_reqs[j];
+			if (unlikely(infl_req->op_flags & CPT_OP_FLAGS_METABUF))
+				rte_mempool_put(qp->meta_info.pool, infl_req->mdata);
+		}
 		i = 0;
 		goto put;
 	}
 
 submit:
-	if (CNXK_TT_FROM_TAG(ws->gw_rdata) == SSO_TT_ORDERED)
-		roc_sso_hws_head_wait(ws->base);
+	if (CNXK_TT_FROM_TAG(burst->ws->gw_rdata) == SSO_TT_ORDERED)
+		roc_sso_hws_head_wait(burst->ws->base);
 
 	if (i > PKTS_PER_STEORL) {
 		lmt_arg = ROC_CN10K_CPT_LMT_ARG | (PKTS_PER_STEORL - 1) << 12 | (uint64_t)lmt_id;
@@ -471,8 +712,8 @@ ca_lmtst_burst_submit(struct cn10k_sso_hws *ws, uint64_t w2[], struct cnxk_cpt_q
 	rte_io_wmb();
 
 put:
-	if (unlikely(i != nb_ops))
-		rte_mempool_put_bulk(qp->ca.req_mp, (void *)&infl_reqs[i], nb_ops - i);
+	if (unlikely(i != burst->nb_ops))
+		rte_mempool_put_bulk(qp->ca.req_mp, (void *)&infl_reqs[i], burst->nb_ops - i);
 
 	return i;
 }
@@ -480,42 +721,76 @@ ca_lmtst_burst_submit(struct cn10k_sso_hws *ws, uint64_t w2[], struct cnxk_cpt_q
 uint16_t __rte_hot
 cn10k_cpt_crypto_adapter_enqueue(void *ws, struct rte_event ev[], uint16_t nb_events)
 {
-	struct rte_crypto_op *ops[PKTS_PER_LOOP], *op;
-	struct cnxk_cpt_qp *qp, *curr_qp = NULL;
-	uint64_t w2s[PKTS_PER_LOOP], w2;
-	uint16_t submitted, count = 0;
-	int ret, i, ops_len = 0;
+	uint16_t submitted, count = 0, vec_tbl_len = 0;
+	struct vec_request vec_tbl[nb_events];
+	struct rte_crypto_op *op;
+	struct ops_burst burst;
+	struct cnxk_cpt_qp *qp;
+	bool is_vector = false;
+	uint64_t w2;
+	int ret, i;
+
+	burst.ws = ws;
+	burst.qp = NULL;
+	burst.nb_ops = 0;
 
 	for (i = 0; i < nb_events; i++) {
 		op = ev[i].event_ptr;
 		ret = cn10k_ca_meta_info_extract(op, &qp, &w2);
 		if (unlikely(ret)) {
 			rte_errno = EINVAL;
-			return count;
+			goto vec_submit;
 		}
 
-		if (qp != curr_qp) {
-			if (ops_len) {
-				submitted = ca_lmtst_burst_submit(ws, w2s, curr_qp, ops, ops_len);
+		/* Queue pair change check */
+		if (qp != burst.qp) {
+			if (burst.nb_ops) {
+				if (is_vector) {
+					submitted =
+						ca_lmtst_vec_submit(&burst, vec_tbl, &vec_tbl_len);
+					/*
+					 * Vector submission is required on qp change, but not in
+					 * other cases, since we could send several vectors per
+					 * lmtst instruction only for same qp
+					 */
+					cn10k_cpt_vec_submit(vec_tbl, vec_tbl_len, burst.qp);
+					vec_tbl_len = 0;
+				} else {
+					submitted = ca_lmtst_burst_submit(&burst);
+				}
 				count += submitted;
-				if (unlikely(submitted != ops_len))
-					return count;
-				ops_len = 0;
+				if (unlikely(submitted != burst.nb_ops))
+					goto vec_submit;
+				burst.nb_ops = 0;
 			}
-			curr_qp = qp;
+			is_vector = qp->ca.vector_sz;
+			burst.qp = qp;
 		}
-		w2s[ops_len] = w2;
-		ops[ops_len] = op;
-		if (++ops_len == PKTS_PER_LOOP) {
-			submitted = ca_lmtst_burst_submit(ws, w2s, curr_qp, ops, ops_len);
+		burst.w2[burst.nb_ops] = w2;
+		burst.op[burst.nb_ops] = op;
+
+		/* Max nb_ops per burst check */
+		if (++burst.nb_ops == PKTS_PER_LOOP) {
+			if (is_vector)
+				submitted = ca_lmtst_vec_submit(&burst, vec_tbl, &vec_tbl_len);
+			else
+				submitted = ca_lmtst_burst_submit(&burst);
 			count += submitted;
-			if (unlikely(submitted != ops_len))
-				return count;
-			ops_len = 0;
+			if (unlikely(submitted != burst.nb_ops))
+				goto vec_submit;
+			burst.nb_ops = 0;
 		}
 	}
-	if (ops_len)
-		count += ca_lmtst_burst_submit(ws, w2s, curr_qp, ops, ops_len);
+	/* Submit the rest of crypto operations */
+	if (burst.nb_ops) {
+		if (is_vector)
+			count += ca_lmtst_vec_submit(&burst, vec_tbl, &vec_tbl_len);
+		else
+			count += ca_lmtst_burst_submit(&burst);
+	}
+
+vec_submit:
+	cn10k_cpt_vec_submit(vec_tbl, vec_tbl_len, burst.qp);
 	return count;
 }
 
@@ -673,6 +948,49 @@ cn10k_cpt_crypto_adapter_dequeue(uintptr_t get_work1)
 	return (uintptr_t)cop;
 }
 
+uintptr_t
+cn10k_cpt_crypto_adapter_vector_dequeue(uintptr_t get_work1)
+{
+	struct cpt_inflight_req *infl_req, *vec_infl_req;
+	struct rte_mempool *meta_mp, *req_mp;
+	struct rte_event_vector *vec;
+	struct rte_crypto_op *cop;
+	struct cnxk_cpt_qp *qp;
+	union cpt_res_s res;
+	int i;
+
+	vec_infl_req = (struct cpt_inflight_req *)(get_work1);
+
+	vec = vec_infl_req->vec;
+	qp = vec_infl_req->qp;
+	meta_mp = qp->meta_info.pool;
+	req_mp = qp->ca.req_mp;
+
+#ifdef CNXK_CRYPTODEV_DEBUG
+	res.u64[0] = __atomic_load_n(&vec_infl_req->res.u64[0], __ATOMIC_RELAXED);
+	PLT_ASSERT(res.cn10k.compcode == CPT_COMP_WARN);
+	PLT_ASSERT(res.cn10k.uc_compcode == 0);
+#endif
+
+	for (i = 0; i < vec->nb_elem; i++) {
+		infl_req = vec->ptrs[i];
+		cop = infl_req->cop;
+
+		res.u64[0] = __atomic_load_n(&infl_req->res.u64[0], __ATOMIC_RELAXED);
+		cn10k_cpt_dequeue_post_process(qp, cop, infl_req, &res.cn10k);
+
+		vec->ptrs[i] = cop;
+		if (unlikely(infl_req->op_flags & CPT_OP_FLAGS_METABUF))
+			rte_mempool_put(meta_mp, infl_req->mdata);
+
+		rte_mempool_put(req_mp, infl_req);
+	}
+
+	rte_mempool_put(req_mp, vec_infl_req);
+
+	return (uintptr_t)vec;
+}
+
 static uint16_t
 cn10k_cpt_dequeue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
 {
diff --git a/drivers/crypto/cnxk/cn10k_cryptodev_ops.h b/drivers/crypto/cnxk/cn10k_cryptodev_ops.h
index 628d6a567c..8104310c30 100644
--- a/drivers/crypto/cnxk/cn10k_cryptodev_ops.h
+++ b/drivers/crypto/cnxk/cn10k_cryptodev_ops.h
@@ -18,5 +18,7 @@ uint16_t __rte_hot cn10k_cpt_crypto_adapter_enqueue(void *ws, struct rte_event e
 		uint16_t nb_events);
 __rte_internal
 uintptr_t cn10k_cpt_crypto_adapter_dequeue(uintptr_t get_work1);
+__rte_internal
+uintptr_t cn10k_cpt_crypto_adapter_vector_dequeue(uintptr_t get_work1);
 
 #endif /* _CN10K_CRYPTODEV_OPS_H_ */
diff --git a/drivers/crypto/cnxk/cnxk_cryptodev_ops.h b/drivers/crypto/cnxk/cnxk_cryptodev_ops.h
index ffe4ae19aa..d9ed43b40b 100644
--- a/drivers/crypto/cnxk/cnxk_cryptodev_ops.h
+++ b/drivers/crypto/cnxk/cnxk_cryptodev_ops.h
@@ -37,7 +37,10 @@ struct cpt_qp_meta_info {
 
 struct cpt_inflight_req {
 	union cpt_res_s res;
-	struct rte_crypto_op *cop;
+	union {
+		struct rte_crypto_op *cop;
+		struct rte_event_vector *vec;
+	};
 	void *mdata;
 	uint8_t op_flags;
 	void *qp;
@@ -63,6 +66,10 @@ struct crypto_adpter_info {
 	/**< Set if queue pair is added to crypto adapter */
 	struct rte_mempool *req_mp;
 	/**< CPT inflight request mempool */
+	uint16_t vector_sz;
+	/** Maximum number of cops to combine into single vector */
+	struct rte_mempool *vector_mp;
+	/** Pool for allocating rte_event_vector */
 };
 
 struct cnxk_cpt_qp {
diff --git a/drivers/crypto/cnxk/version.map b/drivers/crypto/cnxk/version.map
index 0178c416ec..4735e70550 100644
--- a/drivers/crypto/cnxk/version.map
+++ b/drivers/crypto/cnxk/version.map
@@ -5,6 +5,7 @@ INTERNAL {
 	cn9k_cpt_crypto_adapter_dequeue;
 	cn10k_cpt_crypto_adapter_enqueue;
 	cn10k_cpt_crypto_adapter_dequeue;
+	cn10k_cpt_crypto_adapter_vector_dequeue;
 
 	local: *;
 };
diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index 3651af9ca8..067248fc77 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -880,7 +880,8 @@ cn10k_crypto_adapter_caps_get(const struct rte_eventdev *event_dev,
 	CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k");
 
 	*caps = RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD |
-		RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA;
+		RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA |
+		RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR;
 
 	return 0;
 }
@@ -892,8 +893,7 @@ cn10k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
 			    const struct rte_event_crypto_adapter_queue_conf *conf)
 {
 	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
-
-	RTE_SET_USED(conf);
+	int ret;
 
 	CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k");
 	CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k");
@@ -901,12 +901,14 @@ cn10k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
 	dev->is_ca_internal_port = 1;
 	cn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
 
-	return cnxk_crypto_adapter_qp_add(event_dev, cdev, queue_pair_id);
+	ret = cnxk_crypto_adapter_qp_add(event_dev, cdev, queue_pair_id, conf);
+	cn10k_sso_set_priv_mem(event_dev, NULL, 0);
+
+	return ret;
 }
 
 static int
-cn10k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev,
-			    const struct rte_cryptodev *cdev,
+cn10k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev, const struct rte_cryptodev *cdev,
 			    int32_t queue_pair_id)
 {
 	CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k");
@@ -915,6 +917,34 @@ cn10k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev,
 	return cnxk_crypto_adapter_qp_del(cdev, queue_pair_id);
 }
 
+static int
+cn10k_tim_caps_get(const struct rte_eventdev *evdev, uint64_t flags,
+		   uint32_t *caps, const struct event_timer_adapter_ops **ops)
+{
+	return cnxk_tim_caps_get(evdev, flags, caps, ops,
+				 cn10k_sso_set_priv_mem);
+}
+
+static int
+cn10k_crypto_adapter_vec_limits(const struct rte_eventdev *event_dev,
+				const struct rte_cryptodev *cdev,
+				struct rte_event_crypto_adapter_vector_limits *limits)
+{
+	CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k");
+	CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k");
+
+	limits->log2_sz = false;
+	limits->min_sz = 0;
+	limits->max_sz = UINT16_MAX;
+	/* Unused timeout, in software implementation we aggregate all crypto
+	 * operations passed to the enqueue function
+	 */
+	limits->min_timeout_ns = 0;
+	limits->max_timeout_ns = 0;
+
+	return 0;
+}
+
 static struct eventdev_ops cn10k_sso_dev_ops = {
 	.dev_infos_get = cn10k_sso_info_get,
 	.dev_configure = cn10k_sso_dev_configure,
@@ -953,6 +983,7 @@ static struct eventdev_ops cn10k_sso_dev_ops = {
 	.crypto_adapter_caps_get = cn10k_crypto_adapter_caps_get,
 	.crypto_adapter_queue_pair_add = cn10k_crypto_adapter_qp_add,
 	.crypto_adapter_queue_pair_del = cn10k_crypto_adapter_qp_del,
+	.crypto_adapter_vector_limits_get = cn10k_crypto_adapter_vec_limits,
 
 	.dump = cnxk_sso_dump,
 	.dev_start = cn10k_sso_start,
diff --git a/drivers/event/cnxk/cn10k_worker.h b/drivers/event/cnxk/cn10k_worker.h
index 65bb08c0a1..ddd11b3336 100644
--- a/drivers/event/cnxk/cn10k_worker.h
+++ b/drivers/event/cnxk/cn10k_worker.h
@@ -207,6 +207,9 @@ cn10k_sso_hws_post_process(struct cn10k_sso_hws *ws, uint64_t *u64,
 	if ((flags & CPT_RX_WQE_F) &&
 	    (CNXK_EVENT_TYPE_FROM_TAG(u64[0]) == RTE_EVENT_TYPE_CRYPTODEV)) {
 		u64[1] = cn10k_cpt_crypto_adapter_dequeue(u64[1]);
+	} else if ((flags & CPT_RX_WQE_F) &&
+		   (CNXK_EVENT_TYPE_FROM_TAG(u64[0]) == RTE_EVENT_TYPE_CRYPTODEV_VECTOR)) {
+		u64[1] = cn10k_cpt_crypto_adapter_vector_dequeue(u64[1]);
 	} else if (CNXK_EVENT_TYPE_FROM_TAG(u64[0]) == RTE_EVENT_TYPE_ETHDEV) {
 		uint8_t port = CNXK_SUB_EVENT_FROM_TAG(u64[0]);
 		uint64_t mbuf;
@@ -253,8 +256,7 @@ cn10k_sso_hws_post_process(struct cn10k_sso_hws *ws, uint64_t *u64,
 					 flags & NIX_RX_OFFLOAD_TSTAMP_F,
 					 (uint64_t *)tstamp_ptr);
 		u64[1] = mbuf;
-	} else if (CNXK_EVENT_TYPE_FROM_TAG(u64[0]) ==
-		   RTE_EVENT_TYPE_ETHDEV_VECTOR) {
+	} else if (CNXK_EVENT_TYPE_FROM_TAG(u64[0]) == RTE_EVENT_TYPE_ETHDEV_VECTOR) {
 		uint8_t port = CNXK_SUB_EVENT_FROM_TAG(u64[0]);
 		__uint128_t vwqe_hdr = *(__uint128_t *)u64[1];
 
diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
index 45ed547cb0..bd130d40aa 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -1124,8 +1124,7 @@ cn9k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
 			   const struct rte_event_crypto_adapter_queue_conf *conf)
 {
 	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
-
-	RTE_SET_USED(conf);
+	int ret;
 
 	CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k");
 	CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k");
@@ -1133,12 +1132,14 @@ cn9k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
 	dev->is_ca_internal_port = 1;
 	cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
 
-	return cnxk_crypto_adapter_qp_add(event_dev, cdev, queue_pair_id);
+	ret = cnxk_crypto_adapter_qp_add(event_dev, cdev, queue_pair_id, conf);
+	cn9k_sso_set_priv_mem(event_dev, NULL, 0);
+
+	return ret;
 }
 
 static int
-cn9k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev,
-			   const struct rte_cryptodev *cdev,
+cn9k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev, const struct rte_cryptodev *cdev,
 			   int32_t queue_pair_id)
 {
 	CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k");
diff --git a/drivers/event/cnxk/cnxk_eventdev.h b/drivers/event/cnxk/cnxk_eventdev.h
index c9a0686b4d..41d9c0b7e7 100644
--- a/drivers/event/cnxk/cnxk_eventdev.h
+++ b/drivers/event/cnxk/cnxk_eventdev.h
@@ -10,6 +10,7 @@
 #include <cryptodev_pmd.h>
 #include <rte_devargs.h>
 #include <rte_ethdev.h>
+#include <rte_event_crypto_adapter.h>
 #include <rte_event_eth_rx_adapter.h>
 #include <rte_event_eth_tx_adapter.h>
 #include <rte_kvargs.h>
@@ -312,7 +313,8 @@ int cnxk_sso_tx_adapter_start(uint8_t id, const struct rte_eventdev *event_dev);
 int cnxk_sso_tx_adapter_stop(uint8_t id, const struct rte_eventdev *event_dev);
 int cnxk_sso_tx_adapter_free(uint8_t id, const struct rte_eventdev *event_dev);
 int cnxk_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
-			       const struct rte_cryptodev *cdev, int32_t queue_pair_id);
+			       const struct rte_cryptodev *cdev, int32_t queue_pair_id,
+			       const struct rte_event_crypto_adapter_queue_conf *conf);
 int cnxk_crypto_adapter_qp_del(const struct rte_cryptodev *cdev, int32_t queue_pair_id);
 
 #endif /* __CNXK_EVENTDEV_H__ */
diff --git a/drivers/event/cnxk/cnxk_eventdev_adptr.c b/drivers/event/cnxk/cnxk_eventdev_adptr.c
index 3f46e79ba8..cd238fe074 100644
--- a/drivers/event/cnxk/cnxk_eventdev_adptr.c
+++ b/drivers/event/cnxk/cnxk_eventdev_adptr.c
@@ -631,7 +631,8 @@ cnxk_sso_tx_adapter_free(uint8_t id __rte_unused,
 }
 
 static int
-crypto_adapter_qp_setup(const struct rte_cryptodev *cdev, struct cnxk_cpt_qp *qp)
+crypto_adapter_qp_setup(const struct rte_cryptodev *cdev, struct cnxk_cpt_qp *qp,
+			const struct rte_event_crypto_adapter_queue_conf *conf)
 {
 	char name[RTE_MEMPOOL_NAMESIZE];
 	uint32_t cache_size, nb_req;
@@ -664,6 +665,10 @@ crypto_adapter_qp_setup(const struct rte_cryptodev *cdev, struct cnxk_cpt_qp *qp
 	if (qp->ca.req_mp == NULL)
 		return -ENOMEM;
 
+	if (conf != NULL) {
+		qp->ca.vector_sz = conf->vector_sz;
+		qp->ca.vector_mp = conf->vector_mp;
+	}
 	qp->ca.enabled = true;
 
 	return 0;
@@ -671,7 +676,8 @@ crypto_adapter_qp_setup(const struct rte_cryptodev *cdev, struct cnxk_cpt_qp *qp
 
 int
 cnxk_crypto_adapter_qp_add(const struct rte_eventdev *event_dev, const struct rte_cryptodev *cdev,
-			   int32_t queue_pair_id)
+			   int32_t queue_pair_id,
+			   const struct rte_event_crypto_adapter_queue_conf *conf)
 {
 	struct cnxk_sso_evdev *sso_evdev = cnxk_sso_pmd_priv(event_dev);
 	uint32_t adptr_xae_cnt = 0;
@@ -683,7 +689,7 @@ cnxk_crypto_adapter_qp_add(const struct rte_eventdev *event_dev, const struct rt
 
 		for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) {
 			qp = cdev->data->queue_pairs[qp_id];
-			ret = crypto_adapter_qp_setup(cdev, qp);
+			ret = crypto_adapter_qp_setup(cdev, qp, conf);
 			if (ret) {
 				cnxk_crypto_adapter_qp_del(cdev, -1);
 				return ret;
@@ -692,7 +698,7 @@ cnxk_crypto_adapter_qp_add(const struct rte_eventdev *event_dev, const struct rt
 		}
 	} else {
 		qp = cdev->data->queue_pairs[queue_pair_id];
-		ret = crypto_adapter_qp_setup(cdev, qp);
+		ret = crypto_adapter_qp_setup(cdev, qp, conf);
 		if (ret)
 			return ret;
 		adptr_xae_cnt = qp->ca.req_mp->size;
@@ -723,7 +729,8 @@ crypto_adapter_qp_free(struct cnxk_cpt_qp *qp)
 }
 
 int
-cnxk_crypto_adapter_qp_del(const struct rte_cryptodev *cdev, int32_t queue_pair_id)
+cnxk_crypto_adapter_qp_del(const struct rte_cryptodev *cdev,
+			   int32_t queue_pair_id)
 {
 	struct cnxk_cpt_qp *qp;
 
-- 
2.25.1


  parent reply	other threads:[~2022-08-04 10:02 UTC|newest]

Thread overview: 36+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-06-22  1:38 [PATCH v2] app/test: add event inline security tests Volodymyr Fialko
2022-06-22 11:33 ` [PATCH v3] " Volodymyr Fialko
2022-06-22 16:32   ` Anoob Joseph
2022-06-28  8:29   ` Akhil Goyal
2022-06-28 12:09 ` [PATCH] doc: announce change in crypto adapter queue add Volodymyr Fialko
2022-06-28 12:40   ` Akhil Goyal
2022-07-11 14:56     ` Jerin Jacob
2022-07-12  5:31       ` [EXT] " Akhil Goyal
2022-07-13  6:49         ` Gujjar, Abhinandan S
2022-07-14  9:04   ` Hemant Agrawal
2022-07-17 11:32   ` Thomas Monjalon
2022-08-04  9:59 ` [PATCH 0/3] Vector support for event crypto adapter Volodymyr Fialko
2022-08-04  9:59   ` [PATCH 1/3] eventdev: introduce event cryptodev vector type Volodymyr Fialko
2022-09-21 18:32     ` Akhil Goyal
2022-09-22  4:53       ` Gujjar, Abhinandan S
2022-09-24  8:43     ` Gujjar, Abhinandan S
2022-09-26 11:02       ` Volodymyr Fialko
2022-09-27  9:05         ` Gujjar, Abhinandan S
2022-09-27  9:24           ` Volodymyr Fialko
2022-09-27  9:38             ` Gujjar, Abhinandan S
2022-09-27 13:26               ` Jerin Jacob
2022-09-28 14:43                 ` Gujjar, Abhinandan S
2022-09-28 16:13                   ` Jerin Jacob
2022-08-04  9:59   ` [PATCH 2/3] crypto/cnxk: add burst enqueue for event crypto Volodymyr Fialko
2022-08-04  9:59   ` Volodymyr Fialko [this message]
2022-09-26 11:36   ` [PATCH v2 0/3] Vector support for event crypto adapter Volodymyr Fialko
2022-09-26 11:36     ` [PATCH v2 1/3] eventdev: introduce event cryptodev vector type Volodymyr Fialko
2022-09-26 11:36     ` [PATCH v2 2/3] crypto/cnxk: add burst enqueue for event crypto Volodymyr Fialko
2022-09-26 11:36     ` [PATCH v2 3/3] crypto/cnxk: add vectorization " Volodymyr Fialko
2022-10-01  0:42     ` [PATCH v3 0/2] Vector support for event crypto adapter Volodymyr Fialko
2022-10-01  0:42       ` [PATCH v3 1/2] eventdev: introduce event cryptodev vector type Volodymyr Fialko
2022-10-01  0:42       ` [PATCH v3 2/2] crypto/cnxk: add vectorization for event crypto Volodymyr Fialko
2022-10-01  3:42       ` [PATCH v3 0/2] Vector support for event crypto adapter Akhil Goyal
2022-10-01  8:00         ` Gujjar, Abhinandan S
2022-10-01  8:47           ` Akhil Goyal
2022-10-02  1:56             ` Gujjar, Abhinandan S

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220804095907.97895-4-vfialko@marvell.com \
    --to=vfialko@marvell.com \
    --cc=adwivedi@marvell.com \
    --cc=anoobj@marvell.com \
    --cc=dev@dpdk.org \
    --cc=gakhil@marvell.com \
    --cc=jerinj@marvell.com \
    --cc=ktejasree@marvell.com \
    --cc=mdr@ashroe.eu \
    --cc=pbhagavatula@marvell.com \
    --cc=sthotton@marvell.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).