DPDK patches and discussions
 help / color / mirror / Atom feed
From: Anoob Joseph <anoobj@marvell.com>
To: Akhil Goyal <gakhil@marvell.com>, Thomas Monjalon <thomas@monjalon.net>
Cc: Kiran Kumar K <kirankumark@marvell.com>,
	Jerin Jacob <jerinj@marvell.com>,
	 Ankur Dwivedi <adwivedi@marvell.com>,
	Tejasree Kondoj <ktejasree@marvell.com>, <dev@dpdk.org>
Subject: [dpdk-dev] [PATCH v3 6/8] crypto/cnxk: add asymmetric datapath ops
Date: Tue, 29 Jun 2021 13:04:34 +0530	[thread overview]
Message-ID: <1624952076-30928-7-git-send-email-anoobj@marvell.com> (raw)
In-Reply-To: <1624952076-30928-1-git-send-email-anoobj@marvell.com>

From: Kiran Kumar K <kirankumark@marvell.com>

Add asymmetric crypto datapath ops.


Signed-off-by: Kiran Kumar K <kirankumark@marvell.com>
---
 drivers/crypto/cnxk/cn10k_cryptodev_ops.c |  27 ++
 drivers/crypto/cnxk/cn9k_cryptodev_ops.c  |  29 +-
 drivers/crypto/cnxk/cnxk_ae.h             | 625 ++++++++++++++++++++++++++++++
 3 files changed, 679 insertions(+), 2 deletions(-)

diff --git a/drivers/crypto/cnxk/cn10k_cryptodev_ops.c b/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
index aa615b2..6d322a9 100644
--- a/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
+++ b/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
@@ -10,6 +10,7 @@
 #include "cn10k_cryptodev_ops.h"
 #include "cn10k_ipsec_la_ops.h"
 #include "cn10k_ipsec.h"
+#include "cnxk_ae.h"
 #include "cnxk_cryptodev.h"
 #include "cnxk_cryptodev_ops.h"
 #include "cnxk_se.h"
@@ -100,7 +101,9 @@ cn10k_cpt_fill_inst(struct cnxk_cpt_qp *qp, struct rte_crypto_op *ops[],
 		    struct cpt_inst_s inst[], struct cpt_inflight_req *infl_req)
 {
 	struct cn10k_sec_session *sec_sess;
+	struct rte_crypto_asym_op *asym_op;
 	struct rte_crypto_sym_op *sym_op;
+	struct cnxk_ae_sess *ae_sess;
 	struct cnxk_se_sess *sess;
 	struct rte_crypto_op *op;
 	uint64_t w7;
@@ -148,6 +151,21 @@ cn10k_cpt_fill_inst(struct cnxk_cpt_qp *qp, struct rte_crypto_op *ops[],
 			}
 			w7 = sess->cpt_inst_w7;
 		}
+	} else if (op->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
+
+		if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+			asym_op = op->asym;
+			ae_sess = get_asym_session_private_data(
+				asym_op->session, cn10k_cryptodev_driver_id);
+			ret = cnxk_ae_enqueue(qp, op, infl_req, &inst[0],
+					      ae_sess);
+			if (unlikely(ret))
+				return 0;
+			w7 = ae_sess->cpt_inst_w7;
+		} else {
+			plt_dp_err("Not supported Asym op without session");
+			return 0;
+		}
 	} else {
 		plt_dp_err("Unsupported op type");
 		return 0;
@@ -303,6 +321,15 @@ cn10k_cpt_dequeue_post_process(struct cnxk_cpt_qp *qp,
 				compl_auth_verify(cop, (uint8_t *)rsp[0],
 						  rsp[1]);
 			}
+		} else if (cop->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
+			struct rte_crypto_asym_op *op = cop->asym;
+			uintptr_t *mdata = infl_req->mdata;
+			struct cnxk_ae_sess *sess;
+
+			sess = get_asym_session_private_data(
+				op->session, cn10k_cryptodev_driver_id);
+
+			cnxk_ae_post_process(cop, sess, (uint8_t *)mdata[0]);
 		}
 	} else {
 		cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
diff --git a/drivers/crypto/cnxk/cn9k_cryptodev_ops.c b/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
index 6d1537b..724965b 100644
--- a/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
+++ b/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
@@ -7,6 +7,7 @@
 
 #include "cn9k_cryptodev.h"
 #include "cn9k_cryptodev_ops.h"
+#include "cnxk_ae.h"
 #include "cnxk_cryptodev.h"
 #include "cnxk_cryptodev_ops.h"
 #include "cnxk_se.h"
@@ -65,11 +66,11 @@ static uint16_t
 cn9k_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
 {
 	struct cpt_inflight_req *infl_req;
+	struct rte_crypto_asym_op *asym_op;
 	struct rte_crypto_sym_op *sym_op;
 	uint16_t nb_allowed, count = 0;
 	struct cnxk_cpt_qp *qp = qptr;
 	struct pending_queue *pend_q;
-	struct cnxk_se_sess *sess;
 	struct rte_crypto_op *op;
 	struct cpt_inst_s inst;
 	uint64_t lmt_status;
@@ -95,6 +96,8 @@ cn9k_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
 		infl_req->op_flags = 0;
 
 		if (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
+			struct cnxk_se_sess *sess;
+
 			if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
 				sym_op = op->sym;
 				sess = get_sym_session_private_data(
@@ -120,6 +123,20 @@ cn9k_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
 							op->sym->session);
 				}
 			}
+			inst.w7.u64 = sess->cpt_inst_w7;
+		} else if (op->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
+			struct cnxk_ae_sess *sess;
+
+			ret = -EINVAL;
+			if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+				asym_op = op->asym;
+				sess = get_asym_session_private_data(
+					asym_op->session,
+					cn9k_cryptodev_driver_id);
+				ret = cnxk_ae_enqueue(qp, op, infl_req, &inst,
+						      sess);
+				inst.w7.u64 = sess->cpt_inst_w7;
+			}
 		} else {
 			plt_dp_err("Unsupported op type");
 			break;
@@ -134,7 +151,6 @@ cn9k_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
 
 		infl_req->res.cn9k.compcode = CPT_COMP_NOT_DONE;
 		inst.res_addr = (uint64_t)&infl_req->res;
-		inst.w7.u64 = sess->cpt_inst_w7;
 
 		do {
 			/* Copy CPT command to LMTLINE */
@@ -189,6 +205,15 @@ cn9k_cpt_dequeue_post_process(struct cnxk_cpt_qp *qp, struct rte_crypto_op *cop,
 				compl_auth_verify(cop, (uint8_t *)rsp[0],
 						  rsp[1]);
 			}
+		} else if (cop->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
+			struct rte_crypto_asym_op *op = cop->asym;
+			uintptr_t *mdata = infl_req->mdata;
+			struct cnxk_ae_sess *sess;
+
+			sess = get_asym_session_private_data(
+				op->session, cn9k_cryptodev_driver_id);
+
+			cnxk_ae_post_process(cop, sess, (uint8_t *)mdata[0]);
 		}
 	} else {
 		cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
diff --git a/drivers/crypto/cnxk/cnxk_ae.h b/drivers/crypto/cnxk/cnxk_ae.h
index e3dd63b..c752e62 100644
--- a/drivers/crypto/cnxk/cnxk_ae.h
+++ b/drivers/crypto/cnxk/cnxk_ae.h
@@ -208,4 +208,629 @@ cnxk_ae_free_session_parameters(struct cnxk_ae_sess *sess)
 		break;
 	}
 }
+
+static __rte_always_inline int
+cnxk_ae_modex_prep(struct rte_crypto_op *op, struct roc_ae_buf_ptr *meta_buf,
+		   struct rte_crypto_modex_xform *mod, struct cpt_inst_s *inst)
+{
+	uint32_t exp_len = mod->exponent.length;
+	uint32_t mod_len = mod->modulus.length;
+	struct rte_crypto_mod_op_param mod_op;
+	uint64_t total_key_len;
+	union cpt_inst_w4 w4;
+	uint32_t base_len;
+	uint32_t dlen;
+	uint8_t *dptr;
+
+	mod_op = op->asym->modex;
+
+	base_len = mod_op.base.length;
+	if (unlikely(base_len > mod_len)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -ENOTSUP;
+	}
+
+	total_key_len = mod_len + exp_len;
+
+	/* Input buffer */
+	dptr = meta_buf->vaddr;
+	inst->dptr = (uintptr_t)dptr;
+	memcpy(dptr, mod->modulus.data, total_key_len);
+	dptr += total_key_len;
+	memcpy(dptr, mod_op.base.data, base_len);
+	dptr += base_len;
+	dlen = total_key_len + base_len;
+
+	/* Setup opcodes */
+	w4.s.opcode_major = ROC_AE_MAJOR_OP_MODEX;
+	w4.s.opcode_minor = ROC_AE_MINOR_OP_MODEX;
+
+	w4.s.param1 = mod_len;
+	w4.s.param2 = exp_len;
+	w4.s.dlen = dlen;
+
+	inst->w4.u64 = w4.u64;
+	inst->rptr = (uintptr_t)dptr;
+
+	return 0;
+}
+
+static __rte_always_inline void
+cnxk_ae_rsa_prep(struct rte_crypto_op *op, struct roc_ae_buf_ptr *meta_buf,
+		 struct rte_crypto_rsa_xform *rsa,
+		 rte_crypto_param *crypto_param, struct cpt_inst_s *inst)
+{
+	struct rte_crypto_rsa_op_param rsa_op;
+	uint32_t mod_len = rsa->n.length;
+	uint32_t exp_len = rsa->e.length;
+	uint64_t total_key_len;
+	union cpt_inst_w4 w4;
+	uint32_t in_size;
+	uint32_t dlen;
+	uint8_t *dptr;
+
+	rsa_op = op->asym->rsa;
+	total_key_len = mod_len + exp_len;
+
+	/* Input buffer */
+	dptr = meta_buf->vaddr;
+	inst->dptr = (uintptr_t)dptr;
+	memcpy(dptr, rsa->n.data, total_key_len);
+	dptr += total_key_len;
+
+	in_size = crypto_param->length;
+	memcpy(dptr, crypto_param->data, in_size);
+
+	dptr += in_size;
+	dlen = total_key_len + in_size;
+
+	if (rsa_op.pad == RTE_CRYPTO_RSA_PADDING_NONE) {
+		/* Use mod_exp operation for no_padding type */
+		w4.s.opcode_minor = ROC_AE_MINOR_OP_MODEX;
+		w4.s.param2 = exp_len;
+	} else {
+		if (rsa_op.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT) {
+			w4.s.opcode_minor = ROC_AE_MINOR_OP_PKCS_ENC;
+			/* Public key encrypt, use BT2*/
+			w4.s.param2 = ROC_AE_CPT_BLOCK_TYPE2 |
+				      ((uint16_t)(exp_len) << 1);
+		} else if (rsa_op.op_type == RTE_CRYPTO_ASYM_OP_VERIFY) {
+			w4.s.opcode_minor = ROC_AE_MINOR_OP_PKCS_DEC;
+			/* Public key decrypt, use BT1 */
+			w4.s.param2 = ROC_AE_CPT_BLOCK_TYPE1;
+		}
+	}
+
+	w4.s.opcode_major = ROC_AE_MAJOR_OP_MODEX;
+
+	w4.s.param1 = mod_len;
+	w4.s.dlen = dlen;
+
+	inst->w4.u64 = w4.u64;
+	inst->rptr = (uintptr_t)dptr;
+}
+
+static __rte_always_inline void
+cnxk_ae_rsa_crt_prep(struct rte_crypto_op *op, struct roc_ae_buf_ptr *meta_buf,
+		     struct rte_crypto_rsa_xform *rsa,
+		     rte_crypto_param *crypto_param, struct cpt_inst_s *inst)
+{
+	uint32_t qInv_len = rsa->qt.qInv.length;
+	struct rte_crypto_rsa_op_param rsa_op;
+	uint32_t dP_len = rsa->qt.dP.length;
+	uint32_t dQ_len = rsa->qt.dQ.length;
+	uint32_t p_len = rsa->qt.p.length;
+	uint32_t q_len = rsa->qt.q.length;
+	uint32_t mod_len = rsa->n.length;
+	uint64_t total_key_len;
+	union cpt_inst_w4 w4;
+	uint32_t in_size;
+	uint32_t dlen;
+	uint8_t *dptr;
+
+	rsa_op = op->asym->rsa;
+	total_key_len = p_len + q_len + dP_len + dQ_len + qInv_len;
+
+	/* Input buffer */
+	dptr = meta_buf->vaddr;
+	inst->dptr = (uintptr_t)dptr;
+	memcpy(dptr, rsa->qt.q.data, total_key_len);
+	dptr += total_key_len;
+
+	in_size = crypto_param->length;
+	memcpy(dptr, crypto_param->data, in_size);
+
+	dptr += in_size;
+	dlen = total_key_len + in_size;
+
+	if (rsa_op.pad == RTE_CRYPTO_RSA_PADDING_NONE) {
+		/*Use mod_exp operation for no_padding type */
+		w4.s.opcode_minor = ROC_AE_MINOR_OP_MODEX_CRT;
+	} else {
+		if (rsa_op.op_type == RTE_CRYPTO_ASYM_OP_SIGN) {
+			w4.s.opcode_minor = ROC_AE_MINOR_OP_PKCS_ENC_CRT;
+			/* Private encrypt, use BT1 */
+			w4.s.param2 = ROC_AE_CPT_BLOCK_TYPE1;
+		} else if (rsa_op.op_type == RTE_CRYPTO_ASYM_OP_DECRYPT) {
+			w4.s.opcode_minor = ROC_AE_MINOR_OP_PKCS_DEC_CRT;
+			/* Private decrypt, use BT2 */
+			w4.s.param2 = ROC_AE_CPT_BLOCK_TYPE2;
+		}
+	}
+
+	w4.s.opcode_major = ROC_AE_MAJOR_OP_MODEX;
+
+	w4.s.param1 = mod_len;
+	w4.s.dlen = dlen;
+
+	inst->w4.u64 = w4.u64;
+	inst->rptr = (uintptr_t)dptr;
+}
+
+static __rte_always_inline int __rte_hot
+cnxk_ae_enqueue_rsa_op(struct rte_crypto_op *op,
+		       struct roc_ae_buf_ptr *meta_buf,
+		       struct cnxk_ae_sess *sess, struct cpt_inst_s *inst)
+{
+	struct rte_crypto_rsa_op_param *rsa = &op->asym->rsa;
+
+	switch (rsa->op_type) {
+	case RTE_CRYPTO_ASYM_OP_VERIFY:
+		cnxk_ae_rsa_prep(op, meta_buf, &sess->rsa_ctx, &rsa->sign,
+				 inst);
+		break;
+	case RTE_CRYPTO_ASYM_OP_ENCRYPT:
+		cnxk_ae_rsa_prep(op, meta_buf, &sess->rsa_ctx, &rsa->message,
+				 inst);
+		break;
+	case RTE_CRYPTO_ASYM_OP_SIGN:
+		cnxk_ae_rsa_crt_prep(op, meta_buf, &sess->rsa_ctx,
+				     &rsa->message, inst);
+		break;
+	case RTE_CRYPTO_ASYM_OP_DECRYPT:
+		cnxk_ae_rsa_crt_prep(op, meta_buf, &sess->rsa_ctx, &rsa->cipher,
+				     inst);
+		break;
+	default:
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static __rte_always_inline void
+cnxk_ae_ecdsa_sign_prep(struct rte_crypto_ecdsa_op_param *ecdsa,
+			struct roc_ae_buf_ptr *meta_buf,
+			uint64_t fpm_table_iova, struct roc_ae_ec_group *ec_grp,
+			uint8_t curveid, struct cpt_inst_s *inst)
+{
+	uint16_t message_len = ecdsa->message.length;
+	uint16_t pkey_len = ecdsa->pkey.length;
+	uint16_t p_align, k_align, m_align;
+	uint16_t k_len = ecdsa->k.length;
+	uint16_t order_len, prime_len;
+	uint16_t o_offset, pk_offset;
+	union cpt_inst_w4 w4;
+	uint16_t dlen;
+	uint8_t *dptr;
+
+	prime_len = ec_grp->prime.length;
+	order_len = ec_grp->order.length;
+
+	/* Truncate input length to curve prime length */
+	if (message_len > prime_len)
+		message_len = prime_len;
+	m_align = RTE_ALIGN_CEIL(message_len, 8);
+
+	p_align = RTE_ALIGN_CEIL(prime_len, 8);
+	k_align = RTE_ALIGN_CEIL(k_len, 8);
+
+	/* Set write offset for order and private key */
+	o_offset = prime_len - order_len;
+	pk_offset = prime_len - pkey_len;
+
+	/* Input buffer */
+	dptr = meta_buf->vaddr;
+	inst->dptr = (uintptr_t)dptr;
+
+	/*
+	 * Set dlen = sum(sizeof(fpm address), ROUNDUP8(scalar len, input len),
+	 * ROUNDUP8(priv key len, prime len, order len)).
+	 * Please note, private key, order cannot exceed prime
+	 * length i.e 3 * p_align.
+	 */
+	dlen = sizeof(fpm_table_iova) + k_align + m_align + p_align * 3;
+
+	memset(dptr, 0, dlen);
+
+	*(uint64_t *)dptr = fpm_table_iova;
+	dptr += sizeof(fpm_table_iova);
+
+	memcpy(dptr, ecdsa->k.data, k_len);
+	dptr += k_align;
+
+	memcpy(dptr, ec_grp->prime.data, prime_len);
+	dptr += p_align;
+
+	memcpy(dptr + o_offset, ec_grp->order.data, order_len);
+	dptr += p_align;
+
+	memcpy(dptr + pk_offset, ecdsa->pkey.data, pkey_len);
+	dptr += p_align;
+
+	memcpy(dptr, ecdsa->message.data, message_len);
+	dptr += m_align;
+
+	/* Setup opcodes */
+	w4.s.opcode_major = ROC_AE_MAJOR_OP_ECDSA;
+	w4.s.opcode_minor = ROC_AE_MINOR_OP_ECDSA_SIGN;
+
+	w4.s.param1 = curveid | (message_len << 8);
+	w4.s.param2 = k_len;
+	w4.s.dlen = dlen;
+
+	inst->w4.u64 = w4.u64;
+	inst->rptr = (uintptr_t)dptr;
+}
+
+static __rte_always_inline void
+cnxk_ae_ecdsa_verify_prep(struct rte_crypto_ecdsa_op_param *ecdsa,
+			  struct roc_ae_buf_ptr *meta_buf,
+			  uint64_t fpm_table_iova,
+			  struct roc_ae_ec_group *ec_grp, uint8_t curveid,
+			  struct cpt_inst_s *inst)
+{
+	uint32_t message_len = ecdsa->message.length;
+	uint16_t o_offset, r_offset, s_offset;
+	uint16_t qx_len = ecdsa->q.x.length;
+	uint16_t qy_len = ecdsa->q.y.length;
+	uint16_t r_len = ecdsa->r.length;
+	uint16_t s_len = ecdsa->s.length;
+	uint16_t order_len, prime_len;
+	uint16_t qx_offset, qy_offset;
+	uint16_t p_align, m_align;
+	union cpt_inst_w4 w4;
+	uint16_t dlen;
+	uint8_t *dptr;
+
+	prime_len = ec_grp->prime.length;
+	order_len = ec_grp->order.length;
+
+	/* Truncate input length to curve prime length */
+	if (message_len > prime_len)
+		message_len = prime_len;
+
+	m_align = RTE_ALIGN_CEIL(message_len, 8);
+	p_align = RTE_ALIGN_CEIL(prime_len, 8);
+
+	/* Set write offset for sign, order and public key coordinates */
+	o_offset = prime_len - order_len;
+	qx_offset = prime_len - qx_len;
+	qy_offset = prime_len - qy_len;
+	r_offset = prime_len - r_len;
+	s_offset = prime_len - s_len;
+
+	/* Input buffer */
+	dptr = meta_buf->vaddr;
+	inst->dptr = (uintptr_t)dptr;
+
+	/*
+	 * Set dlen = sum(sizeof(fpm address), ROUNDUP8(message len),
+	 * ROUNDUP8(sign len(r and s), public key len(x and y coordinates),
+	 * prime len, order len)).
+	 * Please note sign, public key and order can not exceed prime length
+	 * i.e. 6 * p_align
+	 */
+	dlen = sizeof(fpm_table_iova) + m_align + (6 * p_align);
+
+	memset(dptr, 0, dlen);
+
+	*(uint64_t *)dptr = fpm_table_iova;
+	dptr += sizeof(fpm_table_iova);
+
+	memcpy(dptr + r_offset, ecdsa->r.data, r_len);
+	dptr += p_align;
+
+	memcpy(dptr + s_offset, ecdsa->s.data, s_len);
+	dptr += p_align;
+
+	memcpy(dptr, ecdsa->message.data, message_len);
+	dptr += m_align;
+
+	memcpy(dptr + o_offset, ec_grp->order.data, order_len);
+	dptr += p_align;
+
+	memcpy(dptr, ec_grp->prime.data, prime_len);
+	dptr += p_align;
+
+	memcpy(dptr + qx_offset, ecdsa->q.x.data, qx_len);
+	dptr += p_align;
+
+	memcpy(dptr + qy_offset, ecdsa->q.y.data, qy_len);
+	dptr += p_align;
+
+	/* Setup opcodes */
+	w4.s.opcode_major = ROC_AE_MAJOR_OP_ECDSA;
+	w4.s.opcode_minor = ROC_AE_MINOR_OP_ECDSA_VERIFY;
+
+	w4.s.param1 = curveid | (message_len << 8);
+	w4.s.param2 = 0;
+	w4.s.dlen = dlen;
+
+	inst->w4.u64 = w4.u64;
+	inst->rptr = (uintptr_t)dptr;
+}
+
+static __rte_always_inline int __rte_hot
+cnxk_ae_enqueue_ecdsa_op(struct rte_crypto_op *op,
+			 struct roc_ae_buf_ptr *meta_buf,
+			 struct cnxk_ae_sess *sess, uint64_t *fpm_iova,
+			 struct roc_ae_ec_group **ec_grp,
+			 struct cpt_inst_s *inst)
+{
+	struct rte_crypto_ecdsa_op_param *ecdsa = &op->asym->ecdsa;
+	uint8_t curveid = sess->ec_ctx.curveid;
+
+	if (ecdsa->op_type == RTE_CRYPTO_ASYM_OP_SIGN)
+		cnxk_ae_ecdsa_sign_prep(ecdsa, meta_buf, fpm_iova[curveid],
+					ec_grp[curveid], curveid, inst);
+	else if (ecdsa->op_type == RTE_CRYPTO_ASYM_OP_VERIFY)
+		cnxk_ae_ecdsa_verify_prep(ecdsa, meta_buf, fpm_iova[curveid],
+					  ec_grp[curveid], curveid, inst);
+	else {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static __rte_always_inline int
+cnxk_ae_ecpm_prep(struct rte_crypto_ecpm_op_param *ecpm,
+		  struct roc_ae_buf_ptr *meta_buf,
+		  struct roc_ae_ec_group *ec_grp, uint8_t curveid,
+		  struct cpt_inst_s *inst)
+{
+	uint16_t x1_len = ecpm->p.x.length;
+	uint16_t y1_len = ecpm->p.y.length;
+	uint16_t scalar_align, p_align;
+	uint16_t x1_offset, y1_offset;
+	uint16_t dlen, prime_len;
+	union cpt_inst_w4 w4;
+	uint8_t *dptr;
+
+	prime_len = ec_grp->prime.length;
+
+	/* Input buffer */
+	dptr = meta_buf->vaddr;
+	inst->dptr = (uintptr_t)dptr;
+
+	p_align = RTE_ALIGN_CEIL(prime_len, 8);
+	scalar_align = RTE_ALIGN_CEIL(ecpm->scalar.length, 8);
+
+	/*
+	 * Set dlen = sum(ROUNDUP8(input point(x and y coordinates), prime,
+	 * scalar length),
+	 * Please note point length is equivalent to prime of the curve
+	 */
+	dlen = 3 * p_align + scalar_align;
+
+	x1_offset = prime_len - x1_len;
+	y1_offset = prime_len - y1_len;
+
+	memset(dptr, 0, dlen);
+
+	/* Copy input point, scalar, prime */
+	memcpy(dptr + x1_offset, ecpm->p.x.data, x1_len);
+	dptr += p_align;
+	memcpy(dptr + y1_offset, ecpm->p.y.data, y1_len);
+	dptr += p_align;
+	memcpy(dptr, ecpm->scalar.data, ecpm->scalar.length);
+	dptr += scalar_align;
+	memcpy(dptr, ec_grp->prime.data, ec_grp->prime.length);
+	dptr += p_align;
+
+	/* Setup opcodes */
+	w4.s.opcode_major = ROC_AE_MAJOR_OP_ECC;
+	w4.s.opcode_minor = ROC_AE_MINOR_OP_ECC_UMP;
+
+	w4.s.param1 = curveid;
+	w4.s.param2 = ecpm->scalar.length;
+	w4.s.dlen = dlen;
+
+	inst->w4.u64 = w4.u64;
+	inst->rptr = (uintptr_t)dptr;
+
+	return 0;
+}
+
+static __rte_always_inline void
+cnxk_ae_dequeue_rsa_op(struct rte_crypto_op *cop, uint8_t *rptr,
+		       struct rte_crypto_rsa_xform *rsa_ctx)
+{
+	struct rte_crypto_rsa_op_param *rsa = &cop->asym->rsa;
+
+	switch (rsa->op_type) {
+	case RTE_CRYPTO_ASYM_OP_ENCRYPT:
+		rsa->cipher.length = rsa_ctx->n.length;
+		memcpy(rsa->cipher.data, rptr, rsa->cipher.length);
+		break;
+	case RTE_CRYPTO_ASYM_OP_DECRYPT:
+		if (rsa->pad == RTE_CRYPTO_RSA_PADDING_NONE) {
+			rsa->message.length = rsa_ctx->n.length;
+			memcpy(rsa->message.data, rptr, rsa->message.length);
+		} else {
+			/* Get length of decrypted output */
+			rsa->message.length =
+				rte_cpu_to_be_16(*((uint16_t *)rptr));
+			/*
+			 * Offset output data pointer by length field
+			 * (2 bytes) and copy decrypted data.
+			 */
+			memcpy(rsa->message.data, rptr + 2,
+			       rsa->message.length);
+		}
+		break;
+	case RTE_CRYPTO_ASYM_OP_SIGN:
+		rsa->sign.length = rsa_ctx->n.length;
+		memcpy(rsa->sign.data, rptr, rsa->sign.length);
+		break;
+	case RTE_CRYPTO_ASYM_OP_VERIFY:
+		if (rsa->pad == RTE_CRYPTO_RSA_PADDING_NONE) {
+			rsa->sign.length = rsa_ctx->n.length;
+			memcpy(rsa->sign.data, rptr, rsa->sign.length);
+		} else {
+			/* Get length of signed output */
+			rsa->sign.length =
+				rte_cpu_to_be_16(*((uint16_t *)rptr));
+			/*
+			 * Offset output data pointer by length field
+			 * (2 bytes) and copy signed data.
+			 */
+			memcpy(rsa->sign.data, rptr + 2, rsa->sign.length);
+		}
+		if (memcmp(rsa->sign.data, rsa->message.data,
+			   rsa->message.length)) {
+			cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		}
+		break;
+	default:
+		cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		break;
+	}
+}
+
+static __rte_always_inline void
+cnxk_ae_dequeue_ecdsa_op(struct rte_crypto_ecdsa_op_param *ecdsa, uint8_t *rptr,
+			 struct roc_ae_ec_ctx *ec,
+			 struct roc_ae_ec_group **ec_grp)
+{
+	int prime_len = ec_grp[ec->curveid]->prime.length;
+
+	if (ecdsa->op_type == RTE_CRYPTO_ASYM_OP_VERIFY)
+		return;
+
+	/* Separate out sign r and s components */
+	memcpy(ecdsa->r.data, rptr, prime_len);
+	memcpy(ecdsa->s.data, rptr + RTE_ALIGN_CEIL(prime_len, 8), prime_len);
+	ecdsa->r.length = prime_len;
+	ecdsa->s.length = prime_len;
+}
+
+static __rte_always_inline void
+cnxk_ae_dequeue_ecpm_op(struct rte_crypto_ecpm_op_param *ecpm, uint8_t *rptr,
+			struct roc_ae_ec_ctx *ec,
+			struct roc_ae_ec_group **ec_grp)
+{
+	int prime_len = ec_grp[ec->curveid]->prime.length;
+
+	memcpy(ecpm->r.x.data, rptr, prime_len);
+	memcpy(ecpm->r.y.data, rptr + RTE_ALIGN_CEIL(prime_len, 8), prime_len);
+	ecpm->r.x.length = prime_len;
+	ecpm->r.y.length = prime_len;
+}
+
+static __rte_always_inline void *
+cnxk_ae_alloc_meta(struct roc_ae_buf_ptr *buf,
+		   struct rte_mempool *cpt_meta_pool,
+		   struct cpt_inflight_req *infl_req)
+{
+	uint8_t *mdata;
+
+	if (unlikely(rte_mempool_get(cpt_meta_pool, (void **)&mdata) < 0))
+		return NULL;
+
+	buf->vaddr = mdata;
+
+	infl_req->mdata = mdata;
+	infl_req->op_flags |= CPT_OP_FLAGS_METABUF;
+
+	return mdata;
+}
+
+static __rte_always_inline int32_t __rte_hot
+cnxk_ae_enqueue(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op,
+		struct cpt_inflight_req *infl_req, struct cpt_inst_s *inst,
+		struct cnxk_ae_sess *sess)
+{
+	struct cpt_qp_meta_info *minfo = &qp->meta_info;
+	struct rte_crypto_asym_op *asym_op = op->asym;
+	struct roc_ae_buf_ptr meta_buf;
+	uint64_t *mop;
+	void *mdata;
+	int ret;
+
+	mdata = cnxk_ae_alloc_meta(&meta_buf, minfo->pool, infl_req);
+	if (mdata == NULL)
+		return -ENOMEM;
+
+	/* Reserve 8B for RPTR */
+	meta_buf.vaddr = PLT_PTR_ADD(mdata, sizeof(uint64_t));
+
+	switch (sess->xfrm_type) {
+	case RTE_CRYPTO_ASYM_XFORM_MODEX:
+		ret = cnxk_ae_modex_prep(op, &meta_buf, &sess->mod_ctx, inst);
+		if (unlikely(ret))
+			goto req_fail;
+		break;
+	case RTE_CRYPTO_ASYM_XFORM_RSA:
+		ret = cnxk_ae_enqueue_rsa_op(op, &meta_buf, sess, inst);
+		if (unlikely(ret))
+			goto req_fail;
+		break;
+	case RTE_CRYPTO_ASYM_XFORM_ECDSA:
+		ret = cnxk_ae_enqueue_ecdsa_op(op, &meta_buf, sess,
+					       sess->cnxk_fpm_iova,
+					       sess->ec_grp, inst);
+		if (unlikely(ret))
+			goto req_fail;
+		break;
+	case RTE_CRYPTO_ASYM_XFORM_ECPM:
+		ret = cnxk_ae_ecpm_prep(&asym_op->ecpm, &meta_buf,
+					sess->ec_grp[sess->ec_ctx.curveid],
+					sess->ec_ctx.curveid, inst);
+		if (unlikely(ret))
+			goto req_fail;
+		break;
+	default:
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		ret = -EINVAL;
+		goto req_fail;
+	}
+
+	mop = mdata;
+	mop[0] = inst->rptr;
+	return 0;
+
+req_fail:
+	rte_mempool_put(minfo->pool, infl_req->mdata);
+	return ret;
+}
+
+static __rte_always_inline void
+cnxk_ae_post_process(struct rte_crypto_op *cop, struct cnxk_ae_sess *sess,
+		     uint8_t *rptr)
+{
+	struct rte_crypto_asym_op *op = cop->asym;
+
+	switch (sess->xfrm_type) {
+	case RTE_CRYPTO_ASYM_XFORM_RSA:
+		cnxk_ae_dequeue_rsa_op(cop, rptr, &sess->rsa_ctx);
+		break;
+	case RTE_CRYPTO_ASYM_XFORM_MODEX:
+		op->modex.result.length = sess->mod_ctx.modulus.length;
+		memcpy(op->modex.result.data, rptr, op->modex.result.length);
+		break;
+	case RTE_CRYPTO_ASYM_XFORM_ECDSA:
+		cnxk_ae_dequeue_ecdsa_op(&op->ecdsa, rptr, &sess->ec_ctx,
+					 sess->ec_grp);
+		break;
+	case RTE_CRYPTO_ASYM_XFORM_ECPM:
+		cnxk_ae_dequeue_ecpm_op(&op->ecpm, rptr, &sess->ec_ctx,
+					sess->ec_grp);
+		break;
+	default:
+		cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		break;
+	}
+}
 #endif /* _CNXK_AE_H_ */
-- 
2.7.4


  parent reply	other threads:[~2021-06-29  7:35 UTC|newest]

Thread overview: 24+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-06-02 17:10 [dpdk-dev] [PATCH 0/4] Add rte_security in crypto_cn10k PMD Anoob Joseph
2021-06-02 17:10 ` [dpdk-dev] [PATCH 1/4] crypto/cnxk: add security ctx skeleton Anoob Joseph
2021-06-16 20:14   ` Akhil Goyal
2021-06-25  6:15   ` [dpdk-dev] [PATCH v2 0/4] Add rte_security in crypto_cn10k PMD Anoob Joseph
2021-06-25  6:15     ` [dpdk-dev] [PATCH v2 1/4] crypto/cnxk: add security ctx skeleton Anoob Joseph
2021-06-25  6:15     ` [dpdk-dev] [PATCH v2 2/4] crypto/cnxk: add security session ops Anoob Joseph
2021-06-25  6:15     ` [dpdk-dev] [PATCH v2 3/4] crypto/cnxk: add security handling in datapath ops Anoob Joseph
2021-06-25  6:15     ` [dpdk-dev] [PATCH v2 4/4] crypto/cnxk: add security capabilities Anoob Joseph
2021-06-29  7:34     ` [dpdk-dev] [PATCH v3 0/8] Add lookaside IPsec and asymmetric in cnxk crypto PMDs Anoob Joseph
2021-06-29  7:34       ` [dpdk-dev] [PATCH v3 1/8] crypto/cnxk: add security ctx skeleton Anoob Joseph
2021-06-29  7:34       ` [dpdk-dev] [PATCH v3 2/8] crypto/cnxk: add security session ops Anoob Joseph
2021-06-29  7:34       ` [dpdk-dev] [PATCH v3 3/8] crypto/cnxk: add security handling in datapath ops Anoob Joseph
2021-06-29  7:34       ` [dpdk-dev] [PATCH v3 4/8] crypto/cnxk: add security capabilities Anoob Joseph
2021-06-29  7:34       ` [dpdk-dev] [PATCH v3 5/8] crypto/cnxk: add asymmetric session ops Anoob Joseph
2021-06-29  7:34       ` Anoob Joseph [this message]
2021-06-29  7:34       ` [dpdk-dev] [PATCH v3 7/8] crypto/cnxk: add asymmetric capabilities Anoob Joseph
2021-06-29  7:34       ` [dpdk-dev] [PATCH v3 8/8] test/crypto: add cnxk for asymmetric cases Anoob Joseph
2021-06-29 19:52       ` [dpdk-dev] [PATCH v3 0/8] Add lookaside IPsec and asymmetric in cnxk crypto PMDs Akhil Goyal
2021-06-02 17:11 ` [dpdk-dev] [PATCH 2/4] crypto/cnxk: add security capabilities Anoob Joseph
2021-06-02 17:11 ` [dpdk-dev] [PATCH 3/4] crypto/cnxk: add security session ops Anoob Joseph
2021-06-16 20:11   ` Akhil Goyal
2021-06-17  7:16     ` Anoob Joseph
2021-06-02 17:11 ` [dpdk-dev] [PATCH 4/4] crypto/cnxk: add security handling in datapath ops Anoob Joseph
2021-06-16 20:15 ` [dpdk-dev] [PATCH 0/4] Add rte_security in crypto_cn10k PMD Akhil Goyal

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1624952076-30928-7-git-send-email-anoobj@marvell.com \
    --to=anoobj@marvell.com \
    --cc=adwivedi@marvell.com \
    --cc=dev@dpdk.org \
    --cc=gakhil@marvell.com \
    --cc=jerinj@marvell.com \
    --cc=kirankumark@marvell.com \
    --cc=ktejasree@marvell.com \
    --cc=thomas@monjalon.net \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).