DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH 0/3] Add asymmetric ops in crypto cnxk PMDs
@ 2021-06-02 17:46 Anoob Joseph
  2021-06-02 17:46 ` [dpdk-dev] [PATCH 1/3] crypto/cnxk: add asymmetric session ops Anoob Joseph
                   ` (3 more replies)
  0 siblings, 4 replies; 11+ messages in thread
From: Anoob Joseph @ 2021-06-02 17:46 UTC (permalink / raw)
  To: Akhil Goyal, Thomas Monjalon
  Cc: Anoob Joseph, Jerin Jacob, Ankur Dwivedi, Tejasree Kondoj, dev

Add support for asymmetric operations in crypto cnxk PMDs.
Following operations are supported,
- RSA
- DSA
- ECDSA
- ECPM
- Modular Exponentation

Depends-on: series-17212 ("Add CPT in Marvell CNXK common driver")
Depends-on: series-17213 ("Add Marvell CNXK crypto PMDs")
Depends-on: series-17214 ("Add rte_security in crypto_cn10k PMD")

Kiran Kumar K (3):
  crypto/cnxk: add asymmetric session ops
  crypto/cnxk: add asymmetric datapath ops
  app/test: adding cnxk asymmetric autotest

 app/test/test_cryptodev_asym.c            |  30 ++
 drivers/crypto/cnxk/cn10k_cryptodev.c     |   2 +
 drivers/crypto/cnxk/cn10k_cryptodev_ops.c | 114 ++++-
 drivers/crypto/cnxk/cn9k_cryptodev.c      |   4 +-
 drivers/crypto/cnxk/cn9k_cryptodev_ops.c  | 118 ++++-
 drivers/crypto/cnxk/cnxk_ae.h             | 825 ++++++++++++++++++++++++++++++
 drivers/crypto/cnxk/cnxk_cpt_ops_helper.c |  14 +
 drivers/crypto/cnxk/cnxk_cpt_ops_helper.h |   4 +
 drivers/crypto/cnxk/cnxk_cryptodev.h      |   3 +-
 drivers/crypto/cnxk/cnxk_cryptodev_ops.c  |  75 +++
 drivers/crypto/cnxk/cnxk_cryptodev_ops.h  |   8 +
 11 files changed, 1187 insertions(+), 10 deletions(-)
 create mode 100644 drivers/crypto/cnxk/cnxk_ae.h

-- 
2.7.4


^ permalink raw reply	[flat|nested] 11+ messages in thread

* [dpdk-dev] [PATCH 1/3] crypto/cnxk: add asymmetric session ops
  2021-06-02 17:46 [dpdk-dev] [PATCH 0/3] Add asymmetric ops in crypto cnxk PMDs Anoob Joseph
@ 2021-06-02 17:46 ` Anoob Joseph
  2021-06-16 20:21   ` Akhil Goyal
  2021-06-02 17:46 ` [dpdk-dev] [PATCH 2/3] crypto/cnxk: add asymmetric datapath ops Anoob Joseph
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 11+ messages in thread
From: Anoob Joseph @ 2021-06-02 17:46 UTC (permalink / raw)
  To: Akhil Goyal, Thomas Monjalon
  Cc: Kiran Kumar K, Jerin Jacob, Ankur Dwivedi, Tejasree Kondoj, dev

From: Kiran Kumar K <kirankumark@marvell.com>

Adding asymmetric crypto session ops.

Signed-off-by: Kiran Kumar K <kirankumark@marvell.com>
---
 drivers/crypto/cnxk/cn10k_cryptodev.c     |   2 +
 drivers/crypto/cnxk/cn10k_cryptodev_ops.c |   6 +-
 drivers/crypto/cnxk/cn9k_cryptodev.c      |   4 +-
 drivers/crypto/cnxk/cn9k_cryptodev_ops.c  |   6 +-
 drivers/crypto/cnxk/cnxk_ae.h             | 210 ++++++++++++++++++++++++++++++
 drivers/crypto/cnxk/cnxk_cpt_ops_helper.c |  14 ++
 drivers/crypto/cnxk/cnxk_cpt_ops_helper.h |   4 +
 drivers/crypto/cnxk/cnxk_cryptodev.h      |   3 +-
 drivers/crypto/cnxk/cnxk_cryptodev_ops.c  |  75 +++++++++++
 drivers/crypto/cnxk/cnxk_cryptodev_ops.h  |   8 ++
 10 files changed, 324 insertions(+), 8 deletions(-)
 create mode 100644 drivers/crypto/cnxk/cnxk_ae.h

diff --git a/drivers/crypto/cnxk/cn10k_cryptodev.c b/drivers/crypto/cnxk/cn10k_cryptodev.c
index 9517e62..84a1a3a 100644
--- a/drivers/crypto/cnxk/cn10k_cryptodev.c
+++ b/drivers/crypto/cnxk/cn10k_cryptodev.c
@@ -87,7 +87,9 @@ cn10k_cpt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	cnxk_cpt_caps_populate(vf);
 
 	dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+			     RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO |
 			     RTE_CRYPTODEV_FF_HW_ACCELERATED |
+			     RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT |
 			     RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
 			     RTE_CRYPTODEV_FF_IN_PLACE_SGL |
 			     RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
diff --git a/drivers/crypto/cnxk/cn10k_cryptodev_ops.c b/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
index 68093ea..f98028e 100644
--- a/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
+++ b/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
@@ -426,8 +426,8 @@ struct rte_cryptodev_ops cn10k_cpt_ops = {
 	.sym_session_clear = cnxk_cpt_sym_session_clear,
 
 	/* Asymmetric crypto ops */
-	.asym_session_get_size = NULL,
-	.asym_session_configure = NULL,
-	.asym_session_clear = NULL,
+	.asym_session_get_size = cnxk_ae_session_size_get,
+	.asym_session_configure = cnxk_ae_session_cfg,
+	.asym_session_clear = cnxk_ae_session_clear,
 
 };
diff --git a/drivers/crypto/cnxk/cn9k_cryptodev.c b/drivers/crypto/cnxk/cn9k_cryptodev.c
index ffa01a2..6002fa6 100644
--- a/drivers/crypto/cnxk/cn9k_cryptodev.c
+++ b/drivers/crypto/cnxk/cn9k_cryptodev.c
@@ -78,6 +78,7 @@ cn9k_cpt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	cnxk_cpt_caps_populate(vf);
 
 	dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+			     RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO |
 			     RTE_CRYPTODEV_FF_HW_ACCELERATED |
 			     RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
 			     RTE_CRYPTODEV_FF_IN_PLACE_SGL |
@@ -85,7 +86,8 @@ cn9k_cpt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 			     RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
 			     RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
 			     RTE_CRYPTODEV_FF_SYM_SESSIONLESS |
-			     RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED;
+			     RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED |
+			     RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT;
 
 	cn9k_cpt_set_enqdeq_fns(dev);
 
diff --git a/drivers/crypto/cnxk/cn9k_cryptodev_ops.c b/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
index 23b596f..07be33d 100644
--- a/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
+++ b/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
@@ -312,8 +312,8 @@ struct rte_cryptodev_ops cn9k_cpt_ops = {
 	.sym_session_clear = cnxk_cpt_sym_session_clear,
 
 	/* Asymmetric crypto ops */
-	.asym_session_get_size = NULL,
-	.asym_session_configure = NULL,
-	.asym_session_clear = NULL,
+	.asym_session_get_size = cnxk_ae_session_size_get,
+	.asym_session_configure = cnxk_ae_session_cfg,
+	.asym_session_clear = cnxk_ae_session_clear,
 
 };
diff --git a/drivers/crypto/cnxk/cnxk_ae.h b/drivers/crypto/cnxk/cnxk_ae.h
new file mode 100644
index 0000000..5659c0d
--- /dev/null
+++ b/drivers/crypto/cnxk/cnxk_ae.h
@@ -0,0 +1,210 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#ifndef _CNXK_AE_H_
+#define _CNXK_AE_H_
+
+#include <rte_common.h>
+#include <rte_crypto_asym.h>
+#include <rte_malloc.h>
+
+#include "roc_ae.h"
+#include "roc_ae_fpm_tables.h"
+
+struct cnxk_ae_sess {
+	enum rte_crypto_asym_xform_type xfrm_type;
+	union {
+		struct rte_crypto_rsa_xform rsa_ctx;
+		struct rte_crypto_modex_xform mod_ctx;
+		struct roc_ae_ec_ctx ec_ctx;
+	};
+	uint64_t *cnxk_fpm_iova;
+	uint64_t cpt_inst_w7;
+};
+
+static __rte_always_inline void
+cnxk_ae_modex_param_normalize(uint8_t **data, size_t *len)
+{
+	size_t i;
+
+	/* Strip leading NUL bytes */
+	for (i = 0; i < *len; i++) {
+		if ((*data)[i] != 0)
+			break;
+	}
+	*data += i;
+	*len -= i;
+}
+
+static __rte_always_inline int
+cnxk_ae_fill_modex_params(struct cnxk_ae_sess *sess,
+			  struct rte_crypto_asym_xform *xform)
+{
+	struct rte_crypto_modex_xform *ctx = &sess->mod_ctx;
+	size_t exp_len = xform->modex.exponent.length;
+	size_t mod_len = xform->modex.modulus.length;
+	uint8_t *exp = xform->modex.exponent.data;
+	uint8_t *mod = xform->modex.modulus.data;
+
+	cnxk_ae_modex_param_normalize(&mod, &mod_len);
+	cnxk_ae_modex_param_normalize(&exp, &exp_len);
+
+	if (unlikely(exp_len == 0 || mod_len == 0))
+		return -EINVAL;
+
+	if (unlikely(exp_len > mod_len))
+		return -ENOTSUP;
+
+	/* Allocate buffer to hold modexp params */
+	ctx->modulus.data = rte_malloc(NULL, mod_len + exp_len, 0);
+	if (ctx->modulus.data == NULL)
+		return -ENOMEM;
+
+	/* Set up modexp prime modulus and private exponent */
+	memcpy(ctx->modulus.data, mod, mod_len);
+	ctx->exponent.data = ctx->modulus.data + mod_len;
+	memcpy(ctx->exponent.data, exp, exp_len);
+
+	ctx->modulus.length = mod_len;
+	ctx->exponent.length = exp_len;
+
+	return 0;
+}
+
+static __rte_always_inline int
+cnxk_ae_fill_rsa_params(struct cnxk_ae_sess *sess,
+			struct rte_crypto_asym_xform *xform)
+{
+	struct rte_crypto_rsa_priv_key_qt qt = xform->rsa.qt;
+	struct rte_crypto_rsa_xform *xfrm_rsa = &xform->rsa;
+	struct rte_crypto_rsa_xform *rsa = &sess->rsa_ctx;
+	size_t mod_len = xfrm_rsa->n.length;
+	size_t exp_len = xfrm_rsa->e.length;
+	uint64_t total_size;
+	size_t len = 0;
+
+	/* Make sure key length used is not more than mod_len/2 */
+	if (qt.p.data != NULL)
+		len = (((mod_len / 2) < qt.p.length) ? len : qt.p.length);
+
+	/* Total size required for RSA key params(n,e,(q,dQ,p,dP,qInv)) */
+	total_size = mod_len + exp_len + 5 * len;
+
+	/* Allocate buffer to hold all RSA keys */
+	rsa->n.data = rte_malloc(NULL, total_size, 0);
+	if (rsa->n.data == NULL)
+		return -ENOMEM;
+
+	/* Set up RSA prime modulus and public key exponent */
+	memcpy(rsa->n.data, xfrm_rsa->n.data, mod_len);
+	rsa->e.data = rsa->n.data + mod_len;
+	memcpy(rsa->e.data, xfrm_rsa->e.data, exp_len);
+
+	/* Private key in quintuple format */
+	if (len != 0) {
+		rsa->qt.q.data = rsa->e.data + exp_len;
+		memcpy(rsa->qt.q.data, qt.q.data, qt.q.length);
+		rsa->qt.dQ.data = rsa->qt.q.data + qt.q.length;
+		memcpy(rsa->qt.dQ.data, qt.dQ.data, qt.dQ.length);
+		rsa->qt.p.data = rsa->qt.dQ.data + qt.dQ.length;
+		memcpy(rsa->qt.p.data, qt.p.data, qt.p.length);
+		rsa->qt.dP.data = rsa->qt.p.data + qt.p.length;
+		memcpy(rsa->qt.dP.data, qt.dP.data, qt.dP.length);
+		rsa->qt.qInv.data = rsa->qt.dP.data + qt.dP.length;
+		memcpy(rsa->qt.qInv.data, qt.qInv.data, qt.qInv.length);
+
+		rsa->qt.q.length = qt.q.length;
+		rsa->qt.dQ.length = qt.dQ.length;
+		rsa->qt.p.length = qt.p.length;
+		rsa->qt.dP.length = qt.dP.length;
+		rsa->qt.qInv.length = qt.qInv.length;
+	}
+	rsa->n.length = mod_len;
+	rsa->e.length = exp_len;
+
+	return 0;
+}
+
+static __rte_always_inline int
+cnxk_ae_fill_ec_params(struct cnxk_ae_sess *sess,
+		       struct rte_crypto_asym_xform *xform)
+{
+	struct roc_ae_ec_ctx *ec = &sess->ec_ctx;
+
+	switch (xform->ec.curve_id) {
+	case RTE_CRYPTO_EC_GROUP_SECP192R1:
+		ec->curveid = ROC_AE_EC_ID_P192;
+		break;
+	case RTE_CRYPTO_EC_GROUP_SECP224R1:
+		ec->curveid = ROC_AE_EC_ID_P224;
+		break;
+	case RTE_CRYPTO_EC_GROUP_SECP256R1:
+		ec->curveid = ROC_AE_EC_ID_P256;
+		break;
+	case RTE_CRYPTO_EC_GROUP_SECP384R1:
+		ec->curveid = ROC_AE_EC_ID_P384;
+		break;
+	case RTE_CRYPTO_EC_GROUP_SECP521R1:
+		ec->curveid = ROC_AE_EC_ID_P521;
+		break;
+	default:
+		/* Only NIST curves (FIPS 186-4) are supported */
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static __rte_always_inline int
+cnxk_ae_fill_session_parameters(struct cnxk_ae_sess *sess,
+				struct rte_crypto_asym_xform *xform)
+{
+	int ret;
+
+	sess->xfrm_type = xform->xform_type;
+
+	switch (xform->xform_type) {
+	case RTE_CRYPTO_ASYM_XFORM_RSA:
+		ret = cnxk_ae_fill_rsa_params(sess, xform);
+		break;
+	case RTE_CRYPTO_ASYM_XFORM_MODEX:
+		ret = cnxk_ae_fill_modex_params(sess, xform);
+		break;
+	case RTE_CRYPTO_ASYM_XFORM_ECDSA:
+		/* Fall through */
+	case RTE_CRYPTO_ASYM_XFORM_ECPM:
+		ret = cnxk_ae_fill_ec_params(sess, xform);
+		break;
+	default:
+		return -ENOTSUP;
+	}
+	return ret;
+}
+
+static inline void
+cnxk_ae_free_session_parameters(struct cnxk_ae_sess *sess)
+{
+	struct rte_crypto_modex_xform *mod;
+	struct rte_crypto_rsa_xform *rsa;
+
+	switch (sess->xfrm_type) {
+	case RTE_CRYPTO_ASYM_XFORM_RSA:
+		rsa = &sess->rsa_ctx;
+		if (rsa->n.data)
+			rte_free(rsa->n.data);
+		break;
+	case RTE_CRYPTO_ASYM_XFORM_MODEX:
+		mod = &sess->mod_ctx;
+		if (mod->modulus.data)
+			rte_free(mod->modulus.data);
+		break;
+	case RTE_CRYPTO_ASYM_XFORM_ECDSA:
+		/* Fall through */
+	case RTE_CRYPTO_ASYM_XFORM_ECPM:
+		break;
+	default:
+		break;
+	}
+}
+#endif /* _CNXK_AE_H_ */
diff --git a/drivers/crypto/cnxk/cnxk_cpt_ops_helper.c b/drivers/crypto/cnxk/cnxk_cpt_ops_helper.c
index 103195e..73e28a7 100644
--- a/drivers/crypto/cnxk/cnxk_cpt_ops_helper.c
+++ b/drivers/crypto/cnxk/cnxk_cpt_ops_helper.c
@@ -26,3 +26,17 @@ cnxk_cpt_ops_helper_get_mlen(void)
 
 	return len;
 }
+
+int
+cnxk_cpt_ops_helper_asym_get_mlen(void)
+{
+	uint32_t len;
+
+	/* To hold RPTR */
+	len = sizeof(uint64_t);
+
+	/* Get meta len for asymmetric operations */
+	len += CNXK_CPT_MAX_ASYM_OP_NUM_PARAMS * CNXK_CPT_MAX_ASYM_OP_MOD_LEN;
+
+	return len;
+}
diff --git a/drivers/crypto/cnxk/cnxk_cpt_ops_helper.h b/drivers/crypto/cnxk/cnxk_cpt_ops_helper.h
index 23c6fed..daed6db 100644
--- a/drivers/crypto/cnxk/cnxk_cpt_ops_helper.h
+++ b/drivers/crypto/cnxk/cnxk_cpt_ops_helper.h
@@ -9,6 +9,9 @@
 #define CPT_OFFSET_CONTROL_BYTES 8
 #define SG_ENTRY_SIZE		 sizeof(struct roc_se_sglist_comp)
 
+#define CNXK_CPT_MAX_ASYM_OP_NUM_PARAMS 5
+#define CNXK_CPT_MAX_ASYM_OP_MOD_LEN	1024
+
 /*
  * Get size of contiguous meta buffer to be allocated
  *
@@ -17,4 +20,5 @@
  */
 int cnxk_cpt_ops_helper_get_mlen(void);
 
+int cnxk_cpt_ops_helper_asym_get_mlen(void);
 #endif /* _CNXK_CPT_OPS_HELPER_H_ */
diff --git a/drivers/crypto/cnxk/cnxk_cryptodev.h b/drivers/crypto/cnxk/cnxk_cryptodev.h
index 1568be3..a39fe25 100644
--- a/drivers/crypto/cnxk/cnxk_cryptodev.h
+++ b/drivers/crypto/cnxk/cnxk_cryptodev.h
@@ -25,7 +25,7 @@
 #define CNXK_CPT_MAX_CAPS	 34
 #define CNXK_SEC_CRYPTO_MAX_CAPS 4
 #define CNXK_SEC_MAX_CAPS	 3
-
+#define CNXK_AE_EC_ID_MAX	 5
 /**
  * Device private data
  */
@@ -35,6 +35,7 @@ struct cnxk_cpt_vf {
 	struct rte_cryptodev_capabilities
 		sec_crypto_caps[CNXK_SEC_CRYPTO_MAX_CAPS];
 	struct rte_security_capability sec_caps[CNXK_SEC_MAX_CAPS];
+	uint64_t cnxk_fpm_iova[CNXK_AE_EC_ID_MAX];
 };
 
 int cnxk_cpt_eng_grp_add(struct roc_cpt *roc_cpt);
diff --git a/drivers/crypto/cnxk/cnxk_cryptodev_ops.c b/drivers/crypto/cnxk/cnxk_cryptodev_ops.c
index 3b7cd44..374b394 100644
--- a/drivers/crypto/cnxk/cnxk_cryptodev_ops.c
+++ b/drivers/crypto/cnxk/cnxk_cryptodev_ops.c
@@ -8,6 +8,7 @@
 
 #include "roc_cpt.h"
 
+#include "cnxk_ae.h"
 #include "cnxk_cpt_ops_helper.h"
 #include "cnxk_cryptodev.h"
 #include "cnxk_cryptodev_ops.h"
@@ -111,6 +112,12 @@ cnxk_cpt_metabuf_mempool_create(const struct rte_cryptodev *dev,
 		mlen = cnxk_cpt_ops_helper_get_mlen();
 	}
 
+	if (dev->feature_flags & RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO) {
+
+		/* Get meta len required for asymmetric operations */
+		mlen = cnxk_cpt_ops_helper_asym_get_mlen();
+	}
+
 	cache_sz = RTE_MIN(RTE_MEMPOOL_CACHE_MAX_SIZE, nb_elements / 1.5);
 
 	/* Allocate mempool */
@@ -254,6 +261,15 @@ cnxk_cpt_queue_pair_release(struct rte_cryptodev *dev, uint16_t qp_id)
 	roc_cpt->lf[qp_id] = NULL;
 	dev->data->queue_pairs[qp_id] = NULL;
 
+	if (dev->feature_flags & RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO) {
+		/* Initialize shared FPM table */
+		ret = roc_ae_fpm_get(vf->cnxk_fpm_iova);
+		if (ret) {
+			plt_err("Could not get FPM table");
+			return ret;
+		}
+	}
+
 	return 0;
 }
 
@@ -532,3 +548,62 @@ cnxk_cpt_sym_session_clear(struct rte_cryptodev *dev,
 {
 	return sym_session_clear(dev->driver_id, sess);
 }
+
+unsigned int
+cnxk_ae_session_size_get(struct rte_cryptodev *dev __rte_unused)
+{
+	return sizeof(struct cnxk_ae_sess);
+}
+
+void
+cnxk_ae_session_clear(struct rte_cryptodev *dev,
+		      struct rte_cryptodev_asym_session *sess)
+{
+	struct rte_mempool *sess_mp;
+	struct cnxk_ae_sess *priv;
+
+	priv = get_asym_session_private_data(sess, dev->driver_id);
+	if (priv == NULL)
+		return;
+
+	/* Free resources allocated in session_cfg */
+	cnxk_ae_free_session_parameters(priv);
+
+	/* Reset and free object back to pool */
+	memset(priv, 0, cnxk_ae_session_size_get(dev));
+	sess_mp = rte_mempool_from_obj(priv);
+	set_asym_session_private_data(sess, dev->driver_id, NULL);
+	rte_mempool_put(sess_mp, priv);
+}
+
+int
+cnxk_ae_session_cfg(struct rte_cryptodev *dev,
+		    struct rte_crypto_asym_xform *xform,
+		    struct rte_cryptodev_asym_session *sess,
+		    struct rte_mempool *pool)
+{
+	struct cnxk_cpt_vf *vf = dev->data->dev_private;
+	struct roc_cpt *roc_cpt = &vf->cpt;
+	struct cnxk_ae_sess *priv;
+	union cpt_inst_w7 w7;
+	int ret;
+
+	if (rte_mempool_get(pool, (void **)&priv))
+		return -ENOMEM;
+
+	memset(priv, 0, sizeof(struct cnxk_ae_sess));
+
+	ret = cnxk_ae_fill_session_parameters(priv, xform);
+	if (ret) {
+		rte_mempool_put(pool, priv);
+		return ret;
+	}
+
+	w7.u64 = 0;
+	w7.s.egrp = roc_cpt->eng_grp[CPT_ENG_TYPE_AE];
+	priv->cpt_inst_w7 = w7.u64;
+	priv->cnxk_fpm_iova = vf->cnxk_fpm_iova;
+	set_asym_session_private_data(sess, dev->driver_id, priv);
+
+	return 0;
+}
diff --git a/drivers/crypto/cnxk/cnxk_cryptodev_ops.h b/drivers/crypto/cnxk/cnxk_cryptodev_ops.h
index b252c52..fc731fb 100644
--- a/drivers/crypto/cnxk/cnxk_cryptodev_ops.h
+++ b/drivers/crypto/cnxk/cnxk_cryptodev_ops.h
@@ -106,4 +106,12 @@ void cnxk_cpt_sym_session_clear(struct rte_cryptodev *dev,
 
 void sym_session_clear(int driver_id, struct rte_cryptodev_sym_session *sess);
 
+unsigned int cnxk_ae_session_size_get(struct rte_cryptodev *dev __rte_unused);
+
+void cnxk_ae_session_clear(struct rte_cryptodev *dev,
+			   struct rte_cryptodev_asym_session *sess);
+int cnxk_ae_session_cfg(struct rte_cryptodev *dev,
+			struct rte_crypto_asym_xform *xform,
+			struct rte_cryptodev_asym_session *sess,
+			struct rte_mempool *pool);
 #endif /* _CNXK_CRYPTODEV_OPS_H_ */
-- 
2.7.4


^ permalink raw reply	[flat|nested] 11+ messages in thread

* [dpdk-dev] [PATCH 2/3] crypto/cnxk: add asymmetric datapath ops
  2021-06-02 17:46 [dpdk-dev] [PATCH 0/3] Add asymmetric ops in crypto cnxk PMDs Anoob Joseph
  2021-06-02 17:46 ` [dpdk-dev] [PATCH 1/3] crypto/cnxk: add asymmetric session ops Anoob Joseph
@ 2021-06-02 17:46 ` Anoob Joseph
  2021-06-02 17:46 ` [dpdk-dev] [PATCH 3/3] app/test: adding cnxk asymmetric autotest Anoob Joseph
  2021-06-25  6:25 ` [dpdk-dev] [PATCH v2 0/4] Add asymmetric ops in crypto cnxk PMDs Anoob Joseph
  3 siblings, 0 replies; 11+ messages in thread
From: Anoob Joseph @ 2021-06-02 17:46 UTC (permalink / raw)
  To: Akhil Goyal, Thomas Monjalon
  Cc: Kiran Kumar K, Jerin Jacob, Ankur Dwivedi, Tejasree Kondoj, dev

From: Kiran Kumar K <kirankumark@marvell.com>

Adding asymmetric crypto datapath ops.

Signed-off-by: Kiran Kumar K <kirankumark@marvell.com>
---
 drivers/crypto/cnxk/cn10k_cryptodev_ops.c | 108 ++++++
 drivers/crypto/cnxk/cn9k_cryptodev_ops.c  | 112 +++++-
 drivers/crypto/cnxk/cnxk_ae.h             | 615 ++++++++++++++++++++++++++++++
 3 files changed, 833 insertions(+), 2 deletions(-)

diff --git a/drivers/crypto/cnxk/cn10k_cryptodev_ops.c b/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
index f98028e..68818a6 100644
--- a/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
+++ b/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
@@ -10,6 +10,7 @@
 #include "cn10k_cryptodev_ops.h"
 #include "cn10k_ipsec_la_ops.h"
 #include "cn10k_ipsec.h"
+#include "cnxk_ae.h"
 #include "cnxk_cryptodev.h"
 #include "cnxk_cryptodev_ops.h"
 #include "cnxk_se.h"
@@ -95,12 +96,71 @@ cpt_sym_inst_fill(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op,
 	return ret;
 }
 
+static __rte_always_inline int32_t __rte_hot
+cn10k_ae_enqueue(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op,
+		 struct cpt_inflight_req *infl_req, struct cpt_inst_s *inst,
+		 struct cnxk_ae_sess *sess)
+{
+	struct cpt_qp_meta_info *minfo = &qp->meta_info;
+	struct rte_crypto_asym_op *asym_op = op->asym;
+	struct cnxk_ae_buf_ptr meta_buf;
+	uint64_t *mop;
+	void *mdata;
+	int ret;
+
+	mdata = cnxk_ae_alloc_meta(&meta_buf, minfo->pool, infl_req);
+	if (mdata == NULL)
+		return -ENOMEM;
+
+	/* Reserve 8B for RPTR */
+	meta_buf.vaddr = PLT_PTR_ADD(mdata, sizeof(uint64_t));
+
+	switch (sess->xfrm_type) {
+	case RTE_CRYPTO_ASYM_XFORM_MODEX:
+		ret = cnxk_ae_modex_prep(op, &meta_buf, &sess->mod_ctx, inst);
+		if (unlikely(ret))
+			goto req_fail;
+		break;
+	case RTE_CRYPTO_ASYM_XFORM_RSA:
+		ret = cnxk_ae_enqueue_rsa_op(op, &meta_buf, sess, inst);
+		if (unlikely(ret))
+			goto req_fail;
+		break;
+	case RTE_CRYPTO_ASYM_XFORM_ECDSA:
+		ret = cnxk_ae_enqueue_ecdsa_op(op, &meta_buf, sess,
+					       sess->cnxk_fpm_iova, inst);
+		if (unlikely(ret))
+			goto req_fail;
+		break;
+	case RTE_CRYPTO_ASYM_XFORM_ECPM:
+		ret = cnxk_ae_ecpm_prep(&asym_op->ecpm, &meta_buf,
+					sess->ec_ctx.curveid, inst);
+		if (unlikely(ret))
+			goto req_fail;
+		break;
+	default:
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		ret = -EINVAL;
+		goto req_fail;
+	}
+
+	mop = mdata;
+	mop[0] = inst->rptr;
+	return 0;
+
+req_fail:
+	rte_mempool_put(minfo->pool, infl_req->mdata);
+	return ret;
+}
+
 static inline int
 cn10k_cpt_fill_inst(struct cnxk_cpt_qp *qp, struct rte_crypto_op *ops[],
 		    struct cpt_inst_s inst[], struct cpt_inflight_req *infl_req)
 {
 	struct cn10k_sec_session *sec_sess;
+	struct rte_crypto_asym_op *asym_op;
 	struct rte_crypto_sym_op *sym_op;
+	struct cnxk_ae_sess *ae_sess;
 	struct cnxk_se_sess *sess;
 	struct rte_crypto_op *op;
 	uint64_t w7;
@@ -148,6 +208,21 @@ cn10k_cpt_fill_inst(struct cnxk_cpt_qp *qp, struct rte_crypto_op *ops[],
 			}
 			w7 = sess->cpt_inst_w7;
 		}
+	} else if (op->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
+
+		if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+			asym_op = op->asym;
+			ae_sess = get_asym_session_private_data(
+				asym_op->session, cn10k_cryptodev_driver_id);
+			ret = cn10k_ae_enqueue(qp, op, infl_req, &inst[0],
+					       ae_sess);
+			if (unlikely(ret))
+				return 0;
+			w7 = ae_sess->cpt_inst_w7;
+		} else {
+			CPT_LOG_DP_ERR("Not supported Asym op without session");
+			return 0;
+		}
 	} else {
 		CPT_LOG_DP_ERR("Unsupported op type");
 		return 0;
@@ -239,6 +314,35 @@ cn10k_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
 	return count + i;
 }
 
+static void
+cnxk_ae_post_process(struct rte_crypto_op *cop, uint8_t *rptr)
+{
+	struct rte_crypto_asym_op *op = cop->asym;
+	struct cnxk_ae_sess *sess;
+
+	sess = get_asym_session_private_data(op->session,
+					     cn10k_cryptodev_driver_id);
+
+	switch (sess->xfrm_type) {
+	case RTE_CRYPTO_ASYM_XFORM_RSA:
+		cnxk_ae_dequeue_rsa_op(cop, rptr, &sess->rsa_ctx);
+		break;
+	case RTE_CRYPTO_ASYM_XFORM_MODEX:
+		op->modex.result.length = sess->mod_ctx.modulus.length;
+		memcpy(op->modex.result.data, rptr, op->modex.result.length);
+		break;
+	case RTE_CRYPTO_ASYM_XFORM_ECDSA:
+		cnxk_ae_dequeue_ecdsa_op(&op->ecdsa, rptr, &sess->ec_ctx);
+		break;
+	case RTE_CRYPTO_ASYM_XFORM_ECPM:
+		cnxk_ae_dequeue_ecpm_op(&op->ecpm, rptr, &sess->ec_ctx);
+		break;
+	default:
+		cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		break;
+	}
+}
+
 static inline void
 cn10k_cpt_sec_post_process(struct rte_crypto_op *cop,
 			   struct cpt_inflight_req *infl_req)
@@ -303,6 +407,10 @@ cn10k_cpt_dequeue_post_process(struct cnxk_cpt_qp *qp,
 				compl_auth_verify(cop, (uint8_t *)rsp[0],
 						  rsp[1]);
 			}
+		} else if (cop->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
+			uintptr_t *mdata = infl_req->mdata;
+
+			cnxk_ae_post_process(cop, (uint8_t *)mdata[0]);
 		}
 	} else {
 		cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
diff --git a/drivers/crypto/cnxk/cn9k_cryptodev_ops.c b/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
index 07be33d..c4b5832 100644
--- a/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
+++ b/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
@@ -7,10 +7,97 @@
 
 #include "cn9k_cryptodev.h"
 #include "cn9k_cryptodev_ops.h"
+#include "cnxk_ae.h"
 #include "cnxk_cryptodev.h"
 #include "cnxk_cryptodev_ops.h"
 #include "cnxk_se.h"
 
+static __rte_always_inline int32_t __rte_hot
+cn9k_ae_enqueue(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op,
+		struct cpt_inflight_req *infl_req, struct cpt_inst_s *inst,
+		struct cnxk_ae_sess *sess)
+{
+	struct cpt_qp_meta_info *minfo = &qp->meta_info;
+	struct rte_crypto_asym_op *asym_op = op->asym;
+	struct cnxk_ae_buf_ptr meta_buf;
+	uint64_t *mop;
+	void *mdata;
+	int ret;
+
+	mdata = cnxk_ae_alloc_meta(&meta_buf, minfo->pool, infl_req);
+	if (mdata == NULL)
+		return -ENOMEM;
+
+	/* Reserve 8B for RPTR */
+	meta_buf.vaddr = PLT_PTR_ADD(mdata, sizeof(uint64_t));
+
+	switch (sess->xfrm_type) {
+	case RTE_CRYPTO_ASYM_XFORM_MODEX:
+		ret = cnxk_ae_modex_prep(op, &meta_buf, &sess->mod_ctx, inst);
+		if (unlikely(ret))
+			goto req_fail;
+		break;
+	case RTE_CRYPTO_ASYM_XFORM_RSA:
+		ret = cnxk_ae_enqueue_rsa_op(op, &meta_buf, sess, inst);
+		if (unlikely(ret))
+			goto req_fail;
+		break;
+	case RTE_CRYPTO_ASYM_XFORM_ECDSA:
+		ret = cnxk_ae_enqueue_ecdsa_op(op, &meta_buf, sess,
+					       sess->cnxk_fpm_iova, inst);
+		if (unlikely(ret))
+			goto req_fail;
+		break;
+	case RTE_CRYPTO_ASYM_XFORM_ECPM:
+		ret = cnxk_ae_ecpm_prep(&asym_op->ecpm, &meta_buf,
+					sess->ec_ctx.curveid, inst);
+		if (unlikely(ret))
+			goto req_fail;
+		break;
+	default:
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		ret = -EINVAL;
+		goto req_fail;
+	}
+
+	mop = mdata;
+	mop[0] = inst->rptr;
+	return 0;
+
+req_fail:
+	rte_mempool_put(minfo->pool, infl_req->mdata);
+	return ret;
+}
+
+static void
+cnxk_ae_post_process(struct rte_crypto_op *cop, uint8_t *rptr)
+{
+	struct rte_crypto_asym_op *op = cop->asym;
+	struct cnxk_ae_sess *sess;
+
+	sess = get_asym_session_private_data(op->session,
+					     cn9k_cryptodev_driver_id);
+
+	switch (sess->xfrm_type) {
+	case RTE_CRYPTO_ASYM_XFORM_RSA:
+		cnxk_ae_dequeue_rsa_op(cop, rptr, &sess->rsa_ctx);
+		break;
+	case RTE_CRYPTO_ASYM_XFORM_MODEX:
+		op->modex.result.length = sess->mod_ctx.modulus.length;
+		memcpy(op->modex.result.data, rptr, op->modex.result.length);
+		break;
+	case RTE_CRYPTO_ASYM_XFORM_ECDSA:
+		cnxk_ae_dequeue_ecdsa_op(&op->ecdsa, rptr, &sess->ec_ctx);
+		break;
+	case RTE_CRYPTO_ASYM_XFORM_ECPM:
+		cnxk_ae_dequeue_ecpm_op(&op->ecpm, rptr, &sess->ec_ctx);
+		break;
+	default:
+		cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		break;
+	}
+}
+
 static __rte_always_inline int __rte_hot
 cn9k_cpt_sym_inst_fill(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op,
 		       struct cnxk_se_sess *sess,
@@ -65,11 +152,11 @@ static uint16_t
 cn9k_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
 {
 	struct cpt_inflight_req *infl_req;
+	struct rte_crypto_asym_op *asym_op;
 	struct rte_crypto_sym_op *sym_op;
 	uint16_t nb_allowed, count = 0;
 	struct cnxk_cpt_qp *qp = qptr;
 	struct pending_queue *pend_q;
-	struct cnxk_se_sess *sess;
 	struct rte_crypto_op *op;
 	struct cpt_inst_s inst;
 	uint64_t lmt_status;
@@ -95,6 +182,8 @@ cn9k_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
 		infl_req->op_flags = 0;
 
 		if (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
+			struct cnxk_se_sess *sess;
+
 			if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
 				sym_op = op->sym;
 				sess = get_sym_session_private_data(
@@ -120,6 +209,22 @@ cn9k_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
 							op->sym->session);
 				}
 			}
+			inst.w7.u64 = sess->cpt_inst_w7;
+		} else if (op->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
+			struct cnxk_ae_sess *sess;
+
+			ret = -EINVAL;
+			if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+				asym_op = op->asym;
+				sess = get_asym_session_private_data(
+					asym_op->session,
+					cn9k_cryptodev_driver_id);
+				ret = cn9k_ae_enqueue(qp, op, infl_req, &inst,
+						      sess);
+				if (unlikely(ret))
+					return 0;
+				inst.w7.u64 = sess->cpt_inst_w7;
+			}
 		} else {
 			CPT_LOG_DP_ERR("Unsupported op type");
 			break;
@@ -134,7 +239,6 @@ cn9k_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
 
 		infl_req->res.cn9k.compcode = CPT_COMP_NOT_DONE;
 		inst.res_addr = (uint64_t)&infl_req->res;
-		inst.w7.u64 = sess->cpt_inst_w7;
 
 		do {
 			/* Copy CPT command to LMTLINE */
@@ -189,6 +293,10 @@ cn9k_cpt_dequeue_post_process(struct cnxk_cpt_qp *qp, struct rte_crypto_op *cop,
 				compl_auth_verify(cop, (uint8_t *)rsp[0],
 						  rsp[1]);
 			}
+		} else if (cop->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
+			uintptr_t *mdata = infl_req->mdata;
+
+			cnxk_ae_post_process(cop, (uint8_t *)mdata[0]);
 		}
 	} else {
 		cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
diff --git a/drivers/crypto/cnxk/cnxk_ae.h b/drivers/crypto/cnxk/cnxk_ae.h
index 5659c0d..0724429 100644
--- a/drivers/crypto/cnxk/cnxk_ae.h
+++ b/drivers/crypto/cnxk/cnxk_ae.h
@@ -12,6 +12,8 @@
 #include "roc_ae.h"
 #include "roc_ae_fpm_tables.h"
 
+#include "cnxk_cryptodev_ops.h"
+
 struct cnxk_ae_sess {
 	enum rte_crypto_asym_xform_type xfrm_type;
 	union {
@@ -23,6 +25,11 @@ struct cnxk_ae_sess {
 	uint64_t cpt_inst_w7;
 };
 
+/* Buffer pointer */
+struct cnxk_ae_buf_ptr {
+	void *vaddr;
+};
+
 static __rte_always_inline void
 cnxk_ae_modex_param_normalize(uint8_t **data, size_t *len)
 {
@@ -207,4 +214,612 @@ cnxk_ae_free_session_parameters(struct cnxk_ae_sess *sess)
 		break;
 	}
 }
+
+static const struct roc_ae_ec_group cnxk_ae_ec_grp[ROC_AE_EC_ID_PMAX] = {
+	{
+		.prime = {.data = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+				   0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+				   0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF,
+				   0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF},
+			  .length = 24},
+		.order = {.data = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+				   0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+				   0x99, 0xDE, 0xF8, 0x36, 0x14, 0x6B,
+				   0xC9, 0xB1, 0xB4, 0xD2, 0x28, 0x31},
+			  .length = 24},
+	},
+	{
+		.prime = {.data = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+				   0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+				   0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+				   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
+			  .length = 28},
+		.order = {.data = {0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF,
+				   0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF,
+				   0X16, 0XA2, 0XE0, 0XB8, 0XF0, 0X3E, 0X13,
+				   0XDD, 0X29, 0X45, 0X5C, 0X5C, 0X2A, 0X3D},
+			  .length = 28},
+	},
+	{
+		.prime = {.data = {0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00,
+				   0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+				   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF,
+				   0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+				   0xFF, 0xFF, 0xFF, 0xFF},
+			  .length = 32},
+		.order = {.data = {0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00,
+				   0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+				   0xFF, 0xFF, 0xBC, 0xE6, 0xFA, 0xAD, 0xA7,
+				   0x17, 0x9E, 0x84, 0xF3, 0xB9, 0xCA, 0xC2,
+				   0xFC, 0x63, 0x25, 0x51},
+			  .length = 32},
+	},
+	{.prime = {.data = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+			    0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+			    0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+			    0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE,
+			    0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00,
+			    0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF},
+		   .length = 48},
+	 .order = {.data = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+			    0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+			    0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+			    0xC7, 0x63, 0x4D, 0x81, 0xF4, 0x37, 0x2D, 0xDF,
+			    0x58, 0x1A, 0x0D, 0xB2, 0x48, 0xB0, 0xA7, 0x7A,
+			    0xEC, 0xEC, 0x19, 0x6A, 0xCC, 0xC5, 0x29, 0x73},
+		   .length = 48}},
+	{.prime = {.data = {0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+			    0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+			    0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+			    0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+			    0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+			    0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+			    0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+			    0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+			    0xFF, 0xFF},
+		   .length = 66},
+	 .order = {.data = {0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+			    0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+			    0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+			    0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+			    0xFF, 0xFA, 0x51, 0x86, 0x87, 0x83, 0xBF, 0x2F,
+			    0x96, 0x6B, 0x7F, 0xCC, 0x01, 0x48, 0xF7, 0x09,
+			    0xA5, 0xD0, 0x3B, 0xB5, 0xC9, 0xB8, 0x89, 0x9C,
+			    0x47, 0xAE, 0xBB, 0x6F, 0xB7, 0x1E, 0x91, 0x38,
+			    0x64, 0x09},
+		   .length = 66}}};
+
+static __rte_always_inline int
+cnxk_ae_modex_prep(struct rte_crypto_op *op, struct cnxk_ae_buf_ptr *meta_buf,
+		   struct rte_crypto_modex_xform *mod, struct cpt_inst_s *inst)
+{
+	uint32_t exp_len = mod->exponent.length;
+	uint32_t mod_len = mod->modulus.length;
+	struct rte_crypto_mod_op_param mod_op;
+	uint64_t total_key_len;
+	union cpt_inst_w4 w4;
+	uint32_t base_len;
+	uint32_t dlen;
+	uint8_t *dptr;
+
+	mod_op = op->asym->modex;
+
+	base_len = mod_op.base.length;
+	if (unlikely(base_len > mod_len)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -ENOTSUP;
+	}
+
+	total_key_len = mod_len + exp_len;
+
+	/* Input buffer */
+	dptr = meta_buf->vaddr;
+	inst->dptr = (uintptr_t)dptr;
+	memcpy(dptr, mod->modulus.data, total_key_len);
+	dptr += total_key_len;
+	memcpy(dptr, mod_op.base.data, base_len);
+	dptr += base_len;
+	dlen = total_key_len + base_len;
+
+	/* Setup opcodes */
+	w4.s.opcode_major = ROC_AE_MAJOR_OP_MODEX;
+	w4.s.opcode_minor = ROC_AE_MINOR_OP_MODEX;
+
+	w4.s.param1 = mod_len;
+	w4.s.param2 = exp_len;
+	w4.s.dlen = dlen;
+
+	inst->w4.u64 = w4.u64;
+	inst->rptr = (uintptr_t)dptr;
+
+	return 0;
+}
+
+static __rte_always_inline void
+cnxk_ae_rsa_prep(struct rte_crypto_op *op, struct cnxk_ae_buf_ptr *meta_buf,
+		 struct rte_crypto_rsa_xform *rsa,
+		 rte_crypto_param *crypto_param, struct cpt_inst_s *inst)
+{
+	struct rte_crypto_rsa_op_param rsa_op;
+	uint32_t mod_len = rsa->n.length;
+	uint32_t exp_len = rsa->e.length;
+	uint64_t total_key_len;
+	union cpt_inst_w4 w4;
+	uint32_t in_size;
+	uint32_t dlen;
+	uint8_t *dptr;
+
+	rsa_op = op->asym->rsa;
+	total_key_len = mod_len + exp_len;
+
+	/* Input buffer */
+	dptr = meta_buf->vaddr;
+	inst->dptr = (uintptr_t)dptr;
+	memcpy(dptr, rsa->n.data, total_key_len);
+	dptr += total_key_len;
+
+	in_size = crypto_param->length;
+	memcpy(dptr, crypto_param->data, in_size);
+
+	dptr += in_size;
+	dlen = total_key_len + in_size;
+
+	if (rsa_op.pad == RTE_CRYPTO_RSA_PADDING_NONE) {
+		/* Use mod_exp operation for no_padding type */
+		w4.s.opcode_minor = ROC_AE_MINOR_OP_MODEX;
+		w4.s.param2 = exp_len;
+	} else {
+		if (rsa_op.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT) {
+			w4.s.opcode_minor = ROC_AE_MINOR_OP_PKCS_ENC;
+			/* Public key encrypt, use BT2*/
+			w4.s.param2 = ROC_AE_CPT_BLOCK_TYPE2 |
+				      ((uint16_t)(exp_len) << 1);
+		} else if (rsa_op.op_type == RTE_CRYPTO_ASYM_OP_VERIFY) {
+			w4.s.opcode_minor = ROC_AE_MINOR_OP_PKCS_DEC;
+			/* Public key decrypt, use BT1 */
+			w4.s.param2 = ROC_AE_CPT_BLOCK_TYPE1;
+		}
+	}
+
+	w4.s.opcode_major = ROC_AE_MAJOR_OP_MODEX;
+
+	w4.s.param1 = mod_len;
+	w4.s.dlen = dlen;
+
+	inst->w4.u64 = w4.u64;
+	inst->rptr = (uintptr_t)dptr;
+}
+
+static __rte_always_inline void
+cnxk_ae_rsa_crt_prep(struct rte_crypto_op *op, struct cnxk_ae_buf_ptr *meta_buf,
+		     struct rte_crypto_rsa_xform *rsa,
+		     rte_crypto_param *crypto_param, struct cpt_inst_s *inst)
+{
+	uint32_t qInv_len = rsa->qt.qInv.length;
+	struct rte_crypto_rsa_op_param rsa_op;
+	uint32_t dP_len = rsa->qt.dP.length;
+	uint32_t dQ_len = rsa->qt.dQ.length;
+	uint32_t p_len = rsa->qt.p.length;
+	uint32_t q_len = rsa->qt.q.length;
+	uint32_t mod_len = rsa->n.length;
+	uint64_t total_key_len;
+	union cpt_inst_w4 w4;
+	uint32_t in_size;
+	uint32_t dlen;
+	uint8_t *dptr;
+
+	rsa_op = op->asym->rsa;
+	total_key_len = p_len + q_len + dP_len + dQ_len + qInv_len;
+
+	/* Input buffer */
+	dptr = meta_buf->vaddr;
+	inst->dptr = (uintptr_t)dptr;
+	memcpy(dptr, rsa->qt.q.data, total_key_len);
+	dptr += total_key_len;
+
+	in_size = crypto_param->length;
+	memcpy(dptr, crypto_param->data, in_size);
+
+	dptr += in_size;
+	dlen = total_key_len + in_size;
+
+	if (rsa_op.pad == RTE_CRYPTO_RSA_PADDING_NONE) {
+		/*Use mod_exp operation for no_padding type */
+		w4.s.opcode_minor = ROC_AE_MINOR_OP_MODEX_CRT;
+	} else {
+		if (rsa_op.op_type == RTE_CRYPTO_ASYM_OP_SIGN) {
+			w4.s.opcode_minor = ROC_AE_MINOR_OP_PKCS_ENC_CRT;
+			/* Private encrypt, use BT1 */
+			w4.s.param2 = ROC_AE_CPT_BLOCK_TYPE1;
+		} else if (rsa_op.op_type == RTE_CRYPTO_ASYM_OP_DECRYPT) {
+			w4.s.opcode_minor = ROC_AE_MINOR_OP_PKCS_DEC_CRT;
+			/* Private decrypt, use BT2 */
+			w4.s.param2 = ROC_AE_CPT_BLOCK_TYPE2;
+		}
+	}
+
+	w4.s.opcode_major = ROC_AE_MAJOR_OP_MODEX;
+
+	w4.s.param1 = mod_len;
+	w4.s.dlen = dlen;
+
+	inst->w4.u64 = w4.u64;
+	inst->rptr = (uintptr_t)dptr;
+}
+
+static __rte_always_inline int __rte_hot
+cnxk_ae_enqueue_rsa_op(struct rte_crypto_op *op,
+		       struct cnxk_ae_buf_ptr *meta_buf,
+		       struct cnxk_ae_sess *sess, struct cpt_inst_s *inst)
+{
+	struct rte_crypto_rsa_op_param *rsa = &op->asym->rsa;
+
+	switch (rsa->op_type) {
+	case RTE_CRYPTO_ASYM_OP_VERIFY:
+		cnxk_ae_rsa_prep(op, meta_buf, &sess->rsa_ctx, &rsa->sign,
+				 inst);
+		break;
+	case RTE_CRYPTO_ASYM_OP_ENCRYPT:
+		cnxk_ae_rsa_prep(op, meta_buf, &sess->rsa_ctx, &rsa->message,
+				 inst);
+		break;
+	case RTE_CRYPTO_ASYM_OP_SIGN:
+		cnxk_ae_rsa_crt_prep(op, meta_buf, &sess->rsa_ctx,
+				     &rsa->message, inst);
+		break;
+	case RTE_CRYPTO_ASYM_OP_DECRYPT:
+		cnxk_ae_rsa_crt_prep(op, meta_buf, &sess->rsa_ctx, &rsa->cipher,
+				     inst);
+		break;
+	default:
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static __rte_always_inline void
+cnxk_ae_ecdsa_sign_prep(struct rte_crypto_ecdsa_op_param *ecdsa,
+			struct cnxk_ae_buf_ptr *meta_buf,
+			uint64_t fpm_table_iova, uint8_t curveid,
+			struct cpt_inst_s *inst)
+{
+	uint16_t message_len = ecdsa->message.length;
+	uint16_t pkey_len = ecdsa->pkey.length;
+	uint16_t p_align, k_align, m_align;
+	uint16_t k_len = ecdsa->k.length;
+	uint16_t order_len, prime_len;
+	uint16_t o_offset, pk_offset;
+	union cpt_inst_w4 w4;
+	uint16_t dlen;
+	uint8_t *dptr;
+
+	prime_len = cnxk_ae_ec_grp[curveid].prime.length;
+	order_len = cnxk_ae_ec_grp[curveid].order.length;
+
+	/* Truncate input length to curve prime length */
+	if (message_len > prime_len)
+		message_len = prime_len;
+	m_align = RTE_ALIGN_CEIL(message_len, 8);
+
+	p_align = RTE_ALIGN_CEIL(prime_len, 8);
+	k_align = RTE_ALIGN_CEIL(k_len, 8);
+
+	/* Set write offset for order and private key */
+	o_offset = prime_len - order_len;
+	pk_offset = prime_len - pkey_len;
+
+	/* Input buffer */
+	dptr = meta_buf->vaddr;
+	inst->dptr = (uintptr_t)dptr;
+
+	/*
+	 * Set dlen = sum(sizeof(fpm address), ROUNDUP8(scalar len, input len),
+	 * ROUNDUP8(priv key len, prime len, order len)).
+	 * Please note, private key, order cannot exceed prime
+	 * length i.e 3 * p_align.
+	 */
+	dlen = sizeof(fpm_table_iova) + k_align + m_align + p_align * 3;
+
+	memset(dptr, 0, dlen);
+
+	*(uint64_t *)dptr = fpm_table_iova;
+	dptr += sizeof(fpm_table_iova);
+
+	memcpy(dptr, ecdsa->k.data, k_len);
+	dptr += k_align;
+
+	memcpy(dptr, cnxk_ae_ec_grp[curveid].prime.data, prime_len);
+	dptr += p_align;
+
+	memcpy(dptr + o_offset, cnxk_ae_ec_grp[curveid].order.data, order_len);
+	dptr += p_align;
+
+	memcpy(dptr + pk_offset, ecdsa->pkey.data, pkey_len);
+	dptr += p_align;
+
+	memcpy(dptr, ecdsa->message.data, message_len);
+	dptr += m_align;
+
+	/* Setup opcodes */
+	w4.s.opcode_major = ROC_AE_MAJOR_OP_ECDSA;
+	w4.s.opcode_minor = ROC_AE_MINOR_OP_ECDSA_SIGN;
+
+	w4.s.param1 = curveid | (message_len << 8);
+	w4.s.param2 = k_len;
+	w4.s.dlen = dlen;
+
+	inst->w4.u64 = w4.u64;
+	inst->rptr = (uintptr_t)dptr;
+}
+
+static __rte_always_inline void
+cnxk_ae_ecdsa_verify_prep(struct rte_crypto_ecdsa_op_param *ecdsa,
+			  struct cnxk_ae_buf_ptr *meta_buf,
+			  uint64_t fpm_table_iova, uint8_t curveid,
+			  struct cpt_inst_s *inst)
+{
+	uint32_t message_len = ecdsa->message.length;
+	uint16_t o_offset, r_offset, s_offset;
+	uint16_t qx_len = ecdsa->q.x.length;
+	uint16_t qy_len = ecdsa->q.y.length;
+	uint16_t r_len = ecdsa->r.length;
+	uint16_t s_len = ecdsa->s.length;
+	uint16_t order_len, prime_len;
+	uint16_t qx_offset, qy_offset;
+	uint16_t p_align, m_align;
+	union cpt_inst_w4 w4;
+	uint16_t dlen;
+	uint8_t *dptr;
+
+	prime_len = cnxk_ae_ec_grp[curveid].prime.length;
+	order_len = cnxk_ae_ec_grp[curveid].order.length;
+
+	/* Truncate input length to curve prime length */
+	if (message_len > prime_len)
+		message_len = prime_len;
+
+	m_align = RTE_ALIGN_CEIL(message_len, 8);
+	p_align = RTE_ALIGN_CEIL(prime_len, 8);
+
+	/* Set write offset for sign, order and public key coordinates */
+	o_offset = prime_len - order_len;
+	qx_offset = prime_len - qx_len;
+	qy_offset = prime_len - qy_len;
+	r_offset = prime_len - r_len;
+	s_offset = prime_len - s_len;
+
+	/* Input buffer */
+	dptr = meta_buf->vaddr;
+	inst->dptr = (uintptr_t)dptr;
+
+	/*
+	 * Set dlen = sum(sizeof(fpm address), ROUNDUP8(message len),
+	 * ROUNDUP8(sign len(r and s), public key len(x and y coordinates),
+	 * prime len, order len)).
+	 * Please note sign, public key and order can not exceed prime length
+	 * i.e. 6 * p_align
+	 */
+	dlen = sizeof(fpm_table_iova) + m_align + (6 * p_align);
+
+	memset(dptr, 0, dlen);
+
+	*(uint64_t *)dptr = fpm_table_iova;
+	dptr += sizeof(fpm_table_iova);
+
+	memcpy(dptr + r_offset, ecdsa->r.data, r_len);
+	dptr += p_align;
+
+	memcpy(dptr + s_offset, ecdsa->s.data, s_len);
+	dptr += p_align;
+
+	memcpy(dptr, ecdsa->message.data, message_len);
+	dptr += m_align;
+
+	memcpy(dptr + o_offset, cnxk_ae_ec_grp[curveid].order.data, order_len);
+	dptr += p_align;
+
+	memcpy(dptr, cnxk_ae_ec_grp[curveid].prime.data, prime_len);
+	dptr += p_align;
+
+	memcpy(dptr + qx_offset, ecdsa->q.x.data, qx_len);
+	dptr += p_align;
+
+	memcpy(dptr + qy_offset, ecdsa->q.y.data, qy_len);
+	dptr += p_align;
+
+	/* Setup opcodes */
+	w4.s.opcode_major = ROC_AE_MAJOR_OP_ECDSA;
+	w4.s.opcode_minor = ROC_AE_MINOR_OP_ECDSA_VERIFY;
+
+	w4.s.param1 = curveid | (message_len << 8);
+	w4.s.param2 = 0;
+	w4.s.dlen = dlen;
+
+	inst->w4.u64 = w4.u64;
+	inst->rptr = (uintptr_t)dptr;
+}
+
+static __rte_always_inline int __rte_hot
+cnxk_ae_enqueue_ecdsa_op(struct rte_crypto_op *op,
+			 struct cnxk_ae_buf_ptr *meta_buf,
+			 struct cnxk_ae_sess *sess, uint64_t *fpm_iova,
+			 struct cpt_inst_s *inst)
+{
+	struct rte_crypto_ecdsa_op_param *ecdsa = &op->asym->ecdsa;
+	uint8_t curveid = sess->ec_ctx.curveid;
+
+	if (ecdsa->op_type == RTE_CRYPTO_ASYM_OP_SIGN)
+		cnxk_ae_ecdsa_sign_prep(ecdsa, meta_buf, fpm_iova[curveid],
+					curveid, inst);
+	else if (ecdsa->op_type == RTE_CRYPTO_ASYM_OP_VERIFY)
+		cnxk_ae_ecdsa_verify_prep(ecdsa, meta_buf, fpm_iova[curveid],
+					  curveid, inst);
+	else {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static __rte_always_inline int
+cnxk_ae_ecpm_prep(struct rte_crypto_ecpm_op_param *ecpm,
+		  struct cnxk_ae_buf_ptr *meta_buf, uint8_t curveid,
+		  struct cpt_inst_s *inst)
+{
+	uint16_t x1_len = ecpm->p.x.length;
+	uint16_t y1_len = ecpm->p.y.length;
+	uint16_t scalar_align, p_align;
+	uint16_t x1_offset, y1_offset;
+	uint16_t dlen, prime_len;
+	union cpt_inst_w4 w4;
+	uint8_t *dptr;
+
+	prime_len = cnxk_ae_ec_grp[curveid].prime.length;
+
+	/* Input buffer */
+	dptr = meta_buf->vaddr;
+	inst->dptr = (uintptr_t)dptr;
+
+	p_align = RTE_ALIGN_CEIL(prime_len, 8);
+	scalar_align = RTE_ALIGN_CEIL(ecpm->scalar.length, 8);
+
+	/*
+	 * Set dlen = sum(ROUNDUP8(input point(x and y coordinates), prime,
+	 * scalar length),
+	 * Please note point length is equivalent to prime of the curve
+	 */
+	dlen = 3 * p_align + scalar_align;
+
+	x1_offset = prime_len - x1_len;
+	y1_offset = prime_len - y1_len;
+
+	memset(dptr, 0, dlen);
+
+	/* Copy input point, scalar, prime */
+	memcpy(dptr + x1_offset, ecpm->p.x.data, x1_len);
+	dptr += p_align;
+	memcpy(dptr + y1_offset, ecpm->p.y.data, y1_len);
+	dptr += p_align;
+	memcpy(dptr, ecpm->scalar.data, ecpm->scalar.length);
+	dptr += scalar_align;
+	memcpy(dptr, cnxk_ae_ec_grp[curveid].prime.data,
+	       cnxk_ae_ec_grp[curveid].prime.length);
+	dptr += p_align;
+
+	/* Setup opcodes */
+	w4.s.opcode_major = ROC_AE_MAJOR_OP_ECC;
+	w4.s.opcode_minor = ROC_AE_MINOR_OP_ECC_UMP;
+
+	w4.s.param1 = curveid;
+	w4.s.param2 = ecpm->scalar.length;
+	w4.s.dlen = dlen;
+
+	inst->w4.u64 = w4.u64;
+	inst->rptr = (uintptr_t)dptr;
+
+	return 0;
+}
+
+static __rte_always_inline void
+cnxk_ae_dequeue_rsa_op(struct rte_crypto_op *cop, uint8_t *rptr,
+		       struct rte_crypto_rsa_xform *rsa_ctx)
+{
+	struct rte_crypto_rsa_op_param *rsa = &cop->asym->rsa;
+
+	switch (rsa->op_type) {
+	case RTE_CRYPTO_ASYM_OP_ENCRYPT:
+		rsa->cipher.length = rsa_ctx->n.length;
+		memcpy(rsa->cipher.data, rptr, rsa->cipher.length);
+		break;
+	case RTE_CRYPTO_ASYM_OP_DECRYPT:
+		if (rsa->pad == RTE_CRYPTO_RSA_PADDING_NONE) {
+			rsa->message.length = rsa_ctx->n.length;
+			memcpy(rsa->message.data, rptr, rsa->message.length);
+		} else {
+			/* Get length of decrypted output */
+			rsa->message.length =
+				rte_cpu_to_be_16(*((uint16_t *)rptr));
+			/*
+			 * Offset output data pointer by length field
+			 * (2 bytes) and copy decrypted data.
+			 */
+			memcpy(rsa->message.data, rptr + 2,
+			       rsa->message.length);
+		}
+		break;
+	case RTE_CRYPTO_ASYM_OP_SIGN:
+		rsa->sign.length = rsa_ctx->n.length;
+		memcpy(rsa->sign.data, rptr, rsa->sign.length);
+		break;
+	case RTE_CRYPTO_ASYM_OP_VERIFY:
+		if (rsa->pad == RTE_CRYPTO_RSA_PADDING_NONE) {
+			rsa->sign.length = rsa_ctx->n.length;
+			memcpy(rsa->sign.data, rptr, rsa->sign.length);
+		} else {
+			/* Get length of signed output */
+			rsa->sign.length =
+				rte_cpu_to_be_16(*((uint16_t *)rptr));
+			/*
+			 * Offset output data pointer by length field
+			 * (2 bytes) and copy signed data.
+			 */
+			memcpy(rsa->sign.data, rptr + 2, rsa->sign.length);
+		}
+		if (memcmp(rsa->sign.data, rsa->message.data,
+			   rsa->message.length)) {
+			cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		}
+		break;
+	default:
+		cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		break;
+	}
+}
+
+static __rte_always_inline void
+cnxk_ae_dequeue_ecdsa_op(struct rte_crypto_ecdsa_op_param *ecdsa, uint8_t *rptr,
+			 struct roc_ae_ec_ctx *ec)
+{
+	int prime_len = cnxk_ae_ec_grp[ec->curveid].prime.length;
+
+	if (ecdsa->op_type == RTE_CRYPTO_ASYM_OP_VERIFY)
+		return;
+
+	/* Separate out sign r and s components */
+	memcpy(ecdsa->r.data, rptr, prime_len);
+	memcpy(ecdsa->s.data, rptr + RTE_ALIGN_CEIL(prime_len, 8), prime_len);
+	ecdsa->r.length = prime_len;
+	ecdsa->s.length = prime_len;
+}
+
+static __rte_always_inline void
+cnxk_ae_dequeue_ecpm_op(struct rte_crypto_ecpm_op_param *ecpm, uint8_t *rptr,
+			struct roc_ae_ec_ctx *ec)
+{
+	int prime_len = cnxk_ae_ec_grp[ec->curveid].prime.length;
+
+	memcpy(ecpm->r.x.data, rptr, prime_len);
+	memcpy(ecpm->r.y.data, rptr + RTE_ALIGN_CEIL(prime_len, 8), prime_len);
+	ecpm->r.x.length = prime_len;
+	ecpm->r.y.length = prime_len;
+}
+
+static __rte_always_inline void *
+cnxk_ae_alloc_meta(struct cnxk_ae_buf_ptr *buf,
+		   struct rte_mempool *cpt_meta_pool,
+		   struct cpt_inflight_req *infl_req)
+{
+	uint8_t *mdata;
+
+	if (unlikely(rte_mempool_get(cpt_meta_pool, (void **)&mdata) < 0))
+		return NULL;
+
+	buf->vaddr = mdata;
+
+	infl_req->mdata = mdata;
+	infl_req->op_flags |= CPT_OP_FLAGS_METABUF;
+
+	return mdata;
+}
 #endif /* _CNXK_AE_H_ */
-- 
2.7.4


^ permalink raw reply	[flat|nested] 11+ messages in thread

* [dpdk-dev] [PATCH 3/3] app/test: adding cnxk asymmetric autotest
  2021-06-02 17:46 [dpdk-dev] [PATCH 0/3] Add asymmetric ops in crypto cnxk PMDs Anoob Joseph
  2021-06-02 17:46 ` [dpdk-dev] [PATCH 1/3] crypto/cnxk: add asymmetric session ops Anoob Joseph
  2021-06-02 17:46 ` [dpdk-dev] [PATCH 2/3] crypto/cnxk: add asymmetric datapath ops Anoob Joseph
@ 2021-06-02 17:46 ` Anoob Joseph
  2021-06-16 20:23   ` Akhil Goyal
  2021-06-25  6:25 ` [dpdk-dev] [PATCH v2 0/4] Add asymmetric ops in crypto cnxk PMDs Anoob Joseph
  3 siblings, 1 reply; 11+ messages in thread
From: Anoob Joseph @ 2021-06-02 17:46 UTC (permalink / raw)
  To: Akhil Goyal, Thomas Monjalon
  Cc: Kiran Kumar K, Jerin Jacob, Ankur Dwivedi, Tejasree Kondoj, dev

From: Kiran Kumar K <kirankumark@marvell.com>

Adding autotest for cn9k and cn10k.

Signed-off-by: Kiran Kumar K <kirankumark@marvell.com>
---
 app/test/test_cryptodev_asym.c | 30 ++++++++++++++++++++++++++++++
 1 file changed, 30 insertions(+)

diff --git a/app/test/test_cryptodev_asym.c b/app/test/test_cryptodev_asym.c
index b36eec9..847b074 100644
--- a/app/test/test_cryptodev_asym.c
+++ b/app/test/test_cryptodev_asym.c
@@ -2390,6 +2390,34 @@ test_cryptodev_octeontx2_asym(void)
 	return unit_test_suite_runner(&cryptodev_octeontx_asym_testsuite);
 }
 
+static int
+test_cryptodev_cn9k_asym(void)
+{
+	gbl_driver_id = rte_cryptodev_driver_id_get(
+			RTE_STR(CRYPTODEV_NAME_CN9K_PMD));
+	if (gbl_driver_id == -1) {
+		RTE_LOG(ERR, USER1, "CN9K PMD must be loaded.\n");
+		return TEST_FAILED;
+	}
+
+	/* Use test suite registered for crypto_octeontx PMD */
+	return unit_test_suite_runner(&cryptodev_octeontx_asym_testsuite);
+}
+
+static int
+test_cryptodev_cn10k_asym(void)
+{
+	gbl_driver_id = rte_cryptodev_driver_id_get(
+			RTE_STR(CRYPTODEV_NAME_CN10K_PMD));
+	if (gbl_driver_id == -1) {
+		RTE_LOG(ERR, USER1, "CN10K PMD must be loaded.\n");
+		return TEST_FAILED;
+	}
+
+	/* Use test suite registered for crypto_octeontx PMD */
+	return unit_test_suite_runner(&cryptodev_octeontx_asym_testsuite);
+}
+
 REGISTER_TEST_COMMAND(cryptodev_openssl_asym_autotest,
 					  test_cryptodev_openssl_asym);
 
@@ -2400,3 +2428,5 @@ REGISTER_TEST_COMMAND(cryptodev_octeontx_asym_autotest,
 
 REGISTER_TEST_COMMAND(cryptodev_octeontx2_asym_autotest,
 					  test_cryptodev_octeontx2_asym);
+REGISTER_TEST_COMMAND(cryptodev_cn9k_asym_autotest, test_cryptodev_cn9k_asym);
+REGISTER_TEST_COMMAND(cryptodev_cn10k_asym_autotest, test_cryptodev_cn10k_asym);
-- 
2.7.4


^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [dpdk-dev] [PATCH 1/3] crypto/cnxk: add asymmetric session ops
  2021-06-02 17:46 ` [dpdk-dev] [PATCH 1/3] crypto/cnxk: add asymmetric session ops Anoob Joseph
@ 2021-06-16 20:21   ` Akhil Goyal
  0 siblings, 0 replies; 11+ messages in thread
From: Akhil Goyal @ 2021-06-16 20:21 UTC (permalink / raw)
  To: Anoob Joseph, Thomas Monjalon
  Cc: Kiran Kumar Kokkilagadda, Jerin Jacob Kollanukkaran,
	Ankur Dwivedi, Tejasree Kondoj, dev

> From: Kiran Kumar K <kirankumark@marvell.com>
> 
> Adding asymmetric crypto session ops.
> 
> Signed-off-by: Kiran Kumar K <kirankumark@marvell.com>
> ---
>  drivers/crypto/cnxk/cn10k_cryptodev.c     |   2 +
>  drivers/crypto/cnxk/cn10k_cryptodev_ops.c |   6 +-
>  drivers/crypto/cnxk/cn9k_cryptodev.c      |   4 +-
>  drivers/crypto/cnxk/cn9k_cryptodev_ops.c  |   6 +-
>  drivers/crypto/cnxk/cnxk_ae.h             | 210
> ++++++++++++++++++++++++++++++
>  drivers/crypto/cnxk/cnxk_cpt_ops_helper.c |  14 ++
>  drivers/crypto/cnxk/cnxk_cpt_ops_helper.h |   4 +
>  drivers/crypto/cnxk/cnxk_cryptodev.h      |   3 +-
>  drivers/crypto/cnxk/cnxk_cryptodev_ops.c  |  75 +++++++++++
>  drivers/crypto/cnxk/cnxk_cryptodev_ops.h  |   8 ++
>  10 files changed, 324 insertions(+), 8 deletions(-)
>  create mode 100644 drivers/crypto/cnxk/cnxk_ae.h
> 
> diff --git a/drivers/crypto/cnxk/cn10k_cryptodev.c
> b/drivers/crypto/cnxk/cn10k_cryptodev.c
> index 9517e62..84a1a3a 100644
> --- a/drivers/crypto/cnxk/cn10k_cryptodev.c
> +++ b/drivers/crypto/cnxk/cn10k_cryptodev.c
> @@ -87,7 +87,9 @@ cn10k_cpt_pci_probe(struct rte_pci_driver *pci_drv
> __rte_unused,
>  	cnxk_cpt_caps_populate(vf);
> 
>  	dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
> +			     RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO |
>  			     RTE_CRYPTODEV_FF_HW_ACCELERATED |
> +			     RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT |
>  			     RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING
> |
>  			     RTE_CRYPTODEV_FF_IN_PLACE_SGL |
>  			     RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |

Same comment as in the ipsec series.

Documentation update
Move above change and .ini file update in patch 2/3
Release notes update.



^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [dpdk-dev] [PATCH 3/3] app/test: adding cnxk asymmetric autotest
  2021-06-02 17:46 ` [dpdk-dev] [PATCH 3/3] app/test: adding cnxk asymmetric autotest Anoob Joseph
@ 2021-06-16 20:23   ` Akhil Goyal
  0 siblings, 0 replies; 11+ messages in thread
From: Akhil Goyal @ 2021-06-16 20:23 UTC (permalink / raw)
  To: Anoob Joseph, Thomas Monjalon
  Cc: Kiran Kumar Kokkilagadda, Jerin Jacob Kollanukkaran,
	Ankur Dwivedi, Tejasree Kondoj, dev

> Subject: [PATCH 3/3] app/test: adding cnxk asymmetric autotest
> 
Title should be 
test/crypto: add cnxk for asymmetric cases

> From: Kiran Kumar K <kirankumark@marvell.com>
> 
> Adding autotest for cn9k and cn10k.
> 
> Signed-off-by: Kiran Kumar K <kirankumark@marvell.com>


^ permalink raw reply	[flat|nested] 11+ messages in thread

* [dpdk-dev] [PATCH v2 0/4] Add asymmetric ops in crypto cnxk PMDs
  2021-06-02 17:46 [dpdk-dev] [PATCH 0/3] Add asymmetric ops in crypto cnxk PMDs Anoob Joseph
                   ` (2 preceding siblings ...)
  2021-06-02 17:46 ` [dpdk-dev] [PATCH 3/3] app/test: adding cnxk asymmetric autotest Anoob Joseph
@ 2021-06-25  6:25 ` Anoob Joseph
  2021-06-25  6:25   ` [dpdk-dev] [PATCH v2 1/4] crypto/cnxk: add asymmetric session ops Anoob Joseph
                     ` (3 more replies)
  3 siblings, 4 replies; 11+ messages in thread
From: Anoob Joseph @ 2021-06-25  6:25 UTC (permalink / raw)
  To: Akhil Goyal, Thomas Monjalon
  Cc: Anoob Joseph, Jerin Jacob, Ankur Dwivedi, Tejasree Kondoj, dev

Add support for asymmetric operations in crypto cnxk PMDs.
Following operations are supported,
- RSA
- DSA
- ECDSA
- ECPM
- Modular Exponentation

Depends-on: series-17482 ("Add CPT in Marvell CNXK common driver")
Depends-on: series-17483 ("Add Marvell CNXK crypto PMDs")
Depends-on: series-17484 ("Add rte_security in crypto_cn10k PMD")

Changes in v2:
- Added documentation
- Added asymmetric capabilities as separate patch

Anoob Joseph (1):
  crypto/cnxk: add asymmetric capabilities

Kiran Kumar K (3):
  crypto/cnxk: add asymmetric session ops
  crypto/cnxk: add asymmetric datapath ops
  test/crypto: add cnxk for asymmetric cases

 app/test/test_cryptodev_asym.c                    |  30 +
 doc/guides/cryptodevs/cnxk.rst                    |  23 +
 doc/guides/cryptodevs/features/cn10k.ini          |  13 +
 doc/guides/cryptodevs/features/cn9k.ini           |  13 +
 drivers/crypto/cnxk/cn10k_cryptodev.c             |   2 +
 drivers/crypto/cnxk/cn10k_cryptodev_ops.c         |  33 +-
 drivers/crypto/cnxk/cn9k_cryptodev.c              |   4 +-
 drivers/crypto/cnxk/cn9k_cryptodev_ops.c          |  35 +-
 drivers/crypto/cnxk/cnxk_ae.h                     | 836 ++++++++++++++++++++++
 drivers/crypto/cnxk/cnxk_cryptodev.h              |   4 +-
 drivers/crypto/cnxk/cnxk_cryptodev_capabilities.c |  56 ++
 drivers/crypto/cnxk/cnxk_cryptodev_ops.c          | 106 +++
 drivers/crypto/cnxk/cnxk_cryptodev_ops.h          |   8 +
 13 files changed, 1153 insertions(+), 10 deletions(-)
 create mode 100644 drivers/crypto/cnxk/cnxk_ae.h

-- 
2.7.4


^ permalink raw reply	[flat|nested] 11+ messages in thread

* [dpdk-dev] [PATCH v2 1/4] crypto/cnxk: add asymmetric session ops
  2021-06-25  6:25 ` [dpdk-dev] [PATCH v2 0/4] Add asymmetric ops in crypto cnxk PMDs Anoob Joseph
@ 2021-06-25  6:25   ` Anoob Joseph
  2021-06-25  6:25   ` [dpdk-dev] [PATCH v2 2/4] crypto/cnxk: add asymmetric datapath ops Anoob Joseph
                     ` (2 subsequent siblings)
  3 siblings, 0 replies; 11+ messages in thread
From: Anoob Joseph @ 2021-06-25  6:25 UTC (permalink / raw)
  To: Akhil Goyal, Thomas Monjalon
  Cc: Kiran Kumar K, Jerin Jacob, Ankur Dwivedi, Tejasree Kondoj, dev

From: Kiran Kumar K <kirankumark@marvell.com>

Add asymmetric crypto session ops.

Signed-off-by: Kiran Kumar K <kirankumark@marvell.com>
---
 doc/guides/cryptodevs/features/cn10k.ini  |  13 ++
 doc/guides/cryptodevs/features/cn9k.ini   |  13 ++
 drivers/crypto/cnxk/cn10k_cryptodev.c     |   2 +
 drivers/crypto/cnxk/cn10k_cryptodev_ops.c |   6 +-
 drivers/crypto/cnxk/cn9k_cryptodev.c      |   4 +-
 drivers/crypto/cnxk/cn9k_cryptodev_ops.c  |   6 +-
 drivers/crypto/cnxk/cnxk_ae.h             | 211 ++++++++++++++++++++++++++++++
 drivers/crypto/cnxk/cnxk_cryptodev.h      |   4 +-
 drivers/crypto/cnxk/cnxk_cryptodev_ops.c  | 106 +++++++++++++++
 drivers/crypto/cnxk/cnxk_cryptodev_ops.h  |   8 ++
 10 files changed, 365 insertions(+), 8 deletions(-)
 create mode 100644 drivers/crypto/cnxk/cnxk_ae.h

diff --git a/doc/guides/cryptodevs/features/cn10k.ini b/doc/guides/cryptodevs/features/cn10k.ini
index b268f84..f5552fe 100644
--- a/doc/guides/cryptodevs/features/cn10k.ini
+++ b/doc/guides/cryptodevs/features/cn10k.ini
@@ -5,6 +5,7 @@
 ;
 [Features]
 Symmetric crypto       = Y
+Asymmetric crypto      = Y
 Sym operation chaining = Y
 HW Accelerated         = Y
 Protocol offload       = Y
@@ -65,3 +66,15 @@ AES GCM (128)     = Y
 AES GCM (192)     = Y
 AES GCM (256)     = Y
 CHACHA20-POLY1305 = Y
+
+;
+; Supported Asymmetric algorithms of the 'cn10k' crypto driver.
+;
+[Asymmetric]
+RSA                     = Y
+DSA                     =
+Modular Exponentiation  = Y
+Modular Inversion       =
+Diffie-hellman          =
+ECDSA                   = Y
+ECPM                    = Y
diff --git a/doc/guides/cryptodevs/features/cn9k.ini b/doc/guides/cryptodevs/features/cn9k.ini
index 7b310e6..d69dbe8 100644
--- a/doc/guides/cryptodevs/features/cn9k.ini
+++ b/doc/guides/cryptodevs/features/cn9k.ini
@@ -5,6 +5,7 @@
 ;
 [Features]
 Symmetric crypto       = Y
+Asymmetric crypto      = Y
 Sym operation chaining = Y
 HW Accelerated         = Y
 In Place SGL           = Y
@@ -64,3 +65,15 @@ AES GCM (128)     = Y
 AES GCM (192)     = Y
 AES GCM (256)     = Y
 CHACHA20-POLY1305 = Y
+
+;
+; Supported Asymmetric algorithms of the 'cn9k' crypto driver.
+;
+[Asymmetric]
+RSA                     = Y
+DSA                     =
+Modular Exponentiation  = Y
+Modular Inversion       =
+Diffie-hellman          =
+ECDSA                   = Y
+ECPM                    = Y
diff --git a/drivers/crypto/cnxk/cn10k_cryptodev.c b/drivers/crypto/cnxk/cn10k_cryptodev.c
index 22ae810..10a621f 100644
--- a/drivers/crypto/cnxk/cn10k_cryptodev.c
+++ b/drivers/crypto/cnxk/cn10k_cryptodev.c
@@ -92,7 +92,9 @@ cn10k_cpt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	dev->driver_id = cn10k_cryptodev_driver_id;
 
 	dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+			     RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO |
 			     RTE_CRYPTODEV_FF_HW_ACCELERATED |
+			     RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT |
 			     RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
 			     RTE_CRYPTODEV_FF_IN_PLACE_SGL |
 			     RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
diff --git a/drivers/crypto/cnxk/cn10k_cryptodev_ops.c b/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
index 29525cd..8b33764 100644
--- a/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
+++ b/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
@@ -426,8 +426,8 @@ struct rte_cryptodev_ops cn10k_cpt_ops = {
 	.sym_session_clear = cnxk_cpt_sym_session_clear,
 
 	/* Asymmetric crypto ops */
-	.asym_session_get_size = NULL,
-	.asym_session_configure = NULL,
-	.asym_session_clear = NULL,
+	.asym_session_get_size = cnxk_ae_session_size_get,
+	.asym_session_configure = cnxk_ae_session_cfg,
+	.asym_session_clear = cnxk_ae_session_clear,
 
 };
diff --git a/drivers/crypto/cnxk/cn9k_cryptodev.c b/drivers/crypto/cnxk/cn9k_cryptodev.c
index d3dc084..e74e739 100644
--- a/drivers/crypto/cnxk/cn9k_cryptodev.c
+++ b/drivers/crypto/cnxk/cn9k_cryptodev.c
@@ -83,6 +83,7 @@ cn9k_cpt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	cnxk_cpt_caps_populate(vf);
 
 	dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+			     RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO |
 			     RTE_CRYPTODEV_FF_HW_ACCELERATED |
 			     RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
 			     RTE_CRYPTODEV_FF_IN_PLACE_SGL |
@@ -90,7 +91,8 @@ cn9k_cpt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 			     RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
 			     RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
 			     RTE_CRYPTODEV_FF_SYM_SESSIONLESS |
-			     RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED;
+			     RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED |
+			     RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT;
 
 	cn9k_cpt_set_enqdeq_fns(dev);
 
diff --git a/drivers/crypto/cnxk/cn9k_cryptodev_ops.c b/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
index da13c7d..d8b2aea 100644
--- a/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
+++ b/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
@@ -312,8 +312,8 @@ struct rte_cryptodev_ops cn9k_cpt_ops = {
 	.sym_session_clear = cnxk_cpt_sym_session_clear,
 
 	/* Asymmetric crypto ops */
-	.asym_session_get_size = NULL,
-	.asym_session_configure = NULL,
-	.asym_session_clear = NULL,
+	.asym_session_get_size = cnxk_ae_session_size_get,
+	.asym_session_configure = cnxk_ae_session_cfg,
+	.asym_session_clear = cnxk_ae_session_clear,
 
 };
diff --git a/drivers/crypto/cnxk/cnxk_ae.h b/drivers/crypto/cnxk/cnxk_ae.h
new file mode 100644
index 0000000..e3dd63b
--- /dev/null
+++ b/drivers/crypto/cnxk/cnxk_ae.h
@@ -0,0 +1,211 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#ifndef _CNXK_AE_H_
+#define _CNXK_AE_H_
+
+#include <rte_common.h>
+#include <rte_crypto_asym.h>
+#include <rte_malloc.h>
+
+#include "roc_api.h"
+#include "cnxk_cryptodev_ops.h"
+
+struct cnxk_ae_sess {
+	enum rte_crypto_asym_xform_type xfrm_type;
+	union {
+		struct rte_crypto_rsa_xform rsa_ctx;
+		struct rte_crypto_modex_xform mod_ctx;
+		struct roc_ae_ec_ctx ec_ctx;
+	};
+	uint64_t *cnxk_fpm_iova;
+	struct roc_ae_ec_group **ec_grp;
+	uint64_t cpt_inst_w7;
+};
+
+static __rte_always_inline void
+cnxk_ae_modex_param_normalize(uint8_t **data, size_t *len)
+{
+	size_t i;
+
+	/* Strip leading NUL bytes */
+	for (i = 0; i < *len; i++) {
+		if ((*data)[i] != 0)
+			break;
+	}
+	*data += i;
+	*len -= i;
+}
+
+static __rte_always_inline int
+cnxk_ae_fill_modex_params(struct cnxk_ae_sess *sess,
+			  struct rte_crypto_asym_xform *xform)
+{
+	struct rte_crypto_modex_xform *ctx = &sess->mod_ctx;
+	size_t exp_len = xform->modex.exponent.length;
+	size_t mod_len = xform->modex.modulus.length;
+	uint8_t *exp = xform->modex.exponent.data;
+	uint8_t *mod = xform->modex.modulus.data;
+
+	cnxk_ae_modex_param_normalize(&mod, &mod_len);
+	cnxk_ae_modex_param_normalize(&exp, &exp_len);
+
+	if (unlikely(exp_len == 0 || mod_len == 0))
+		return -EINVAL;
+
+	if (unlikely(exp_len > mod_len))
+		return -ENOTSUP;
+
+	/* Allocate buffer to hold modexp params */
+	ctx->modulus.data = rte_malloc(NULL, mod_len + exp_len, 0);
+	if (ctx->modulus.data == NULL)
+		return -ENOMEM;
+
+	/* Set up modexp prime modulus and private exponent */
+	memcpy(ctx->modulus.data, mod, mod_len);
+	ctx->exponent.data = ctx->modulus.data + mod_len;
+	memcpy(ctx->exponent.data, exp, exp_len);
+
+	ctx->modulus.length = mod_len;
+	ctx->exponent.length = exp_len;
+
+	return 0;
+}
+
+static __rte_always_inline int
+cnxk_ae_fill_rsa_params(struct cnxk_ae_sess *sess,
+			struct rte_crypto_asym_xform *xform)
+{
+	struct rte_crypto_rsa_priv_key_qt qt = xform->rsa.qt;
+	struct rte_crypto_rsa_xform *xfrm_rsa = &xform->rsa;
+	struct rte_crypto_rsa_xform *rsa = &sess->rsa_ctx;
+	size_t mod_len = xfrm_rsa->n.length;
+	size_t exp_len = xfrm_rsa->e.length;
+	size_t len = (mod_len / 2);
+	uint64_t total_size;
+
+	/* Make sure key length used is not more than mod_len/2 */
+	if (qt.p.data != NULL)
+		len = RTE_MIN(len, qt.p.length);
+
+	/* Total size required for RSA key params(n,e,(q,dQ,p,dP,qInv)) */
+	total_size = mod_len + exp_len + 5 * len;
+
+	/* Allocate buffer to hold all RSA keys */
+	rsa->n.data = rte_malloc(NULL, total_size, 0);
+	if (rsa->n.data == NULL)
+		return -ENOMEM;
+
+	/* Set up RSA prime modulus and public key exponent */
+	memcpy(rsa->n.data, xfrm_rsa->n.data, mod_len);
+	rsa->e.data = rsa->n.data + mod_len;
+	memcpy(rsa->e.data, xfrm_rsa->e.data, exp_len);
+
+	/* Private key in quintuple format */
+	if (len != 0) {
+		rsa->qt.q.data = rsa->e.data + exp_len;
+		memcpy(rsa->qt.q.data, qt.q.data, qt.q.length);
+		rsa->qt.dQ.data = rsa->qt.q.data + qt.q.length;
+		memcpy(rsa->qt.dQ.data, qt.dQ.data, qt.dQ.length);
+		rsa->qt.p.data = rsa->qt.dQ.data + qt.dQ.length;
+		memcpy(rsa->qt.p.data, qt.p.data, qt.p.length);
+		rsa->qt.dP.data = rsa->qt.p.data + qt.p.length;
+		memcpy(rsa->qt.dP.data, qt.dP.data, qt.dP.length);
+		rsa->qt.qInv.data = rsa->qt.dP.data + qt.dP.length;
+		memcpy(rsa->qt.qInv.data, qt.qInv.data, qt.qInv.length);
+
+		rsa->qt.q.length = qt.q.length;
+		rsa->qt.dQ.length = qt.dQ.length;
+		rsa->qt.p.length = qt.p.length;
+		rsa->qt.dP.length = qt.dP.length;
+		rsa->qt.qInv.length = qt.qInv.length;
+	}
+	rsa->n.length = mod_len;
+	rsa->e.length = exp_len;
+
+	return 0;
+}
+
+static __rte_always_inline int
+cnxk_ae_fill_ec_params(struct cnxk_ae_sess *sess,
+		       struct rte_crypto_asym_xform *xform)
+{
+	struct roc_ae_ec_ctx *ec = &sess->ec_ctx;
+
+	switch (xform->ec.curve_id) {
+	case RTE_CRYPTO_EC_GROUP_SECP192R1:
+		ec->curveid = ROC_AE_EC_ID_P192;
+		break;
+	case RTE_CRYPTO_EC_GROUP_SECP224R1:
+		ec->curveid = ROC_AE_EC_ID_P224;
+		break;
+	case RTE_CRYPTO_EC_GROUP_SECP256R1:
+		ec->curveid = ROC_AE_EC_ID_P256;
+		break;
+	case RTE_CRYPTO_EC_GROUP_SECP384R1:
+		ec->curveid = ROC_AE_EC_ID_P384;
+		break;
+	case RTE_CRYPTO_EC_GROUP_SECP521R1:
+		ec->curveid = ROC_AE_EC_ID_P521;
+		break;
+	default:
+		/* Only NIST curves (FIPS 186-4) are supported */
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static __rte_always_inline int
+cnxk_ae_fill_session_parameters(struct cnxk_ae_sess *sess,
+				struct rte_crypto_asym_xform *xform)
+{
+	int ret;
+
+	sess->xfrm_type = xform->xform_type;
+
+	switch (xform->xform_type) {
+	case RTE_CRYPTO_ASYM_XFORM_RSA:
+		ret = cnxk_ae_fill_rsa_params(sess, xform);
+		break;
+	case RTE_CRYPTO_ASYM_XFORM_MODEX:
+		ret = cnxk_ae_fill_modex_params(sess, xform);
+		break;
+	case RTE_CRYPTO_ASYM_XFORM_ECDSA:
+		/* Fall through */
+	case RTE_CRYPTO_ASYM_XFORM_ECPM:
+		ret = cnxk_ae_fill_ec_params(sess, xform);
+		break;
+	default:
+		return -ENOTSUP;
+	}
+	return ret;
+}
+
+static inline void
+cnxk_ae_free_session_parameters(struct cnxk_ae_sess *sess)
+{
+	struct rte_crypto_modex_xform *mod;
+	struct rte_crypto_rsa_xform *rsa;
+
+	switch (sess->xfrm_type) {
+	case RTE_CRYPTO_ASYM_XFORM_RSA:
+		rsa = &sess->rsa_ctx;
+		if (rsa->n.data)
+			rte_free(rsa->n.data);
+		break;
+	case RTE_CRYPTO_ASYM_XFORM_MODEX:
+		mod = &sess->mod_ctx;
+		if (mod->modulus.data)
+			rte_free(mod->modulus.data);
+		break;
+	case RTE_CRYPTO_ASYM_XFORM_ECDSA:
+		/* Fall through */
+	case RTE_CRYPTO_ASYM_XFORM_ECPM:
+		break;
+	default:
+		break;
+	}
+}
+#endif /* _CNXK_AE_H_ */
diff --git a/drivers/crypto/cnxk/cnxk_cryptodev.h b/drivers/crypto/cnxk/cnxk_cryptodev.h
index 6760c13..5e38933 100644
--- a/drivers/crypto/cnxk/cnxk_cryptodev.h
+++ b/drivers/crypto/cnxk/cnxk_cryptodev.h
@@ -13,7 +13,7 @@
 #define CNXK_CPT_MAX_CAPS	 34
 #define CNXK_SEC_CRYPTO_MAX_CAPS 4
 #define CNXK_SEC_MAX_CAPS	 3
-
+#define CNXK_AE_EC_ID_MAX	 5
 /**
  * Device private data
  */
@@ -23,6 +23,8 @@ struct cnxk_cpt_vf {
 	struct rte_cryptodev_capabilities
 		sec_crypto_caps[CNXK_SEC_CRYPTO_MAX_CAPS];
 	struct rte_security_capability sec_caps[CNXK_SEC_MAX_CAPS];
+	uint64_t cnxk_fpm_iova[CNXK_AE_EC_ID_MAX];
+	struct roc_ae_ec_group *ec_grp[CNXK_AE_EC_ID_MAX];
 };
 
 int cnxk_cpt_eng_grp_add(struct roc_cpt *roc_cpt);
diff --git a/drivers/crypto/cnxk/cnxk_cryptodev_ops.c b/drivers/crypto/cnxk/cnxk_cryptodev_ops.c
index 0d81785..7322539 100644
--- a/drivers/crypto/cnxk/cnxk_cryptodev_ops.c
+++ b/drivers/crypto/cnxk/cnxk_cryptodev_ops.c
@@ -8,11 +8,15 @@
 
 #include "roc_cpt.h"
 
+#include "cnxk_ae.h"
 #include "cnxk_cryptodev.h"
 #include "cnxk_cryptodev_ops.h"
 #include "cnxk_cryptodev_capabilities.h"
 #include "cnxk_se.h"
 
+#define CNXK_CPT_MAX_ASYM_OP_NUM_PARAMS 5
+#define CNXK_CPT_MAX_ASYM_OP_MOD_LEN	1024
+
 static int
 cnxk_cpt_get_mlen(void)
 {
@@ -31,6 +35,20 @@ cnxk_cpt_get_mlen(void)
 	return len;
 }
 
+static int
+cnxk_cpt_asym_get_mlen(void)
+{
+	uint32_t len;
+
+	/* To hold RPTR */
+	len = sizeof(uint64_t);
+
+	/* Get meta len for asymmetric operations */
+	len += CNXK_CPT_MAX_ASYM_OP_NUM_PARAMS * CNXK_CPT_MAX_ASYM_OP_MOD_LEN;
+
+	return len;
+}
+
 int
 cnxk_cpt_dev_config(struct rte_cryptodev *dev,
 		    struct rte_cryptodev_config *conf)
@@ -54,6 +72,23 @@ cnxk_cpt_dev_config(struct rte_cryptodev *dev,
 		return ret;
 	}
 
+	if (dev->feature_flags & RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO) {
+		/* Initialize shared FPM table */
+		ret = roc_ae_fpm_get(vf->cnxk_fpm_iova);
+		if (ret) {
+			plt_err("Could not get FPM table");
+			return ret;
+		}
+
+		/* Init EC grp table */
+		ret = roc_ae_ec_grp_get(vf->ec_grp);
+		if (ret) {
+			plt_err("Could not get EC grp table");
+			roc_ae_fpm_put();
+			return ret;
+		}
+	}
+
 	return 0;
 }
 
@@ -86,6 +121,11 @@ cnxk_cpt_dev_close(struct rte_cryptodev *dev)
 		}
 	}
 
+	if (dev->feature_flags & RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO) {
+		roc_ae_fpm_put();
+		roc_ae_ec_grp_put();
+	}
+
 	roc_cpt_dev_clear(&vf->cpt);
 
 	return 0;
@@ -128,6 +168,12 @@ cnxk_cpt_metabuf_mempool_create(const struct rte_cryptodev *dev,
 		mlen = cnxk_cpt_get_mlen();
 	}
 
+	if (dev->feature_flags & RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO) {
+
+		/* Get meta len required for asymmetric operations */
+		mlen = RTE_MAX(mlen, cnxk_cpt_asym_get_mlen());
+	}
+
 	cache_sz = RTE_MIN(RTE_MEMPOOL_CACHE_MAX_SIZE, nb_elements / 1.5);
 
 	/* Allocate mempool */
@@ -549,3 +595,63 @@ cnxk_cpt_sym_session_clear(struct rte_cryptodev *dev,
 {
 	return sym_session_clear(dev->driver_id, sess);
 }
+
+unsigned int
+cnxk_ae_session_size_get(struct rte_cryptodev *dev __rte_unused)
+{
+	return sizeof(struct cnxk_ae_sess);
+}
+
+void
+cnxk_ae_session_clear(struct rte_cryptodev *dev,
+		      struct rte_cryptodev_asym_session *sess)
+{
+	struct rte_mempool *sess_mp;
+	struct cnxk_ae_sess *priv;
+
+	priv = get_asym_session_private_data(sess, dev->driver_id);
+	if (priv == NULL)
+		return;
+
+	/* Free resources allocated in session_cfg */
+	cnxk_ae_free_session_parameters(priv);
+
+	/* Reset and free object back to pool */
+	memset(priv, 0, cnxk_ae_session_size_get(dev));
+	sess_mp = rte_mempool_from_obj(priv);
+	set_asym_session_private_data(sess, dev->driver_id, NULL);
+	rte_mempool_put(sess_mp, priv);
+}
+
+int
+cnxk_ae_session_cfg(struct rte_cryptodev *dev,
+		    struct rte_crypto_asym_xform *xform,
+		    struct rte_cryptodev_asym_session *sess,
+		    struct rte_mempool *pool)
+{
+	struct cnxk_cpt_vf *vf = dev->data->dev_private;
+	struct roc_cpt *roc_cpt = &vf->cpt;
+	struct cnxk_ae_sess *priv;
+	union cpt_inst_w7 w7;
+	int ret;
+
+	if (rte_mempool_get(pool, (void **)&priv))
+		return -ENOMEM;
+
+	memset(priv, 0, sizeof(struct cnxk_ae_sess));
+
+	ret = cnxk_ae_fill_session_parameters(priv, xform);
+	if (ret) {
+		rte_mempool_put(pool, priv);
+		return ret;
+	}
+
+	w7.u64 = 0;
+	w7.s.egrp = roc_cpt->eng_grp[CPT_ENG_TYPE_AE];
+	priv->cpt_inst_w7 = w7.u64;
+	priv->cnxk_fpm_iova = vf->cnxk_fpm_iova;
+	priv->ec_grp = vf->ec_grp;
+	set_asym_session_private_data(sess, dev->driver_id, priv);
+
+	return 0;
+}
diff --git a/drivers/crypto/cnxk/cnxk_cryptodev_ops.h b/drivers/crypto/cnxk/cnxk_cryptodev_ops.h
index 7995959..c317f40 100644
--- a/drivers/crypto/cnxk/cnxk_cryptodev_ops.h
+++ b/drivers/crypto/cnxk/cnxk_cryptodev_ops.h
@@ -105,4 +105,12 @@ void cnxk_cpt_sym_session_clear(struct rte_cryptodev *dev,
 
 void sym_session_clear(int driver_id, struct rte_cryptodev_sym_session *sess);
 
+unsigned int cnxk_ae_session_size_get(struct rte_cryptodev *dev __rte_unused);
+
+void cnxk_ae_session_clear(struct rte_cryptodev *dev,
+			   struct rte_cryptodev_asym_session *sess);
+int cnxk_ae_session_cfg(struct rte_cryptodev *dev,
+			struct rte_crypto_asym_xform *xform,
+			struct rte_cryptodev_asym_session *sess,
+			struct rte_mempool *pool);
 #endif /* _CNXK_CRYPTODEV_OPS_H_ */
-- 
2.7.4


^ permalink raw reply	[flat|nested] 11+ messages in thread

* [dpdk-dev] [PATCH v2 2/4] crypto/cnxk: add asymmetric datapath ops
  2021-06-25  6:25 ` [dpdk-dev] [PATCH v2 0/4] Add asymmetric ops in crypto cnxk PMDs Anoob Joseph
  2021-06-25  6:25   ` [dpdk-dev] [PATCH v2 1/4] crypto/cnxk: add asymmetric session ops Anoob Joseph
@ 2021-06-25  6:25   ` Anoob Joseph
  2021-06-25  6:25   ` [dpdk-dev] [PATCH v2 3/4] crypto/cnxk: add asymmetric capabilities Anoob Joseph
  2021-06-25  6:25   ` [dpdk-dev] [PATCH v2 4/4] test/crypto: add cnxk for asymmetric cases Anoob Joseph
  3 siblings, 0 replies; 11+ messages in thread
From: Anoob Joseph @ 2021-06-25  6:25 UTC (permalink / raw)
  To: Akhil Goyal, Thomas Monjalon
  Cc: Kiran Kumar K, Jerin Jacob, Ankur Dwivedi, Tejasree Kondoj, dev

From: Kiran Kumar K <kirankumark@marvell.com>

Add asymmetric crypto datapath ops.


Signed-off-by: Kiran Kumar K <kirankumark@marvell.com>
---
 drivers/crypto/cnxk/cn10k_cryptodev_ops.c |  27 ++
 drivers/crypto/cnxk/cn9k_cryptodev_ops.c  |  29 +-
 drivers/crypto/cnxk/cnxk_ae.h             | 625 ++++++++++++++++++++++++++++++
 3 files changed, 679 insertions(+), 2 deletions(-)

diff --git a/drivers/crypto/cnxk/cn10k_cryptodev_ops.c b/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
index 8b33764..6d12cc3 100644
--- a/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
+++ b/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
@@ -10,6 +10,7 @@
 #include "cn10k_cryptodev_ops.h"
 #include "cn10k_ipsec_la_ops.h"
 #include "cn10k_ipsec.h"
+#include "cnxk_ae.h"
 #include "cnxk_cryptodev.h"
 #include "cnxk_cryptodev_ops.h"
 #include "cnxk_se.h"
@@ -100,7 +101,9 @@ cn10k_cpt_fill_inst(struct cnxk_cpt_qp *qp, struct rte_crypto_op *ops[],
 		    struct cpt_inst_s inst[], struct cpt_inflight_req *infl_req)
 {
 	struct cn10k_sec_session *sec_sess;
+	struct rte_crypto_asym_op *asym_op;
 	struct rte_crypto_sym_op *sym_op;
+	struct cnxk_ae_sess *ae_sess;
 	struct cnxk_se_sess *sess;
 	struct rte_crypto_op *op;
 	uint64_t w7;
@@ -148,6 +151,21 @@ cn10k_cpt_fill_inst(struct cnxk_cpt_qp *qp, struct rte_crypto_op *ops[],
 			}
 			w7 = sess->cpt_inst_w7;
 		}
+	} else if (op->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
+
+		if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+			asym_op = op->asym;
+			ae_sess = get_asym_session_private_data(
+				asym_op->session, cn10k_cryptodev_driver_id);
+			ret = cnxk_ae_enqueue(qp, op, infl_req, &inst[0],
+					      ae_sess);
+			if (unlikely(ret))
+				return 0;
+			w7 = ae_sess->cpt_inst_w7;
+		} else {
+			plt_dp_err("Not supported Asym op without session");
+			return 0;
+		}
 	} else {
 		plt_dp_err("Unsupported op type");
 		return 0;
@@ -303,6 +321,15 @@ cn10k_cpt_dequeue_post_process(struct cnxk_cpt_qp *qp,
 				compl_auth_verify(cop, (uint8_t *)rsp[0],
 						  rsp[1]);
 			}
+		} else if (cop->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
+			struct rte_crypto_asym_op *op = cop->asym;
+			uintptr_t *mdata = infl_req->mdata;
+			struct cnxk_ae_sess *sess;
+
+			sess = get_asym_session_private_data(
+				op->session, cn10k_cryptodev_driver_id);
+
+			cnxk_ae_post_process(cop, sess, (uint8_t *)mdata[0]);
 		}
 	} else {
 		cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
diff --git a/drivers/crypto/cnxk/cn9k_cryptodev_ops.c b/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
index d8b2aea..e367cc4 100644
--- a/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
+++ b/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
@@ -7,6 +7,7 @@
 
 #include "cn9k_cryptodev.h"
 #include "cn9k_cryptodev_ops.h"
+#include "cnxk_ae.h"
 #include "cnxk_cryptodev.h"
 #include "cnxk_cryptodev_ops.h"
 #include "cnxk_se.h"
@@ -65,11 +66,11 @@ static uint16_t
 cn9k_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
 {
 	struct cpt_inflight_req *infl_req;
+	struct rte_crypto_asym_op *asym_op;
 	struct rte_crypto_sym_op *sym_op;
 	uint16_t nb_allowed, count = 0;
 	struct cnxk_cpt_qp *qp = qptr;
 	struct pending_queue *pend_q;
-	struct cnxk_se_sess *sess;
 	struct rte_crypto_op *op;
 	struct cpt_inst_s inst;
 	uint64_t lmt_status;
@@ -95,6 +96,8 @@ cn9k_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
 		infl_req->op_flags = 0;
 
 		if (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
+			struct cnxk_se_sess *sess;
+
 			if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
 				sym_op = op->sym;
 				sess = get_sym_session_private_data(
@@ -120,6 +123,20 @@ cn9k_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
 							op->sym->session);
 				}
 			}
+			inst.w7.u64 = sess->cpt_inst_w7;
+		} else if (op->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
+			struct cnxk_ae_sess *sess;
+
+			ret = -EINVAL;
+			if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+				asym_op = op->asym;
+				sess = get_asym_session_private_data(
+					asym_op->session,
+					cn9k_cryptodev_driver_id);
+				ret = cnxk_ae_enqueue(qp, op, infl_req, &inst,
+						      sess);
+				inst.w7.u64 = sess->cpt_inst_w7;
+			}
 		} else {
 			plt_dp_err("Unsupported op type");
 			break;
@@ -134,7 +151,6 @@ cn9k_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
 
 		infl_req->res.cn9k.compcode = CPT_COMP_NOT_DONE;
 		inst.res_addr = (uint64_t)&infl_req->res;
-		inst.w7.u64 = sess->cpt_inst_w7;
 
 		do {
 			/* Copy CPT command to LMTLINE */
@@ -189,6 +205,15 @@ cn9k_cpt_dequeue_post_process(struct cnxk_cpt_qp *qp, struct rte_crypto_op *cop,
 				compl_auth_verify(cop, (uint8_t *)rsp[0],
 						  rsp[1]);
 			}
+		} else if (cop->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
+			struct rte_crypto_asym_op *op = cop->asym;
+			uintptr_t *mdata = infl_req->mdata;
+			struct cnxk_ae_sess *sess;
+
+			sess = get_asym_session_private_data(
+				op->session, cn9k_cryptodev_driver_id);
+
+			cnxk_ae_post_process(cop, sess, (uint8_t *)mdata[0]);
 		}
 	} else {
 		cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
diff --git a/drivers/crypto/cnxk/cnxk_ae.h b/drivers/crypto/cnxk/cnxk_ae.h
index e3dd63b..c752e62 100644
--- a/drivers/crypto/cnxk/cnxk_ae.h
+++ b/drivers/crypto/cnxk/cnxk_ae.h
@@ -208,4 +208,629 @@ cnxk_ae_free_session_parameters(struct cnxk_ae_sess *sess)
 		break;
 	}
 }
+
+static __rte_always_inline int
+cnxk_ae_modex_prep(struct rte_crypto_op *op, struct roc_ae_buf_ptr *meta_buf,
+		   struct rte_crypto_modex_xform *mod, struct cpt_inst_s *inst)
+{
+	uint32_t exp_len = mod->exponent.length;
+	uint32_t mod_len = mod->modulus.length;
+	struct rte_crypto_mod_op_param mod_op;
+	uint64_t total_key_len;
+	union cpt_inst_w4 w4;
+	uint32_t base_len;
+	uint32_t dlen;
+	uint8_t *dptr;
+
+	mod_op = op->asym->modex;
+
+	base_len = mod_op.base.length;
+	if (unlikely(base_len > mod_len)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -ENOTSUP;
+	}
+
+	total_key_len = mod_len + exp_len;
+
+	/* Input buffer */
+	dptr = meta_buf->vaddr;
+	inst->dptr = (uintptr_t)dptr;
+	memcpy(dptr, mod->modulus.data, total_key_len);
+	dptr += total_key_len;
+	memcpy(dptr, mod_op.base.data, base_len);
+	dptr += base_len;
+	dlen = total_key_len + base_len;
+
+	/* Setup opcodes */
+	w4.s.opcode_major = ROC_AE_MAJOR_OP_MODEX;
+	w4.s.opcode_minor = ROC_AE_MINOR_OP_MODEX;
+
+	w4.s.param1 = mod_len;
+	w4.s.param2 = exp_len;
+	w4.s.dlen = dlen;
+
+	inst->w4.u64 = w4.u64;
+	inst->rptr = (uintptr_t)dptr;
+
+	return 0;
+}
+
+static __rte_always_inline void
+cnxk_ae_rsa_prep(struct rte_crypto_op *op, struct roc_ae_buf_ptr *meta_buf,
+		 struct rte_crypto_rsa_xform *rsa,
+		 rte_crypto_param *crypto_param, struct cpt_inst_s *inst)
+{
+	struct rte_crypto_rsa_op_param rsa_op;
+	uint32_t mod_len = rsa->n.length;
+	uint32_t exp_len = rsa->e.length;
+	uint64_t total_key_len;
+	union cpt_inst_w4 w4;
+	uint32_t in_size;
+	uint32_t dlen;
+	uint8_t *dptr;
+
+	rsa_op = op->asym->rsa;
+	total_key_len = mod_len + exp_len;
+
+	/* Input buffer */
+	dptr = meta_buf->vaddr;
+	inst->dptr = (uintptr_t)dptr;
+	memcpy(dptr, rsa->n.data, total_key_len);
+	dptr += total_key_len;
+
+	in_size = crypto_param->length;
+	memcpy(dptr, crypto_param->data, in_size);
+
+	dptr += in_size;
+	dlen = total_key_len + in_size;
+
+	if (rsa_op.pad == RTE_CRYPTO_RSA_PADDING_NONE) {
+		/* Use mod_exp operation for no_padding type */
+		w4.s.opcode_minor = ROC_AE_MINOR_OP_MODEX;
+		w4.s.param2 = exp_len;
+	} else {
+		if (rsa_op.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT) {
+			w4.s.opcode_minor = ROC_AE_MINOR_OP_PKCS_ENC;
+			/* Public key encrypt, use BT2*/
+			w4.s.param2 = ROC_AE_CPT_BLOCK_TYPE2 |
+				      ((uint16_t)(exp_len) << 1);
+		} else if (rsa_op.op_type == RTE_CRYPTO_ASYM_OP_VERIFY) {
+			w4.s.opcode_minor = ROC_AE_MINOR_OP_PKCS_DEC;
+			/* Public key decrypt, use BT1 */
+			w4.s.param2 = ROC_AE_CPT_BLOCK_TYPE1;
+		}
+	}
+
+	w4.s.opcode_major = ROC_AE_MAJOR_OP_MODEX;
+
+	w4.s.param1 = mod_len;
+	w4.s.dlen = dlen;
+
+	inst->w4.u64 = w4.u64;
+	inst->rptr = (uintptr_t)dptr;
+}
+
+static __rte_always_inline void
+cnxk_ae_rsa_crt_prep(struct rte_crypto_op *op, struct roc_ae_buf_ptr *meta_buf,
+		     struct rte_crypto_rsa_xform *rsa,
+		     rte_crypto_param *crypto_param, struct cpt_inst_s *inst)
+{
+	uint32_t qInv_len = rsa->qt.qInv.length;
+	struct rte_crypto_rsa_op_param rsa_op;
+	uint32_t dP_len = rsa->qt.dP.length;
+	uint32_t dQ_len = rsa->qt.dQ.length;
+	uint32_t p_len = rsa->qt.p.length;
+	uint32_t q_len = rsa->qt.q.length;
+	uint32_t mod_len = rsa->n.length;
+	uint64_t total_key_len;
+	union cpt_inst_w4 w4;
+	uint32_t in_size;
+	uint32_t dlen;
+	uint8_t *dptr;
+
+	rsa_op = op->asym->rsa;
+	total_key_len = p_len + q_len + dP_len + dQ_len + qInv_len;
+
+	/* Input buffer */
+	dptr = meta_buf->vaddr;
+	inst->dptr = (uintptr_t)dptr;
+	memcpy(dptr, rsa->qt.q.data, total_key_len);
+	dptr += total_key_len;
+
+	in_size = crypto_param->length;
+	memcpy(dptr, crypto_param->data, in_size);
+
+	dptr += in_size;
+	dlen = total_key_len + in_size;
+
+	if (rsa_op.pad == RTE_CRYPTO_RSA_PADDING_NONE) {
+		/*Use mod_exp operation for no_padding type */
+		w4.s.opcode_minor = ROC_AE_MINOR_OP_MODEX_CRT;
+	} else {
+		if (rsa_op.op_type == RTE_CRYPTO_ASYM_OP_SIGN) {
+			w4.s.opcode_minor = ROC_AE_MINOR_OP_PKCS_ENC_CRT;
+			/* Private encrypt, use BT1 */
+			w4.s.param2 = ROC_AE_CPT_BLOCK_TYPE1;
+		} else if (rsa_op.op_type == RTE_CRYPTO_ASYM_OP_DECRYPT) {
+			w4.s.opcode_minor = ROC_AE_MINOR_OP_PKCS_DEC_CRT;
+			/* Private decrypt, use BT2 */
+			w4.s.param2 = ROC_AE_CPT_BLOCK_TYPE2;
+		}
+	}
+
+	w4.s.opcode_major = ROC_AE_MAJOR_OP_MODEX;
+
+	w4.s.param1 = mod_len;
+	w4.s.dlen = dlen;
+
+	inst->w4.u64 = w4.u64;
+	inst->rptr = (uintptr_t)dptr;
+}
+
+static __rte_always_inline int __rte_hot
+cnxk_ae_enqueue_rsa_op(struct rte_crypto_op *op,
+		       struct roc_ae_buf_ptr *meta_buf,
+		       struct cnxk_ae_sess *sess, struct cpt_inst_s *inst)
+{
+	struct rte_crypto_rsa_op_param *rsa = &op->asym->rsa;
+
+	switch (rsa->op_type) {
+	case RTE_CRYPTO_ASYM_OP_VERIFY:
+		cnxk_ae_rsa_prep(op, meta_buf, &sess->rsa_ctx, &rsa->sign,
+				 inst);
+		break;
+	case RTE_CRYPTO_ASYM_OP_ENCRYPT:
+		cnxk_ae_rsa_prep(op, meta_buf, &sess->rsa_ctx, &rsa->message,
+				 inst);
+		break;
+	case RTE_CRYPTO_ASYM_OP_SIGN:
+		cnxk_ae_rsa_crt_prep(op, meta_buf, &sess->rsa_ctx,
+				     &rsa->message, inst);
+		break;
+	case RTE_CRYPTO_ASYM_OP_DECRYPT:
+		cnxk_ae_rsa_crt_prep(op, meta_buf, &sess->rsa_ctx, &rsa->cipher,
+				     inst);
+		break;
+	default:
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static __rte_always_inline void
+cnxk_ae_ecdsa_sign_prep(struct rte_crypto_ecdsa_op_param *ecdsa,
+			struct roc_ae_buf_ptr *meta_buf,
+			uint64_t fpm_table_iova, struct roc_ae_ec_group *ec_grp,
+			uint8_t curveid, struct cpt_inst_s *inst)
+{
+	uint16_t message_len = ecdsa->message.length;
+	uint16_t pkey_len = ecdsa->pkey.length;
+	uint16_t p_align, k_align, m_align;
+	uint16_t k_len = ecdsa->k.length;
+	uint16_t order_len, prime_len;
+	uint16_t o_offset, pk_offset;
+	union cpt_inst_w4 w4;
+	uint16_t dlen;
+	uint8_t *dptr;
+
+	prime_len = ec_grp->prime.length;
+	order_len = ec_grp->order.length;
+
+	/* Truncate input length to curve prime length */
+	if (message_len > prime_len)
+		message_len = prime_len;
+	m_align = RTE_ALIGN_CEIL(message_len, 8);
+
+	p_align = RTE_ALIGN_CEIL(prime_len, 8);
+	k_align = RTE_ALIGN_CEIL(k_len, 8);
+
+	/* Set write offset for order and private key */
+	o_offset = prime_len - order_len;
+	pk_offset = prime_len - pkey_len;
+
+	/* Input buffer */
+	dptr = meta_buf->vaddr;
+	inst->dptr = (uintptr_t)dptr;
+
+	/*
+	 * Set dlen = sum(sizeof(fpm address), ROUNDUP8(scalar len, input len),
+	 * ROUNDUP8(priv key len, prime len, order len)).
+	 * Please note, private key, order cannot exceed prime
+	 * length i.e 3 * p_align.
+	 */
+	dlen = sizeof(fpm_table_iova) + k_align + m_align + p_align * 3;
+
+	memset(dptr, 0, dlen);
+
+	*(uint64_t *)dptr = fpm_table_iova;
+	dptr += sizeof(fpm_table_iova);
+
+	memcpy(dptr, ecdsa->k.data, k_len);
+	dptr += k_align;
+
+	memcpy(dptr, ec_grp->prime.data, prime_len);
+	dptr += p_align;
+
+	memcpy(dptr + o_offset, ec_grp->order.data, order_len);
+	dptr += p_align;
+
+	memcpy(dptr + pk_offset, ecdsa->pkey.data, pkey_len);
+	dptr += p_align;
+
+	memcpy(dptr, ecdsa->message.data, message_len);
+	dptr += m_align;
+
+	/* Setup opcodes */
+	w4.s.opcode_major = ROC_AE_MAJOR_OP_ECDSA;
+	w4.s.opcode_minor = ROC_AE_MINOR_OP_ECDSA_SIGN;
+
+	w4.s.param1 = curveid | (message_len << 8);
+	w4.s.param2 = k_len;
+	w4.s.dlen = dlen;
+
+	inst->w4.u64 = w4.u64;
+	inst->rptr = (uintptr_t)dptr;
+}
+
+static __rte_always_inline void
+cnxk_ae_ecdsa_verify_prep(struct rte_crypto_ecdsa_op_param *ecdsa,
+			  struct roc_ae_buf_ptr *meta_buf,
+			  uint64_t fpm_table_iova,
+			  struct roc_ae_ec_group *ec_grp, uint8_t curveid,
+			  struct cpt_inst_s *inst)
+{
+	uint32_t message_len = ecdsa->message.length;
+	uint16_t o_offset, r_offset, s_offset;
+	uint16_t qx_len = ecdsa->q.x.length;
+	uint16_t qy_len = ecdsa->q.y.length;
+	uint16_t r_len = ecdsa->r.length;
+	uint16_t s_len = ecdsa->s.length;
+	uint16_t order_len, prime_len;
+	uint16_t qx_offset, qy_offset;
+	uint16_t p_align, m_align;
+	union cpt_inst_w4 w4;
+	uint16_t dlen;
+	uint8_t *dptr;
+
+	prime_len = ec_grp->prime.length;
+	order_len = ec_grp->order.length;
+
+	/* Truncate input length to curve prime length */
+	if (message_len > prime_len)
+		message_len = prime_len;
+
+	m_align = RTE_ALIGN_CEIL(message_len, 8);
+	p_align = RTE_ALIGN_CEIL(prime_len, 8);
+
+	/* Set write offset for sign, order and public key coordinates */
+	o_offset = prime_len - order_len;
+	qx_offset = prime_len - qx_len;
+	qy_offset = prime_len - qy_len;
+	r_offset = prime_len - r_len;
+	s_offset = prime_len - s_len;
+
+	/* Input buffer */
+	dptr = meta_buf->vaddr;
+	inst->dptr = (uintptr_t)dptr;
+
+	/*
+	 * Set dlen = sum(sizeof(fpm address), ROUNDUP8(message len),
+	 * ROUNDUP8(sign len(r and s), public key len(x and y coordinates),
+	 * prime len, order len)).
+	 * Please note sign, public key and order can not exceed prime length
+	 * i.e. 6 * p_align
+	 */
+	dlen = sizeof(fpm_table_iova) + m_align + (6 * p_align);
+
+	memset(dptr, 0, dlen);
+
+	*(uint64_t *)dptr = fpm_table_iova;
+	dptr += sizeof(fpm_table_iova);
+
+	memcpy(dptr + r_offset, ecdsa->r.data, r_len);
+	dptr += p_align;
+
+	memcpy(dptr + s_offset, ecdsa->s.data, s_len);
+	dptr += p_align;
+
+	memcpy(dptr, ecdsa->message.data, message_len);
+	dptr += m_align;
+
+	memcpy(dptr + o_offset, ec_grp->order.data, order_len);
+	dptr += p_align;
+
+	memcpy(dptr, ec_grp->prime.data, prime_len);
+	dptr += p_align;
+
+	memcpy(dptr + qx_offset, ecdsa->q.x.data, qx_len);
+	dptr += p_align;
+
+	memcpy(dptr + qy_offset, ecdsa->q.y.data, qy_len);
+	dptr += p_align;
+
+	/* Setup opcodes */
+	w4.s.opcode_major = ROC_AE_MAJOR_OP_ECDSA;
+	w4.s.opcode_minor = ROC_AE_MINOR_OP_ECDSA_VERIFY;
+
+	w4.s.param1 = curveid | (message_len << 8);
+	w4.s.param2 = 0;
+	w4.s.dlen = dlen;
+
+	inst->w4.u64 = w4.u64;
+	inst->rptr = (uintptr_t)dptr;
+}
+
+static __rte_always_inline int __rte_hot
+cnxk_ae_enqueue_ecdsa_op(struct rte_crypto_op *op,
+			 struct roc_ae_buf_ptr *meta_buf,
+			 struct cnxk_ae_sess *sess, uint64_t *fpm_iova,
+			 struct roc_ae_ec_group **ec_grp,
+			 struct cpt_inst_s *inst)
+{
+	struct rte_crypto_ecdsa_op_param *ecdsa = &op->asym->ecdsa;
+	uint8_t curveid = sess->ec_ctx.curveid;
+
+	if (ecdsa->op_type == RTE_CRYPTO_ASYM_OP_SIGN)
+		cnxk_ae_ecdsa_sign_prep(ecdsa, meta_buf, fpm_iova[curveid],
+					ec_grp[curveid], curveid, inst);
+	else if (ecdsa->op_type == RTE_CRYPTO_ASYM_OP_VERIFY)
+		cnxk_ae_ecdsa_verify_prep(ecdsa, meta_buf, fpm_iova[curveid],
+					  ec_grp[curveid], curveid, inst);
+	else {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static __rte_always_inline int
+cnxk_ae_ecpm_prep(struct rte_crypto_ecpm_op_param *ecpm,
+		  struct roc_ae_buf_ptr *meta_buf,
+		  struct roc_ae_ec_group *ec_grp, uint8_t curveid,
+		  struct cpt_inst_s *inst)
+{
+	uint16_t x1_len = ecpm->p.x.length;
+	uint16_t y1_len = ecpm->p.y.length;
+	uint16_t scalar_align, p_align;
+	uint16_t x1_offset, y1_offset;
+	uint16_t dlen, prime_len;
+	union cpt_inst_w4 w4;
+	uint8_t *dptr;
+
+	prime_len = ec_grp->prime.length;
+
+	/* Input buffer */
+	dptr = meta_buf->vaddr;
+	inst->dptr = (uintptr_t)dptr;
+
+	p_align = RTE_ALIGN_CEIL(prime_len, 8);
+	scalar_align = RTE_ALIGN_CEIL(ecpm->scalar.length, 8);
+
+	/*
+	 * Set dlen = sum(ROUNDUP8(input point(x and y coordinates), prime,
+	 * scalar length),
+	 * Please note point length is equivalent to prime of the curve
+	 */
+	dlen = 3 * p_align + scalar_align;
+
+	x1_offset = prime_len - x1_len;
+	y1_offset = prime_len - y1_len;
+
+	memset(dptr, 0, dlen);
+
+	/* Copy input point, scalar, prime */
+	memcpy(dptr + x1_offset, ecpm->p.x.data, x1_len);
+	dptr += p_align;
+	memcpy(dptr + y1_offset, ecpm->p.y.data, y1_len);
+	dptr += p_align;
+	memcpy(dptr, ecpm->scalar.data, ecpm->scalar.length);
+	dptr += scalar_align;
+	memcpy(dptr, ec_grp->prime.data, ec_grp->prime.length);
+	dptr += p_align;
+
+	/* Setup opcodes */
+	w4.s.opcode_major = ROC_AE_MAJOR_OP_ECC;
+	w4.s.opcode_minor = ROC_AE_MINOR_OP_ECC_UMP;
+
+	w4.s.param1 = curveid;
+	w4.s.param2 = ecpm->scalar.length;
+	w4.s.dlen = dlen;
+
+	inst->w4.u64 = w4.u64;
+	inst->rptr = (uintptr_t)dptr;
+
+	return 0;
+}
+
+static __rte_always_inline void
+cnxk_ae_dequeue_rsa_op(struct rte_crypto_op *cop, uint8_t *rptr,
+		       struct rte_crypto_rsa_xform *rsa_ctx)
+{
+	struct rte_crypto_rsa_op_param *rsa = &cop->asym->rsa;
+
+	switch (rsa->op_type) {
+	case RTE_CRYPTO_ASYM_OP_ENCRYPT:
+		rsa->cipher.length = rsa_ctx->n.length;
+		memcpy(rsa->cipher.data, rptr, rsa->cipher.length);
+		break;
+	case RTE_CRYPTO_ASYM_OP_DECRYPT:
+		if (rsa->pad == RTE_CRYPTO_RSA_PADDING_NONE) {
+			rsa->message.length = rsa_ctx->n.length;
+			memcpy(rsa->message.data, rptr, rsa->message.length);
+		} else {
+			/* Get length of decrypted output */
+			rsa->message.length =
+				rte_cpu_to_be_16(*((uint16_t *)rptr));
+			/*
+			 * Offset output data pointer by length field
+			 * (2 bytes) and copy decrypted data.
+			 */
+			memcpy(rsa->message.data, rptr + 2,
+			       rsa->message.length);
+		}
+		break;
+	case RTE_CRYPTO_ASYM_OP_SIGN:
+		rsa->sign.length = rsa_ctx->n.length;
+		memcpy(rsa->sign.data, rptr, rsa->sign.length);
+		break;
+	case RTE_CRYPTO_ASYM_OP_VERIFY:
+		if (rsa->pad == RTE_CRYPTO_RSA_PADDING_NONE) {
+			rsa->sign.length = rsa_ctx->n.length;
+			memcpy(rsa->sign.data, rptr, rsa->sign.length);
+		} else {
+			/* Get length of signed output */
+			rsa->sign.length =
+				rte_cpu_to_be_16(*((uint16_t *)rptr));
+			/*
+			 * Offset output data pointer by length field
+			 * (2 bytes) and copy signed data.
+			 */
+			memcpy(rsa->sign.data, rptr + 2, rsa->sign.length);
+		}
+		if (memcmp(rsa->sign.data, rsa->message.data,
+			   rsa->message.length)) {
+			cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		}
+		break;
+	default:
+		cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		break;
+	}
+}
+
+static __rte_always_inline void
+cnxk_ae_dequeue_ecdsa_op(struct rte_crypto_ecdsa_op_param *ecdsa, uint8_t *rptr,
+			 struct roc_ae_ec_ctx *ec,
+			 struct roc_ae_ec_group **ec_grp)
+{
+	int prime_len = ec_grp[ec->curveid]->prime.length;
+
+	if (ecdsa->op_type == RTE_CRYPTO_ASYM_OP_VERIFY)
+		return;
+
+	/* Separate out sign r and s components */
+	memcpy(ecdsa->r.data, rptr, prime_len);
+	memcpy(ecdsa->s.data, rptr + RTE_ALIGN_CEIL(prime_len, 8), prime_len);
+	ecdsa->r.length = prime_len;
+	ecdsa->s.length = prime_len;
+}
+
+static __rte_always_inline void
+cnxk_ae_dequeue_ecpm_op(struct rte_crypto_ecpm_op_param *ecpm, uint8_t *rptr,
+			struct roc_ae_ec_ctx *ec,
+			struct roc_ae_ec_group **ec_grp)
+{
+	int prime_len = ec_grp[ec->curveid]->prime.length;
+
+	memcpy(ecpm->r.x.data, rptr, prime_len);
+	memcpy(ecpm->r.y.data, rptr + RTE_ALIGN_CEIL(prime_len, 8), prime_len);
+	ecpm->r.x.length = prime_len;
+	ecpm->r.y.length = prime_len;
+}
+
+static __rte_always_inline void *
+cnxk_ae_alloc_meta(struct roc_ae_buf_ptr *buf,
+		   struct rte_mempool *cpt_meta_pool,
+		   struct cpt_inflight_req *infl_req)
+{
+	uint8_t *mdata;
+
+	if (unlikely(rte_mempool_get(cpt_meta_pool, (void **)&mdata) < 0))
+		return NULL;
+
+	buf->vaddr = mdata;
+
+	infl_req->mdata = mdata;
+	infl_req->op_flags |= CPT_OP_FLAGS_METABUF;
+
+	return mdata;
+}
+
+static __rte_always_inline int32_t __rte_hot
+cnxk_ae_enqueue(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op,
+		struct cpt_inflight_req *infl_req, struct cpt_inst_s *inst,
+		struct cnxk_ae_sess *sess)
+{
+	struct cpt_qp_meta_info *minfo = &qp->meta_info;
+	struct rte_crypto_asym_op *asym_op = op->asym;
+	struct roc_ae_buf_ptr meta_buf;
+	uint64_t *mop;
+	void *mdata;
+	int ret;
+
+	mdata = cnxk_ae_alloc_meta(&meta_buf, minfo->pool, infl_req);
+	if (mdata == NULL)
+		return -ENOMEM;
+
+	/* Reserve 8B for RPTR */
+	meta_buf.vaddr = PLT_PTR_ADD(mdata, sizeof(uint64_t));
+
+	switch (sess->xfrm_type) {
+	case RTE_CRYPTO_ASYM_XFORM_MODEX:
+		ret = cnxk_ae_modex_prep(op, &meta_buf, &sess->mod_ctx, inst);
+		if (unlikely(ret))
+			goto req_fail;
+		break;
+	case RTE_CRYPTO_ASYM_XFORM_RSA:
+		ret = cnxk_ae_enqueue_rsa_op(op, &meta_buf, sess, inst);
+		if (unlikely(ret))
+			goto req_fail;
+		break;
+	case RTE_CRYPTO_ASYM_XFORM_ECDSA:
+		ret = cnxk_ae_enqueue_ecdsa_op(op, &meta_buf, sess,
+					       sess->cnxk_fpm_iova,
+					       sess->ec_grp, inst);
+		if (unlikely(ret))
+			goto req_fail;
+		break;
+	case RTE_CRYPTO_ASYM_XFORM_ECPM:
+		ret = cnxk_ae_ecpm_prep(&asym_op->ecpm, &meta_buf,
+					sess->ec_grp[sess->ec_ctx.curveid],
+					sess->ec_ctx.curveid, inst);
+		if (unlikely(ret))
+			goto req_fail;
+		break;
+	default:
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		ret = -EINVAL;
+		goto req_fail;
+	}
+
+	mop = mdata;
+	mop[0] = inst->rptr;
+	return 0;
+
+req_fail:
+	rte_mempool_put(minfo->pool, infl_req->mdata);
+	return ret;
+}
+
+static __rte_always_inline void
+cnxk_ae_post_process(struct rte_crypto_op *cop, struct cnxk_ae_sess *sess,
+		     uint8_t *rptr)
+{
+	struct rte_crypto_asym_op *op = cop->asym;
+
+	switch (sess->xfrm_type) {
+	case RTE_CRYPTO_ASYM_XFORM_RSA:
+		cnxk_ae_dequeue_rsa_op(cop, rptr, &sess->rsa_ctx);
+		break;
+	case RTE_CRYPTO_ASYM_XFORM_MODEX:
+		op->modex.result.length = sess->mod_ctx.modulus.length;
+		memcpy(op->modex.result.data, rptr, op->modex.result.length);
+		break;
+	case RTE_CRYPTO_ASYM_XFORM_ECDSA:
+		cnxk_ae_dequeue_ecdsa_op(&op->ecdsa, rptr, &sess->ec_ctx,
+					 sess->ec_grp);
+		break;
+	case RTE_CRYPTO_ASYM_XFORM_ECPM:
+		cnxk_ae_dequeue_ecpm_op(&op->ecpm, rptr, &sess->ec_ctx,
+					sess->ec_grp);
+		break;
+	default:
+		cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		break;
+	}
+}
 #endif /* _CNXK_AE_H_ */
-- 
2.7.4


^ permalink raw reply	[flat|nested] 11+ messages in thread

* [dpdk-dev] [PATCH v2 3/4] crypto/cnxk: add asymmetric capabilities
  2021-06-25  6:25 ` [dpdk-dev] [PATCH v2 0/4] Add asymmetric ops in crypto cnxk PMDs Anoob Joseph
  2021-06-25  6:25   ` [dpdk-dev] [PATCH v2 1/4] crypto/cnxk: add asymmetric session ops Anoob Joseph
  2021-06-25  6:25   ` [dpdk-dev] [PATCH v2 2/4] crypto/cnxk: add asymmetric datapath ops Anoob Joseph
@ 2021-06-25  6:25   ` Anoob Joseph
  2021-06-25  6:25   ` [dpdk-dev] [PATCH v2 4/4] test/crypto: add cnxk for asymmetric cases Anoob Joseph
  3 siblings, 0 replies; 11+ messages in thread
From: Anoob Joseph @ 2021-06-25  6:25 UTC (permalink / raw)
  To: Akhil Goyal, Thomas Monjalon
  Cc: Anoob Joseph, Jerin Jacob, Ankur Dwivedi, Tejasree Kondoj, dev

Add asymmetric capabilities supported.

Signed-off-by: Anoob Joseph <anoobj@marvell.com>
---
 doc/guides/cryptodevs/cnxk.rst                    |  6 +++
 drivers/crypto/cnxk/cnxk_cryptodev_capabilities.c | 56 +++++++++++++++++++++++
 2 files changed, 62 insertions(+)

diff --git a/doc/guides/cryptodevs/cnxk.rst b/doc/guides/cryptodevs/cnxk.rst
index db949fa..bbc6daa 100644
--- a/doc/guides/cryptodevs/cnxk.rst
+++ b/doc/guides/cryptodevs/cnxk.rst
@@ -67,6 +67,12 @@ AEAD algorithms:
 * ``RTE_CRYPTO_AEAD_AES_GCM``
 * ``RTE_CRYPTO_AEAD_CHACHA20_POLY1305``
 
+Asymmetric Crypto Algorithms
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+* ``RTE_CRYPTO_ASYM_XFORM_RSA``
+* ``RTE_CRYPTO_ASYM_XFORM_MODEX``
+
 Installation
 ------------
 
diff --git a/drivers/crypto/cnxk/cnxk_cryptodev_capabilities.c b/drivers/crypto/cnxk/cnxk_cryptodev_capabilities.c
index d52fa89..ab37f9c 100644
--- a/drivers/crypto/cnxk/cnxk_cryptodev_capabilities.c
+++ b/drivers/crypto/cnxk/cnxk_cryptodev_capabilities.c
@@ -28,6 +28,61 @@
 				     RTE_DIM(sec_caps_##name));                \
 	} while (0)
 
+static const struct rte_cryptodev_capabilities caps_mul[] = {
+	{	/* RSA */
+		.op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,
+		{.asym = {
+			.xform_capa = {
+				.xform_type = RTE_CRYPTO_ASYM_XFORM_RSA,
+				.op_types = ((1 << RTE_CRYPTO_ASYM_OP_SIGN) |
+					(1 << RTE_CRYPTO_ASYM_OP_VERIFY) |
+					(1 << RTE_CRYPTO_ASYM_OP_ENCRYPT) |
+					(1 << RTE_CRYPTO_ASYM_OP_DECRYPT)),
+				{.modlen = {
+					.min = 17,
+					.max = 1024,
+					.increment = 1
+				}, }
+			}
+		}, }
+	},
+	{	/* MOD_EXP */
+		.op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,
+		{.asym = {
+			.xform_capa = {
+				.xform_type = RTE_CRYPTO_ASYM_XFORM_MODEX,
+				.op_types = 0,
+				{.modlen = {
+					.min = 17,
+					.max = 1024,
+					.increment = 1
+				}, }
+			}
+		}, }
+	},
+	{	/* ECDSA */
+		.op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,
+		{.asym = {
+			.xform_capa = {
+				.xform_type = RTE_CRYPTO_ASYM_XFORM_ECDSA,
+				.op_types = ((1 << RTE_CRYPTO_ASYM_OP_SIGN) |
+					(1 << RTE_CRYPTO_ASYM_OP_VERIFY)),
+				}
+			},
+		}
+	},
+	{	/* ECPM */
+		.op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,
+		{.asym = {
+			.xform_capa = {
+				.xform_type = RTE_CRYPTO_ASYM_XFORM_ECPM,
+				.op_types = 0
+				}
+			},
+		}
+	},
+};
+
 static const struct rte_cryptodev_capabilities caps_sha1_sha2[] = {
 	{	/* SHA1 */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
@@ -748,6 +803,7 @@ crypto_caps_populate(struct rte_cryptodev_capabilities cnxk_caps[],
 {
 	int cur_pos = 0;
 
+	CPT_CAPS_ADD(cnxk_caps, &cur_pos, hw_caps, mul);
 	CPT_CAPS_ADD(cnxk_caps, &cur_pos, hw_caps, sha1_sha2);
 	CPT_CAPS_ADD(cnxk_caps, &cur_pos, hw_caps, chacha20);
 	CPT_CAPS_ADD(cnxk_caps, &cur_pos, hw_caps, zuc_snow3g);
-- 
2.7.4


^ permalink raw reply	[flat|nested] 11+ messages in thread

* [dpdk-dev] [PATCH v2 4/4] test/crypto: add cnxk for asymmetric cases
  2021-06-25  6:25 ` [dpdk-dev] [PATCH v2 0/4] Add asymmetric ops in crypto cnxk PMDs Anoob Joseph
                     ` (2 preceding siblings ...)
  2021-06-25  6:25   ` [dpdk-dev] [PATCH v2 3/4] crypto/cnxk: add asymmetric capabilities Anoob Joseph
@ 2021-06-25  6:25   ` Anoob Joseph
  3 siblings, 0 replies; 11+ messages in thread
From: Anoob Joseph @ 2021-06-25  6:25 UTC (permalink / raw)
  To: Akhil Goyal, Thomas Monjalon
  Cc: Kiran Kumar K, Jerin Jacob, Ankur Dwivedi, Tejasree Kondoj, dev

From: Kiran Kumar K <kirankumark@marvell.com>

Adding autotest for cn9k and cn10k.

Signed-off-by: Kiran Kumar K <kirankumark@marvell.com>
---
 app/test/test_cryptodev_asym.c | 30 ++++++++++++++++++++++++++++++
 doc/guides/cryptodevs/cnxk.rst | 17 +++++++++++++++++
 2 files changed, 47 insertions(+)

diff --git a/app/test/test_cryptodev_asym.c b/app/test/test_cryptodev_asym.c
index b36eec9..847b074 100644
--- a/app/test/test_cryptodev_asym.c
+++ b/app/test/test_cryptodev_asym.c
@@ -2390,6 +2390,34 @@ test_cryptodev_octeontx2_asym(void)
 	return unit_test_suite_runner(&cryptodev_octeontx_asym_testsuite);
 }
 
+static int
+test_cryptodev_cn9k_asym(void)
+{
+	gbl_driver_id = rte_cryptodev_driver_id_get(
+			RTE_STR(CRYPTODEV_NAME_CN9K_PMD));
+	if (gbl_driver_id == -1) {
+		RTE_LOG(ERR, USER1, "CN9K PMD must be loaded.\n");
+		return TEST_FAILED;
+	}
+
+	/* Use test suite registered for crypto_octeontx PMD */
+	return unit_test_suite_runner(&cryptodev_octeontx_asym_testsuite);
+}
+
+static int
+test_cryptodev_cn10k_asym(void)
+{
+	gbl_driver_id = rte_cryptodev_driver_id_get(
+			RTE_STR(CRYPTODEV_NAME_CN10K_PMD));
+	if (gbl_driver_id == -1) {
+		RTE_LOG(ERR, USER1, "CN10K PMD must be loaded.\n");
+		return TEST_FAILED;
+	}
+
+	/* Use test suite registered for crypto_octeontx PMD */
+	return unit_test_suite_runner(&cryptodev_octeontx_asym_testsuite);
+}
+
 REGISTER_TEST_COMMAND(cryptodev_openssl_asym_autotest,
 					  test_cryptodev_openssl_asym);
 
@@ -2400,3 +2428,5 @@ REGISTER_TEST_COMMAND(cryptodev_octeontx_asym_autotest,
 
 REGISTER_TEST_COMMAND(cryptodev_octeontx2_asym_autotest,
 					  test_cryptodev_octeontx2_asym);
+REGISTER_TEST_COMMAND(cryptodev_cn9k_asym_autotest, test_cryptodev_cn9k_asym);
+REGISTER_TEST_COMMAND(cryptodev_cn10k_asym_autotest, test_cryptodev_cn10k_asym);
diff --git a/doc/guides/cryptodevs/cnxk.rst b/doc/guides/cryptodevs/cnxk.rst
index bbc6daa..98c7118 100644
--- a/doc/guides/cryptodevs/cnxk.rst
+++ b/doc/guides/cryptodevs/cnxk.rst
@@ -191,6 +191,23 @@ running the test application:
     ./dpdk-test
     RTE>>cryptodev_cn10k_autotest
 
+The asymmetric crypto operations on OCTEON cnxk crypto PMD may be verified by
+running the test application:
+
+``CN9K``
+
+.. code-block:: console
+
+    ./dpdk-test
+    RTE>>cryptodev_cn9k_asym_autotest
+
+``CN10K``
+
+.. code-block:: console
+
+    ./dpdk-test
+    RTE>>cryptodev_cn10k_asym_autotest
+
 Lookaside IPsec Support
 -----------------------
 
-- 
2.7.4


^ permalink raw reply	[flat|nested] 11+ messages in thread

end of thread, other threads:[~2021-06-25  6:25 UTC | newest]

Thread overview: 11+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-06-02 17:46 [dpdk-dev] [PATCH 0/3] Add asymmetric ops in crypto cnxk PMDs Anoob Joseph
2021-06-02 17:46 ` [dpdk-dev] [PATCH 1/3] crypto/cnxk: add asymmetric session ops Anoob Joseph
2021-06-16 20:21   ` Akhil Goyal
2021-06-02 17:46 ` [dpdk-dev] [PATCH 2/3] crypto/cnxk: add asymmetric datapath ops Anoob Joseph
2021-06-02 17:46 ` [dpdk-dev] [PATCH 3/3] app/test: adding cnxk asymmetric autotest Anoob Joseph
2021-06-16 20:23   ` Akhil Goyal
2021-06-25  6:25 ` [dpdk-dev] [PATCH v2 0/4] Add asymmetric ops in crypto cnxk PMDs Anoob Joseph
2021-06-25  6:25   ` [dpdk-dev] [PATCH v2 1/4] crypto/cnxk: add asymmetric session ops Anoob Joseph
2021-06-25  6:25   ` [dpdk-dev] [PATCH v2 2/4] crypto/cnxk: add asymmetric datapath ops Anoob Joseph
2021-06-25  6:25   ` [dpdk-dev] [PATCH v2 3/4] crypto/cnxk: add asymmetric capabilities Anoob Joseph
2021-06-25  6:25   ` [dpdk-dev] [PATCH v2 4/4] test/crypto: add cnxk for asymmetric cases Anoob Joseph

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).