DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH 0/8] fixes and improvements to CNXK crypto PMD
@ 2023-06-19 12:45 Tejasree Kondoj
  2023-06-19 12:45 ` [PATCH 1/8] crypto/cnxk: check for null pointer Tejasree Kondoj
                   ` (4 more replies)
  0 siblings, 5 replies; 6+ messages in thread
From: Tejasree Kondoj @ 2023-06-19 12:45 UTC (permalink / raw)
  To: Akhil Goyal
  Cc: Anoob Joseph, Aakash Sasidharan, Gowrishankar Muthukrishnan,
	Vidya Sagar Velumuri, dev

This series adds SM4, raw cryptodev API support and
fixes to CNXK crypto PMD.

Aakash Sasidharan (1):
  crypto/cnxk: use pt inst for null cipher with null auth

Anoob Joseph (3):
  crypto/cnxk: remove packet length checks in crypto offload
  crypto/cnxk: add support for raw APIs
  test/crypto: enable raw crypto tests for crypto_cn10k

Gowrishankar Muthukrishnan (1):
  crypto/cnxk: fix order of ECFPM parameters

Tejasree Kondoj (2):
  crypto/cnxk: check for null pointer
  crypto/cnxk: enable context cache for 103XX

Vidya Sagar Velumuri (1):
  crypto/cnxk: add support for sm4

 app/test/test_cryptodev.c                     |   8 +
 doc/guides/cryptodevs/cnxk.rst                |   1 +
 doc/guides/cryptodevs/features/cn10k.ini      |   1 +
 drivers/common/cnxk/hw/cpt.h                  |   5 +-
 drivers/common/cnxk/roc_se.c                  |   3 +
 drivers/common/cnxk/roc_se.h                  |  20 +
 drivers/crypto/cnxk/cn10k_cryptodev_ops.c     | 459 ++++++++++++
 drivers/crypto/cnxk/cnxk_ae.h                 |   3 +-
 drivers/crypto/cnxk/cnxk_cryptodev.c          |  20 +-
 drivers/crypto/cnxk/cnxk_cryptodev.h          |   2 +-
 .../crypto/cnxk/cnxk_cryptodev_capabilities.c | 113 ++-
 drivers/crypto/cnxk/cnxk_cryptodev_ops.c      |  29 +-
 drivers/crypto/cnxk/cnxk_cryptodev_ops.h      |   9 +
 drivers/crypto/cnxk/cnxk_se.h                 | 696 ++++++++++++++++--
 14 files changed, 1262 insertions(+), 107 deletions(-)

-- 
2.25.1


^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH 1/8] crypto/cnxk: check for null pointer
  2023-06-19 12:45 [PATCH 0/8] fixes and improvements to CNXK crypto PMD Tejasree Kondoj
@ 2023-06-19 12:45 ` Tejasree Kondoj
  2023-06-19 12:45 ` [PATCH 2/8] crypto/cnxk: remove packet length checks in crypto offload Tejasree Kondoj
                   ` (3 subsequent siblings)
  4 siblings, 0 replies; 6+ messages in thread
From: Tejasree Kondoj @ 2023-06-19 12:45 UTC (permalink / raw)
  To: Akhil Goyal
  Cc: Anoob Joseph, Aakash Sasidharan, Gowrishankar Muthukrishnan,
	Vidya Sagar Velumuri, dev

Checking for NULL pointer dereference.

Signed-off-by: Tejasree Kondoj <ktejasree@marvell.com>
---
 drivers/crypto/cnxk/cnxk_se.h | 14 ++++++++------
 1 file changed, 8 insertions(+), 6 deletions(-)

diff --git a/drivers/crypto/cnxk/cnxk_se.h b/drivers/crypto/cnxk/cnxk_se.h
index c66ab80749..a85e4c5170 100644
--- a/drivers/crypto/cnxk/cnxk_se.h
+++ b/drivers/crypto/cnxk/cnxk_se.h
@@ -2185,12 +2185,14 @@ fill_sess_auth(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
 
 	if (zsk_flag && sess->roc_se_ctx.auth_then_ciph) {
 		struct rte_crypto_cipher_xform *c_form;
-		c_form = &xform->next->cipher;
-		if (c_form->op != RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
-		    a_form->op != RTE_CRYPTO_AUTH_OP_GENERATE) {
-			plt_dp_err("Crypto: PDCP auth then cipher must use"
-				   " options: encrypt and generate");
-			return -EINVAL;
+		if (xform->next != NULL) {
+			c_form = &xform->next->cipher;
+			if ((c_form != NULL) && (c_form->op != RTE_CRYPTO_CIPHER_OP_ENCRYPT) &&
+			    a_form->op != RTE_CRYPTO_AUTH_OP_GENERATE) {
+				plt_dp_err("Crypto: PDCP auth then cipher must use"
+					   " options: encrypt and generate");
+				return -EINVAL;
+			}
 		}
 	}
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH 2/8] crypto/cnxk: remove packet length checks in crypto offload
  2023-06-19 12:45 [PATCH 0/8] fixes and improvements to CNXK crypto PMD Tejasree Kondoj
  2023-06-19 12:45 ` [PATCH 1/8] crypto/cnxk: check for null pointer Tejasree Kondoj
@ 2023-06-19 12:45 ` Tejasree Kondoj
  2023-06-19 12:45 ` [PATCH 3/8] crypto/cnxk: use pt inst for null cipher with null auth Tejasree Kondoj
                   ` (2 subsequent siblings)
  4 siblings, 0 replies; 6+ messages in thread
From: Tejasree Kondoj @ 2023-06-19 12:45 UTC (permalink / raw)
  To: Akhil Goyal
  Cc: Anoob Joseph, Aakash Sasidharan, Gowrishankar Muthukrishnan,
	Vidya Sagar Velumuri, dev

From: Anoob Joseph <anoobj@marvell.com>

When performing crypto offload, the packet length of the input/output
buffer does not matter. The length that matters is the
cipher/authentication range specified in crypto_op. Since application
can request for ciphering of a small portion of the buffer, the extra
comparison of buffer lengths may result in false failures during
enqueue of OOP operations.

Signed-off-by: Anoob Joseph <anoobj@marvell.com>
---
 drivers/crypto/cnxk/cnxk_se.h | 54 +++--------------------------------
 1 file changed, 4 insertions(+), 50 deletions(-)

diff --git a/drivers/crypto/cnxk/cnxk_se.h b/drivers/crypto/cnxk/cnxk_se.h
index a85e4c5170..87414eb131 100644
--- a/drivers/crypto/cnxk/cnxk_se.h
+++ b/drivers/crypto/cnxk/cnxk_se.h
@@ -2539,23 +2539,6 @@ fill_fc_params(struct rte_crypto_op *cop, struct cnxk_se_sess *sess,
 		}
 
 		if (unlikely(m_dst != NULL)) {
-			uint32_t pkt_len;
-
-			/* Try to make room as much as src has */
-			pkt_len = rte_pktmbuf_pkt_len(m_dst);
-
-			if (unlikely(pkt_len < rte_pktmbuf_pkt_len(m_src))) {
-				pkt_len = rte_pktmbuf_pkt_len(m_src) - pkt_len;
-				if (!rte_pktmbuf_append(m_dst, pkt_len)) {
-					plt_dp_err("Not enough space in "
-						   "m_dst %p, need %u"
-						   " more",
-						   m_dst, pkt_len);
-					ret = -EINVAL;
-					goto err_exit;
-				}
-			}
-
 			if (prepare_iov_from_pkt(m_dst, fc_params.dst_iov, 0)) {
 				plt_dp_err("Prepare dst iov failed for "
 					   "m_dst %p",
@@ -2650,32 +2633,18 @@ fill_pdcp_params(struct rte_crypto_op *cop, struct cnxk_se_sess *sess,
 		fc_params.dst_iov = fc_params.src_iov = (void *)src;
 		prepare_iov_from_pkt_inplace(m_src, &fc_params, &flags);
 	} else {
-		uint32_t pkt_len;
-
 		/* Out of place processing */
+
 		fc_params.src_iov = (void *)src;
 		fc_params.dst_iov = (void *)dst;
 
 		/* Store SG I/O in the api for reuse */
-		if (prepare_iov_from_pkt(m_src, fc_params.src_iov, 0)) {
+		if (unlikely(prepare_iov_from_pkt(m_src, fc_params.src_iov, 0))) {
 			plt_dp_err("Prepare src iov failed");
 			ret = -EINVAL;
 			goto err_exit;
 		}
 
-		/* Try to make room as much as src has */
-		pkt_len = rte_pktmbuf_pkt_len(m_dst);
-
-		if (unlikely(pkt_len < rte_pktmbuf_pkt_len(m_src))) {
-			pkt_len = rte_pktmbuf_pkt_len(m_src) - pkt_len;
-			if (unlikely(rte_pktmbuf_append(m_dst, pkt_len) == NULL)) {
-				plt_dp_err("Not enough space in m_dst %p, need %u more", m_dst,
-					   pkt_len);
-				ret = -EINVAL;
-				goto err_exit;
-			}
-		}
-
 		if (unlikely(prepare_iov_from_pkt(m_dst, fc_params.dst_iov, 0))) {
 			plt_dp_err("Prepare dst iov failed for m_dst %p", m_dst);
 			ret = -EINVAL;
@@ -2689,7 +2658,8 @@ fill_pdcp_params(struct rte_crypto_op *cop, struct cnxk_se_sess *sess,
 		mdata = alloc_op_meta(&fc_params.meta_buf, m_info->mlen, m_info->pool, infl_req);
 		if (mdata == NULL) {
 			plt_dp_err("Could not allocate meta buffer");
-			return -ENOMEM;
+			ret = -ENOMEM;
+			goto err_exit;
 		}
 	}
 
@@ -2798,22 +2768,6 @@ fill_pdcp_chain_params(struct rte_crypto_op *cop, struct cnxk_se_sess *sess,
 		}
 
 		if (unlikely(m_dst != NULL)) {
-			uint32_t pkt_len;
-
-			/* Try to make room as much as src has */
-			pkt_len = rte_pktmbuf_pkt_len(m_dst);
-
-			if (unlikely(pkt_len < rte_pktmbuf_pkt_len(m_src))) {
-				pkt_len = rte_pktmbuf_pkt_len(m_src) - pkt_len;
-				if (!rte_pktmbuf_append(m_dst, pkt_len)) {
-					plt_dp_err("Not enough space in m_dst "
-						   "%p, need %u more",
-						   m_dst, pkt_len);
-					ret = -EINVAL;
-					goto err_exit;
-				}
-			}
-
 			if (unlikely(prepare_iov_from_pkt(m_dst, fc_params.dst_iov, 0))) {
 				plt_dp_err("Could not prepare m_dst iov %p", m_dst);
 				ret = -EINVAL;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH 3/8] crypto/cnxk: use pt inst for null cipher with null auth
  2023-06-19 12:45 [PATCH 0/8] fixes and improvements to CNXK crypto PMD Tejasree Kondoj
  2023-06-19 12:45 ` [PATCH 1/8] crypto/cnxk: check for null pointer Tejasree Kondoj
  2023-06-19 12:45 ` [PATCH 2/8] crypto/cnxk: remove packet length checks in crypto offload Tejasree Kondoj
@ 2023-06-19 12:45 ` Tejasree Kondoj
  2023-06-19 12:45 ` [PATCH 4/8] crypto/cnxk: enable context cache for 103XX Tejasree Kondoj
  2023-06-19 12:45 ` [PATCH 5/8] crypto/cnxk: add support for raw APIs Tejasree Kondoj
  4 siblings, 0 replies; 6+ messages in thread
From: Tejasree Kondoj @ 2023-06-19 12:45 UTC (permalink / raw)
  To: Akhil Goyal
  Cc: Aakash Sasidharan, Anoob Joseph, Gowrishankar Muthukrishnan,
	Vidya Sagar Velumuri, dev

From: Aakash Sasidharan <asasidharan@marvell.com>

Use passthrough instruction for NULL cipher with NULL
auth combination.

Signed-off-by: Aakash Sasidharan <asasidharan@marvell.com>
---
 drivers/crypto/cnxk/cnxk_cryptodev_ops.c | 20 ++++----
 drivers/crypto/cnxk/cnxk_se.h            | 59 ++++++++++++++++--------
 2 files changed, 50 insertions(+), 29 deletions(-)

diff --git a/drivers/crypto/cnxk/cnxk_cryptodev_ops.c b/drivers/crypto/cnxk/cnxk_cryptodev_ops.c
index d405786668..2018b0eba5 100644
--- a/drivers/crypto/cnxk/cnxk_cryptodev_ops.c
+++ b/drivers/crypto/cnxk/cnxk_cryptodev_ops.c
@@ -526,16 +526,13 @@ cnxk_sess_fill(struct roc_cpt *roc_cpt, struct rte_crypto_sym_xform *xform,
 		return -EINVAL;
 	}
 
-	if ((c_xfrm == NULL || c_xfrm->cipher.algo == RTE_CRYPTO_CIPHER_NULL) &&
-	    a_xfrm != NULL && a_xfrm->auth.algo == RTE_CRYPTO_AUTH_NULL &&
-	    a_xfrm->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY) {
-		plt_dp_err("Null cipher + null auth verify is not supported");
-		return -ENOTSUP;
-	}
+	if ((aead_xfrm == NULL) &&
+	    (c_xfrm == NULL || c_xfrm->cipher.algo == RTE_CRYPTO_CIPHER_NULL) &&
+	    (a_xfrm == NULL || a_xfrm->auth.algo == RTE_CRYPTO_AUTH_NULL))
+		sess->passthrough = 1;
 
 	/* Cipher only */
-	if (c_xfrm != NULL &&
-	    (a_xfrm == NULL || a_xfrm->auth.algo == RTE_CRYPTO_AUTH_NULL)) {
+	if (c_xfrm != NULL && (a_xfrm == NULL || a_xfrm->auth.algo == RTE_CRYPTO_AUTH_NULL)) {
 		if (fill_sess_cipher(c_xfrm, sess))
 			return -ENOTSUP;
 		else
@@ -662,7 +659,8 @@ cnxk_cpt_inst_w7_get(struct cnxk_se_sess *sess, struct roc_cpt *roc_cpt)
 		inst_w7.s.cptr += 8;
 
 	/* Set the engine group */
-	if (sess->zsk_flag || sess->aes_ctr_eea2 || sess->is_sha3 || sess->is_sm3)
+	if (sess->zsk_flag || sess->aes_ctr_eea2 || sess->is_sha3 || sess->is_sm3 ||
+	    sess->passthrough)
 		inst_w7.s.egrp = roc_cpt->eng_grp[CPT_ENG_TYPE_SE];
 	else
 		inst_w7.s.egrp = roc_cpt->eng_grp[CPT_ENG_TYPE_IE];
@@ -687,7 +685,9 @@ sym_session_configure(struct roc_cpt *roc_cpt, struct rte_crypto_sym_xform *xfor
 
 	sess_priv->lf = roc_cpt->lf[0];
 
-	if (sess_priv->cpt_op & ROC_SE_OP_CIPHER_MASK) {
+	if (sess_priv->passthrough)
+		thr_type = CPT_DP_THREAD_TYPE_PT;
+	else if (sess_priv->cpt_op & ROC_SE_OP_CIPHER_MASK) {
 		switch (sess_priv->roc_se_ctx.fc_type) {
 		case ROC_SE_FC_GEN:
 			if (sess_priv->aes_gcm || sess_priv->aes_ccm || sess_priv->chacha_poly)
diff --git a/drivers/crypto/cnxk/cnxk_se.h b/drivers/crypto/cnxk/cnxk_se.h
index 87414eb131..ceb50fa3b6 100644
--- a/drivers/crypto/cnxk/cnxk_se.h
+++ b/drivers/crypto/cnxk/cnxk_se.h
@@ -24,6 +24,8 @@ enum cpt_dp_thread_type {
 	CPT_DP_THREAD_TYPE_PDCP_CHAIN,
 	CPT_DP_THREAD_TYPE_KASUMI,
 	CPT_DP_THREAD_AUTH_ONLY,
+	CPT_DP_THREAD_GENERIC,
+	CPT_DP_THREAD_TYPE_PT,
 };
 
 struct cnxk_se_sess {
@@ -46,7 +48,8 @@ struct cnxk_se_sess {
 	uint8_t is_sha3 : 1;
 	uint8_t short_iv : 1;
 	uint8_t is_sm3 : 1;
-	uint8_t rsvd : 5;
+	uint8_t passthrough : 1;
+	uint8_t rsvd : 4;
 	uint8_t mac_len;
 	uint8_t iv_length;
 	uint8_t auth_iv_length;
@@ -636,15 +639,6 @@ cpt_digest_gen_sg_ver1_prep(uint32_t flags, uint64_t d_lens, struct roc_se_fc_pa
 		cpt_inst_w4.s.dlen = data_len;
 	}
 
-	/* Null auth only case enters the if */
-	if (unlikely(!hash_type && !ctx->enc_cipher)) {
-		cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_MISC;
-		/* Minor op is passthrough */
-		cpt_inst_w4.s.opcode_minor = 0x03;
-		/* Send out completion code only */
-		cpt_inst_w4.s.param2 = 0x1;
-	}
-
 	/* DPTR has SG list */
 	in_buffer = m_vaddr;
 
@@ -758,15 +752,6 @@ cpt_digest_gen_sg_ver2_prep(uint32_t flags, uint64_t d_lens, struct roc_se_fc_pa
 		cpt_inst_w4.s.dlen = data_len;
 	}
 
-	/* Null auth only case enters the if */
-	if (unlikely(!hash_type && !ctx->enc_cipher)) {
-		cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_MISC;
-		/* Minor op is passthrough */
-		cpt_inst_w4.s.opcode_minor = 0x03;
-		/* Send out completion code only */
-		cpt_inst_w4.s.param2 = 0x1;
-	}
-
 	/* DPTR has SG list */
 
 	/* TODO Add error check if space will be sufficient */
@@ -2376,6 +2361,7 @@ prepare_iov_from_pkt_inplace(struct rte_mbuf *pkt,
 	iovec->buf_cnt = index;
 	return;
 }
+
 static __rte_always_inline int
 fill_fc_params(struct rte_crypto_op *cop, struct cnxk_se_sess *sess,
 	       struct cpt_qp_meta_info *m_info, struct cpt_inflight_req *infl_req,
@@ -2592,6 +2578,38 @@ fill_fc_params(struct rte_crypto_op *cop, struct cnxk_se_sess *sess,
 	return ret;
 }
 
+static inline int
+fill_passthrough_params(struct rte_crypto_op *cop, struct cpt_inst_s *inst)
+{
+	struct rte_crypto_sym_op *sym_op = cop->sym;
+	struct rte_mbuf *m_src, *m_dst;
+
+	const union cpt_inst_w4 w4 = {
+		.s.opcode_major = ROC_SE_MAJOR_OP_MISC,
+		.s.opcode_minor = ROC_SE_MISC_MINOR_OP_PASSTHROUGH,
+		.s.param1 = 1,
+		.s.param2 = 1,
+		.s.dlen = 0,
+	};
+
+	m_src = sym_op->m_src;
+	m_dst = sym_op->m_dst;
+
+	if (unlikely(m_dst != NULL && m_dst != m_src)) {
+		void *src = rte_pktmbuf_mtod_offset(m_src, void *, cop->sym->cipher.data.offset);
+		void *dst = rte_pktmbuf_mtod(m_dst, void *);
+		int data_len = cop->sym->cipher.data.length;
+
+		rte_memcpy(dst, src, data_len);
+	}
+
+	inst->w0.u64 = 0;
+	inst->w5.u64 = 0;
+	inst->w4.u64 = w4.u64;
+
+	return 0;
+}
+
 static __rte_always_inline int
 fill_pdcp_params(struct rte_crypto_op *cop, struct cnxk_se_sess *sess,
 		 struct cpt_qp_meta_info *m_info, struct cpt_inflight_req *infl_req,
@@ -3012,6 +3030,9 @@ cpt_sym_inst_fill(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op, struct cnxk_
 	int ret;
 
 	switch (sess->dp_thr_type) {
+	case CPT_DP_THREAD_TYPE_PT:
+		ret = fill_passthrough_params(op, inst);
+		break;
 	case CPT_DP_THREAD_TYPE_PDCP:
 		ret = fill_pdcp_params(op, sess, &qp->meta_info, infl_req, inst, is_sg_ver2);
 		break;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH 4/8] crypto/cnxk: enable context cache for 103XX
  2023-06-19 12:45 [PATCH 0/8] fixes and improvements to CNXK crypto PMD Tejasree Kondoj
                   ` (2 preceding siblings ...)
  2023-06-19 12:45 ` [PATCH 3/8] crypto/cnxk: use pt inst for null cipher with null auth Tejasree Kondoj
@ 2023-06-19 12:45 ` Tejasree Kondoj
  2023-06-19 12:45 ` [PATCH 5/8] crypto/cnxk: add support for raw APIs Tejasree Kondoj
  4 siblings, 0 replies; 6+ messages in thread
From: Tejasree Kondoj @ 2023-06-19 12:45 UTC (permalink / raw)
  To: Akhil Goyal
  Cc: Anoob Joseph, Aakash Sasidharan, Gowrishankar Muthukrishnan,
	Vidya Sagar Velumuri, dev

Enabling context cache for SE instructions on 106B0
and 103XX.

Signed-off-by: Tejasree Kondoj <ktejasree@marvell.com>
---
 drivers/crypto/cnxk/cnxk_cryptodev_ops.c | 6 +++---
 drivers/crypto/cnxk/cnxk_cryptodev_ops.h | 8 ++++++++
 2 files changed, 11 insertions(+), 3 deletions(-)

diff --git a/drivers/crypto/cnxk/cnxk_cryptodev_ops.c b/drivers/crypto/cnxk/cnxk_cryptodev_ops.c
index 2018b0eba5..d0c99d37e8 100644
--- a/drivers/crypto/cnxk/cnxk_cryptodev_ops.c
+++ b/drivers/crypto/cnxk/cnxk_cryptodev_ops.c
@@ -653,7 +653,7 @@ cnxk_cpt_inst_w7_get(struct cnxk_se_sess *sess, struct roc_cpt *roc_cpt)
 
 	inst_w7.s.cptr = (uint64_t)&sess->roc_se_ctx.se_ctx;
 
-	if (roc_errata_cpt_hang_on_mixed_ctx_val())
+	if (hw_ctx_cache_enable())
 		inst_w7.s.ctx_val = 1;
 	else
 		inst_w7.s.cptr += 8;
@@ -729,7 +729,7 @@ sym_session_configure(struct roc_cpt *roc_cpt, struct rte_crypto_sym_xform *xfor
 
 	sess_priv->cpt_inst_w7 = cnxk_cpt_inst_w7_get(sess_priv, roc_cpt);
 
-	if (roc_errata_cpt_hang_on_mixed_ctx_val())
+	if (hw_ctx_cache_enable())
 		roc_se_ctx_init(&sess_priv->roc_se_ctx);
 
 	return 0;
@@ -755,7 +755,7 @@ sym_session_clear(struct rte_cryptodev_sym_session *sess, bool is_session_less)
 	struct cnxk_se_sess *sess_priv = (struct cnxk_se_sess *)sess;
 
 	/* Trigger CTX flush + invalidate to remove from CTX_CACHE */
-	if (roc_errata_cpt_hang_on_mixed_ctx_val())
+	if (hw_ctx_cache_enable())
 		roc_cpt_lf_ctx_flush(sess_priv->lf, &sess_priv->roc_se_ctx.se_ctx, true);
 
 	if (sess_priv->roc_se_ctx.auth_key != NULL)
diff --git a/drivers/crypto/cnxk/cnxk_cryptodev_ops.h b/drivers/crypto/cnxk/cnxk_cryptodev_ops.h
index b1a40e8e25..6ee4cbda70 100644
--- a/drivers/crypto/cnxk/cnxk_cryptodev_ops.h
+++ b/drivers/crypto/cnxk/cnxk_cryptodev_ops.h
@@ -13,6 +13,7 @@
 #include "roc_constants.h"
 #include "roc_cpt.h"
 #include "roc_cpt_sg.h"
+#include "roc_errata.h"
 #include "roc_se.h"
 
 #define CNXK_CPT_MIN_HEADROOM_REQ	 32
@@ -180,4 +181,11 @@ alloc_op_meta(struct roc_se_buf_ptr *buf, int32_t len, struct rte_mempool *cpt_m
 
 	return mdata;
 }
+
+static __rte_always_inline bool
+hw_ctx_cache_enable(void)
+{
+	return roc_errata_cpt_hang_on_mixed_ctx_val() || roc_model_is_cn10ka_b0() ||
+	       roc_model_is_cn10kb_a0();
+}
 #endif /* _CNXK_CRYPTODEV_OPS_H_ */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH 5/8] crypto/cnxk: add support for raw APIs
  2023-06-19 12:45 [PATCH 0/8] fixes and improvements to CNXK crypto PMD Tejasree Kondoj
                   ` (3 preceding siblings ...)
  2023-06-19 12:45 ` [PATCH 4/8] crypto/cnxk: enable context cache for 103XX Tejasree Kondoj
@ 2023-06-19 12:45 ` Tejasree Kondoj
  4 siblings, 0 replies; 6+ messages in thread
From: Tejasree Kondoj @ 2023-06-19 12:45 UTC (permalink / raw)
  To: Akhil Goyal
  Cc: Anoob Joseph, Aakash Sasidharan, Gowrishankar Muthukrishnan,
	Vidya Sagar Velumuri, dev

From: Anoob Joseph <anoobj@marvell.com>

Add crypto RAW API support in cnxk PMD
Enable the flag to allow execution of raw test suite.

Signed-off-by: Vidya Sagar Velumuri <vvelumuri@marvell.com>
Signed-off-by: Anoob Joseph <anoobj@marvell.com>
---
 drivers/crypto/cnxk/cn10k_cryptodev_ops.c | 459 ++++++++++++++++++++++
 drivers/crypto/cnxk/cnxk_cryptodev.c      |  20 +-
 drivers/crypto/cnxk/cnxk_cryptodev_ops.h  |   1 +
 drivers/crypto/cnxk/cnxk_se.h             | 293 ++++++++++++++
 4 files changed, 760 insertions(+), 13 deletions(-)

diff --git a/drivers/crypto/cnxk/cn10k_cryptodev_ops.c b/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
index e405a2ad9f..47b0e3a6f3 100644
--- a/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
+++ b/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
@@ -1064,6 +1064,461 @@ cn10k_cpt_dev_info_get(struct rte_cryptodev *dev,
 	}
 }
 
+static inline int
+cn10k_cpt_raw_fill_inst(struct cnxk_iov *iov, struct cnxk_cpt_qp *qp,
+			struct cnxk_sym_dp_ctx *dp_ctx, struct cpt_inst_s inst[],
+			struct cpt_inflight_req *infl_req, void *opaque, const bool is_sg_ver2)
+{
+	struct cnxk_se_sess *sess;
+	int ret;
+
+	const union cpt_res_s res = {
+		.cn10k.compcode = CPT_COMP_NOT_DONE,
+	};
+
+	inst[0].w0.u64 = 0;
+	inst[0].w2.u64 = 0;
+	inst[0].w3.u64 = 0;
+
+	sess = dp_ctx->sess;
+
+	switch (sess->dp_thr_type) {
+	case CPT_DP_THREAD_TYPE_PT:
+		ret = fill_raw_passthrough_params(iov, inst);
+		break;
+	case CPT_DP_THREAD_TYPE_FC_CHAIN:
+		ret = fill_raw_fc_params(iov, sess, &qp->meta_info, infl_req, &inst[0], false,
+					 false, is_sg_ver2);
+		break;
+	case CPT_DP_THREAD_TYPE_FC_AEAD:
+		ret = fill_raw_fc_params(iov, sess, &qp->meta_info, infl_req, &inst[0], false, true,
+					 is_sg_ver2);
+		break;
+	case CPT_DP_THREAD_AUTH_ONLY:
+		ret = fill_raw_digest_params(iov, sess, &qp->meta_info, infl_req, &inst[0],
+					     is_sg_ver2);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	if (unlikely(ret))
+		return 0;
+
+	inst[0].res_addr = (uint64_t)&infl_req->res;
+	__atomic_store_n(&infl_req->res.u64[0], res.u64[0], __ATOMIC_RELAXED);
+	infl_req->opaque = opaque;
+
+	inst[0].w7.u64 = sess->cpt_inst_w7;
+
+	return 1;
+}
+
+static uint32_t
+cn10k_cpt_raw_enqueue_burst(void *qpair, uint8_t *drv_ctx, struct rte_crypto_sym_vec *vec,
+			    union rte_crypto_sym_ofs ofs, void *user_data[], int *enqueue_status,
+			    const bool is_sgv2)
+{
+	uint16_t lmt_id, nb_allowed, nb_ops = vec->num;
+	uint64_t lmt_base, lmt_arg, io_addr, head;
+	struct cpt_inflight_req *infl_req;
+	struct cnxk_cpt_qp *qp = qpair;
+	struct cnxk_sym_dp_ctx *dp_ctx;
+	struct pending_queue *pend_q;
+	uint32_t count = 0, index;
+	union cpt_fc_write_s fc;
+	struct cpt_inst_s *inst;
+	uint64_t *fc_addr;
+	int ret, i;
+
+	pend_q = &qp->pend_q;
+	const uint64_t pq_mask = pend_q->pq_mask;
+
+	head = pend_q->head;
+	nb_allowed = pending_queue_free_cnt(head, pend_q->tail, pq_mask);
+	nb_ops = RTE_MIN(nb_ops, nb_allowed);
+
+	if (unlikely(nb_ops == 0))
+		return 0;
+
+	lmt_base = qp->lmtline.lmt_base;
+	io_addr = qp->lmtline.io_addr;
+	fc_addr = qp->lmtline.fc_addr;
+
+	const uint32_t fc_thresh = qp->lmtline.fc_thresh;
+
+	ROC_LMT_BASE_ID_GET(lmt_base, lmt_id);
+	inst = (struct cpt_inst_s *)lmt_base;
+
+	dp_ctx = (struct cnxk_sym_dp_ctx *)drv_ctx;
+again:
+	fc.u64[0] = __atomic_load_n(fc_addr, __ATOMIC_RELAXED);
+	if (unlikely(fc.s.qsize > fc_thresh)) {
+		i = 0;
+		goto pend_q_commit;
+	}
+
+	for (i = 0; i < RTE_MIN(PKTS_PER_LOOP, nb_ops); i++) {
+		struct cnxk_iov iov;
+
+		index = count + i;
+		infl_req = &pend_q->req_queue[head];
+		infl_req->op_flags = 0;
+
+		cnxk_raw_burst_to_iov(vec, &ofs, index, &iov);
+		ret = cn10k_cpt_raw_fill_inst(&iov, qp, dp_ctx, &inst[2 * i], infl_req,
+					      user_data[index], is_sgv2);
+		if (unlikely(ret != 1)) {
+			plt_dp_err("Could not process vec: %d", index);
+			if (i == 0 && count == 0)
+				return -1;
+			else if (i == 0)
+				goto pend_q_commit;
+			else
+				break;
+		}
+		pending_queue_advance(&head, pq_mask);
+	}
+
+	if (i > PKTS_PER_STEORL) {
+		lmt_arg = ROC_CN10K_CPT_LMT_ARG | (PKTS_PER_STEORL - 1) << 12 | (uint64_t)lmt_id;
+		roc_lmt_submit_steorl(lmt_arg, io_addr);
+		lmt_arg = ROC_CN10K_CPT_LMT_ARG | (i - PKTS_PER_STEORL - 1) << 12 |
+			  (uint64_t)(lmt_id + PKTS_PER_STEORL);
+		roc_lmt_submit_steorl(lmt_arg, io_addr);
+	} else {
+		lmt_arg = ROC_CN10K_CPT_LMT_ARG | (i - 1) << 12 | (uint64_t)lmt_id;
+		roc_lmt_submit_steorl(lmt_arg, io_addr);
+	}
+
+	rte_io_wmb();
+
+	if (nb_ops - i > 0 && i == PKTS_PER_LOOP) {
+		nb_ops -= i;
+		count += i;
+		goto again;
+	}
+
+pend_q_commit:
+	rte_atomic_thread_fence(__ATOMIC_RELEASE);
+
+	pend_q->head = head;
+	pend_q->time_out = rte_get_timer_cycles() + DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz();
+
+	*enqueue_status = 1;
+	return count + i;
+}
+
+static uint32_t
+cn10k_cpt_raw_enqueue_burst_sgv2(void *qpair, uint8_t *drv_ctx, struct rte_crypto_sym_vec *vec,
+				 union rte_crypto_sym_ofs ofs, void *user_data[],
+				 int *enqueue_status)
+{
+	return cn10k_cpt_raw_enqueue_burst(qpair, drv_ctx, vec, ofs, user_data, enqueue_status,
+					   true);
+}
+
+static uint32_t
+cn10k_cpt_raw_enqueue_burst_sgv1(void *qpair, uint8_t *drv_ctx, struct rte_crypto_sym_vec *vec,
+				 union rte_crypto_sym_ofs ofs, void *user_data[],
+				 int *enqueue_status)
+{
+	return cn10k_cpt_raw_enqueue_burst(qpair, drv_ctx, vec, ofs, user_data, enqueue_status,
+					   false);
+}
+
+static int
+cn10k_cpt_raw_enqueue(void *qpair, uint8_t *drv_ctx, struct rte_crypto_vec *data_vec,
+		      uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,
+		      struct rte_crypto_va_iova_ptr *iv, struct rte_crypto_va_iova_ptr *digest,
+		      struct rte_crypto_va_iova_ptr *aad_or_auth_iv, void *user_data,
+		      const bool is_sgv2)
+{
+	uint64_t lmt_base, lmt_arg, io_addr, head;
+	struct cpt_inflight_req *infl_req;
+	struct cnxk_cpt_qp *qp = qpair;
+	struct cnxk_sym_dp_ctx *dp_ctx;
+	uint16_t lmt_id, nb_allowed;
+	struct cpt_inst_s *inst;
+	union cpt_fc_write_s fc;
+	struct cnxk_iov iov;
+	uint64_t *fc_addr;
+	int ret;
+
+	struct pending_queue *pend_q = &qp->pend_q;
+	const uint64_t pq_mask = pend_q->pq_mask;
+	const uint32_t fc_thresh = qp->lmtline.fc_thresh;
+
+	head = pend_q->head;
+	nb_allowed = pending_queue_free_cnt(head, pend_q->tail, pq_mask);
+
+	if (unlikely(nb_allowed == 0))
+		return -1;
+
+	cnxk_raw_to_iov(data_vec, n_data_vecs, &ofs, iv, digest, aad_or_auth_iv, &iov);
+
+	lmt_base = qp->lmtline.lmt_base;
+	io_addr = qp->lmtline.io_addr;
+	fc_addr = qp->lmtline.fc_addr;
+
+	ROC_LMT_BASE_ID_GET(lmt_base, lmt_id);
+	inst = (struct cpt_inst_s *)lmt_base;
+
+	fc.u64[0] = __atomic_load_n(fc_addr, __ATOMIC_RELAXED);
+	if (unlikely(fc.s.qsize > fc_thresh))
+		return -1;
+
+	dp_ctx = (struct cnxk_sym_dp_ctx *)drv_ctx;
+	infl_req = &pend_q->req_queue[head];
+	infl_req->op_flags = 0;
+
+	ret = cn10k_cpt_raw_fill_inst(&iov, qp, dp_ctx, &inst[0], infl_req, user_data, is_sgv2);
+	if (unlikely(ret != 1)) {
+		plt_dp_err("Could not process vec");
+		return -1;
+	}
+
+	pending_queue_advance(&head, pq_mask);
+
+	lmt_arg = ROC_CN10K_CPT_LMT_ARG | (uint64_t)lmt_id;
+	roc_lmt_submit_steorl(lmt_arg, io_addr);
+
+	rte_io_wmb();
+
+	pend_q->head = head;
+	pend_q->time_out = rte_get_timer_cycles() + DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz();
+
+	return 1;
+}
+
+static int
+cn10k_cpt_raw_enqueue_sgv2(void *qpair, uint8_t *drv_ctx, struct rte_crypto_vec *data_vec,
+			   uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,
+			   struct rte_crypto_va_iova_ptr *iv, struct rte_crypto_va_iova_ptr *digest,
+			   struct rte_crypto_va_iova_ptr *aad_or_auth_iv, void *user_data)
+{
+	return cn10k_cpt_raw_enqueue(qpair, drv_ctx, data_vec, n_data_vecs, ofs, iv, digest,
+				     aad_or_auth_iv, user_data, true);
+}
+
+static int
+cn10k_cpt_raw_enqueue_sgv1(void *qpair, uint8_t *drv_ctx, struct rte_crypto_vec *data_vec,
+			   uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,
+			   struct rte_crypto_va_iova_ptr *iv, struct rte_crypto_va_iova_ptr *digest,
+			   struct rte_crypto_va_iova_ptr *aad_or_auth_iv, void *user_data)
+{
+	return cn10k_cpt_raw_enqueue(qpair, drv_ctx, data_vec, n_data_vecs, ofs, iv, digest,
+				     aad_or_auth_iv, user_data, false);
+}
+
+static inline int
+cn10k_cpt_raw_dequeue_post_process(struct cpt_cn10k_res_s *res)
+{
+	const uint8_t uc_compcode = res->uc_compcode;
+	const uint8_t compcode = res->compcode;
+	int ret = 1;
+
+	if (likely(compcode == CPT_COMP_GOOD)) {
+		if (unlikely(uc_compcode))
+			plt_dp_info("Request failed with microcode error: 0x%x", res->uc_compcode);
+		else
+			ret = 0;
+	}
+
+	return ret;
+}
+
+static uint32_t
+cn10k_cpt_sym_raw_dequeue_burst(void *qptr, uint8_t *drv_ctx,
+				rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
+				uint32_t max_nb_to_dequeue,
+				rte_cryptodev_raw_post_dequeue_t post_dequeue, void **out_user_data,
+				uint8_t is_user_data_array, uint32_t *n_success,
+				int *dequeue_status)
+{
+	struct cpt_inflight_req *infl_req;
+	struct cnxk_cpt_qp *qp = qptr;
+	struct pending_queue *pend_q;
+	uint64_t infl_cnt, pq_tail;
+	union cpt_res_s res;
+	int is_op_success;
+	uint16_t nb_ops;
+	void *opaque;
+	int i = 0;
+
+	pend_q = &qp->pend_q;
+
+	const uint64_t pq_mask = pend_q->pq_mask;
+
+	RTE_SET_USED(drv_ctx);
+	pq_tail = pend_q->tail;
+	infl_cnt = pending_queue_infl_cnt(pend_q->head, pq_tail, pq_mask);
+
+	/* Ensure infl_cnt isn't read before data lands */
+	rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+
+	infl_req = &pend_q->req_queue[pq_tail];
+
+	opaque = infl_req->opaque;
+	if (get_dequeue_count)
+		nb_ops = get_dequeue_count(opaque);
+	else
+		nb_ops = max_nb_to_dequeue;
+	nb_ops = RTE_MIN(nb_ops, infl_cnt);
+
+	for (i = 0; i < nb_ops; i++) {
+		is_op_success = 0;
+		infl_req = &pend_q->req_queue[pq_tail];
+
+		res.u64[0] = __atomic_load_n(&infl_req->res.u64[0], __ATOMIC_RELAXED);
+
+		if (unlikely(res.cn10k.compcode == CPT_COMP_NOT_DONE)) {
+			if (unlikely(rte_get_timer_cycles() > pend_q->time_out)) {
+				plt_err("Request timed out");
+				cnxk_cpt_dump_on_err(qp);
+				pend_q->time_out = rte_get_timer_cycles() +
+						   DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz();
+			}
+			break;
+		}
+
+		pending_queue_advance(&pq_tail, pq_mask);
+
+		if (!cn10k_cpt_raw_dequeue_post_process(&res.cn10k)) {
+			is_op_success = 1;
+			*n_success += 1;
+		}
+
+		if (is_user_data_array) {
+			out_user_data[i] = infl_req->opaque;
+			post_dequeue(out_user_data[i], i, is_op_success);
+		} else {
+			if (i == 0)
+				out_user_data[0] = opaque;
+			post_dequeue(out_user_data[0], i, is_op_success);
+		}
+
+		if (unlikely(infl_req->op_flags & CPT_OP_FLAGS_METABUF))
+			rte_mempool_put(qp->meta_info.pool, infl_req->mdata);
+	}
+
+	pend_q->tail = pq_tail;
+	*dequeue_status = 1;
+
+	return i;
+}
+
+static void *
+cn10k_cpt_sym_raw_dequeue(void *qptr, uint8_t *drv_ctx, int *dequeue_status,
+			  enum rte_crypto_op_status *op_status)
+{
+	struct cpt_inflight_req *infl_req;
+	struct cnxk_cpt_qp *qp = qptr;
+	struct pending_queue *pend_q;
+	uint64_t pq_tail;
+	union cpt_res_s res;
+	void *opaque = NULL;
+
+	pend_q = &qp->pend_q;
+
+	const uint64_t pq_mask = pend_q->pq_mask;
+
+	RTE_SET_USED(drv_ctx);
+
+	pq_tail = pend_q->tail;
+
+	rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+
+	infl_req = &pend_q->req_queue[pq_tail];
+
+	res.u64[0] = __atomic_load_n(&infl_req->res.u64[0], __ATOMIC_RELAXED);
+
+	if (unlikely(res.cn10k.compcode == CPT_COMP_NOT_DONE)) {
+		if (unlikely(rte_get_timer_cycles() > pend_q->time_out)) {
+			plt_err("Request timed out");
+			cnxk_cpt_dump_on_err(qp);
+			pend_q->time_out = rte_get_timer_cycles() +
+					   DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz();
+		}
+		goto exit;
+	}
+
+	pending_queue_advance(&pq_tail, pq_mask);
+
+	opaque = infl_req->opaque;
+
+	if (!cn10k_cpt_raw_dequeue_post_process(&res.cn10k))
+		*op_status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+	else
+		*op_status = RTE_CRYPTO_OP_STATUS_ERROR;
+
+	if (unlikely(infl_req->op_flags & CPT_OP_FLAGS_METABUF))
+		rte_mempool_put(qp->meta_info.pool, infl_req->mdata);
+
+	*dequeue_status = 1;
+exit:
+	return opaque;
+}
+
+static int
+cn10k_sym_get_raw_dp_ctx_size(struct rte_cryptodev *dev __rte_unused)
+{
+	return sizeof(struct cnxk_sym_dp_ctx);
+}
+
+static int
+cn10k_sym_configure_raw_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
+			       struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
+			       enum rte_crypto_op_sess_type sess_type,
+			       union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
+{
+	struct cnxk_se_sess *sess = (struct cnxk_se_sess *)session_ctx.crypto_sess;
+	struct cnxk_sym_dp_ctx *dp_ctx;
+
+	if (sess_type != RTE_CRYPTO_OP_WITH_SESSION)
+		return -ENOTSUP;
+
+	if (sess == NULL)
+		return -EINVAL;
+
+	if ((sess->dp_thr_type == CPT_DP_THREAD_TYPE_PDCP) ||
+	    (sess->dp_thr_type == CPT_DP_THREAD_TYPE_PDCP_CHAIN) ||
+	    (sess->dp_thr_type == CPT_DP_THREAD_TYPE_KASUMI))
+		return -ENOTSUP;
+
+	if ((sess->dp_thr_type == CPT_DP_THREAD_AUTH_ONLY) &&
+	    ((sess->roc_se_ctx.fc_type == ROC_SE_KASUMI) ||
+	     (sess->roc_se_ctx.fc_type == ROC_SE_PDCP)))
+		return -ENOTSUP;
+
+	if ((sess->roc_se_ctx.hash_type == ROC_SE_GMAC_TYPE) ||
+	    (sess->roc_se_ctx.hash_type == ROC_SE_SHA1_TYPE))
+		return -ENOTSUP;
+
+	dp_ctx = (struct cnxk_sym_dp_ctx *)raw_dp_ctx->drv_ctx_data;
+	dp_ctx->sess = sess;
+
+	if (!is_update) {
+		struct cnxk_cpt_vf *vf;
+
+		raw_dp_ctx->qp_data = (struct cnxk_cpt_qp *)dev->data->queue_pairs[qp_id];
+		raw_dp_ctx->dequeue = cn10k_cpt_sym_raw_dequeue;
+		raw_dp_ctx->dequeue_burst = cn10k_cpt_sym_raw_dequeue_burst;
+
+		vf = dev->data->dev_private;
+		if (vf->cpt.hw_caps[CPT_ENG_TYPE_SE].sg_ver2 &&
+		    vf->cpt.hw_caps[CPT_ENG_TYPE_IE].sg_ver2) {
+			raw_dp_ctx->enqueue = cn10k_cpt_raw_enqueue_sgv2;
+			raw_dp_ctx->enqueue_burst = cn10k_cpt_raw_enqueue_burst_sgv2;
+		} else {
+			raw_dp_ctx->enqueue = cn10k_cpt_raw_enqueue_sgv1;
+			raw_dp_ctx->enqueue_burst = cn10k_cpt_raw_enqueue_burst_sgv1;
+		}
+	}
+
+	return 0;
+}
+
 struct rte_cryptodev_ops cn10k_cpt_ops = {
 	/* Device control ops */
 	.dev_configure = cnxk_cpt_dev_config,
@@ -1090,4 +1545,8 @@ struct rte_cryptodev_ops cn10k_cpt_ops = {
 	/* Event crypto ops */
 	.session_ev_mdata_set = cn10k_cpt_crypto_adapter_ev_mdata_set,
 	.queue_pair_event_error_query = cnxk_cpt_queue_pair_event_error_query,
+
+	/* Raw data-path API related operations */
+	.sym_get_raw_dp_ctx_size = cn10k_sym_get_raw_dp_ctx_size,
+	.sym_configure_raw_dp_ctx = cn10k_sym_configure_raw_dp_ctx,
 };
diff --git a/drivers/crypto/cnxk/cnxk_cryptodev.c b/drivers/crypto/cnxk/cnxk_cryptodev.c
index 4fa1907cea..4819a14184 100644
--- a/drivers/crypto/cnxk/cnxk_cryptodev.c
+++ b/drivers/crypto/cnxk/cnxk_cryptodev.c
@@ -13,22 +13,16 @@
 uint64_t
 cnxk_cpt_default_ff_get(void)
 {
-	uint64_t ff = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
-		      RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO |
-		      RTE_CRYPTODEV_FF_HW_ACCELERATED |
-		      RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT |
+	uint64_t ff = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO |
+		      RTE_CRYPTODEV_FF_HW_ACCELERATED | RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT |
 		      RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP |
-		      RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
-		      RTE_CRYPTODEV_FF_IN_PLACE_SGL |
-		      RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
-		      RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
-		      RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
-		      RTE_CRYPTODEV_FF_SYM_SESSIONLESS |
-		      RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED |
-		      RTE_CRYPTODEV_FF_SECURITY;
+		      RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | RTE_CRYPTODEV_FF_IN_PLACE_SGL |
+		      RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT | RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
+		      RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT | RTE_CRYPTODEV_FF_SYM_SESSIONLESS |
+		      RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED | RTE_CRYPTODEV_FF_SECURITY;
 
 	if (roc_model_is_cn10k())
-		ff |= RTE_CRYPTODEV_FF_SECURITY_INNER_CSUM;
+		ff |= RTE_CRYPTODEV_FF_SECURITY_INNER_CSUM | RTE_CRYPTODEV_FF_SYM_RAW_DP;
 
 	return ff;
 }
diff --git a/drivers/crypto/cnxk/cnxk_cryptodev_ops.h b/drivers/crypto/cnxk/cnxk_cryptodev_ops.h
index 6ee4cbda70..4a8eb0890b 100644
--- a/drivers/crypto/cnxk/cnxk_cryptodev_ops.h
+++ b/drivers/crypto/cnxk/cnxk_cryptodev_ops.h
@@ -44,6 +44,7 @@ struct cpt_qp_meta_info {
 struct cpt_inflight_req {
 	union cpt_res_s res;
 	union {
+		void *opaque;
 		struct rte_crypto_op *cop;
 		struct rte_event_vector *vec;
 	};
diff --git a/drivers/crypto/cnxk/cnxk_se.h b/drivers/crypto/cnxk/cnxk_se.h
index ceb50fa3b6..9f3bff3e68 100644
--- a/drivers/crypto/cnxk/cnxk_se.h
+++ b/drivers/crypto/cnxk/cnxk_se.h
@@ -63,6 +63,23 @@ struct cnxk_se_sess {
 	struct roc_cpt_lf *lf;
 } __rte_aligned(ROC_ALIGN);
 
+struct cnxk_sym_dp_ctx {
+	struct cnxk_se_sess *sess;
+};
+
+struct cnxk_iov {
+	char src[SRC_IOV_SIZE];
+	char dst[SRC_IOV_SIZE];
+	void *iv_buf;
+	void *aad_buf;
+	void *mac_buf;
+	uint16_t c_head;
+	uint16_t c_tail;
+	uint16_t a_head;
+	uint16_t a_tail;
+	int data_len;
+};
+
 static __rte_always_inline int fill_sess_gmac(struct rte_crypto_sym_xform *xform,
 					      struct cnxk_se_sess *sess);
 
@@ -3061,4 +3078,280 @@ cpt_sym_inst_fill(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op, struct cnxk_
 	return ret;
 }
 
+static __rte_always_inline uint32_t
+prepare_iov_from_raw_vec(struct rte_crypto_vec *vec, struct roc_se_iov_ptr *iovec, uint32_t num)
+{
+	uint32_t i, total_len = 0;
+
+	for (i = 0; i < num; i++) {
+		iovec->bufs[i].vaddr = vec[i].base;
+		iovec->bufs[i].size = vec[i].len;
+
+		total_len += vec[i].len;
+	}
+
+	iovec->buf_cnt = i;
+	return total_len;
+}
+
+static __rte_always_inline void
+cnxk_raw_burst_to_iov(struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs *ofs, int index,
+		      struct cnxk_iov *iov)
+{
+	iov->iv_buf = vec->iv[index].va;
+	iov->aad_buf = vec->aad[index].va;
+	iov->mac_buf = vec->digest[index].va;
+
+	iov->data_len =
+		prepare_iov_from_raw_vec(vec->src_sgl[index].vec, (struct roc_se_iov_ptr *)iov->src,
+					 vec->src_sgl[index].num);
+
+	if (vec->dest_sgl == NULL)
+		prepare_iov_from_raw_vec(vec->src_sgl[index].vec, (struct roc_se_iov_ptr *)iov->dst,
+					 vec->src_sgl[index].num);
+	else
+		prepare_iov_from_raw_vec(vec->dest_sgl[index].vec,
+					 (struct roc_se_iov_ptr *)iov->dst,
+					 vec->dest_sgl[index].num);
+
+	iov->c_head = ofs->ofs.cipher.head;
+	iov->c_tail = ofs->ofs.cipher.tail;
+
+	iov->a_head = ofs->ofs.auth.head;
+	iov->a_tail = ofs->ofs.auth.tail;
+}
+
+static __rte_always_inline void
+cnxk_raw_to_iov(struct rte_crypto_vec *data_vec, uint16_t n_vecs, union rte_crypto_sym_ofs *ofs,
+		struct rte_crypto_va_iova_ptr *iv, struct rte_crypto_va_iova_ptr *digest,
+		struct rte_crypto_va_iova_ptr *aad, struct cnxk_iov *iov)
+{
+	iov->iv_buf = iv->va;
+	iov->aad_buf = aad->va;
+	iov->mac_buf = digest->va;
+
+	iov->data_len =
+		prepare_iov_from_raw_vec(data_vec, (struct roc_se_iov_ptr *)iov->src, n_vecs);
+	prepare_iov_from_raw_vec(data_vec, (struct roc_se_iov_ptr *)iov->dst, n_vecs);
+
+	iov->c_head = ofs->ofs.cipher.head;
+	iov->c_tail = ofs->ofs.cipher.tail;
+
+	iov->a_head = ofs->ofs.auth.head;
+	iov->a_tail = ofs->ofs.auth.tail;
+}
+
+static inline void
+raw_memcpy(struct cnxk_iov *iov)
+{
+	struct roc_se_iov_ptr *src = (struct roc_se_iov_ptr *)iov->src;
+	struct roc_se_iov_ptr *dst = (struct roc_se_iov_ptr *)iov->dst;
+	int num = src->buf_cnt;
+	int i;
+
+	/* skip copy in case of inplace */
+	if (dst->bufs[0].vaddr == src->bufs[0].vaddr)
+		return;
+
+	for (i = 0; i < num; i++) {
+		rte_memcpy(dst->bufs[i].vaddr, src->bufs[i].vaddr, src->bufs[i].size);
+		dst->bufs[i].size = src->bufs[i].size;
+	}
+}
+
+static inline int
+fill_raw_passthrough_params(struct cnxk_iov *iov, struct cpt_inst_s *inst)
+{
+	const union cpt_inst_w4 w4 = {
+		.s.opcode_major = ROC_SE_MAJOR_OP_MISC,
+		.s.opcode_minor = ROC_SE_MISC_MINOR_OP_PASSTHROUGH,
+		.s.param1 = 1,
+		.s.param2 = 1,
+		.s.dlen = 0,
+	};
+
+	inst->w0.u64 = 0;
+	inst->w5.u64 = 0;
+	inst->w4.u64 = w4.u64;
+
+	raw_memcpy(iov);
+
+	return 0;
+}
+
+static __rte_always_inline int
+fill_raw_fc_params(struct cnxk_iov *iov, struct cnxk_se_sess *sess, struct cpt_qp_meta_info *m_info,
+		   struct cpt_inflight_req *infl_req, struct cpt_inst_s *inst, const bool is_kasumi,
+		   const bool is_aead, const bool is_sg_ver2)
+{
+	uint32_t cipher_len, auth_len = 0;
+	struct roc_se_fc_params fc_params;
+	uint8_t cpt_op = sess->cpt_op;
+	uint64_t d_offs, d_lens;
+	uint8_t ccm_iv_buf[16];
+	uint32_t flags = 0;
+	void *mdata = NULL;
+	uint32_t iv_buf[4];
+	int ret;
+
+	fc_params.cipher_iv_len = sess->iv_length;
+	fc_params.ctx = &sess->roc_se_ctx;
+	fc_params.auth_iv_buf = NULL;
+	fc_params.auth_iv_len = 0;
+	fc_params.mac_buf.size = 0;
+	fc_params.mac_buf.vaddr = 0;
+	fc_params.iv_buf = NULL;
+
+	if (likely(is_kasumi || sess->iv_length)) {
+		flags |= ROC_SE_VALID_IV_BUF;
+		fc_params.iv_buf = iov->iv_buf;
+
+		if (sess->short_iv) {
+			memcpy((uint8_t *)iv_buf, iov->iv_buf, 12);
+			iv_buf[3] = rte_cpu_to_be_32(0x1);
+			fc_params.iv_buf = iv_buf;
+		}
+
+		if (sess->aes_ccm) {
+			memcpy((uint8_t *)ccm_iv_buf, iov->iv_buf, sess->iv_length + 1);
+			ccm_iv_buf[0] = 14 - sess->iv_length;
+			fc_params.iv_buf = ccm_iv_buf;
+		}
+	}
+
+	fc_params.src_iov = (void *)iov->src;
+	fc_params.dst_iov = (void *)iov->dst;
+
+	cipher_len = iov->data_len - iov->c_head - iov->c_tail;
+	auth_len = iov->data_len - iov->a_head - iov->a_tail;
+
+	d_offs = (iov->c_head << 16) | iov->a_head;
+	d_lens = ((uint64_t)cipher_len << 32) | auth_len;
+
+	if (is_aead) {
+		uint16_t aad_len = sess->aad_length;
+
+		if (likely(aad_len == 0)) {
+			d_offs = (iov->c_head << 16) | iov->c_head;
+			d_lens = ((uint64_t)cipher_len << 32) | cipher_len;
+		} else {
+			flags |= ROC_SE_VALID_AAD_BUF;
+			fc_params.aad_buf.size = sess->aad_length;
+			/* For AES CCM, AAD is written 18B after aad.data as per API */
+			if (sess->aes_ccm)
+				fc_params.aad_buf.vaddr = PLT_PTR_ADD((uint8_t *)iov->aad_buf, 18);
+			else
+				fc_params.aad_buf.vaddr = iov->aad_buf;
+
+			d_offs = (iov->c_head << 16);
+			d_lens = ((uint64_t)cipher_len << 32);
+		}
+	}
+
+	if (likely(sess->mac_len)) {
+		flags |= ROC_SE_VALID_MAC_BUF;
+		fc_params.mac_buf.size = sess->mac_len;
+		fc_params.mac_buf.vaddr = iov->mac_buf;
+	}
+
+	fc_params.meta_buf.vaddr = NULL;
+	mdata = alloc_op_meta(&fc_params.meta_buf, m_info->mlen, m_info->pool, infl_req);
+	if (mdata == NULL) {
+		plt_dp_err("Error allocating meta buffer for request");
+		return -ENOMEM;
+	}
+
+	if (is_kasumi) {
+		if (cpt_op & ROC_SE_OP_ENCODE)
+			ret = cpt_enc_hmac_prep(flags, d_offs, d_lens, &fc_params, inst,
+						is_sg_ver2);
+		else
+			ret = cpt_dec_hmac_prep(flags, d_offs, d_lens, &fc_params, inst,
+						is_sg_ver2);
+	} else {
+		if (cpt_op & ROC_SE_OP_ENCODE)
+			ret = cpt_enc_hmac_prep(flags, d_offs, d_lens, &fc_params, inst,
+						is_sg_ver2);
+		else
+			ret = cpt_dec_hmac_prep(flags, d_offs, d_lens, &fc_params, inst,
+						is_sg_ver2);
+	}
+
+	if (unlikely(ret)) {
+		plt_dp_err("Preparing request failed due to bad input arg");
+		goto free_mdata_and_exit;
+	}
+
+	return 0;
+
+free_mdata_and_exit:
+	rte_mempool_put(m_info->pool, infl_req->mdata);
+	return ret;
+}
+
+static __rte_always_inline int
+fill_raw_digest_params(struct cnxk_iov *iov, struct cnxk_se_sess *sess,
+		       struct cpt_qp_meta_info *m_info, struct cpt_inflight_req *infl_req,
+		       struct cpt_inst_s *inst, const bool is_sg_ver2)
+{
+	uint16_t auth_op = sess->cpt_op & ROC_SE_OP_AUTH_MASK;
+	struct roc_se_fc_params fc_params;
+	uint16_t mac_len = sess->mac_len;
+	uint64_t d_offs, d_lens;
+	uint32_t auth_len = 0;
+	uint32_t flags = 0;
+	void *mdata = NULL;
+	uint32_t space = 0;
+	int ret;
+
+	memset(&fc_params, 0, sizeof(struct roc_se_fc_params));
+	fc_params.cipher_iv_len = sess->iv_length;
+	fc_params.ctx = &sess->roc_se_ctx;
+
+	mdata = alloc_op_meta(&fc_params.meta_buf, m_info->mlen, m_info->pool, infl_req);
+	if (mdata == NULL) {
+		plt_dp_err("Error allocating meta buffer for request");
+		ret = -ENOMEM;
+		goto err_exit;
+	}
+
+	flags |= ROC_SE_VALID_MAC_BUF;
+	fc_params.src_iov = (void *)iov->src;
+	auth_len = iov->data_len - iov->a_head - iov->a_tail;
+	d_lens = auth_len;
+	d_offs = iov->a_head;
+
+	if (auth_op == ROC_SE_OP_AUTH_GENERATE) {
+		fc_params.mac_buf.size = sess->mac_len;
+		fc_params.mac_buf.vaddr = iov->mac_buf;
+	} else {
+		uint64_t *op = mdata;
+
+		/* Need space for storing generated mac */
+		space += 2 * sizeof(uint64_t);
+
+		fc_params.mac_buf.vaddr = (uint8_t *)mdata + space;
+		fc_params.mac_buf.size = mac_len;
+		space += RTE_ALIGN_CEIL(mac_len, 8);
+		op[0] = (uintptr_t)iov->mac_buf;
+		op[1] = mac_len;
+		infl_req->op_flags |= CPT_OP_FLAGS_AUTH_VERIFY;
+	}
+
+	fc_params.meta_buf.vaddr = (uint8_t *)mdata + space;
+	fc_params.meta_buf.size -= space;
+
+	ret = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens, &fc_params, inst, is_sg_ver2);
+	if (ret)
+		goto free_mdata_and_exit;
+
+	return 0;
+
+free_mdata_and_exit:
+	if (infl_req->op_flags & CPT_OP_FLAGS_METABUF)
+		rte_mempool_put(m_info->pool, infl_req->mdata);
+err_exit:
+	return ret;
+}
+
 #endif /*_CNXK_SE_H_ */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2023-06-19 12:46 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-06-19 12:45 [PATCH 0/8] fixes and improvements to CNXK crypto PMD Tejasree Kondoj
2023-06-19 12:45 ` [PATCH 1/8] crypto/cnxk: check for null pointer Tejasree Kondoj
2023-06-19 12:45 ` [PATCH 2/8] crypto/cnxk: remove packet length checks in crypto offload Tejasree Kondoj
2023-06-19 12:45 ` [PATCH 3/8] crypto/cnxk: use pt inst for null cipher with null auth Tejasree Kondoj
2023-06-19 12:45 ` [PATCH 4/8] crypto/cnxk: enable context cache for 103XX Tejasree Kondoj
2023-06-19 12:45 ` [PATCH 5/8] crypto/cnxk: add support for raw APIs Tejasree Kondoj

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).