DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [RFC 00/16] crypto: add raw vector support in DPAAx
@ 2021-08-12  7:12 Hemant Agrawal
  2021-08-12  7:12 ` [dpdk-dev] [RFC 01/16] crypto: change sgl to src_sgl in vector Hemant Agrawal
                   ` (16 more replies)
  0 siblings, 17 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-08-12  7:12 UTC (permalink / raw)
  To: dev, gakhil

This patch series adds support for raw vector API in dpaax_sec drivers
This also enhances the raw vector APIs to support OOP and security
protocol support.

Gagandeep Singh (12):
  crypto: add total raw buffer length
  crypto: enhance raw process for protocol offload
  crypto/dpaa2_sec: support raw datapath APIs
  crypto/dpaa2_sec: support AUTH only with raw buffer APIs
  crypto/dpaa2_sec: support AUTHENC with raw buffer APIs
  crypto/dpaa2_sec: support AEAD with raw buffer APIs
  crypto/dpaa2_sec: support OOP with raw buffer API
  crypto/dpaa2_sec: fix ctx memset size
  crypto/dpaa2_sec: enhance error checks with raw buffer APIs
  crypto/dpaa_sec: support raw datapath APIs
  crypto/dpaa_sec: support authonly and chain with raw APIs
  crypto/dpaa_sec: support AEAD and proto with raw APIs

Hemant Agrawal (4):
  crypto: change sgl to src_sgl in vector
  crypto: add dest_sgl in raw vector APIs
  test/crypto: add raw API test for dpaax
  test/crypto: enabling raw API support in 5G algos

 app/test/test_cryptodev.c                   |  179 +++-
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c |   13 +-
 drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h   |   82 +-
 drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c | 1045 ++++++++++++++++++
 drivers/crypto/dpaa2_sec/meson.build        |    3 +-
 drivers/crypto/dpaa_sec/dpaa_sec.c          |   23 +-
 drivers/crypto/dpaa_sec/dpaa_sec.h          |   40 +-
 drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c   | 1052 +++++++++++++++++++
 drivers/crypto/dpaa_sec/meson.build         |    4 +-
 drivers/crypto/qat/qat_sym_hw_dp.c          |   27 +-
 lib/cryptodev/rte_crypto_sym.h              |   13 +-
 lib/ipsec/misc.h                            |    4 +-
 12 files changed, 2390 insertions(+), 95 deletions(-)
 create mode 100644 drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
 create mode 100644 drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c

-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [RFC 01/16] crypto: change sgl to src_sgl in vector
  2021-08-12  7:12 [dpdk-dev] [RFC 00/16] crypto: add raw vector support in DPAAx Hemant Agrawal
@ 2021-08-12  7:12 ` Hemant Agrawal
  2021-08-12  8:22   ` [dpdk-dev] [EXT] " Akhil Goyal
  2021-08-12  7:12 ` [dpdk-dev] [RFC 02/16] crypto: add total raw buffer length Hemant Agrawal
                   ` (15 subsequent siblings)
  16 siblings, 1 reply; 115+ messages in thread
From: Hemant Agrawal @ 2021-08-12  7:12 UTC (permalink / raw)
  To: dev, gakhil

This patch renames the sgl to src_sgl to help differentiating
between source and destination sgl.

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 app/test/test_cryptodev.c          |  6 +++---
 drivers/crypto/qat/qat_sym_hw_dp.c | 27 ++++++++++++++++-----------
 lib/cryptodev/rte_crypto_sym.h     |  2 +-
 lib/ipsec/misc.h                   |  4 ++--
 4 files changed, 22 insertions(+), 17 deletions(-)

diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 9ad0b37473..aecd507fa1 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -221,7 +221,7 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id,
 	digest.va = NULL;
 	sgl.vec = data_vec;
 	vec.num = 1;
-	vec.sgl = &sgl;
+	vec.src_sgl = &sgl;
 	vec.iv = &cipher_iv;
 	vec.digest = &digest;
 	vec.aad = &aad_auth_iv;
@@ -385,7 +385,7 @@ process_cpu_aead_op(uint8_t dev_id, struct rte_crypto_op *op)
 
 	sgl.vec = vec;
 	sgl.num = n;
-	symvec.sgl = &sgl;
+	symvec.src_sgl = &sgl;
 	symvec.iv = &iv_ptr;
 	symvec.digest = &digest_ptr;
 	symvec.aad = &aad_ptr;
@@ -431,7 +431,7 @@ process_cpu_crypt_auth_op(uint8_t dev_id, struct rte_crypto_op *op)
 
 	sgl.vec = vec;
 	sgl.num = n;
-	symvec.sgl = &sgl;
+	symvec.src_sgl = &sgl;
 	symvec.iv = &iv_ptr;
 	symvec.digest = &digest_ptr;
 	symvec.status = &st;
diff --git a/drivers/crypto/qat/qat_sym_hw_dp.c b/drivers/crypto/qat/qat_sym_hw_dp.c
index ac9ac05363..4870ebf66a 100644
--- a/drivers/crypto/qat/qat_sym_hw_dp.c
+++ b/drivers/crypto/qat/qat_sym_hw_dp.c
@@ -181,8 +181,9 @@ qat_sym_dp_enqueue_cipher_jobs(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec,
-			vec->sgl[i].num);
+		data_len = qat_sym_dp_parse_data_vec(qp, req,
+			vec->src_sgl[i].vec,
+			vec->src_sgl[i].num);
 		if (unlikely(data_len < 0))
 			break;
 		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
@@ -302,8 +303,9 @@ qat_sym_dp_enqueue_auth_jobs(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec,
-			vec->sgl[i].num);
+		data_len = qat_sym_dp_parse_data_vec(qp, req,
+			vec->src_sgl[i].vec,
+			vec->src_sgl[i].num);
 		if (unlikely(data_len < 0))
 			break;
 		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
@@ -484,14 +486,16 @@ qat_sym_dp_enqueue_chain_jobs(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec,
-			vec->sgl[i].num);
+		data_len = qat_sym_dp_parse_data_vec(qp, req,
+			vec->src_sgl[i].vec,
+			vec->src_sgl[i].num);
 		if (unlikely(data_len < 0))
 			break;
 		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
-		if (unlikely(enqueue_one_chain_job(ctx, req, vec->sgl[i].vec,
-			vec->sgl[i].num, &vec->iv[i], &vec->digest[i],
-				&vec->auth_iv[i], ofs, (uint32_t)data_len)))
+		if (unlikely(enqueue_one_chain_job(ctx, req,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num,
+			&vec->iv[i], &vec->digest[i],
+			&vec->auth_iv[i], ofs, (uint32_t)data_len)))
 			break;
 
 		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
@@ -688,8 +692,9 @@ qat_sym_dp_enqueue_aead_jobs(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec,
-			vec->sgl[i].num);
+		data_len = qat_sym_dp_parse_data_vec(qp, req,
+			vec->src_sgl[i].vec,
+			vec->src_sgl[i].num);
 		if (unlikely(data_len < 0))
 			break;
 		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
diff --git a/lib/cryptodev/rte_crypto_sym.h b/lib/cryptodev/rte_crypto_sym.h
index 58c0724743..dcc0bd5933 100644
--- a/lib/cryptodev/rte_crypto_sym.h
+++ b/lib/cryptodev/rte_crypto_sym.h
@@ -69,7 +69,7 @@ struct rte_crypto_sym_vec {
 	/** number of operations to perform */
 	uint32_t num;
 	/** array of SGL vectors */
-	struct rte_crypto_sgl *sgl;
+	struct rte_crypto_sgl *src_sgl;
 	/** array of pointers to cipher IV */
 	struct rte_crypto_va_iova_ptr *iv;
 	/** array of pointers to digest */
diff --git a/lib/ipsec/misc.h b/lib/ipsec/misc.h
index 79b9a20762..58ff538141 100644
--- a/lib/ipsec/misc.h
+++ b/lib/ipsec/misc.h
@@ -136,7 +136,7 @@ cpu_crypto_bulk(const struct rte_ipsec_session *ss,
 		/* not enough space in vec[] to hold all segments */
 		if (vcnt < 0) {
 			/* fill the request structure */
-			symvec.sgl = &vecpkt[j];
+			symvec.src_sgl = &vecpkt[j];
 			symvec.iv = &iv[j];
 			symvec.digest = &dgst[j];
 			symvec.aad = &aad[j];
@@ -160,7 +160,7 @@ cpu_crypto_bulk(const struct rte_ipsec_session *ss,
 	}
 
 	/* fill the request structure */
-	symvec.sgl = &vecpkt[j];
+	symvec.src_sgl = &vecpkt[j];
 	symvec.iv = &iv[j];
 	symvec.aad = &aad[j];
 	symvec.digest = &dgst[j];
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [RFC 02/16] crypto: add total raw buffer length
  2021-08-12  7:12 [dpdk-dev] [RFC 00/16] crypto: add raw vector support in DPAAx Hemant Agrawal
  2021-08-12  7:12 ` [dpdk-dev] [RFC 01/16] crypto: change sgl to src_sgl in vector Hemant Agrawal
@ 2021-08-12  7:12 ` Hemant Agrawal
  2021-08-12  7:12 ` [dpdk-dev] [RFC 03/16] crypto: add dest_sgl in raw vector APIs Hemant Agrawal
                   ` (14 subsequent siblings)
  16 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-08-12  7:12 UTC (permalink / raw)
  To: dev, gakhil; +Cc: Gagandeep Singh

From: Gagandeep Singh <g.singh@nxp.com>

The current crypto raw data vectors is extended to support
rte_security usecases, where we need total data length to know
how much additional memory space is available in buffer other
than data length so that driver/HW can write expanded size
data after encryption.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 lib/cryptodev/rte_crypto_sym.h | 6 ++++++
 1 file changed, 6 insertions(+)

diff --git a/lib/cryptodev/rte_crypto_sym.h b/lib/cryptodev/rte_crypto_sym.h
index dcc0bd5933..e5cef1fb72 100644
--- a/lib/cryptodev/rte_crypto_sym.h
+++ b/lib/cryptodev/rte_crypto_sym.h
@@ -37,6 +37,8 @@ struct rte_crypto_vec {
 	rte_iova_t iova;
 	/** length of the data buffer */
 	uint32_t len;
+	/** total buffer length*/
+	uint32_t tot_len;
 };
 
 /**
@@ -980,12 +982,14 @@ rte_crypto_mbuf_to_vec(const struct rte_mbuf *mb, uint32_t ofs, uint32_t len,
 	seglen = mb->data_len - ofs;
 	if (len <= seglen) {
 		vec[0].len = len;
+		vec[0].tot_len = mb->buf_len;
 		return 1;
 	}
 
 	/* data spread across segments */
 	vec[0].len = seglen;
 	left = len - seglen;
+	vec[0].tot_len = mb->buf_len;
 	for (i = 1, nseg = mb->next; nseg != NULL; nseg = nseg->next, i++) {
 
 		vec[i].base = rte_pktmbuf_mtod(nseg, void *);
@@ -995,6 +999,7 @@ rte_crypto_mbuf_to_vec(const struct rte_mbuf *mb, uint32_t ofs, uint32_t len,
 		if (left <= seglen) {
 			/* whole requested data is completed */
 			vec[i].len = left;
+			vec[i].tot_len = mb->buf_len;
 			left = 0;
 			break;
 		}
@@ -1002,6 +1007,7 @@ rte_crypto_mbuf_to_vec(const struct rte_mbuf *mb, uint32_t ofs, uint32_t len,
 		/* use whole segment */
 		vec[i].len = seglen;
 		left -= seglen;
+		vec[i].tot_len = mb->buf_len;
 	}
 
 	RTE_ASSERT(left == 0);
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [RFC 03/16] crypto: add dest_sgl in raw vector APIs
  2021-08-12  7:12 [dpdk-dev] [RFC 00/16] crypto: add raw vector support in DPAAx Hemant Agrawal
  2021-08-12  7:12 ` [dpdk-dev] [RFC 01/16] crypto: change sgl to src_sgl in vector Hemant Agrawal
  2021-08-12  7:12 ` [dpdk-dev] [RFC 02/16] crypto: add total raw buffer length Hemant Agrawal
@ 2021-08-12  7:12 ` Hemant Agrawal
  2021-08-12  7:12 ` [dpdk-dev] [RFC 04/16] crypto: enhance raw process for protocol offload Hemant Agrawal
                   ` (13 subsequent siblings)
  16 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-08-12  7:12 UTC (permalink / raw)
  To: dev, gakhil

The structure rte_crypto_sym_vec is updated to
add dest_sgl to support out of place processing.

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 lib/cryptodev/rte_crypto_sym.h | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/lib/cryptodev/rte_crypto_sym.h b/lib/cryptodev/rte_crypto_sym.h
index e5cef1fb72..978708845f 100644
--- a/lib/cryptodev/rte_crypto_sym.h
+++ b/lib/cryptodev/rte_crypto_sym.h
@@ -72,6 +72,8 @@ struct rte_crypto_sym_vec {
 	uint32_t num;
 	/** array of SGL vectors */
 	struct rte_crypto_sgl *src_sgl;
+	/** array of SGL vectors for OOP, keep it NULL for inplace*/
+	struct rte_crypto_sgl *dest_sgl;
 	/** array of pointers to cipher IV */
 	struct rte_crypto_va_iova_ptr *iv;
 	/** array of pointers to digest */
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [RFC 04/16] crypto: enhance raw process for protocol offload
  2021-08-12  7:12 [dpdk-dev] [RFC 00/16] crypto: add raw vector support in DPAAx Hemant Agrawal
                   ` (2 preceding siblings ...)
  2021-08-12  7:12 ` [dpdk-dev] [RFC 03/16] crypto: add dest_sgl in raw vector APIs Hemant Agrawal
@ 2021-08-12  7:12 ` Hemant Agrawal
  2021-08-12  8:11   ` [dpdk-dev] [EXT] " Akhil Goyal
  2021-08-12  7:12 ` [dpdk-dev] [RFC 05/16] crypto/dpaa2_sec: support raw datapath APIs Hemant Agrawal
                   ` (12 subsequent siblings)
  16 siblings, 1 reply; 115+ messages in thread
From: Hemant Agrawal @ 2021-08-12  7:12 UTC (permalink / raw)
  To: dev, gakhil; +Cc: Gagandeep Singh

From: Gagandeep Singh <g.singh@nxp.com>

This patch improves the raw vector processing for security
protocol with OOP processing.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 lib/cryptodev/rte_crypto_sym.h | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/lib/cryptodev/rte_crypto_sym.h b/lib/cryptodev/rte_crypto_sym.h
index 978708845f..a48228a646 100644
--- a/lib/cryptodev/rte_crypto_sym.h
+++ b/lib/cryptodev/rte_crypto_sym.h
@@ -1003,6 +1003,7 @@ rte_crypto_mbuf_to_vec(const struct rte_mbuf *mb, uint32_t ofs, uint32_t len,
 			vec[i].len = left;
 			vec[i].tot_len = mb->buf_len;
 			left = 0;
+			i++;
 			break;
 		}
 
@@ -1013,7 +1014,7 @@ rte_crypto_mbuf_to_vec(const struct rte_mbuf *mb, uint32_t ofs, uint32_t len,
 	}
 
 	RTE_ASSERT(left == 0);
-	return i + 1;
+	return i;
 }
 
 
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [RFC 05/16] crypto/dpaa2_sec: support raw datapath APIs
  2021-08-12  7:12 [dpdk-dev] [RFC 00/16] crypto: add raw vector support in DPAAx Hemant Agrawal
                   ` (3 preceding siblings ...)
  2021-08-12  7:12 ` [dpdk-dev] [RFC 04/16] crypto: enhance raw process for protocol offload Hemant Agrawal
@ 2021-08-12  7:12 ` Hemant Agrawal
  2021-08-12  7:12 ` [dpdk-dev] [RFC 06/16] crypto/dpaa2_sec: support AUTH only with raw buffer APIs Hemant Agrawal
                   ` (11 subsequent siblings)
  16 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-08-12  7:12 UTC (permalink / raw)
  To: dev, gakhil; +Cc: Gagandeep Singh

From: Gagandeep Singh <g.singh@nxp.com>

This path add framework for raw API support.
The initial patch only test cipher only part.

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c |  13 +-
 drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h   |  60 +-
 drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c | 596 ++++++++++++++++++++
 drivers/crypto/dpaa2_sec/meson.build        |   3 +-
 4 files changed, 643 insertions(+), 29 deletions(-)
 create mode 100644 drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c

diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index 1ccead3641..fe90d9d2d8 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -49,15 +49,8 @@
 #define FSL_MC_DPSECI_DEVID     3
 
 #define NO_PREFETCH 0
-/* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */
-#define FLE_POOL_NUM_BUFS	32000
-#define FLE_POOL_BUF_SIZE	256
-#define FLE_POOL_CACHE_SIZE	512
-#define FLE_SG_MEM_SIZE(num)	(FLE_POOL_BUF_SIZE + ((num) * 32))
-#define SEC_FLC_DHR_OUTBOUND	-114
-#define SEC_FLC_DHR_INBOUND	0
 
-static uint8_t cryptodev_driver_id;
+uint8_t cryptodev_driver_id;
 
 #ifdef RTE_LIB_SECURITY
 static inline int
@@ -3805,6 +3798,9 @@ static struct rte_cryptodev_ops crypto_ops = {
 	.sym_session_get_size     = dpaa2_sec_sym_session_get_size,
 	.sym_session_configure    = dpaa2_sec_sym_session_configure,
 	.sym_session_clear        = dpaa2_sec_sym_session_clear,
+	/* Raw data-path API related operations */
+	.sym_get_raw_dp_ctx_size = dpaa2_sec_get_dp_ctx_size,
+	.sym_configure_raw_dp_ctx = dpaa2_sec_configure_raw_dp_ctx,
 };
 
 #ifdef RTE_LIB_SECURITY
@@ -3887,6 +3883,7 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
 			RTE_CRYPTODEV_FF_HW_ACCELERATED |
 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
 			RTE_CRYPTODEV_FF_SECURITY |
+			RTE_CRYPTODEV_FF_SYM_RAW_DP |
 			RTE_CRYPTODEV_FF_IN_PLACE_SGL |
 			RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
 			RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
index 7dbc69f6cb..860c9b6520 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
@@ -15,6 +15,16 @@
 #define CRYPTODEV_NAME_DPAA2_SEC_PMD	crypto_dpaa2_sec
 /**< NXP DPAA2 - SEC PMD device name */
 
+extern uint8_t cryptodev_driver_id;
+
+/* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */
+#define FLE_POOL_NUM_BUFS	32000
+#define FLE_POOL_BUF_SIZE	256
+#define FLE_POOL_CACHE_SIZE	512
+#define FLE_SG_MEM_SIZE(num)	(FLE_POOL_BUF_SIZE + ((num) * 32))
+#define SEC_FLC_DHR_OUTBOUND	-114
+#define SEC_FLC_DHR_INBOUND	0
+
 #define MAX_QUEUES		64
 #define MAX_DESC_SIZE		64
 /** private data structure for each DPAA2_SEC device */
@@ -158,6 +168,24 @@ struct dpaa2_pdcp_ctxt {
 	uint32_t hfn_threshold;	/*!< HFN Threashold for key renegotiation */
 };
 #endif
+
+typedef int (*dpaa2_sec_build_fd_t)(
+	void *qp, uint8_t *drv_ctx, struct rte_crypto_vec *data_vec,
+	uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
+	void *user_data);
+
+typedef int (*dpaa2_sec_build_raw_dp_fd_t)(uint8_t *drv_ctx,
+		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_va_iova_ptr *iv,
+		       struct rte_crypto_va_iova_ptr *digest,
+		       struct rte_crypto_va_iova_ptr *auth_iv,
+		       union rte_crypto_sym_ofs ofs,
+		       void *userdata,
+		       struct qbman_fd *fd);
+
 typedef struct dpaa2_sec_session_entry {
 	void *ctxt;
 	uint8_t ctxt_type;
@@ -165,6 +193,8 @@ typedef struct dpaa2_sec_session_entry {
 	enum rte_crypto_cipher_algorithm cipher_alg; /*!< Cipher Algorithm*/
 	enum rte_crypto_auth_algorithm auth_alg; /*!< Authentication Algorithm*/
 	enum rte_crypto_aead_algorithm aead_alg; /*!< AEAD Algorithm*/
+	dpaa2_sec_build_fd_t build_fd;
+	dpaa2_sec_build_raw_dp_fd_t build_raw_dp_fd;
 	union {
 		struct {
 			uint8_t *data;	/**< pointer to key data */
@@ -547,26 +577,6 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
 			}, }
 		}, }
 	},
-	{	/* NULL (CIPHER) */
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
-		{.sym = {
-			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
-			{.cipher = {
-				.algo = RTE_CRYPTO_CIPHER_NULL,
-				.block_size = 1,
-				.key_size = {
-					.min = 0,
-					.max = 0,
-					.increment = 0
-				},
-				.iv_size = {
-					.min = 0,
-					.max = 0,
-					.increment = 0
-				}
-			}, },
-		}, }
-	},
 	{	/* AES CBC */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
@@ -974,4 +984,14 @@ calc_chksum(void *buffer, int len)
 	return  result;
 }
 
+int
+dpaa2_sec_configure_raw_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
+	enum rte_crypto_op_sess_type sess_type,
+	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update);
+
+int
+dpaa2_sec_get_dp_ctx_size(struct rte_cryptodev *dev);
+
+
 #endif /* _DPAA2_SEC_PMD_PRIVATE_H_ */
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
new file mode 100644
index 0000000000..10cb4247b0
--- /dev/null
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
@@ -0,0 +1,596 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2021 NXP
+ */
+
+#include <rte_cryptodev_pmd.h>
+#include <rte_fslmc.h>
+#include <fslmc_vfio.h>
+#include <dpaa2_hw_pvt.h>
+#include <dpaa2_hw_dpio.h>
+
+#include "dpaa2_sec_priv.h"
+#include "dpaa2_sec_logs.h"
+
+struct dpaa2_sec_raw_dp_ctx {
+	dpaa2_sec_session *session;
+	uint32_t tail;
+	uint32_t head;
+	uint16_t cached_enqueue;
+	uint16_t cached_dequeue;
+};
+
+static int
+build_raw_dp_chain_fd(uint8_t *drv_ctx,
+		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_va_iova_ptr *iv,
+		       struct rte_crypto_va_iova_ptr *digest,
+		       struct rte_crypto_va_iova_ptr *auth_iv,
+		       union rte_crypto_sym_ofs ofs,
+		       void *userdata,
+		       struct qbman_fd *fd)
+{
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(sgl);
+	RTE_SET_USED(iv);
+	RTE_SET_USED(digest);
+	RTE_SET_USED(auth_iv);
+	RTE_SET_USED(ofs);
+	RTE_SET_USED(userdata);
+	RTE_SET_USED(fd);
+
+	return 0;
+}
+
+static int
+build_raw_dp_aead_fd(uint8_t *drv_ctx,
+		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_va_iova_ptr *iv,
+		       struct rte_crypto_va_iova_ptr *digest,
+		       struct rte_crypto_va_iova_ptr *auth_iv,
+		       union rte_crypto_sym_ofs ofs,
+		       void *userdata,
+		       struct qbman_fd *fd)
+{
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(sgl);
+	RTE_SET_USED(iv);
+	RTE_SET_USED(digest);
+	RTE_SET_USED(auth_iv);
+	RTE_SET_USED(ofs);
+	RTE_SET_USED(userdata);
+	RTE_SET_USED(fd);
+
+	return 0;
+}
+
+static int
+build_raw_dp_auth_fd(uint8_t *drv_ctx,
+		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_va_iova_ptr *iv,
+		       struct rte_crypto_va_iova_ptr *digest,
+		       struct rte_crypto_va_iova_ptr *auth_iv,
+		       union rte_crypto_sym_ofs ofs,
+		       void *userdata,
+		       struct qbman_fd *fd)
+{
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(sgl);
+	RTE_SET_USED(iv);
+	RTE_SET_USED(digest);
+	RTE_SET_USED(auth_iv);
+	RTE_SET_USED(ofs);
+	RTE_SET_USED(userdata);
+	RTE_SET_USED(fd);
+
+	return 0;
+}
+
+static int
+build_raw_dp_proto_fd(uint8_t *drv_ctx,
+		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_va_iova_ptr *iv,
+		       struct rte_crypto_va_iova_ptr *digest,
+		       struct rte_crypto_va_iova_ptr *auth_iv,
+		       union rte_crypto_sym_ofs ofs,
+		       void *userdata,
+		       struct qbman_fd *fd)
+{
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(sgl);
+	RTE_SET_USED(iv);
+	RTE_SET_USED(digest);
+	RTE_SET_USED(auth_iv);
+	RTE_SET_USED(ofs);
+	RTE_SET_USED(userdata);
+	RTE_SET_USED(fd);
+
+	return 0;
+}
+
+static int
+build_raw_dp_proto_compound_fd(uint8_t *drv_ctx,
+		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_va_iova_ptr *iv,
+		       struct rte_crypto_va_iova_ptr *digest,
+		       struct rte_crypto_va_iova_ptr *auth_iv,
+		       union rte_crypto_sym_ofs ofs,
+		       void *userdata,
+		       struct qbman_fd *fd)
+{
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(sgl);
+	RTE_SET_USED(iv);
+	RTE_SET_USED(digest);
+	RTE_SET_USED(auth_iv);
+	RTE_SET_USED(ofs);
+	RTE_SET_USED(userdata);
+	RTE_SET_USED(fd);
+
+	return 0;
+}
+
+static int
+build_raw_dp_cipher_fd(uint8_t *drv_ctx,
+		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_va_iova_ptr *iv,
+		       struct rte_crypto_va_iova_ptr *digest,
+		       struct rte_crypto_va_iova_ptr *auth_iv,
+		       union rte_crypto_sym_ofs ofs,
+		       void *userdata,
+		       struct qbman_fd *fd)
+{
+	RTE_SET_USED(digest);
+	RTE_SET_USED(auth_iv);
+
+	dpaa2_sec_session *sess =
+		((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
+	struct qbman_fle *ip_fle, *op_fle, *sge, *fle;
+	int total_len = 0, data_len = 0, data_offset;
+	struct sec_flow_context *flc;
+	struct ctxt_priv *priv = sess->ctxt;
+	unsigned int i;
+
+	for (i = 0; i < sgl->num; i++)
+		total_len += sgl->vec[i].len;
+
+	data_len = total_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
+	data_offset = ofs.ofs.cipher.head;
+
+	if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
+		sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
+		if ((data_len & 7) || (data_offset & 7)) {
+			DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes");
+			return -ENOTSUP;
+		}
+
+		data_len = data_len >> 3;
+		data_offset = data_offset >> 3;
+	}
+
+	/* first FLE entry used to store mbuf and session ctxt */
+	fle = (struct qbman_fle *)rte_malloc(NULL,
+			FLE_SG_MEM_SIZE(2*sgl->num),
+			RTE_CACHE_LINE_SIZE);
+	if (!fle) {
+		DPAA2_SEC_ERR("RAW CIPHER SG: Memory alloc failed for SGE");
+		return -ENOMEM;
+	}
+	memset(fle, 0, FLE_SG_MEM_SIZE(2*sgl->num));
+	/* first FLE entry used to store userdata and session ctxt */
+	DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
+	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
+
+	op_fle = fle + 1;
+	ip_fle = fle + 2;
+	sge = fle + 3;
+
+	flc = &priv->flc_desc[0].flc;
+
+	DPAA2_SEC_DP_DEBUG(
+		"RAW CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d\n",
+		data_offset,
+		data_len,
+		sess->iv.length);
+
+	/* o/p fle */
+	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
+	op_fle->length = data_len;
+	DPAA2_SET_FLE_SG_EXT(op_fle);
+
+	/* o/p 1st seg */
+	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+	DPAA2_SET_FLE_OFFSET(sge, data_offset);
+	sge->length = sgl->vec[0].len - data_offset;
+
+	/* o/p segs */
+	for (i = 1; i < sgl->num; i++) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+		DPAA2_SET_FLE_OFFSET(sge, 0);
+		sge->length = sgl->vec[i].len;
+	}
+	DPAA2_SET_FLE_FIN(sge);
+
+	DPAA2_SEC_DP_DEBUG(
+		"RAW CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n",
+		flc, fle, fle->addr_hi, fle->addr_lo,
+		fle->length);
+
+	/* i/p fle */
+	sge++;
+	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
+	ip_fle->length = sess->iv.length + data_len;
+	DPAA2_SET_FLE_SG_EXT(ip_fle);
+
+	/* i/p IV */
+	DPAA2_SET_FLE_ADDR(sge, iv->iova);
+	DPAA2_SET_FLE_OFFSET(sge, 0);
+	sge->length = sess->iv.length;
+
+	sge++;
+
+	/* i/p 1st seg */
+	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+	DPAA2_SET_FLE_OFFSET(sge, data_offset);
+	sge->length = sgl->vec[0].len - data_offset;
+
+	/* i/p segs */
+	for (i = 1; i < sgl->num; i++) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+		DPAA2_SET_FLE_OFFSET(sge, 0);
+		sge->length = sgl->vec[i].len;
+	}
+	DPAA2_SET_FLE_FIN(sge);
+	DPAA2_SET_FLE_FIN(ip_fle);
+
+	/* sg fd */
+	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
+	DPAA2_SET_FD_LEN(fd, ip_fle->length);
+	DPAA2_SET_FD_COMPOUND_FMT(fd);
+	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+	DPAA2_SEC_DP_DEBUG(
+		"RAW CIPHER SG: fdaddr =%" PRIx64 " off =%d, len =%d\n",
+		DPAA2_GET_FD_ADDR(fd),
+		DPAA2_GET_FD_OFFSET(fd),
+		DPAA2_GET_FD_LEN(fd));
+
+	return 0;
+}
+
+static __rte_always_inline uint32_t
+dpaa2_sec_raw_enqueue_burst(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	RTE_SET_USED(user_data);
+	uint32_t loop;
+	int32_t ret;
+	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
+	uint32_t frames_to_send, retry_count;
+	struct qbman_eq_desc eqdesc;
+	struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp_data;
+	dpaa2_sec_session *sess =
+		((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
+	struct qbman_swp *swp;
+	uint16_t num_tx = 0;
+	uint32_t flags[MAX_TX_RING_SLOTS] = {0};
+
+	if (unlikely(vec->num == 0))
+		return 0;
+
+	if (sess == NULL) {
+		DPAA2_SEC_ERR("sessionless raw crypto not supported");
+		return 0;
+	}
+	/*Prepare enqueue descriptor*/
+	qbman_eq_desc_clear(&eqdesc);
+	qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
+	qbman_eq_desc_set_response(&eqdesc, 0, 0);
+	qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
+
+	if (!DPAA2_PER_LCORE_DPIO) {
+		ret = dpaa2_affine_qbman_swp();
+		if (ret) {
+			DPAA2_SEC_ERR(
+				"Failed to allocate IO portal, tid: %d\n",
+				rte_gettid());
+			return 0;
+		}
+	}
+	swp = DPAA2_PER_LCORE_PORTAL;
+
+	while (vec->num) {
+		frames_to_send = (vec->num > dpaa2_eqcr_size) ?
+			dpaa2_eqcr_size : vec->num;
+
+		for (loop = 0; loop < frames_to_send; loop++) {
+			/*Clear the unused FD fields before sending*/
+			memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
+			ret = sess->build_raw_dp_fd(drv_ctx,
+						    &vec->src_sgl[loop],
+						    &vec->iv[loop],
+						    &vec->digest[loop],
+						    &vec->auth_iv[loop],
+						    ofs,
+						    user_data[loop],
+						    &fd_arr[loop]);
+			if (ret) {
+				DPAA2_SEC_ERR("error: Improper packet contents"
+					      " for crypto operation");
+				goto skip_tx;
+			}
+			status[loop] = 1;
+		}
+
+		loop = 0;
+		retry_count = 0;
+		while (loop < frames_to_send) {
+			ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
+							 &fd_arr[loop],
+							 &flags[loop],
+							 frames_to_send - loop);
+			if (unlikely(ret < 0)) {
+				retry_count++;
+				if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
+					num_tx += loop;
+					vec->num -= loop;
+					goto skip_tx;
+				}
+			} else {
+				loop += ret;
+				retry_count = 0;
+			}
+		}
+
+		num_tx += loop;
+		vec->num -= loop;
+	}
+skip_tx:
+	dpaa2_qp->tx_vq.tx_pkts += num_tx;
+	dpaa2_qp->tx_vq.err_pkts += vec->num;
+
+	return num_tx;
+}
+
+static __rte_always_inline int
+dpaa2_sec_raw_enqueue(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data_vec,
+	uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
+	void *user_data)
+{
+	RTE_SET_USED(qp_data);
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(data_vec);
+	RTE_SET_USED(n_data_vecs);
+	RTE_SET_USED(ofs);
+	RTE_SET_USED(iv);
+	RTE_SET_USED(digest);
+	RTE_SET_USED(aad_or_auth_iv);
+	RTE_SET_USED(user_data);
+
+	return 0;
+}
+
+static inline void *
+sec_fd_to_userdata(const struct qbman_fd *fd)
+{
+	struct qbman_fle *fle;
+	void *userdata;
+	fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
+
+	DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n",
+			   fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
+	userdata = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
+	/* free the fle memory */
+	rte_free((void *)(fle-1));
+
+	return userdata;
+}
+
+static __rte_always_inline uint32_t
+dpaa2_sec_raw_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
+	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
+	uint32_t max_nb_to_dequeue,
+	rte_cryptodev_raw_post_dequeue_t post_dequeue,
+	void **out_user_data, uint8_t is_user_data_array,
+	uint32_t *n_success, int *dequeue_status)
+{
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(get_dequeue_count);
+
+	/* Function is responsible to receive frames for a given device and VQ*/
+	struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp_data;
+	struct qbman_result *dq_storage;
+	uint32_t fqid = dpaa2_qp->rx_vq.fqid;
+	int ret, num_rx = 0;
+	uint8_t is_last = 0, status;
+	struct qbman_swp *swp;
+	const struct qbman_fd *fd;
+	struct qbman_pull_desc pulldesc;
+	void *user_data;
+	uint32_t nb_ops = max_nb_to_dequeue;
+
+	if (!DPAA2_PER_LCORE_DPIO) {
+		ret = dpaa2_affine_qbman_swp();
+		if (ret) {
+			DPAA2_SEC_ERR(
+				"Failed to allocate IO portal, tid: %d\n",
+				rte_gettid());
+			return 0;
+		}
+	}
+	swp = DPAA2_PER_LCORE_PORTAL;
+	dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
+
+	qbman_pull_desc_clear(&pulldesc);
+	qbman_pull_desc_set_numframes(&pulldesc,
+				      (nb_ops > dpaa2_dqrr_size) ?
+				      dpaa2_dqrr_size : nb_ops);
+	qbman_pull_desc_set_fq(&pulldesc, fqid);
+	qbman_pull_desc_set_storage(&pulldesc, dq_storage,
+				    (uint64_t)DPAA2_VADDR_TO_IOVA(dq_storage),
+				    1);
+
+	/*Issue a volatile dequeue command. */
+	while (1) {
+		if (qbman_swp_pull(swp, &pulldesc)) {
+			DPAA2_SEC_WARN(
+				"SEC VDQ command is not issued : QBMAN busy");
+			/* Portal was busy, try again */
+			continue;
+		}
+		break;
+	};
+
+	/* Receive the packets till Last Dequeue entry is found with
+	 * respect to the above issues PULL command.
+	 */
+	while (!is_last) {
+		/* Check if the previous issued command is completed.
+		 * Also seems like the SWP is shared between the Ethernet Driver
+		 * and the SEC driver.
+		 */
+		while (!qbman_check_command_complete(dq_storage))
+			;
+
+		/* Loop until the dq_storage is updated with
+		 * new token by QBMAN
+		 */
+		while (!qbman_check_new_result(dq_storage))
+			;
+		/* Check whether Last Pull command is Expired and
+		 * setting Condition for Loop termination
+		 */
+		if (qbman_result_DQ_is_pull_complete(dq_storage)) {
+			is_last = 1;
+			/* Check for valid frame. */
+			status = (uint8_t)qbman_result_DQ_flags(dq_storage);
+			if (unlikely(
+				(status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
+				DPAA2_SEC_DP_DEBUG("No frame is delivered\n");
+				continue;
+			}
+		}
+
+		fd = qbman_result_DQ_fd(dq_storage);
+		user_data = sec_fd_to_userdata(fd);
+		if (is_user_data_array)
+			out_user_data[num_rx] = user_data;
+		else
+			out_user_data[0] = user_data;
+		if (unlikely(fd->simple.frc)) {
+			/* TODO Parse SEC errors */
+			DPAA2_SEC_ERR("SEC returned Error - %x",
+				      fd->simple.frc);
+			status = RTE_CRYPTO_OP_STATUS_ERROR;
+		} else {
+			status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+		}
+		post_dequeue(user_data, num_rx, status);
+
+		num_rx++;
+		dq_storage++;
+	} /* End of Packet Rx loop */
+
+	dpaa2_qp->rx_vq.rx_pkts += num_rx;
+	*dequeue_status = 1;
+	*n_success = num_rx;
+
+	DPAA2_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
+	/*Return the total number of packets received to DPAA2 app*/
+	return num_rx;
+}
+
+static __rte_always_inline void *
+dpaa2_sec_raw_dequeue(void *qp_data, uint8_t *drv_ctx, int *dequeue_status,
+		enum rte_crypto_op_status *op_status)
+{
+	RTE_SET_USED(qp_data);
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(dequeue_status);
+	RTE_SET_USED(op_status);
+
+	return NULL;
+}
+
+static __rte_always_inline int
+dpaa2_sec_raw_enqueue_done(void *qp_data, uint8_t *drv_ctx, uint32_t n)
+{
+	RTE_SET_USED(qp_data);
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(n);
+
+	return 0;
+}
+
+static __rte_always_inline int
+dpaa2_sec_raw_dequeue_done(void *qp_data, uint8_t *drv_ctx, uint32_t n)
+{
+	RTE_SET_USED(qp_data);
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(n);
+
+	return 0;
+}
+
+int
+dpaa2_sec_configure_raw_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
+	enum rte_crypto_op_sess_type sess_type,
+	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
+{
+	dpaa2_sec_session *sess;
+	struct dpaa2_sec_raw_dp_ctx *dp_ctx;
+	RTE_SET_USED(qp_id);
+
+	if (!is_update) {
+		memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx) +
+				sizeof(dpaa2_sec_session));
+		raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
+	}
+
+	if (sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
+		sess = (dpaa2_sec_session *)get_sec_session_private_data(
+				session_ctx.sec_sess);
+	else if (sess_type == RTE_CRYPTO_OP_WITH_SESSION)
+		sess = (dpaa2_sec_session *)get_sym_session_private_data(
+			session_ctx.crypto_sess, cryptodev_driver_id);
+	else
+		return -ENOTSUP;
+	raw_dp_ctx->dequeue_burst = dpaa2_sec_raw_dequeue_burst;
+	raw_dp_ctx->dequeue = dpaa2_sec_raw_dequeue;
+	raw_dp_ctx->dequeue_done = dpaa2_sec_raw_dequeue_done;
+	raw_dp_ctx->enqueue_burst = dpaa2_sec_raw_enqueue_burst;
+	raw_dp_ctx->enqueue = dpaa2_sec_raw_enqueue;
+	raw_dp_ctx->enqueue_done = dpaa2_sec_raw_enqueue_done;
+
+	if (sess->ctxt_type == DPAA2_SEC_CIPHER_HASH)
+		sess->build_raw_dp_fd = build_raw_dp_chain_fd;
+	else if (sess->ctxt_type == DPAA2_SEC_AEAD)
+		sess->build_raw_dp_fd = build_raw_dp_aead_fd;
+	else if (sess->ctxt_type == DPAA2_SEC_AUTH)
+		sess->build_raw_dp_fd = build_raw_dp_auth_fd;
+	else if (sess->ctxt_type == DPAA2_SEC_CIPHER)
+		sess->build_raw_dp_fd = build_raw_dp_cipher_fd;
+	else if (sess->ctxt_type == DPAA2_SEC_IPSEC)
+		sess->build_raw_dp_fd = build_raw_dp_proto_fd;
+	else if (sess->ctxt_type == DPAA2_SEC_PDCP)
+		sess->build_raw_dp_fd = build_raw_dp_proto_compound_fd;
+	else
+		return -ENOTSUP;
+	dp_ctx = (struct dpaa2_sec_raw_dp_ctx *)raw_dp_ctx->drv_ctx_data;
+	dp_ctx->session = sess;
+
+	return 0;
+}
+
+int
+dpaa2_sec_get_dp_ctx_size(__rte_unused struct rte_cryptodev *dev)
+{
+	return sizeof(struct dpaa2_sec_raw_dp_ctx);
+}
diff --git a/drivers/crypto/dpaa2_sec/meson.build b/drivers/crypto/dpaa2_sec/meson.build
index ea1d73a13d..e6e5abb3c1 100644
--- a/drivers/crypto/dpaa2_sec/meson.build
+++ b/drivers/crypto/dpaa2_sec/meson.build
@@ -1,5 +1,5 @@
 # SPDX-License-Identifier: BSD-3-Clause
-# Copyright 2018 NXP
+# Copyright 2018,2021 NXP
 
 if not is_linux
     build = false
@@ -9,6 +9,7 @@ endif
 deps += ['security', 'mempool_dpaa2']
 sources = files(
         'dpaa2_sec_dpseci.c',
+	'dpaa2_sec_raw_dp.c',
         'mc/dpseci.c',
 )
 
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [RFC 06/16] crypto/dpaa2_sec: support AUTH only with raw buffer APIs
  2021-08-12  7:12 [dpdk-dev] [RFC 00/16] crypto: add raw vector support in DPAAx Hemant Agrawal
                   ` (4 preceding siblings ...)
  2021-08-12  7:12 ` [dpdk-dev] [RFC 05/16] crypto/dpaa2_sec: support raw datapath APIs Hemant Agrawal
@ 2021-08-12  7:12 ` Hemant Agrawal
  2021-08-12  7:12 ` [dpdk-dev] [RFC 07/16] crypto/dpaa2_sec: support AUTHENC " Hemant Agrawal
                   ` (10 subsequent siblings)
  16 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-08-12  7:12 UTC (permalink / raw)
  To: dev, gakhil; +Cc: Gagandeep Singh

From: Gagandeep Singh <g.singh@nxp.com>

Auth only with raw buffer APIs has been supported in this patch.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h   |  21 ----
 drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c | 114 ++++++++++++++++++--
 2 files changed, 108 insertions(+), 27 deletions(-)

diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
index 860c9b6520..f6507855e3 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
@@ -231,27 +231,6 @@ typedef struct dpaa2_sec_session_entry {
 
 static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
 	/* Symmetric capabilities */
-	{	/* NULL (AUTH) */
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
-		{.sym = {
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
-			{.auth = {
-				.algo = RTE_CRYPTO_AUTH_NULL,
-				.block_size = 1,
-				.key_size = {
-					.min = 0,
-					.max = 0,
-					.increment = 0
-				},
-				.digest_size = {
-					.min = 0,
-					.max = 0,
-					.increment = 0
-				},
-				.iv_size = { 0 }
-			}, },
-		}, },
-	},
 	{	/* MD5 */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
index 10cb4247b0..4c11a79d90 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
@@ -11,6 +11,8 @@
 #include "dpaa2_sec_priv.h"
 #include "dpaa2_sec_logs.h"
 
+#include <desc/algo.h>
+
 struct dpaa2_sec_raw_dp_ctx {
 	dpaa2_sec_session *session;
 	uint32_t tail;
@@ -73,14 +75,114 @@ build_raw_dp_auth_fd(uint8_t *drv_ctx,
 		       void *userdata,
 		       struct qbman_fd *fd)
 {
-	RTE_SET_USED(drv_ctx);
-	RTE_SET_USED(sgl);
 	RTE_SET_USED(iv);
-	RTE_SET_USED(digest);
 	RTE_SET_USED(auth_iv);
-	RTE_SET_USED(ofs);
-	RTE_SET_USED(userdata);
-	RTE_SET_USED(fd);
+
+	dpaa2_sec_session *sess =
+		((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
+	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
+	struct sec_flow_context *flc;
+	int total_len = 0, data_len = 0, data_offset;
+	uint8_t *old_digest;
+	struct ctxt_priv *priv = sess->ctxt;
+	unsigned int i;
+
+	for (i = 0; i < sgl->num; i++)
+		total_len += sgl->vec[i].len;
+
+	data_len = total_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
+	data_offset = ofs.ofs.auth.head;
+
+	if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
+		sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
+		if ((data_len & 7) || (data_offset & 7)) {
+			DPAA2_SEC_ERR("AUTH: len/offset must be full bytes");
+			return -ENOTSUP;
+		}
+
+		data_len = data_len >> 3;
+		data_offset = data_offset >> 3;
+	}
+	fle = (struct qbman_fle *)rte_malloc(NULL,
+		FLE_SG_MEM_SIZE(2 * sgl->num),
+			RTE_CACHE_LINE_SIZE);
+	if (unlikely(!fle)) {
+		DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE");
+		return -ENOMEM;
+	}
+	memset(fle, 0, FLE_SG_MEM_SIZE(2*sgl->num));
+	/* first FLE entry used to store mbuf and session ctxt */
+	DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
+	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
+	op_fle = fle + 1;
+	ip_fle = fle + 2;
+	sge = fle + 3;
+
+	flc = &priv->flc_desc[DESC_INITFINAL].flc;
+
+	/* sg FD */
+	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
+	DPAA2_SET_FD_COMPOUND_FMT(fd);
+
+	/* o/p fle */
+	DPAA2_SET_FLE_ADDR(op_fle,
+			DPAA2_VADDR_TO_IOVA(digest->va));
+	op_fle->length = sess->digest_length;
+
+	/* i/p fle */
+	DPAA2_SET_FLE_SG_EXT(ip_fle);
+	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
+	ip_fle->length = data_len;
+
+	if (sess->iv.length) {
+		uint8_t *iv_ptr;
+
+		iv_ptr = rte_crypto_op_ctod_offset(userdata, uint8_t *,
+						sess->iv.offset);
+
+		if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
+			iv_ptr = conv_to_snow_f9_iv(iv_ptr);
+			sge->length = 12;
+		} else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
+			iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
+			sge->length = 8;
+		} else {
+			sge->length = sess->iv.length;
+		}
+		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
+		ip_fle->length += sge->length;
+		sge++;
+	}
+	/* i/p 1st seg */
+	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+	DPAA2_SET_FLE_OFFSET(sge, data_offset);
+
+	if (data_len <= (int)(sgl->vec[0].len - data_offset)) {
+		sge->length = data_len;
+		data_len = 0;
+	} else {
+		sge->length = sgl->vec[0].len - data_offset;
+		for (i = 1; i < sgl->num; i++) {
+			sge++;
+			DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+			DPAA2_SET_FLE_OFFSET(sge, 0);
+			sge->length = sgl->vec[i].len;
+		}
+	}
+	if (sess->dir == DIR_DEC) {
+		/* Digest verification case */
+		sge++;
+		old_digest = (uint8_t *)(sge + 1);
+		rte_memcpy(old_digest, digest->va,
+			sess->digest_length);
+		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
+		sge->length = sess->digest_length;
+		ip_fle->length += sess->digest_length;
+	}
+	DPAA2_SET_FLE_FIN(sge);
+	DPAA2_SET_FLE_FIN(ip_fle);
+	DPAA2_SET_FD_LEN(fd, ip_fle->length);
 
 	return 0;
 }
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [RFC 07/16] crypto/dpaa2_sec: support AUTHENC with raw buffer APIs
  2021-08-12  7:12 [dpdk-dev] [RFC 00/16] crypto: add raw vector support in DPAAx Hemant Agrawal
                   ` (5 preceding siblings ...)
  2021-08-12  7:12 ` [dpdk-dev] [RFC 06/16] crypto/dpaa2_sec: support AUTH only with raw buffer APIs Hemant Agrawal
@ 2021-08-12  7:12 ` Hemant Agrawal
  2021-08-12  7:12 ` [dpdk-dev] [RFC 08/16] crypto/dpaa2_sec: support AEAD " Hemant Agrawal
                   ` (9 subsequent siblings)
  16 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-08-12  7:12 UTC (permalink / raw)
  To: dev, gakhil; +Cc: Gagandeep Singh

From: Gagandeep Singh <g.singh@nxp.com>

This patch supports AUTHENC with raw buufer APIs

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c | 128 ++++++++++++++++++--
 1 file changed, 121 insertions(+), 7 deletions(-)

diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
index 4c11a79d90..79ba0ecb81 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
@@ -31,14 +31,128 @@ build_raw_dp_chain_fd(uint8_t *drv_ctx,
 		       void *userdata,
 		       struct qbman_fd *fd)
 {
-	RTE_SET_USED(drv_ctx);
-	RTE_SET_USED(sgl);
-	RTE_SET_USED(iv);
-	RTE_SET_USED(digest);
 	RTE_SET_USED(auth_iv);
-	RTE_SET_USED(ofs);
-	RTE_SET_USED(userdata);
-	RTE_SET_USED(fd);
+
+	dpaa2_sec_session *sess =
+		((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
+	struct ctxt_priv *priv = sess->ctxt;
+	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
+	struct sec_flow_context *flc;
+	int data_len = 0, auth_len = 0, cipher_len = 0;
+	unsigned int i = 0;
+	uint16_t auth_hdr_len = ofs.ofs.cipher.head -
+				ofs.ofs.auth.head;
+
+	uint16_t auth_tail_len = ofs.ofs.auth.tail;
+	uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
+	int icv_len = sess->digest_length;
+	uint8_t *old_icv;
+	uint8_t *iv_ptr = iv->va;
+
+	for (i = 0; i < sgl->num; i++)
+		data_len += sgl->vec[i].len;
+
+	cipher_len = data_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
+	auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
+	/* first FLE entry used to store session ctxt */
+	fle = (struct qbman_fle *)rte_malloc(NULL,
+			FLE_SG_MEM_SIZE(2 * sgl->num),
+			RTE_CACHE_LINE_SIZE);
+	if (unlikely(!fle)) {
+		DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE");
+		return -ENOMEM;
+	}
+	memset(fle, 0, FLE_SG_MEM_SIZE(2 * sgl->num));
+	DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
+	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
+
+	op_fle = fle + 1;
+	ip_fle = fle + 2;
+	sge = fle + 3;
+
+	/* Save the shared descriptor */
+	flc = &priv->flc_desc[0].flc;
+
+	/* Configure FD as a FRAME LIST */
+	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
+	DPAA2_SET_FD_COMPOUND_FMT(fd);
+	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+	/* Configure Output FLE with Scatter/Gather Entry */
+	DPAA2_SET_FLE_SG_EXT(op_fle);
+	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
+
+	if (auth_only_len)
+		DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
+
+	op_fle->length = (sess->dir == DIR_ENC) ?
+			(cipher_len + icv_len) :
+			cipher_len;
+
+	/* Configure Output SGE for Encap/Decap */
+	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+	DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.auth.head);
+	sge->length = sgl->vec[0].len - ofs.ofs.auth.head;
+
+	/* o/p segs */
+	for (i = 1; i < sgl->num; i++) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+		DPAA2_SET_FLE_OFFSET(sge, 0);
+		sge->length = sgl->vec[i].len;
+	}
+
+	if (sess->dir == DIR_ENC) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge,
+			digest->iova);
+		sge->length = icv_len;
+	}
+	DPAA2_SET_FLE_FIN(sge);
+
+	sge++;
+
+	/* Configure Input FLE with Scatter/Gather Entry */
+	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
+	DPAA2_SET_FLE_SG_EXT(ip_fle);
+	DPAA2_SET_FLE_FIN(ip_fle);
+
+	ip_fle->length = (sess->dir == DIR_ENC) ?
+			(auth_len + sess->iv.length) :
+			(auth_len + sess->iv.length +
+			icv_len);
+
+	/* Configure Input SGE for Encap/Decap */
+	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
+	sge->length = sess->iv.length;
+
+	sge++;
+	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+	DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.auth.head);
+	sge->length = sgl->vec[0].len - ofs.ofs.auth.head;
+
+	for (i = 1; i < sgl->num; i++) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+		DPAA2_SET_FLE_OFFSET(sge, 0);
+		sge->length = sgl->vec[i].len;
+	}
+
+	if (sess->dir == DIR_DEC) {
+		sge++;
+		old_icv = (uint8_t *)(sge + 1);
+		memcpy(old_icv, digest->va,
+			icv_len);
+		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
+		sge->length = icv_len;
+	}
+
+	DPAA2_SET_FLE_FIN(sge);
+	if (auth_only_len) {
+		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
+		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
+	}
+	DPAA2_SET_FD_LEN(fd, ip_fle->length);
 
 	return 0;
 }
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [RFC 08/16] crypto/dpaa2_sec: support AEAD with raw buffer APIs
  2021-08-12  7:12 [dpdk-dev] [RFC 00/16] crypto: add raw vector support in DPAAx Hemant Agrawal
                   ` (6 preceding siblings ...)
  2021-08-12  7:12 ` [dpdk-dev] [RFC 07/16] crypto/dpaa2_sec: support AUTHENC " Hemant Agrawal
@ 2021-08-12  7:12 ` Hemant Agrawal
  2021-08-12  7:12 ` [dpdk-dev] [RFC 09/16] crypto/dpaa2_sec: support OOP with raw buffer API Hemant Agrawal
                   ` (8 subsequent siblings)
  16 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-08-12  7:12 UTC (permalink / raw)
  To: dev, gakhil; +Cc: Gagandeep Singh

From: Gagandeep Singh <g.singh@nxp.com>

add raw vector API support for AEAD algos.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c | 249 +++++++++++++++++---
 1 file changed, 214 insertions(+), 35 deletions(-)

diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
index 79ba0ecb81..41f24c93b6 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
@@ -167,14 +167,126 @@ build_raw_dp_aead_fd(uint8_t *drv_ctx,
 		       void *userdata,
 		       struct qbman_fd *fd)
 {
-	RTE_SET_USED(drv_ctx);
-	RTE_SET_USED(sgl);
-	RTE_SET_USED(iv);
-	RTE_SET_USED(digest);
-	RTE_SET_USED(auth_iv);
-	RTE_SET_USED(ofs);
-	RTE_SET_USED(userdata);
-	RTE_SET_USED(fd);
+	dpaa2_sec_session *sess =
+		((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
+	struct ctxt_priv *priv = sess->ctxt;
+	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
+	struct sec_flow_context *flc;
+	uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
+	int icv_len = sess->digest_length;
+	uint8_t *old_icv;
+	uint8_t *IV_ptr = iv->va;
+	unsigned int i = 0;
+	int data_len = 0, aead_len = 0;
+
+	for (i = 0; i < sgl->num; i++)
+		data_len += sgl->vec[i].len;
+
+	aead_len = data_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
+
+	/* first FLE entry used to store mbuf and session ctxt */
+	fle = (struct qbman_fle *)rte_malloc(NULL,
+			FLE_SG_MEM_SIZE(2 * sgl->num),
+			RTE_CACHE_LINE_SIZE);
+	if (unlikely(!fle)) {
+		DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE");
+		return -ENOMEM;
+	}
+	memset(fle, 0, FLE_SG_MEM_SIZE(2 * sgl->num));
+	DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
+	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
+
+	op_fle = fle + 1;
+	ip_fle = fle + 2;
+	sge = fle + 3;
+
+	/* Save the shared descriptor */
+	flc = &priv->flc_desc[0].flc;
+
+	/* Configure FD as a FRAME LIST */
+	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
+	DPAA2_SET_FD_COMPOUND_FMT(fd);
+	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+	/* Configure Output FLE with Scatter/Gather Entry */
+	DPAA2_SET_FLE_SG_EXT(op_fle);
+	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
+
+	if (auth_only_len)
+		DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
+
+	op_fle->length = (sess->dir == DIR_ENC) ?
+			(aead_len + icv_len) :
+			aead_len;
+
+	/* Configure Output SGE for Encap/Decap */
+	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+	DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
+	sge->length = sgl->vec[0].len - ofs.ofs.cipher.head;
+
+	/* o/p segs */
+	for (i = 1; i < sgl->num; i++) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+		DPAA2_SET_FLE_OFFSET(sge, 0);
+		sge->length = sgl->vec[i].len;
+	}
+
+	if (sess->dir == DIR_ENC) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge, digest->iova);
+		sge->length = icv_len;
+	}
+	DPAA2_SET_FLE_FIN(sge);
+
+	sge++;
+
+	/* Configure Input FLE with Scatter/Gather Entry */
+	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
+	DPAA2_SET_FLE_SG_EXT(ip_fle);
+	DPAA2_SET_FLE_FIN(ip_fle);
+	ip_fle->length = (sess->dir == DIR_ENC) ?
+		(aead_len + sess->iv.length + auth_only_len) :
+		(aead_len + sess->iv.length + auth_only_len +
+		icv_len);
+
+	/* Configure Input SGE for Encap/Decap */
+	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
+	sge->length = sess->iv.length;
+
+	sge++;
+	if (auth_only_len) {
+		DPAA2_SET_FLE_ADDR(sge, auth_iv->iova);
+		sge->length = auth_only_len;
+		sge++;
+	}
+
+	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+	DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
+	sge->length = sgl->vec[0].len - ofs.ofs.cipher.head;
+
+	/* i/p segs */
+	for (i = 1; i < sgl->num; i++) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+		DPAA2_SET_FLE_OFFSET(sge, 0);
+		sge->length = sgl->vec[i].len;
+	}
+
+	if (sess->dir == DIR_DEC) {
+		sge++;
+		old_icv = (uint8_t *)(sge + 1);
+		memcpy(old_icv,  digest->va, icv_len);
+		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
+		sge->length = icv_len;
+	}
+
+	DPAA2_SET_FLE_FIN(sge);
+	if (auth_only_len) {
+		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
+		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
+	}
+	DPAA2_SET_FD_LEN(fd, ip_fle->length);
 
 	return 0;
 }
@@ -311,36 +423,104 @@ build_raw_dp_proto_fd(uint8_t *drv_ctx,
 		       void *userdata,
 		       struct qbman_fd *fd)
 {
-	RTE_SET_USED(drv_ctx);
-	RTE_SET_USED(sgl);
 	RTE_SET_USED(iv);
 	RTE_SET_USED(digest);
 	RTE_SET_USED(auth_iv);
 	RTE_SET_USED(ofs);
-	RTE_SET_USED(userdata);
-	RTE_SET_USED(fd);
 
-	return 0;
-}
+	dpaa2_sec_session *sess =
+		((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
+	struct ctxt_priv *priv = sess->ctxt;
+	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
+	struct sec_flow_context *flc;
+	uint32_t in_len = 0, out_len = 0, i;
 
-static int
-build_raw_dp_proto_compound_fd(uint8_t *drv_ctx,
-		       struct rte_crypto_sgl *sgl,
-		       struct rte_crypto_va_iova_ptr *iv,
-		       struct rte_crypto_va_iova_ptr *digest,
-		       struct rte_crypto_va_iova_ptr *auth_iv,
-		       union rte_crypto_sym_ofs ofs,
-		       void *userdata,
-		       struct qbman_fd *fd)
-{
-	RTE_SET_USED(drv_ctx);
-	RTE_SET_USED(sgl);
-	RTE_SET_USED(iv);
-	RTE_SET_USED(digest);
-	RTE_SET_USED(auth_iv);
-	RTE_SET_USED(ofs);
-	RTE_SET_USED(userdata);
-	RTE_SET_USED(fd);
+	/* first FLE entry used to store mbuf and session ctxt */
+	fle = (struct qbman_fle *)rte_malloc(NULL,
+			FLE_SG_MEM_SIZE(2 * sgl->num),
+			RTE_CACHE_LINE_SIZE);
+	if (unlikely(!fle)) {
+		DPAA2_SEC_DP_ERR("Proto:SG: Memory alloc failed for SGE");
+		return -ENOMEM;
+	}
+	memset(fle, 0, FLE_SG_MEM_SIZE(2 * sgl->num));
+	DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
+	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
+
+	/* Save the shared descriptor */
+	flc = &priv->flc_desc[0].flc;
+	op_fle = fle + 1;
+	ip_fle = fle + 2;
+	sge = fle + 3;
+
+	DPAA2_SET_FD_IVP(fd);
+	DPAA2_SET_FLE_IVP(op_fle);
+	DPAA2_SET_FLE_IVP(ip_fle);
+
+	/* Configure FD as a FRAME LIST */
+	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
+	DPAA2_SET_FD_COMPOUND_FMT(fd);
+	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+	/* Configure Output FLE with Scatter/Gather Entry */
+	DPAA2_SET_FLE_SG_EXT(op_fle);
+	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
+
+	/* Configure Output SGE for Encap/Decap */
+	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+	DPAA2_SET_FLE_OFFSET(sge, 0);
+	sge->length = sgl->vec[0].len;
+	out_len += sge->length;
+	/* o/p segs */
+	for (i = 1; i < sgl->num; i++) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+		DPAA2_SET_FLE_OFFSET(sge, 0);
+		sge->length = sgl->vec[i].len;
+		out_len += sge->length;
+	}
+	sge->length = sgl->vec[i - 1].tot_len;
+	out_len += sge->length;
+
+	DPAA2_SET_FLE_FIN(sge);
+	op_fle->length = out_len;
+
+	sge++;
+
+	/* Configure Input FLE with Scatter/Gather Entry */
+	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
+	DPAA2_SET_FLE_SG_EXT(ip_fle);
+	DPAA2_SET_FLE_FIN(ip_fle);
+
+	/* Configure input SGE for Encap/Decap */
+	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+	DPAA2_SET_FLE_OFFSET(sge, 0);
+	sge->length = sgl->vec[0].len;
+	in_len += sge->length;
+	/* i/p segs */
+	for (i = 1; i < sgl->num; i++) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+		DPAA2_SET_FLE_OFFSET(sge, 0);
+		sge->length = sgl->vec[i].len;
+		in_len += sge->length;
+	}
+
+	ip_fle->length = in_len;
+	DPAA2_SET_FLE_FIN(sge);
+
+	/* In case of PDCP, per packet HFN is stored in
+	 * mbuf priv after sym_op.
+	 */
+	if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
+		uint32_t hfn_ovd = *(uint32_t *)((uint8_t *)userdata +
+				sess->pdcp.hfn_ovd_offset);
+		/*enable HFN override override */
+		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd);
+		DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd);
+		DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd);
+	}
+	DPAA2_SET_FD_LEN(fd, ip_fle->length);
 
 	return 0;
 }
@@ -793,10 +973,9 @@ dpaa2_sec_configure_raw_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
 		sess->build_raw_dp_fd = build_raw_dp_auth_fd;
 	else if (sess->ctxt_type == DPAA2_SEC_CIPHER)
 		sess->build_raw_dp_fd = build_raw_dp_cipher_fd;
-	else if (sess->ctxt_type == DPAA2_SEC_IPSEC)
+	else if (sess->ctxt_type == DPAA2_SEC_IPSEC ||
+		sess->ctxt_type == DPAA2_SEC_PDCP)
 		sess->build_raw_dp_fd = build_raw_dp_proto_fd;
-	else if (sess->ctxt_type == DPAA2_SEC_PDCP)
-		sess->build_raw_dp_fd = build_raw_dp_proto_compound_fd;
 	else
 		return -ENOTSUP;
 	dp_ctx = (struct dpaa2_sec_raw_dp_ctx *)raw_dp_ctx->drv_ctx_data;
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [RFC 09/16] crypto/dpaa2_sec: support OOP with raw buffer API
  2021-08-12  7:12 [dpdk-dev] [RFC 00/16] crypto: add raw vector support in DPAAx Hemant Agrawal
                   ` (7 preceding siblings ...)
  2021-08-12  7:12 ` [dpdk-dev] [RFC 08/16] crypto/dpaa2_sec: support AEAD " Hemant Agrawal
@ 2021-08-12  7:12 ` Hemant Agrawal
  2021-08-12  7:12 ` [dpdk-dev] [RFC 10/16] crypto/dpaa2_sec: fix ctx memset size Hemant Agrawal
                   ` (7 subsequent siblings)
  16 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-08-12  7:12 UTC (permalink / raw)
  To: dev, gakhil; +Cc: Gagandeep Singh

From: Gagandeep Singh <g.singh@nxp.com>

add support for out of order processing with raw vector APIs.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h   |   1 +
 drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c | 156 +++++++++++++++-----
 2 files changed, 116 insertions(+), 41 deletions(-)

diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
index f6507855e3..db72c11a5f 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
@@ -179,6 +179,7 @@ typedef int (*dpaa2_sec_build_fd_t)(
 
 typedef int (*dpaa2_sec_build_raw_dp_fd_t)(uint8_t *drv_ctx,
 		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_sgl *dest_sgl,
 		       struct rte_crypto_va_iova_ptr *iv,
 		       struct rte_crypto_va_iova_ptr *digest,
 		       struct rte_crypto_va_iova_ptr *auth_iv,
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
index 41f24c93b6..87a1a10268 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
@@ -24,6 +24,7 @@ struct dpaa2_sec_raw_dp_ctx {
 static int
 build_raw_dp_chain_fd(uint8_t *drv_ctx,
 		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_sgl *dest_sgl,
 		       struct rte_crypto_va_iova_ptr *iv,
 		       struct rte_crypto_va_iova_ptr *digest,
 		       struct rte_crypto_va_iova_ptr *auth_iv,
@@ -89,17 +90,33 @@ build_raw_dp_chain_fd(uint8_t *drv_ctx,
 			(cipher_len + icv_len) :
 			cipher_len;
 
-	/* Configure Output SGE for Encap/Decap */
-	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
-	DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.auth.head);
-	sge->length = sgl->vec[0].len - ofs.ofs.auth.head;
+	/* OOP */
+	if (dest_sgl) {
+		/* Configure Output SGE for Encap/Decap */
+		DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[0].iova);
+		DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
+		sge->length = dest_sgl->vec[0].len - ofs.ofs.cipher.head;
 
-	/* o/p segs */
-	for (i = 1; i < sgl->num; i++) {
-		sge++;
-		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
-		DPAA2_SET_FLE_OFFSET(sge, 0);
-		sge->length = sgl->vec[i].len;
+		/* o/p segs */
+		for (i = 1; i < dest_sgl->num; i++) {
+			sge++;
+			DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[i].iova);
+			DPAA2_SET_FLE_OFFSET(sge, 0);
+			sge->length = dest_sgl->vec[i].len;
+		}
+	} else {
+		/* Configure Output SGE for Encap/Decap */
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+		DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
+		sge->length = sgl->vec[0].len - ofs.ofs.cipher.head;
+
+		/* o/p segs */
+		for (i = 1; i < sgl->num; i++) {
+			sge++;
+			DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+			DPAA2_SET_FLE_OFFSET(sge, 0);
+			sge->length = sgl->vec[i].len;
+		}
 	}
 
 	if (sess->dir == DIR_ENC) {
@@ -160,6 +177,7 @@ build_raw_dp_chain_fd(uint8_t *drv_ctx,
 static int
 build_raw_dp_aead_fd(uint8_t *drv_ctx,
 		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_sgl *dest_sgl,
 		       struct rte_crypto_va_iova_ptr *iv,
 		       struct rte_crypto_va_iova_ptr *digest,
 		       struct rte_crypto_va_iova_ptr *auth_iv,
@@ -219,17 +237,33 @@ build_raw_dp_aead_fd(uint8_t *drv_ctx,
 			(aead_len + icv_len) :
 			aead_len;
 
-	/* Configure Output SGE for Encap/Decap */
-	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
-	DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
-	sge->length = sgl->vec[0].len - ofs.ofs.cipher.head;
+	/* OOP */
+	if (dest_sgl) {
+		/* Configure Output SGE for Encap/Decap */
+		DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[0].iova);
+		DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
+		sge->length = dest_sgl->vec[0].len - ofs.ofs.cipher.head;
 
-	/* o/p segs */
-	for (i = 1; i < sgl->num; i++) {
-		sge++;
-		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
-		DPAA2_SET_FLE_OFFSET(sge, 0);
-		sge->length = sgl->vec[i].len;
+		/* o/p segs */
+		for (i = 1; i < dest_sgl->num; i++) {
+			sge++;
+			DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[i].iova);
+			DPAA2_SET_FLE_OFFSET(sge, 0);
+			sge->length = dest_sgl->vec[i].len;
+		}
+	} else {
+		/* Configure Output SGE for Encap/Decap */
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+		DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
+		sge->length = sgl->vec[0].len - ofs.ofs.cipher.head;
+
+		/* o/p segs */
+		for (i = 1; i < sgl->num; i++) {
+			sge++;
+			DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+			DPAA2_SET_FLE_OFFSET(sge, 0);
+			sge->length = sgl->vec[i].len;
+		}
 	}
 
 	if (sess->dir == DIR_ENC) {
@@ -294,6 +328,7 @@ build_raw_dp_aead_fd(uint8_t *drv_ctx,
 static int
 build_raw_dp_auth_fd(uint8_t *drv_ctx,
 		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_sgl *dest_sgl,
 		       struct rte_crypto_va_iova_ptr *iv,
 		       struct rte_crypto_va_iova_ptr *digest,
 		       struct rte_crypto_va_iova_ptr *auth_iv,
@@ -303,6 +338,7 @@ build_raw_dp_auth_fd(uint8_t *drv_ctx,
 {
 	RTE_SET_USED(iv);
 	RTE_SET_USED(auth_iv);
+	RTE_SET_USED(dest_sgl);
 
 	dpaa2_sec_session *sess =
 		((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
@@ -416,6 +452,7 @@ build_raw_dp_auth_fd(uint8_t *drv_ctx,
 static int
 build_raw_dp_proto_fd(uint8_t *drv_ctx,
 		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_sgl *dest_sgl,
 		       struct rte_crypto_va_iova_ptr *iv,
 		       struct rte_crypto_va_iova_ptr *digest,
 		       struct rte_crypto_va_iova_ptr *auth_iv,
@@ -466,20 +503,39 @@ build_raw_dp_proto_fd(uint8_t *drv_ctx,
 	DPAA2_SET_FLE_SG_EXT(op_fle);
 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
 
-	/* Configure Output SGE for Encap/Decap */
-	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
-	DPAA2_SET_FLE_OFFSET(sge, 0);
-	sge->length = sgl->vec[0].len;
-	out_len += sge->length;
-	/* o/p segs */
-	for (i = 1; i < sgl->num; i++) {
-		sge++;
-		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+	/* OOP */
+	if (dest_sgl) {
+		/* Configure Output SGE for Encap/Decap */
+		DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[0].iova);
 		DPAA2_SET_FLE_OFFSET(sge, 0);
-		sge->length = sgl->vec[i].len;
+		sge->length = dest_sgl->vec[0].len;
+		out_len += sge->length;
+		/* o/p segs */
+		for (i = 1; i < dest_sgl->num; i++) {
+			sge++;
+			DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[i].iova);
+			DPAA2_SET_FLE_OFFSET(sge, 0);
+			sge->length = dest_sgl->vec[i].len;
+			out_len += sge->length;
+		}
+		sge->length = dest_sgl->vec[i - 1].tot_len;
+
+	} else {
+		/* Configure Output SGE for Encap/Decap */
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+		DPAA2_SET_FLE_OFFSET(sge, 0);
+		sge->length = sgl->vec[0].len;
 		out_len += sge->length;
+		/* o/p segs */
+		for (i = 1; i < sgl->num; i++) {
+			sge++;
+			DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+			DPAA2_SET_FLE_OFFSET(sge, 0);
+			sge->length = sgl->vec[i].len;
+			out_len += sge->length;
+		}
+		sge->length = sgl->vec[i - 1].tot_len;
 	}
-	sge->length = sgl->vec[i - 1].tot_len;
 	out_len += sge->length;
 
 	DPAA2_SET_FLE_FIN(sge);
@@ -528,6 +584,7 @@ build_raw_dp_proto_fd(uint8_t *drv_ctx,
 static int
 build_raw_dp_cipher_fd(uint8_t *drv_ctx,
 		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_sgl *dest_sgl,
 		       struct rte_crypto_va_iova_ptr *iv,
 		       struct rte_crypto_va_iova_ptr *digest,
 		       struct rte_crypto_va_iova_ptr *auth_iv,
@@ -593,17 +650,33 @@ build_raw_dp_cipher_fd(uint8_t *drv_ctx,
 	op_fle->length = data_len;
 	DPAA2_SET_FLE_SG_EXT(op_fle);
 
-	/* o/p 1st seg */
-	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
-	DPAA2_SET_FLE_OFFSET(sge, data_offset);
-	sge->length = sgl->vec[0].len - data_offset;
+	/* OOP */
+	if (dest_sgl) {
+		/* o/p 1st seg */
+		DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[0].iova);
+		DPAA2_SET_FLE_OFFSET(sge, data_offset);
+		sge->length = dest_sgl->vec[0].len - data_offset;
 
-	/* o/p segs */
-	for (i = 1; i < sgl->num; i++) {
-		sge++;
-		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
-		DPAA2_SET_FLE_OFFSET(sge, 0);
-		sge->length = sgl->vec[i].len;
+		/* o/p segs */
+		for (i = 1; i < dest_sgl->num; i++) {
+			sge++;
+			DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[i].iova);
+			DPAA2_SET_FLE_OFFSET(sge, 0);
+			sge->length = dest_sgl->vec[i].len;
+		}
+	} else {
+		/* o/p 1st seg */
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+		DPAA2_SET_FLE_OFFSET(sge, data_offset);
+		sge->length = sgl->vec[0].len - data_offset;
+
+		/* o/p segs */
+		for (i = 1; i < sgl->num; i++) {
+			sge++;
+			DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+			DPAA2_SET_FLE_OFFSET(sge, 0);
+			sge->length = sgl->vec[i].len;
+		}
 	}
 	DPAA2_SET_FLE_FIN(sge);
 
@@ -706,6 +779,7 @@ dpaa2_sec_raw_enqueue_burst(void *qp_data, uint8_t *drv_ctx,
 			memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
 			ret = sess->build_raw_dp_fd(drv_ctx,
 						    &vec->src_sgl[loop],
+						    &vec->dest_sgl[loop],
 						    &vec->iv[loop],
 						    &vec->digest[loop],
 						    &vec->auth_iv[loop],
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [RFC 10/16] crypto/dpaa2_sec: fix ctx memset size
  2021-08-12  7:12 [dpdk-dev] [RFC 00/16] crypto: add raw vector support in DPAAx Hemant Agrawal
                   ` (8 preceding siblings ...)
  2021-08-12  7:12 ` [dpdk-dev] [RFC 09/16] crypto/dpaa2_sec: support OOP with raw buffer API Hemant Agrawal
@ 2021-08-12  7:12 ` Hemant Agrawal
  2021-08-12  7:12 ` [dpdk-dev] [RFC 11/16] crypto/dpaa2_sec: enhance error checks with raw buffer APIs Hemant Agrawal
                   ` (6 subsequent siblings)
  16 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-08-12  7:12 UTC (permalink / raw)
  To: dev, gakhil; +Cc: Gagandeep Singh

From: Gagandeep Singh <g.singh@nxp.com>

Driver is doing memset on memory more than its allocation.
This patch fixes the size of memset.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
index 87a1a10268..51e316cc00 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
@@ -1019,8 +1019,7 @@ dpaa2_sec_configure_raw_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
 	RTE_SET_USED(qp_id);
 
 	if (!is_update) {
-		memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx) +
-				sizeof(dpaa2_sec_session));
+		memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx));
 		raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
 	}
 
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [RFC 11/16] crypto/dpaa2_sec: enhance error checks with raw buffer APIs
  2021-08-12  7:12 [dpdk-dev] [RFC 00/16] crypto: add raw vector support in DPAAx Hemant Agrawal
                   ` (9 preceding siblings ...)
  2021-08-12  7:12 ` [dpdk-dev] [RFC 10/16] crypto/dpaa2_sec: fix ctx memset size Hemant Agrawal
@ 2021-08-12  7:12 ` Hemant Agrawal
  2021-08-12  7:12 ` [dpdk-dev] [RFC 12/16] crypto/dpaa_sec: support raw datapath APIs Hemant Agrawal
                   ` (5 subsequent siblings)
  16 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-08-12  7:12 UTC (permalink / raw)
  To: dev, gakhil; +Cc: Gagandeep Singh

From: Gagandeep Singh <g.singh@nxp.com>

This patch improves error conditions and support of
Wireless algos with raw buffers.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c | 31 ++++-----------------
 1 file changed, 6 insertions(+), 25 deletions(-)

diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
index 51e316cc00..25364454c9 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
@@ -355,16 +355,7 @@ build_raw_dp_auth_fd(uint8_t *drv_ctx,
 	data_len = total_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
 	data_offset = ofs.ofs.auth.head;
 
-	if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
-		sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
-		if ((data_len & 7) || (data_offset & 7)) {
-			DPAA2_SEC_ERR("AUTH: len/offset must be full bytes");
-			return -ENOTSUP;
-		}
-
-		data_len = data_len >> 3;
-		data_offset = data_offset >> 3;
-	}
+	/* For SNOW3G and ZUC, lengths in bits only supported */
 	fle = (struct qbman_fle *)rte_malloc(NULL,
 		FLE_SG_MEM_SIZE(2 * sgl->num),
 			RTE_CACHE_LINE_SIZE);
@@ -609,17 +600,7 @@ build_raw_dp_cipher_fd(uint8_t *drv_ctx,
 	data_len = total_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
 	data_offset = ofs.ofs.cipher.head;
 
-	if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
-		sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
-		if ((data_len & 7) || (data_offset & 7)) {
-			DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes");
-			return -ENOTSUP;
-		}
-
-		data_len = data_len >> 3;
-		data_offset = data_offset >> 3;
-	}
-
+	/* For SNOW3G and ZUC, lengths in bits only supported */
 	/* first FLE entry used to store mbuf and session ctxt */
 	fle = (struct qbman_fle *)rte_malloc(NULL,
 			FLE_SG_MEM_SIZE(2*sgl->num),
@@ -878,7 +859,7 @@ dpaa2_sec_raw_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
 	struct qbman_result *dq_storage;
 	uint32_t fqid = dpaa2_qp->rx_vq.fqid;
 	int ret, num_rx = 0;
-	uint8_t is_last = 0, status;
+	uint8_t is_last = 0, status, is_success = 0;
 	struct qbman_swp *swp;
 	const struct qbman_fd *fd;
 	struct qbman_pull_desc pulldesc;
@@ -957,11 +938,11 @@ dpaa2_sec_raw_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
 			/* TODO Parse SEC errors */
 			DPAA2_SEC_ERR("SEC returned Error - %x",
 				      fd->simple.frc);
-			status = RTE_CRYPTO_OP_STATUS_ERROR;
+			is_success = false;
 		} else {
-			status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+			is_success = true;
 		}
-		post_dequeue(user_data, num_rx, status);
+		post_dequeue(user_data, num_rx, is_success);
 
 		num_rx++;
 		dq_storage++;
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [RFC 12/16] crypto/dpaa_sec: support raw datapath APIs
  2021-08-12  7:12 [dpdk-dev] [RFC 00/16] crypto: add raw vector support in DPAAx Hemant Agrawal
                   ` (10 preceding siblings ...)
  2021-08-12  7:12 ` [dpdk-dev] [RFC 11/16] crypto/dpaa2_sec: enhance error checks with raw buffer APIs Hemant Agrawal
@ 2021-08-12  7:12 ` Hemant Agrawal
  2021-08-12  7:12 ` [dpdk-dev] [RFC 13/16] crypto/dpaa_sec: support authonly and chain with raw APIs Hemant Agrawal
                   ` (4 subsequent siblings)
  16 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-08-12  7:12 UTC (permalink / raw)
  To: dev, gakhil; +Cc: Gagandeep Singh

From: Gagandeep Singh <g.singh@nxp.com>

This patch add raw vector API framework for dpaa_sec driver.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/crypto/dpaa_sec/dpaa_sec.c        |  23 +-
 drivers/crypto/dpaa_sec/dpaa_sec.h        |  39 +-
 drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c | 485 ++++++++++++++++++++++
 drivers/crypto/dpaa_sec/meson.build       |   4 +-
 4 files changed, 537 insertions(+), 14 deletions(-)
 create mode 100644 drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c

diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c
index 19d4684e24..7534f80195 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.c
@@ -45,10 +45,7 @@
 #include <dpaa_sec_log.h>
 #include <dpaax_iova_table.h>
 
-static uint8_t cryptodev_driver_id;
-
-static int
-dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
+uint8_t dpaa_cryptodev_driver_id;
 
 static inline void
 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
@@ -1745,8 +1742,8 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
 			case RTE_CRYPTO_OP_WITH_SESSION:
 				ses = (dpaa_sec_session *)
 					get_sym_session_private_data(
-							op->sym->session,
-							cryptodev_driver_id);
+						op->sym->session,
+						dpaa_cryptodev_driver_id);
 				break;
 #ifdef RTE_LIB_SECURITY
 			case RTE_CRYPTO_OP_SECURITY_SESSION:
@@ -2307,7 +2304,7 @@ dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
 	return -1;
 }
 
-static int
+int
 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
 {
 	int ret;
@@ -3115,7 +3112,7 @@ dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
 		info->feature_flags = dev->feature_flags;
 		info->capabilities = dpaa_sec_capabilities;
 		info->sym.max_nb_sessions = internals->max_nb_sessions;
-		info->driver_id = cryptodev_driver_id;
+		info->driver_id = dpaa_cryptodev_driver_id;
 	}
 }
 
@@ -3311,7 +3308,10 @@ static struct rte_cryptodev_ops crypto_ops = {
 	.queue_pair_release   = dpaa_sec_queue_pair_release,
 	.sym_session_get_size     = dpaa_sec_sym_session_get_size,
 	.sym_session_configure    = dpaa_sec_sym_session_configure,
-	.sym_session_clear        = dpaa_sec_sym_session_clear
+	.sym_session_clear        = dpaa_sec_sym_session_clear,
+	/* Raw data-path API related operations */
+	.sym_get_raw_dp_ctx_size = dpaa_sec_get_dp_ctx_size,
+	.sym_configure_raw_dp_ctx = dpaa_sec_configure_raw_dp_ctx,
 };
 
 #ifdef RTE_LIB_SECURITY
@@ -3362,7 +3362,7 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	cryptodev->driver_id = cryptodev_driver_id;
+	cryptodev->driver_id = dpaa_cryptodev_driver_id;
 	cryptodev->dev_ops = &crypto_ops;
 
 	cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
@@ -3371,6 +3371,7 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
 			RTE_CRYPTODEV_FF_HW_ACCELERATED |
 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
 			RTE_CRYPTODEV_FF_SECURITY |
+			RTE_CRYPTODEV_FF_SYM_RAW_DP |
 			RTE_CRYPTODEV_FF_IN_PLACE_SGL |
 			RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
 			RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
@@ -3536,5 +3537,5 @@ static struct cryptodev_driver dpaa_sec_crypto_drv;
 
 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
-		cryptodev_driver_id);
+		dpaa_cryptodev_driver_id);
 RTE_LOG_REGISTER(dpaa_logtype_sec, pmd.crypto.dpaa, NOTICE);
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.h b/drivers/crypto/dpaa_sec/dpaa_sec.h
index 368699678b..f6e83d46e7 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.h
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.h
@@ -19,6 +19,8 @@
 #define AES_CTR_IV_LEN		16
 #define AES_GCM_IV_LEN		12
 
+extern uint8_t dpaa_cryptodev_driver_id;
+
 #define DPAA_IPv6_DEFAULT_VTC_FLOW	0x60000000
 
 /* Minimum job descriptor consists of a oneword job descriptor HEADER and
@@ -117,6 +119,24 @@ struct sec_pdcp_ctxt {
 	uint32_t hfn_threshold;	/*!< HFN Threashold for key renegotiation */
 };
 #endif
+
+typedef int (*dpaa_sec_build_fd_t)(
+	void *qp, uint8_t *drv_ctx, struct rte_crypto_vec *data_vec,
+	uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
+	void *user_data);
+
+typedef struct dpaa_sec_job* (*dpaa_sec_build_raw_dp_fd_t)(uint8_t *drv_ctx,
+			struct rte_crypto_sgl *sgl,
+			struct rte_crypto_sgl *dest_sgl,
+			struct rte_crypto_va_iova_ptr *iv,
+			struct rte_crypto_va_iova_ptr *digest,
+			struct rte_crypto_va_iova_ptr *auth_iv,
+			union rte_crypto_sym_ofs ofs,
+			void *userdata);
+
 typedef struct dpaa_sec_session_entry {
 	struct sec_cdb cdb;	/**< cmd block associated with qp */
 	struct dpaa_sec_qp *qp[MAX_DPAA_CORES];
@@ -129,6 +149,8 @@ typedef struct dpaa_sec_session_entry {
 #ifdef RTE_LIB_SECURITY
 	enum rte_security_session_protocol proto_alg; /*!< Security Algorithm*/
 #endif
+	dpaa_sec_build_fd_t build_fd;
+	dpaa_sec_build_raw_dp_fd_t build_raw_dp_fd;
 	union {
 		struct {
 			uint8_t *data;	/**< pointer to key data */
@@ -211,7 +233,10 @@ struct dpaa_sec_job {
 #define DPAA_MAX_NB_MAX_DIGEST	32
 struct dpaa_sec_op_ctx {
 	struct dpaa_sec_job job;
-	struct rte_crypto_op *op;
+	union {
+		struct rte_crypto_op *op;
+		void *userdata;
+	};
 	struct rte_mempool *ctx_pool; /* mempool pointer for dpaa_sec_op_ctx */
 	uint32_t fd_status;
 	int64_t vtop_offset;
@@ -803,4 +828,16 @@ calc_chksum(void *buffer, int len)
 	return  result;
 }
 
+int
+dpaa_sec_configure_raw_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
+	enum rte_crypto_op_sess_type sess_type,
+	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update);
+
+int
+dpaa_sec_get_dp_ctx_size(struct rte_cryptodev *dev);
+
+int
+dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
+
 #endif /* _DPAA_SEC_H_ */
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c b/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c
new file mode 100644
index 0000000000..ee0ca2e0d5
--- /dev/null
+++ b/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c
@@ -0,0 +1,485 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2021 NXP
+ */
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_crypto.h>
+#include <rte_cryptodev.h>
+#ifdef RTE_LIB_SECURITY
+#include <rte_security_driver.h>
+#endif
+
+/* RTA header files */
+#include <desc/ipsec.h>
+
+#include <rte_dpaa_bus.h>
+#include <dpaa_sec.h>
+#include <dpaa_sec_log.h>
+
+struct dpaa_sec_raw_dp_ctx {
+	dpaa_sec_session *session;
+	uint32_t tail;
+	uint32_t head;
+	uint16_t cached_enqueue;
+	uint16_t cached_dequeue;
+};
+
+static __rte_always_inline int
+dpaa_sec_raw_enqueue_done(void *qp_data, uint8_t *drv_ctx, uint32_t n)
+{
+	RTE_SET_USED(qp_data);
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(n);
+
+	return 0;
+}
+
+static __rte_always_inline int
+dpaa_sec_raw_dequeue_done(void *qp_data, uint8_t *drv_ctx, uint32_t n)
+{
+	RTE_SET_USED(qp_data);
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(n);
+
+	return 0;
+}
+
+static inline struct dpaa_sec_op_ctx *
+dpaa_sec_alloc_raw_ctx(dpaa_sec_session *ses, int sg_count)
+{
+	struct dpaa_sec_op_ctx *ctx;
+	int i, retval;
+
+	retval = rte_mempool_get(
+			ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool,
+			(void **)(&ctx));
+	if (!ctx || retval) {
+		DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
+		return NULL;
+	}
+	/*
+	 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
+	 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
+	 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
+	 * each packet, memset is costlier than dcbz_64().
+	 */
+	for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4)
+		dcbz_64(&ctx->job.sg[i]);
+
+	ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool;
+	ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
+
+	return ctx;
+}
+
+static struct dpaa_sec_job *
+build_dpaa_raw_dp_auth_fd(uint8_t *drv_ctx,
+			struct rte_crypto_sgl *sgl,
+			struct rte_crypto_sgl *dest_sgl,
+			struct rte_crypto_va_iova_ptr *iv,
+			struct rte_crypto_va_iova_ptr *digest,
+			struct rte_crypto_va_iova_ptr *auth_iv,
+			union rte_crypto_sym_ofs ofs,
+			void *userdata)
+{
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(sgl);
+	RTE_SET_USED(dest_sgl);
+	RTE_SET_USED(iv);
+	RTE_SET_USED(digest);
+	RTE_SET_USED(auth_iv);
+	RTE_SET_USED(ofs);
+	RTE_SET_USED(userdata);
+
+	return NULL;
+}
+
+static struct dpaa_sec_job *
+build_dpaa_raw_dp_cipher_fd(uint8_t *drv_ctx,
+			struct rte_crypto_sgl *sgl,
+			struct rte_crypto_sgl *dest_sgl,
+			struct rte_crypto_va_iova_ptr *iv,
+			struct rte_crypto_va_iova_ptr *digest,
+			struct rte_crypto_va_iova_ptr *auth_iv,
+			union rte_crypto_sym_ofs ofs,
+			void *userdata)
+{
+	RTE_SET_USED(digest);
+	RTE_SET_USED(auth_iv);
+	dpaa_sec_session *ses =
+		((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
+	struct dpaa_sec_job *cf;
+	struct dpaa_sec_op_ctx *ctx;
+	struct qm_sg_entry *sg, *out_sg, *in_sg;
+	unsigned int i;
+	uint8_t *IV_ptr = iv->va;
+	int data_len, total_len = 0, data_offset;
+
+	for (i = 0; i < sgl->num; i++)
+		total_len += sgl->vec[i].len;
+
+	data_len = total_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
+	data_offset = ofs.ofs.cipher.head;
+
+	/* Support lengths in bits only for SNOW3G and ZUC */
+	if (sgl->num > MAX_SG_ENTRIES) {
+		DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
+				MAX_SG_ENTRIES);
+		return NULL;
+	}
+
+	ctx = dpaa_sec_alloc_raw_ctx(ses, sgl->num * 2 + 3);
+	if (!ctx)
+		return NULL;
+
+	cf = &ctx->job;
+	ctx->userdata = (void *)userdata;
+
+	/* output */
+	out_sg = &cf->sg[0];
+	out_sg->extension = 1;
+	out_sg->length = data_len;
+	qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
+	cpu_to_hw_sg(out_sg);
+
+	if (dest_sgl) {
+		/* 1st seg */
+		sg = &cf->sg[2];
+		qm_sg_entry_set64(sg, dest_sgl->vec[0].iova);
+		sg->length = dest_sgl->vec[0].len - data_offset;
+		sg->offset = data_offset;
+
+		/* Successive segs */
+		for (i = 1; i < dest_sgl->num; i++) {
+			cpu_to_hw_sg(sg);
+			sg++;
+			qm_sg_entry_set64(sg, dest_sgl->vec[i].iova);
+			sg->length = dest_sgl->vec[i].len;
+		}
+	} else {
+		/* 1st seg */
+		sg = &cf->sg[2];
+		qm_sg_entry_set64(sg, sgl->vec[0].iova);
+		sg->length = sgl->vec[0].len - data_offset;
+		sg->offset = data_offset;
+
+		/* Successive segs */
+		for (i = 1; i < sgl->num; i++) {
+			cpu_to_hw_sg(sg);
+			sg++;
+			qm_sg_entry_set64(sg, sgl->vec[i].iova);
+			sg->length = sgl->vec[i].len;
+		}
+
+	}
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+
+	/* input */
+	in_sg = &cf->sg[1];
+	in_sg->extension = 1;
+	in_sg->final = 1;
+	in_sg->length = data_len + ses->iv.length;
+
+	sg++;
+	qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
+	cpu_to_hw_sg(in_sg);
+
+	/* IV */
+	qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
+	sg->length = ses->iv.length;
+	cpu_to_hw_sg(sg);
+
+	/* 1st seg */
+	sg++;
+	qm_sg_entry_set64(sg, sgl->vec[0].iova);
+	sg->length = sgl->vec[0].len - data_offset;
+	sg->offset = data_offset;
+
+	/* Successive segs */
+	for (i = 1; i < sgl->num; i++) {
+		cpu_to_hw_sg(sg);
+		sg++;
+		qm_sg_entry_set64(sg, sgl->vec[i].iova);
+		sg->length = sgl->vec[i].len;
+	}
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+
+	return cf;
+}
+
+static uint32_t
+dpaa_sec_raw_enqueue_burst(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	/* Function to transmit the frames to given device and queuepair */
+	uint32_t loop;
+	struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp_data;
+	uint16_t num_tx = 0;
+	struct qm_fd fds[DPAA_SEC_BURST], *fd;
+	uint32_t frames_to_send;
+	struct dpaa_sec_job *cf;
+	dpaa_sec_session *ses =
+			((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
+	uint32_t flags[DPAA_SEC_BURST] = {0};
+	struct qman_fq *inq[DPAA_SEC_BURST];
+
+	if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
+		if (rte_dpaa_portal_init((void *)0)) {
+			DPAA_SEC_ERR("Failure in affining portal");
+			return 0;
+		}
+	}
+
+	while (vec->num) {
+		frames_to_send = (vec->num > DPAA_SEC_BURST) ?
+				DPAA_SEC_BURST : vec->num;
+		for (loop = 0; loop < frames_to_send; loop++) {
+			if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
+				if (dpaa_sec_attach_sess_q(dpaa_qp, ses)) {
+					frames_to_send = loop;
+					goto send_pkts;
+				}
+			} else if (unlikely(ses->qp[rte_lcore_id() %
+						MAX_DPAA_CORES] != dpaa_qp)) {
+				DPAA_SEC_DP_ERR("Old:sess->qp = %p"
+					" New qp = %p\n",
+					ses->qp[rte_lcore_id() %
+					MAX_DPAA_CORES], dpaa_qp);
+				frames_to_send = loop;
+				goto send_pkts;
+			}
+
+			/*Clear the unused FD fields before sending*/
+			fd = &fds[loop];
+			memset(fd, 0, sizeof(struct qm_fd));
+			cf = ses->build_raw_dp_fd(drv_ctx,
+						&vec->src_sgl[loop],
+						&vec->dest_sgl[loop],
+						&vec->iv[loop],
+						&vec->digest[loop],
+						&vec->auth_iv[loop],
+						ofs,
+						user_data[loop]);
+			if (!cf) {
+				DPAA_SEC_ERR("error: Improper packet contents"
+					" for crypto operation");
+				goto skip_tx;
+			}
+			inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
+			fd->opaque_addr = 0;
+			fd->cmd = 0;
+			qm_fd_addr_set64(fd, rte_dpaa_mem_vtop(cf->sg));
+			fd->_format1 = qm_fd_compound;
+			fd->length29 = 2 * sizeof(struct qm_sg_entry);
+
+			status[loop] = 1;
+		}
+send_pkts:
+		loop = 0;
+		while (loop < frames_to_send) {
+			loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
+					&flags[loop], frames_to_send - loop);
+		}
+		vec->num -= frames_to_send;
+		num_tx += frames_to_send;
+	}
+
+skip_tx:
+	dpaa_qp->tx_pkts += num_tx;
+	dpaa_qp->tx_errs += vec->num - num_tx;
+
+	return num_tx;
+}
+
+static int
+dpaa_sec_deq_raw(struct dpaa_sec_qp *qp, void **out_user_data,
+		uint8_t is_user_data_array,
+		rte_cryptodev_raw_post_dequeue_t post_dequeue,
+		int nb_ops)
+{
+	struct qman_fq *fq;
+	unsigned int pkts = 0;
+	int num_rx_bufs, ret;
+	struct qm_dqrr_entry *dq;
+	uint32_t vdqcr_flags = 0;
+	uint8_t is_success = 0;
+
+	fq = &qp->outq;
+	/*
+	 * Until request for four buffers, we provide exact number of buffers.
+	 * Otherwise we do not set the QM_VDQCR_EXACT flag.
+	 * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
+	 * requested, so we request two less in this case.
+	 */
+	if (nb_ops < 4) {
+		vdqcr_flags = QM_VDQCR_EXACT;
+		num_rx_bufs = nb_ops;
+	} else {
+		num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
+			(DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
+	}
+	ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
+	if (ret)
+		return 0;
+
+	do {
+		const struct qm_fd *fd;
+		struct dpaa_sec_job *job;
+		struct dpaa_sec_op_ctx *ctx;
+
+		dq = qman_dequeue(fq);
+		if (!dq)
+			continue;
+
+		fd = &dq->fd;
+		/* sg is embedded in an op ctx,
+		 * sg[0] is for output
+		 * sg[1] for input
+		 */
+		job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
+
+		ctx = container_of(job, struct dpaa_sec_op_ctx, job);
+		ctx->fd_status = fd->status;
+		if (is_user_data_array)
+			out_user_data[pkts] = ctx->userdata;
+		else
+			out_user_data[0] = ctx->userdata;
+
+		if (!ctx->fd_status) {
+			is_success = true;
+		} else {
+			is_success = false;
+			DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
+		}
+		post_dequeue(ctx->op, pkts, is_success);
+		pkts++;
+
+		/* report op status to sym->op and then free the ctx memory */
+		rte_mempool_put(ctx->ctx_pool, (void *)ctx);
+
+		qman_dqrr_consume(fq, dq);
+	} while (fq->flags & QMAN_FQ_STATE_VDQCR);
+
+	return pkts;
+}
+
+
+static __rte_always_inline uint32_t
+dpaa_sec_raw_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
+	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
+	uint32_t max_nb_to_dequeue,
+	rte_cryptodev_raw_post_dequeue_t post_dequeue,
+	void **out_user_data, uint8_t is_user_data_array,
+	uint32_t *n_success, int *dequeue_status)
+{
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(get_dequeue_count);
+	uint16_t num_rx;
+	struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp_data;
+	uint32_t nb_ops = max_nb_to_dequeue;
+
+	if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
+		if (rte_dpaa_portal_init((void *)0)) {
+			DPAA_SEC_ERR("Failure in affining portal");
+			return 0;
+		}
+	}
+
+	num_rx = dpaa_sec_deq_raw(dpaa_qp, out_user_data,
+			is_user_data_array, post_dequeue, nb_ops);
+
+	dpaa_qp->rx_pkts += num_rx;
+	*dequeue_status = 1;
+	*n_success = num_rx;
+
+	DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
+
+	return num_rx;
+}
+
+static __rte_always_inline int
+dpaa_sec_raw_enqueue(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data_vec,
+	uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
+	void *user_data)
+{
+	RTE_SET_USED(qp_data);
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(data_vec);
+	RTE_SET_USED(n_data_vecs);
+	RTE_SET_USED(ofs);
+	RTE_SET_USED(iv);
+	RTE_SET_USED(digest);
+	RTE_SET_USED(aad_or_auth_iv);
+	RTE_SET_USED(user_data);
+
+	return 0;
+}
+
+static __rte_always_inline void *
+dpaa_sec_raw_dequeue(void *qp_data, uint8_t *drv_ctx, int *dequeue_status,
+	enum rte_crypto_op_status *op_status)
+{
+	RTE_SET_USED(qp_data);
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(dequeue_status);
+	RTE_SET_USED(op_status);
+
+	return NULL;
+}
+
+int
+dpaa_sec_configure_raw_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
+	enum rte_crypto_op_sess_type sess_type,
+	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
+{
+	dpaa_sec_session *sess;
+	struct dpaa_sec_raw_dp_ctx *dp_ctx;
+	RTE_SET_USED(qp_id);
+
+	if (!is_update) {
+		memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx));
+		raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
+	}
+
+	if (sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
+		sess = (dpaa_sec_session *)get_sec_session_private_data(
+				session_ctx.sec_sess);
+	else if (sess_type == RTE_CRYPTO_OP_WITH_SESSION)
+		sess = (dpaa_sec_session *)get_sym_session_private_data(
+			session_ctx.crypto_sess, dpaa_cryptodev_driver_id);
+	else
+		return -ENOTSUP;
+	raw_dp_ctx->dequeue_burst = dpaa_sec_raw_dequeue_burst;
+	raw_dp_ctx->dequeue = dpaa_sec_raw_dequeue;
+	raw_dp_ctx->dequeue_done = dpaa_sec_raw_dequeue_done;
+	raw_dp_ctx->enqueue_burst = dpaa_sec_raw_enqueue_burst;
+	raw_dp_ctx->enqueue = dpaa_sec_raw_enqueue;
+	raw_dp_ctx->enqueue_done = dpaa_sec_raw_enqueue_done;
+
+	if (sess->ctxt == DPAA_SEC_CIPHER)
+		sess->build_raw_dp_fd = build_dpaa_raw_dp_cipher_fd;
+	else if (sess->ctxt == DPAA_SEC_AUTH)
+		sess->build_raw_dp_fd = build_dpaa_raw_dp_auth_fd;
+	else
+		return -ENOTSUP;
+	dp_ctx = (struct dpaa_sec_raw_dp_ctx *)raw_dp_ctx->drv_ctx_data;
+	dp_ctx->session = sess;
+
+	return 0;
+}
+
+int
+dpaa_sec_get_dp_ctx_size(__rte_unused struct rte_cryptodev *dev)
+{
+	return sizeof(struct dpaa_sec_raw_dp_ctx);
+}
diff --git a/drivers/crypto/dpaa_sec/meson.build b/drivers/crypto/dpaa_sec/meson.build
index 44fd60e5ae..f87ad6c7e7 100644
--- a/drivers/crypto/dpaa_sec/meson.build
+++ b/drivers/crypto/dpaa_sec/meson.build
@@ -1,5 +1,5 @@
 # SPDX-License-Identifier: BSD-3-Clause
-# Copyright 2018 NXP
+# Copyright 2018-2021 NXP
 
 if not is_linux
     build = false
@@ -7,7 +7,7 @@ if not is_linux
 endif
 
 deps += ['bus_dpaa', 'mempool_dpaa', 'security']
-sources = files('dpaa_sec.c')
+sources = files('dpaa_sec.c', 'dpaa_sec_raw_dp.c')
 
 includes += include_directories('../../bus/dpaa/include')
 includes += include_directories('../../common/dpaax')
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [RFC 13/16] crypto/dpaa_sec: support authonly and chain with raw APIs
  2021-08-12  7:12 [dpdk-dev] [RFC 00/16] crypto: add raw vector support in DPAAx Hemant Agrawal
                   ` (11 preceding siblings ...)
  2021-08-12  7:12 ` [dpdk-dev] [RFC 12/16] crypto/dpaa_sec: support raw datapath APIs Hemant Agrawal
@ 2021-08-12  7:12 ` Hemant Agrawal
  2021-08-12  7:12 ` [dpdk-dev] [RFC 14/16] crypto/dpaa_sec: support AEAD and proto " Hemant Agrawal
                   ` (3 subsequent siblings)
  16 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-08-12  7:12 UTC (permalink / raw)
  To: dev, gakhil; +Cc: Gagandeep Singh

From: Gagandeep Singh <g.singh@nxp.com>

This patch improves the raw vector support in dpaa_sec driver
for authonly and chain usecase.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/crypto/dpaa_sec/dpaa_sec.h        |   3 +-
 drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c | 296 +++++++++++++++++++++-
 2 files changed, 287 insertions(+), 12 deletions(-)

diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.h b/drivers/crypto/dpaa_sec/dpaa_sec.h
index f6e83d46e7..2e0ab93ff0 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.h
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.h
@@ -135,7 +135,8 @@ typedef struct dpaa_sec_job* (*dpaa_sec_build_raw_dp_fd_t)(uint8_t *drv_ctx,
 			struct rte_crypto_va_iova_ptr *digest,
 			struct rte_crypto_va_iova_ptr *auth_iv,
 			union rte_crypto_sym_ofs ofs,
-			void *userdata);
+			void *userdata,
+			struct qm_fd *fd);
 
 typedef struct dpaa_sec_session_entry {
 	struct sec_cdb cdb;	/**< cmd block associated with qp */
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c b/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c
index ee0ca2e0d5..4e34629f18 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c
@@ -12,6 +12,7 @@
 #endif
 
 /* RTA header files */
+#include <desc/algo.h>
 #include <desc/ipsec.h>
 
 #include <rte_dpaa_bus.h>
@@ -26,6 +27,17 @@ struct dpaa_sec_raw_dp_ctx {
 	uint16_t cached_dequeue;
 };
 
+static inline int
+is_encode(dpaa_sec_session *ses)
+{
+	return ses->dir == DIR_ENC;
+}
+
+static inline int is_decode(dpaa_sec_session *ses)
+{
+	return ses->dir == DIR_DEC;
+}
+
 static __rte_always_inline int
 dpaa_sec_raw_enqueue_done(void *qp_data, uint8_t *drv_ctx, uint32_t n)
 {
@@ -82,18 +94,276 @@ build_dpaa_raw_dp_auth_fd(uint8_t *drv_ctx,
 			struct rte_crypto_va_iova_ptr *digest,
 			struct rte_crypto_va_iova_ptr *auth_iv,
 			union rte_crypto_sym_ofs ofs,
-			void *userdata)
+			void *userdata,
+			struct qm_fd *fd)
 {
-	RTE_SET_USED(drv_ctx);
-	RTE_SET_USED(sgl);
 	RTE_SET_USED(dest_sgl);
 	RTE_SET_USED(iv);
-	RTE_SET_USED(digest);
 	RTE_SET_USED(auth_iv);
-	RTE_SET_USED(ofs);
-	RTE_SET_USED(userdata);
+	RTE_SET_USED(fd);
 
-	return NULL;
+	dpaa_sec_session *ses =
+		((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
+	struct dpaa_sec_job *cf;
+	struct dpaa_sec_op_ctx *ctx;
+	struct qm_sg_entry *sg, *out_sg, *in_sg;
+	phys_addr_t start_addr;
+	uint8_t *old_digest, extra_segs;
+	int data_len, data_offset, total_len = 0;
+	unsigned int i;
+
+	for (i = 0; i < sgl->num; i++)
+		total_len += sgl->vec[i].len;
+
+	data_len = total_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
+	data_offset =  ofs.ofs.auth.head;
+
+	/* Support only length in bits for SNOW3G and ZUC */
+
+	if (is_decode(ses))
+		extra_segs = 3;
+	else
+		extra_segs = 2;
+
+	if (sgl->num > MAX_SG_ENTRIES) {
+		DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
+				MAX_SG_ENTRIES);
+		return NULL;
+	}
+	ctx = dpaa_sec_alloc_raw_ctx(ses, sgl->num * 2 + extra_segs);
+	if (!ctx)
+		return NULL;
+
+	cf = &ctx->job;
+	ctx->userdata = (void *)userdata;
+	old_digest = ctx->digest;
+
+	/* output */
+	out_sg = &cf->sg[0];
+	qm_sg_entry_set64(out_sg, digest->iova);
+	out_sg->length = ses->digest_length;
+	cpu_to_hw_sg(out_sg);
+
+	/* input */
+	in_sg = &cf->sg[1];
+	/* need to extend the input to a compound frame */
+	in_sg->extension = 1;
+	in_sg->final = 1;
+	in_sg->length = data_len;
+	qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
+
+	/* 1st seg */
+	sg = in_sg + 1;
+
+	if (ses->iv.length) {
+		uint8_t *iv_ptr;
+
+		iv_ptr = rte_crypto_op_ctod_offset(userdata, uint8_t *,
+						   ses->iv.offset);
+
+		if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
+			iv_ptr = conv_to_snow_f9_iv(iv_ptr);
+			sg->length = 12;
+		} else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
+			iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
+			sg->length = 8;
+		} else {
+			sg->length = ses->iv.length;
+		}
+		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
+		in_sg->length += sg->length;
+		cpu_to_hw_sg(sg);
+		sg++;
+	}
+
+	qm_sg_entry_set64(sg, sgl->vec[0].iova);
+	sg->offset = data_offset;
+
+	if (data_len <= (int)(sgl->vec[0].len - data_offset)) {
+		sg->length = data_len;
+	} else {
+		sg->length = sgl->vec[0].len - data_offset;
+
+		/* remaining i/p segs */
+		for (i = 1; i < sgl->num; i++) {
+			cpu_to_hw_sg(sg);
+			sg++;
+			qm_sg_entry_set64(sg, sgl->vec[i].iova);
+			if (data_len > (int)sgl->vec[i].len)
+				sg->length = sgl->vec[0].len;
+			else
+				sg->length = data_len;
+
+			data_len = data_len - sg->length;
+			if (data_len < 1)
+				break;
+		}
+	}
+
+	if (is_decode(ses)) {
+		/* Digest verification case */
+		cpu_to_hw_sg(sg);
+		sg++;
+		rte_memcpy(old_digest, digest->va,
+				ses->digest_length);
+		start_addr = rte_dpaa_mem_vtop(old_digest);
+		qm_sg_entry_set64(sg, start_addr);
+		sg->length = ses->digest_length;
+		in_sg->length += ses->digest_length;
+	}
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+	cpu_to_hw_sg(in_sg);
+
+	return cf;
+}
+
+static inline struct dpaa_sec_job *
+build_dpaa_raw_dp_chain_fd(uint8_t *drv_ctx,
+			struct rte_crypto_sgl *sgl,
+			struct rte_crypto_sgl *dest_sgl,
+			struct rte_crypto_va_iova_ptr *iv,
+			struct rte_crypto_va_iova_ptr *digest,
+			struct rte_crypto_va_iova_ptr *auth_iv,
+			union rte_crypto_sym_ofs ofs,
+			void *userdata,
+			struct qm_fd *fd)
+{
+	RTE_SET_USED(auth_iv);
+
+	dpaa_sec_session *ses =
+		((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
+	struct dpaa_sec_job *cf;
+	struct dpaa_sec_op_ctx *ctx;
+	struct qm_sg_entry *sg, *out_sg, *in_sg;
+	uint8_t *IV_ptr = iv->va;
+	unsigned int i;
+	uint16_t auth_hdr_len = ofs.ofs.cipher.head -
+				ofs.ofs.auth.head;
+	uint16_t auth_tail_len = ofs.ofs.auth.tail;
+	uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
+	int data_len = 0, auth_len = 0, cipher_len = 0;
+
+	for (i = 0; i < sgl->num; i++)
+		data_len += sgl->vec[i].len;
+
+	cipher_len = data_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
+	auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
+
+	if (sgl->num > MAX_SG_ENTRIES) {
+		DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
+				MAX_SG_ENTRIES);
+		return NULL;
+	}
+
+	ctx = dpaa_sec_alloc_raw_ctx(ses, sgl->num * 2 + 4);
+	if (!ctx)
+		return NULL;
+
+	cf = &ctx->job;
+	ctx->userdata = (void *)userdata;
+
+	rte_prefetch0(cf->sg);
+
+	/* output */
+	out_sg = &cf->sg[0];
+	out_sg->extension = 1;
+	if (is_encode(ses))
+		out_sg->length = cipher_len + ses->digest_length;
+	else
+		out_sg->length = cipher_len;
+
+	/* output sg entries */
+	sg = &cf->sg[2];
+	qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
+	cpu_to_hw_sg(out_sg);
+
+	/* 1st seg */
+	if (dest_sgl) {
+		qm_sg_entry_set64(sg, dest_sgl->vec[0].iova);
+		sg->length = dest_sgl->vec[0].len - ofs.ofs.cipher.head;
+		sg->offset = ofs.ofs.cipher.head;
+
+		/* Successive segs */
+		for (i = 1; i < dest_sgl->num; i++) {
+			cpu_to_hw_sg(sg);
+			sg++;
+			qm_sg_entry_set64(sg, dest_sgl->vec[i].iova);
+			sg->length = dest_sgl->vec[i].len;
+		}
+	} else {
+		qm_sg_entry_set64(sg, sgl->vec[0].iova);
+		sg->length = sgl->vec[0].len - ofs.ofs.cipher.head;
+		sg->offset = ofs.ofs.cipher.head;
+
+		/* Successive segs */
+		for (i = 1; i < sgl->num; i++) {
+			cpu_to_hw_sg(sg);
+			sg++;
+			qm_sg_entry_set64(sg, sgl->vec[i].iova);
+			sg->length = sgl->vec[i].len;
+		}
+	}
+
+	if (is_encode(ses)) {
+		cpu_to_hw_sg(sg);
+		/* set auth output */
+		sg++;
+		qm_sg_entry_set64(sg, digest->iova);
+		sg->length = ses->digest_length;
+	}
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+
+	/* input */
+	in_sg = &cf->sg[1];
+	in_sg->extension = 1;
+	in_sg->final = 1;
+	if (is_encode(ses))
+		in_sg->length = ses->iv.length + auth_len;
+	else
+		in_sg->length = ses->iv.length + auth_len
+						+ ses->digest_length;
+
+	/* input sg entries */
+	sg++;
+	qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
+	cpu_to_hw_sg(in_sg);
+
+	/* 1st seg IV */
+	qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
+	sg->length = ses->iv.length;
+	cpu_to_hw_sg(sg);
+
+	/* 2 seg */
+	sg++;
+	qm_sg_entry_set64(sg, sgl->vec[0].iova);
+	sg->length = sgl->vec[0].len - ofs.ofs.auth.head;
+	sg->offset = ofs.ofs.auth.head;
+
+	/* Successive segs */
+	for (i = 1; i < sgl->num; i++) {
+		cpu_to_hw_sg(sg);
+		sg++;
+		qm_sg_entry_set64(sg, sgl->vec[i].iova);
+		sg->length = sgl->vec[i].len;
+	}
+
+	if (is_decode(ses)) {
+		cpu_to_hw_sg(sg);
+		sg++;
+		memcpy(ctx->digest, digest->va,
+			ses->digest_length);
+		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
+		sg->length = ses->digest_length;
+	}
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+
+	if (auth_only_len)
+		fd->cmd = 0x80000000 | auth_only_len;
+
+	return cf;
 }
 
 static struct dpaa_sec_job *
@@ -104,10 +374,13 @@ build_dpaa_raw_dp_cipher_fd(uint8_t *drv_ctx,
 			struct rte_crypto_va_iova_ptr *digest,
 			struct rte_crypto_va_iova_ptr *auth_iv,
 			union rte_crypto_sym_ofs ofs,
-			void *userdata)
+			void *userdata,
+			struct qm_fd *fd)
 {
 	RTE_SET_USED(digest);
 	RTE_SET_USED(auth_iv);
+	RTE_SET_USED(fd);
+
 	dpaa_sec_session *ses =
 		((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
 	struct dpaa_sec_job *cf;
@@ -264,15 +537,14 @@ dpaa_sec_raw_enqueue_burst(void *qp_data, uint8_t *drv_ctx,
 						&vec->digest[loop],
 						&vec->auth_iv[loop],
 						ofs,
-						user_data[loop]);
+						user_data[loop],
+						fd);
 			if (!cf) {
 				DPAA_SEC_ERR("error: Improper packet contents"
 					" for crypto operation");
 				goto skip_tx;
 			}
 			inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
-			fd->opaque_addr = 0;
-			fd->cmd = 0;
 			qm_fd_addr_set64(fd, rte_dpaa_mem_vtop(cf->sg));
 			fd->_format1 = qm_fd_compound;
 			fd->length29 = 2 * sizeof(struct qm_sg_entry);
@@ -470,6 +742,8 @@ dpaa_sec_configure_raw_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
 		sess->build_raw_dp_fd = build_dpaa_raw_dp_cipher_fd;
 	else if (sess->ctxt == DPAA_SEC_AUTH)
 		sess->build_raw_dp_fd = build_dpaa_raw_dp_auth_fd;
+	else if (sess->ctxt == DPAA_SEC_CIPHER_HASH)
+		sess->build_raw_dp_fd = build_dpaa_raw_dp_chain_fd;
 	else
 		return -ENOTSUP;
 	dp_ctx = (struct dpaa_sec_raw_dp_ctx *)raw_dp_ctx->drv_ctx_data;
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [RFC 14/16] crypto/dpaa_sec: support AEAD and proto with raw APIs
  2021-08-12  7:12 [dpdk-dev] [RFC 00/16] crypto: add raw vector support in DPAAx Hemant Agrawal
                   ` (12 preceding siblings ...)
  2021-08-12  7:12 ` [dpdk-dev] [RFC 13/16] crypto/dpaa_sec: support authonly and chain with raw APIs Hemant Agrawal
@ 2021-08-12  7:12 ` Hemant Agrawal
  2021-08-12  7:12 ` [dpdk-dev] [RFC 15/16] test/crypto: add raw API test for dpaax Hemant Agrawal
                   ` (2 subsequent siblings)
  16 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-08-12  7:12 UTC (permalink / raw)
  To: dev, gakhil; +Cc: Gagandeep Singh

From: Gagandeep Singh <g.singh@nxp.com>

This add support for AEAD and proto offload with raw APIs
for dpaa_sec driver.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c | 293 ++++++++++++++++++++++
 1 file changed, 293 insertions(+)

diff --git a/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c b/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c
index 4e34629f18..b0c22a7c26 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c
@@ -218,6 +218,163 @@ build_dpaa_raw_dp_auth_fd(uint8_t *drv_ctx,
 	return cf;
 }
 
+static inline struct dpaa_sec_job *
+build_raw_cipher_auth_gcm_sg(uint8_t *drv_ctx,
+			struct rte_crypto_sgl *sgl,
+			struct rte_crypto_sgl *dest_sgl,
+			struct rte_crypto_va_iova_ptr *iv,
+			struct rte_crypto_va_iova_ptr *digest,
+			struct rte_crypto_va_iova_ptr *auth_iv,
+			union rte_crypto_sym_ofs ofs,
+			void *userdata,
+			struct qm_fd *fd)
+{
+	dpaa_sec_session *ses =
+		((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
+	struct dpaa_sec_job *cf;
+	struct dpaa_sec_op_ctx *ctx;
+	struct qm_sg_entry *sg, *out_sg, *in_sg;
+	uint8_t extra_req_segs;
+	uint8_t *IV_ptr = iv->va;
+	int data_len = 0, aead_len = 0;
+	unsigned int i;
+
+	for (i = 0; i < sgl->num; i++)
+		data_len += sgl->vec[i].len;
+
+	extra_req_segs = 4;
+	aead_len = data_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
+
+	if (ses->auth_only_len)
+		extra_req_segs++;
+
+	if (sgl->num > MAX_SG_ENTRIES) {
+		DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
+				MAX_SG_ENTRIES);
+		return NULL;
+	}
+
+	ctx = dpaa_sec_alloc_raw_ctx(ses,  sgl->num * 2 + extra_req_segs);
+	if (!ctx)
+		return NULL;
+
+	cf = &ctx->job;
+	ctx->userdata = (void *)userdata;
+
+	rte_prefetch0(cf->sg);
+
+	/* output */
+	out_sg = &cf->sg[0];
+	out_sg->extension = 1;
+	if (is_encode(ses))
+		out_sg->length = aead_len + ses->digest_length;
+	else
+		out_sg->length = aead_len;
+
+	/* output sg entries */
+	sg = &cf->sg[2];
+	qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
+	cpu_to_hw_sg(out_sg);
+
+	if (dest_sgl) {
+		/* 1st seg */
+		qm_sg_entry_set64(sg, dest_sgl->vec[0].iova);
+		sg->length = dest_sgl->vec[0].len - ofs.ofs.cipher.head;
+		sg->offset = ofs.ofs.cipher.head;
+
+		/* Successive segs */
+		for (i = 1; i < dest_sgl->num; i++) {
+			cpu_to_hw_sg(sg);
+			sg++;
+			qm_sg_entry_set64(sg, dest_sgl->vec[i].iova);
+			sg->length = dest_sgl->vec[i].len;
+		}
+	} else {
+		/* 1st seg */
+		qm_sg_entry_set64(sg, sgl->vec[0].iova);
+		sg->length = sgl->vec[0].len - ofs.ofs.cipher.head;
+		sg->offset = ofs.ofs.cipher.head;
+
+		/* Successive segs */
+		for (i = 1; i < sgl->num; i++) {
+			cpu_to_hw_sg(sg);
+			sg++;
+			qm_sg_entry_set64(sg, sgl->vec[i].iova);
+			sg->length = sgl->vec[i].len;
+		}
+
+	}
+
+	if (is_encode(ses)) {
+		cpu_to_hw_sg(sg);
+		/* set auth output */
+		sg++;
+		qm_sg_entry_set64(sg, digest->iova);
+		sg->length = ses->digest_length;
+	}
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+
+	/* input */
+	in_sg = &cf->sg[1];
+	in_sg->extension = 1;
+	in_sg->final = 1;
+	if (is_encode(ses))
+		in_sg->length = ses->iv.length + aead_len
+						+ ses->auth_only_len;
+	else
+		in_sg->length = ses->iv.length + aead_len
+				+ ses->auth_only_len + ses->digest_length;
+
+	/* input sg entries */
+	sg++;
+	qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
+	cpu_to_hw_sg(in_sg);
+
+	/* 1st seg IV */
+	qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
+	sg->length = ses->iv.length;
+	cpu_to_hw_sg(sg);
+
+	/* 2 seg auth only */
+	if (ses->auth_only_len) {
+		sg++;
+		qm_sg_entry_set64(sg, auth_iv->iova);
+		sg->length = ses->auth_only_len;
+		cpu_to_hw_sg(sg);
+	}
+
+	/* 3rd seg */
+	sg++;
+	qm_sg_entry_set64(sg, sgl->vec[0].iova);
+	sg->length = sgl->vec[0].len - ofs.ofs.cipher.head;
+	sg->offset = ofs.ofs.cipher.head;
+
+	/* Successive segs */
+	for (i = 1; i < sgl->num; i++) {
+		cpu_to_hw_sg(sg);
+		sg++;
+		qm_sg_entry_set64(sg, sgl->vec[i].iova);
+		sg->length =  sgl->vec[i].len;
+	}
+
+	if (is_decode(ses)) {
+		cpu_to_hw_sg(sg);
+		sg++;
+		memcpy(ctx->digest, digest->va,
+			ses->digest_length);
+		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
+		sg->length = ses->digest_length;
+	}
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+
+	if (ses->auth_only_len)
+		fd->cmd = 0x80000000 | ses->auth_only_len;
+
+	return cf;
+}
+
 static inline struct dpaa_sec_job *
 build_dpaa_raw_dp_chain_fd(uint8_t *drv_ctx,
 			struct rte_crypto_sgl *sgl,
@@ -484,6 +641,135 @@ build_dpaa_raw_dp_cipher_fd(uint8_t *drv_ctx,
 	return cf;
 }
 
+#ifdef RTE_LIBRTE_SECURITY
+static inline struct dpaa_sec_job *
+build_dpaa_raw_proto_sg(uint8_t *drv_ctx,
+			struct rte_crypto_sgl *sgl,
+			struct rte_crypto_sgl *dest_sgl,
+			struct rte_crypto_va_iova_ptr *iv,
+			struct rte_crypto_va_iova_ptr *digest,
+			struct rte_crypto_va_iova_ptr *auth_iv,
+			union rte_crypto_sym_ofs ofs,
+			void *userdata,
+			struct qm_fd *fd)
+{
+	RTE_SET_USED(iv);
+	RTE_SET_USED(digest);
+	RTE_SET_USED(auth_iv);
+	RTE_SET_USED(ofs);
+
+	dpaa_sec_session *ses =
+		((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
+	struct dpaa_sec_job *cf;
+	struct dpaa_sec_op_ctx *ctx;
+	struct qm_sg_entry *sg, *out_sg, *in_sg;
+	uint32_t in_len = 0, out_len = 0;
+	unsigned int i;
+
+	if (sgl->num > MAX_SG_ENTRIES) {
+		DPAA_SEC_DP_ERR("Proto: Max sec segs supported is %d",
+				MAX_SG_ENTRIES);
+		return NULL;
+	}
+
+	ctx = dpaa_sec_alloc_raw_ctx(ses, sgl->num * 2 + 4);
+	if (!ctx)
+		return NULL;
+	cf = &ctx->job;
+	ctx->userdata = (void *)userdata;
+	/* output */
+	out_sg = &cf->sg[0];
+	out_sg->extension = 1;
+	qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
+
+	if (dest_sgl) {
+		/* 1st seg */
+		sg = &cf->sg[2];
+		qm_sg_entry_set64(sg, dest_sgl->vec[0].iova);
+		sg->offset = 0;
+		sg->length = dest_sgl->vec[0].len;
+		out_len += sg->length;
+
+		for (i = 1; i < dest_sgl->num; i++) {
+		/* Successive segs */
+			cpu_to_hw_sg(sg);
+			sg++;
+			qm_sg_entry_set64(sg, dest_sgl->vec[i].iova);
+			sg->offset = 0;
+			sg->length = dest_sgl->vec[i].len;
+			out_len += sg->length;
+		}
+		sg->length = dest_sgl->vec[i - 1].tot_len;
+	} else {
+		/* 1st seg */
+		sg = &cf->sg[2];
+		qm_sg_entry_set64(sg, sgl->vec[0].iova);
+		sg->offset = 0;
+		sg->length = sgl->vec[0].len;
+		out_len += sg->length;
+
+		for (i = 1; i < sgl->num; i++) {
+		/* Successive segs */
+			cpu_to_hw_sg(sg);
+			sg++;
+			qm_sg_entry_set64(sg, sgl->vec[i].iova);
+			sg->offset = 0;
+			sg->length = sgl->vec[i].len;
+			out_len += sg->length;
+		}
+		sg->length = sgl->vec[i - 1].tot_len;
+
+	}
+	out_len += sg->length;
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+
+	out_sg->length = out_len;
+	cpu_to_hw_sg(out_sg);
+
+	/* input */
+	in_sg = &cf->sg[1];
+	in_sg->extension = 1;
+	in_sg->final = 1;
+	in_len = sgl->vec[0].len;
+
+	sg++;
+	qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
+
+	/* 1st seg */
+	qm_sg_entry_set64(sg, sgl->vec[0].iova);
+	sg->length = sgl->vec[0].len;
+	sg->offset = 0;
+
+	/* Successive segs */
+	for (i = 1; i < sgl->num; i++) {
+		cpu_to_hw_sg(sg);
+		sg++;
+		qm_sg_entry_set64(sg, sgl->vec[i].iova);
+		sg->length = sgl->vec[i].len;
+		sg->offset = 0;
+		in_len += sg->length;
+	}
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+
+	in_sg->length = in_len;
+	cpu_to_hw_sg(in_sg);
+
+	if ((ses->ctxt == DPAA_SEC_PDCP) && ses->pdcp.hfn_ovd) {
+		fd->cmd = 0x80000000 |
+			*((uint32_t *)((uint8_t *)userdata +
+			ses->pdcp.hfn_ovd_offset));
+		DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u\n",
+			*((uint32_t *)((uint8_t *)userdata +
+			ses->pdcp.hfn_ovd_offset)),
+			ses->pdcp.hfn_ovd);
+	}
+
+	return cf;
+}
+#endif
+
 static uint32_t
 dpaa_sec_raw_enqueue_burst(void *qp_data, uint8_t *drv_ctx,
 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
@@ -744,6 +1030,13 @@ dpaa_sec_configure_raw_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
 		sess->build_raw_dp_fd = build_dpaa_raw_dp_auth_fd;
 	else if (sess->ctxt == DPAA_SEC_CIPHER_HASH)
 		sess->build_raw_dp_fd = build_dpaa_raw_dp_chain_fd;
+	else if (sess->ctxt == DPAA_SEC_AEAD)
+		sess->build_raw_dp_fd = build_raw_cipher_auth_gcm_sg;
+#ifdef RTE_LIBRTE_SECURITY
+	else if (sess->ctxt == DPAA_SEC_IPSEC ||
+			sess->ctxt == DPAA_SEC_PDCP)
+		sess->build_raw_dp_fd = build_dpaa_raw_proto_sg;
+#endif
 	else
 		return -ENOTSUP;
 	dp_ctx = (struct dpaa_sec_raw_dp_ctx *)raw_dp_ctx->drv_ctx_data;
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [RFC 15/16] test/crypto: add raw API test for dpaax
  2021-08-12  7:12 [dpdk-dev] [RFC 00/16] crypto: add raw vector support in DPAAx Hemant Agrawal
                   ` (13 preceding siblings ...)
  2021-08-12  7:12 ` [dpdk-dev] [RFC 14/16] crypto/dpaa_sec: support AEAD and proto " Hemant Agrawal
@ 2021-08-12  7:12 ` Hemant Agrawal
  2021-08-12  7:12 ` [dpdk-dev] [RFC 16/16] test/crypto: enabling raw API support in 5G algos Hemant Agrawal
  2021-08-25  7:14 ` [dpdk-dev] [PATCH 00/15] crypto: add raw vector support in DPAAx Hemant Agrawal
  16 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-08-12  7:12 UTC (permalink / raw)
  To: dev, gakhil; +Cc: Gagandeep Singh

This patch add support for raw API tests for
dpaa_sec and dpaa2_sec platforms.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 app/test/test_cryptodev.c | 116 +++++++++++++++++++++++++++++++++++---
 1 file changed, 109 insertions(+), 7 deletions(-)

diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index aecd507fa1..59bed6e6d2 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -175,11 +175,11 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id,
 {
 	struct rte_crypto_sym_op *sop = op->sym;
 	struct rte_crypto_op *ret_op = NULL;
-	struct rte_crypto_vec data_vec[UINT8_MAX];
+	struct rte_crypto_vec data_vec[UINT8_MAX], dest_data_vec[UINT8_MAX];
 	struct rte_crypto_va_iova_ptr cipher_iv, digest, aad_auth_iv;
 	union rte_crypto_sym_ofs ofs;
 	struct rte_crypto_sym_vec vec;
-	struct rte_crypto_sgl sgl;
+	struct rte_crypto_sgl sgl, dest_sgl;
 	uint32_t max_len;
 	union rte_cryptodev_session_ctx sess;
 	uint32_t count = 0;
@@ -315,6 +315,19 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id,
 	}
 
 	sgl.num = n;
+	/* Out of place */
+	if (sop->m_dst != NULL) {
+		dest_sgl.vec = dest_data_vec;
+		vec.dest_sgl = &dest_sgl;
+		n = rte_crypto_mbuf_to_vec(sop->m_dst, 0, max_len,
+				dest_data_vec, RTE_DIM(dest_data_vec));
+		if (n < 0 || n > sop->m_dst->nb_segs) {
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			goto exit;
+		}
+		dest_sgl.num = n;
+	} else
+		vec.dest_sgl = NULL;
 
 	if (rte_cryptodev_raw_enqueue_burst(ctx, &vec, ofs, (void **)&op,
 			&enqueue_status) < 1) {
@@ -8305,10 +8318,21 @@ test_pdcp_proto_SGL(int i, int oop,
 	int to_trn_tbl[16];
 	int segs = 1;
 	unsigned int trn_data = 0;
+	struct rte_cryptodev_info dev_info;
+	uint64_t feat_flags;
 	struct rte_security_ctx *ctx = (struct rte_security_ctx *)
 				rte_cryptodev_get_sec_ctx(
 				ts_params->valid_devs[0]);
+	struct rte_mbuf *temp_mbuf;
+
+	rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
+	feat_flags = dev_info.feature_flags;
 
+	if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) &&
+			(!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) {
+		printf("Device does not support RAW data-path APIs.\n");
+		return -ENOTSUP;
+	}
 	/* Verify the capabilities */
 	struct rte_security_capability_idx sec_cap_idx;
 
@@ -8492,8 +8516,23 @@ test_pdcp_proto_SGL(int i, int oop,
 		ut_params->op->sym->m_dst = ut_params->obuf;
 
 	/* Process crypto operation */
-	if (process_crypto_request(ts_params->valid_devs[0], ut_params->op)
-		== NULL) {
+	temp_mbuf = ut_params->op->sym->m_src;
+	if (global_api_test_type == CRYPTODEV_RAW_API_TEST) {
+		/* filling lengths */
+		while (temp_mbuf) {
+			ut_params->op->sym->cipher.data.length
+				+= temp_mbuf->pkt_len;
+			ut_params->op->sym->auth.data.length
+				+= temp_mbuf->pkt_len;
+			temp_mbuf = temp_mbuf->next;
+		}
+		process_sym_raw_dp_op(ts_params->valid_devs[0], 0,
+			ut_params->op, 1, 1, 0, 0);
+	} else {
+		ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+							ut_params->op);
+	}
+	if (ut_params->op == NULL) {
 		printf("TestCase %s()-%d line %d failed %s: ",
 			__func__, i, __LINE__,
 			"failed to process sym crypto op");
@@ -9934,6 +9973,7 @@ test_authenticated_encryption_oop(const struct aead_test_data *tdata)
 	int retval;
 	uint8_t *ciphertext, *auth_tag;
 	uint16_t plaintext_pad_len;
+	struct rte_cryptodev_info dev_info;
 
 	/* Verify the capabilities */
 	struct rte_cryptodev_sym_capability_idx cap_idx;
@@ -9943,7 +9983,11 @@ test_authenticated_encryption_oop(const struct aead_test_data *tdata)
 			&cap_idx) == NULL)
 		return TEST_SKIPPED;
 
-	if (global_api_test_type == CRYPTODEV_RAW_API_TEST)
+	rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
+	uint64_t feat_flags = dev_info.feature_flags;
+
+	if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) &&
+			(!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP)))
 		return TEST_SKIPPED;
 
 	/* not supported with CPU crypto */
@@ -9980,7 +10024,11 @@ test_authenticated_encryption_oop(const struct aead_test_data *tdata)
 	ut_params->op->sym->m_dst = ut_params->obuf;
 
 	/* Process crypto operation */
-	TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+	if (global_api_test_type == CRYPTODEV_RAW_API_TEST)
+		process_sym_raw_dp_op(ts_params->valid_devs[0], 0,
+			ut_params->op, 0, 0, 0, 0);
+	else
+		TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
 			ut_params->op), "failed to process sym crypto op");
 
 	TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
@@ -10026,6 +10074,10 @@ test_authenticated_decryption_oop(const struct aead_test_data *tdata)
 
 	int retval;
 	uint8_t *plaintext;
+	struct rte_cryptodev_info dev_info;
+
+	rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
+	uint64_t feat_flags = dev_info.feature_flags;
 
 	/* Verify the capabilities */
 	struct rte_cryptodev_sym_capability_idx cap_idx;
@@ -10040,6 +10092,12 @@ test_authenticated_decryption_oop(const struct aead_test_data *tdata)
 			global_api_test_type == CRYPTODEV_RAW_API_TEST)
 		return TEST_SKIPPED;
 
+	if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) &&
+			(!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) {
+		printf("Device does not support RAW data-path APIs.\n");
+		return TEST_SKIPPED;
+	}
+
 	/* Create AEAD session */
 	retval = create_aead_session(ts_params->valid_devs[0],
 			tdata->algo,
@@ -10070,7 +10128,11 @@ test_authenticated_decryption_oop(const struct aead_test_data *tdata)
 	ut_params->op->sym->m_dst = ut_params->obuf;
 
 	/* Process crypto operation */
-	TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+	if (global_api_test_type == CRYPTODEV_RAW_API_TEST)
+		process_sym_raw_dp_op(ts_params->valid_devs[0], 0,
+				ut_params->op, 0, 0, 0, 0);
+	else
+		TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
 			ut_params->op), "failed to process sym crypto op");
 
 	TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
@@ -14834,6 +14896,46 @@ test_cryptodev_cn10k(void)
 	return run_cryptodev_testsuite(RTE_STR(CRYPTODEV_NAME_CN10K_PMD));
 }
 
+static int
+test_cryptodev_dpaa2_sec_raw_api(void)
+{
+	static const char *pmd_name = RTE_STR(CRYPTODEV_NAME_DPAA2_SEC_PMD);
+	int ret;
+
+	ret = require_feature_flag(pmd_name, RTE_CRYPTODEV_FF_SYM_RAW_DP,
+			"RAW API");
+	if (ret)
+		return ret;
+
+	global_api_test_type = CRYPTODEV_RAW_API_TEST;
+	ret = run_cryptodev_testsuite(pmd_name);
+	global_api_test_type = CRYPTODEV_API_TEST;
+
+	return ret;
+}
+
+static int
+test_cryptodev_dpaa_sec_raw_api(void)
+{
+	static const char *pmd_name = RTE_STR(CRYPTODEV_NAME_DPAA2_SEC_PMD);
+	int ret;
+
+	ret = require_feature_flag(pmd_name, RTE_CRYPTODEV_FF_SYM_RAW_DP,
+			"RAW API");
+	if (ret)
+		return ret;
+
+	global_api_test_type = CRYPTODEV_RAW_API_TEST;
+	ret = run_cryptodev_testsuite(pmd_name);
+	global_api_test_type = CRYPTODEV_API_TEST;
+
+	return ret;
+}
+
+REGISTER_TEST_COMMAND(cryptodev_dpaa2_sec_raw_api_autotest,
+		test_cryptodev_dpaa2_sec_raw_api);
+REGISTER_TEST_COMMAND(cryptodev_dpaa_sec_raw_api_autotest,
+		test_cryptodev_dpaa_sec_raw_api);
 REGISTER_TEST_COMMAND(cryptodev_qat_raw_api_autotest,
 		test_cryptodev_qat_raw_api);
 REGISTER_TEST_COMMAND(cryptodev_qat_autotest, test_cryptodev_qat);
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [RFC 16/16] test/crypto: enabling raw API support in 5G algos
  2021-08-12  7:12 [dpdk-dev] [RFC 00/16] crypto: add raw vector support in DPAAx Hemant Agrawal
                   ` (14 preceding siblings ...)
  2021-08-12  7:12 ` [dpdk-dev] [RFC 15/16] test/crypto: add raw API test for dpaax Hemant Agrawal
@ 2021-08-12  7:12 ` Hemant Agrawal
  2021-08-25  7:14 ` [dpdk-dev] [PATCH 00/15] crypto: add raw vector support in DPAAx Hemant Agrawal
  16 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-08-12  7:12 UTC (permalink / raw)
  To: dev, gakhil; +Cc: Gagandeep Singh

This patch add support for RAW API testing with ZUC
and SNOW test cases.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 app/test/test_cryptodev.c | 57 ++++++++++++++++++++++++++++++++++-----
 1 file changed, 51 insertions(+), 6 deletions(-)

diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 59bed6e6d2..ab3394bb09 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -368,6 +368,7 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id,
 	}
 
 	op->status = (count == MAX_RAW_DEQUEUE_COUNT + 1 || ret_op != op ||
+			ret_op->status == RTE_CRYPTO_OP_STATUS_ERROR ||
 			n_success < 1) ? RTE_CRYPTO_OP_STATUS_ERROR :
 					RTE_CRYPTO_OP_STATUS_SUCCESS;
 
@@ -4152,6 +4153,16 @@ test_snow3g_encryption_oop(const struct snow3g_test_data *tdata)
 	int retval;
 	unsigned plaintext_pad_len;
 	unsigned plaintext_len;
+	struct rte_cryptodev_info dev_info;
+
+	rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
+	uint64_t feat_flags = dev_info.feature_flags;
+
+	if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) &&
+			(!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) {
+		printf("Device does not support RAW data-path APIs.\n");
+		return -ENOTSUP;
+	}
 
 	/* Verify the capabilities */
 	struct rte_cryptodev_sym_capability_idx cap_idx;
@@ -4207,7 +4218,11 @@ test_snow3g_encryption_oop(const struct snow3g_test_data *tdata)
 	if (retval < 0)
 		return retval;
 
-	ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+	if (global_api_test_type == CRYPTODEV_RAW_API_TEST)
+		process_sym_raw_dp_op(ts_params->valid_devs[0], 0,
+			ut_params->op, 1, 0, 1, tdata->cipher_iv.len);
+	else
+		ut_params->op = process_crypto_request(ts_params->valid_devs[0],
 						ut_params->op);
 	TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
 
@@ -4267,6 +4282,12 @@ test_snow3g_encryption_oop_sgl(const struct snow3g_test_data *tdata)
 		return TEST_SKIPPED;
 	}
 
+	if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) &&
+			(!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) {
+		printf("Device does not support RAW data-path APIs.\n");
+		return -ENOTSUP;
+	}
+
 	/* Create SNOW 3G session */
 	retval = create_wireless_algo_cipher_session(ts_params->valid_devs[0],
 					RTE_CRYPTO_CIPHER_OP_ENCRYPT,
@@ -4301,7 +4322,11 @@ test_snow3g_encryption_oop_sgl(const struct snow3g_test_data *tdata)
 	if (retval < 0)
 		return retval;
 
-	ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+	if (global_api_test_type == CRYPTODEV_RAW_API_TEST)
+		process_sym_raw_dp_op(ts_params->valid_devs[0], 0,
+			ut_params->op, 1, 0, 1, tdata->cipher_iv.len);
+	else
+		ut_params->op = process_crypto_request(ts_params->valid_devs[0],
 						ut_params->op);
 	TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
 
@@ -4428,7 +4453,11 @@ test_snow3g_encryption_offset_oop(const struct snow3g_test_data *tdata)
 	if (retval < 0)
 		return retval;
 
-	ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+	if (global_api_test_type == CRYPTODEV_RAW_API_TEST)
+		process_sym_raw_dp_op(ts_params->valid_devs[0], 0,
+			ut_params->op, 1, 0, 1, tdata->cipher_iv.len);
+	else
+		ut_params->op = process_crypto_request(ts_params->valid_devs[0],
 						ut_params->op);
 	TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
 
@@ -4559,7 +4588,16 @@ static int test_snow3g_decryption_oop(const struct snow3g_test_data *tdata)
 	uint8_t *plaintext, *ciphertext;
 	unsigned ciphertext_pad_len;
 	unsigned ciphertext_len;
+	struct rte_cryptodev_info dev_info;
+
+	rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
+	uint64_t feat_flags = dev_info.feature_flags;
 
+	if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) &&
+			(!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) {
+		printf("Device does not support RAW data-path APIs.\n");
+		return -ENOTSUP;
+	}
 	/* Verify the capabilities */
 	struct rte_cryptodev_sym_capability_idx cap_idx;
 	cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
@@ -4617,7 +4655,11 @@ static int test_snow3g_decryption_oop(const struct snow3g_test_data *tdata)
 	if (retval < 0)
 		return retval;
 
-	ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+	if (global_api_test_type == CRYPTODEV_RAW_API_TEST)
+		process_sym_raw_dp_op(ts_params->valid_devs[0], 0,
+			ut_params->op, 1, 0, 1, tdata->cipher_iv.len);
+	else
+		ut_params->op = process_crypto_request(ts_params->valid_devs[0],
 						ut_params->op);
 	TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
 	ut_params->obuf = ut_params->op->sym->m_dst;
@@ -12653,10 +12695,13 @@ test_authenticated_decryption_fail_when_corruption(
 	else {
 		ut_params->op = process_crypto_request(ts_params->valid_devs[0],
 			ut_params->op);
-		TEST_ASSERT_NULL(ut_params->op, "authentication not failed");
 	}
+	if (ut_params->op == NULL)
+		return 0;
+	else if (ut_params->op->status != RTE_CRYPTO_OP_STATUS_SUCCESS)
+		return 0;
 
-	return 0;
+	return -1;
 }
 
 static int
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [dpdk-dev] [EXT] [RFC 04/16] crypto: enhance raw process for protocol offload
  2021-08-12  7:12 ` [dpdk-dev] [RFC 04/16] crypto: enhance raw process for protocol offload Hemant Agrawal
@ 2021-08-12  8:11   ` Akhil Goyal
  0 siblings, 0 replies; 115+ messages in thread
From: Akhil Goyal @ 2021-08-12  8:11 UTC (permalink / raw)
  To: Hemant Agrawal, dev; +Cc: Gagandeep Singh, roy.fan.zhang, konstantin.ananyev


> From: Gagandeep Singh <g.singh@nxp.com>
> 
> This patch improves the raw vector processing for security
> protocol with OOP processing.
> 
> Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
> ---
Can you explain the reasoning for this change in commit message.
Is this a bug? Do we need to backport it?

++Fan/Konstantin

>  lib/cryptodev/rte_crypto_sym.h | 3 ++-
>  1 file changed, 2 insertions(+), 1 deletion(-)
> 
> diff --git a/lib/cryptodev/rte_crypto_sym.h
> b/lib/cryptodev/rte_crypto_sym.h
> index 978708845f..a48228a646 100644
> --- a/lib/cryptodev/rte_crypto_sym.h
> +++ b/lib/cryptodev/rte_crypto_sym.h
> @@ -1003,6 +1003,7 @@ rte_crypto_mbuf_to_vec(const struct rte_mbuf
> *mb, uint32_t ofs, uint32_t len,
>  			vec[i].len = left;
>  			vec[i].tot_len = mb->buf_len;
>  			left = 0;
> +			i++;
>  			break;
>  		}
> 
> @@ -1013,7 +1014,7 @@ rte_crypto_mbuf_to_vec(const struct rte_mbuf
> *mb, uint32_t ofs, uint32_t len,
>  	}
> 
>  	RTE_ASSERT(left == 0);
> -	return i + 1;
> +	return i;
>  }
> 
> 
> --
> 2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [dpdk-dev] [EXT] [RFC 01/16] crypto: change sgl to src_sgl in vector
  2021-08-12  7:12 ` [dpdk-dev] [RFC 01/16] crypto: change sgl to src_sgl in vector Hemant Agrawal
@ 2021-08-12  8:22   ` Akhil Goyal
  0 siblings, 0 replies; 115+ messages in thread
From: Akhil Goyal @ 2021-08-12  8:22 UTC (permalink / raw)
  To: Hemant Agrawal, dev; +Cc: roy.fan.zhang, konstantin.ananyev

> This patch renames the sgl to src_sgl to help differentiating
> between source and destination sgl.
> 
> Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
> ---
++Fan/Konstantin

^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH 00/15] crypto: add raw vector support in DPAAx
  2021-08-12  7:12 [dpdk-dev] [RFC 00/16] crypto: add raw vector support in DPAAx Hemant Agrawal
                   ` (15 preceding siblings ...)
  2021-08-12  7:12 ` [dpdk-dev] [RFC 16/16] test/crypto: enabling raw API support in 5G algos Hemant Agrawal
@ 2021-08-25  7:14 ` Hemant Agrawal
  2021-08-25  7:14   ` [dpdk-dev] [PATCH 01/15] crypto: change sgl to src_sgl in vector Hemant Agrawal
                     ` (16 more replies)
  16 siblings, 17 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-08-25  7:14 UTC (permalink / raw)
  To: dev, gakhil; +Cc: konstantin.ananyev, roy.fan.zhang

This patch series adds support for raw vector API in dpaax_sec drivers
This also enhances the raw vector APIs to support OOP and security
protocol support.

Gagandeep Singh (11):
  crypto: add total raw buffer length
  crypto: fix raw process for multi-seg case
  crypto/dpaa2_sec: support raw datapath APIs
  crypto/dpaa2_sec: support AUTH only with raw buffer APIs
  crypto/dpaa2_sec: support AUTHENC with raw buffer APIs
  crypto/dpaa2_sec: support AEAD with raw buffer APIs
  crypto/dpaa2_sec: support OOP with raw buffer API
  crypto/dpaa2_sec: enhance error checks with raw buffer APIs
  crypto/dpaa_sec: support raw datapath APIs
  crypto/dpaa_sec: support authonly and chain with raw APIs
  crypto/dpaa_sec: support AEAD and proto with raw APIs

Hemant Agrawal (4):
  crypto: change sgl to src_sgl in vector
  crypto: add dest_sgl in raw vector APIs
  test/crypto: add raw API test for dpaax
  test/crypto: add raw API support in 5G algos

 app/test/test_cryptodev.c                   |  179 +++-
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c |   13 +-
 drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h   |   82 +-
 drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c | 1045 ++++++++++++++++++
 drivers/crypto/dpaa2_sec/meson.build        |    3 +-
 drivers/crypto/dpaa_sec/dpaa_sec.c          |   23 +-
 drivers/crypto/dpaa_sec/dpaa_sec.h          |   40 +-
 drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c   | 1052 +++++++++++++++++++
 drivers/crypto/dpaa_sec/meson.build         |    4 +-
 drivers/crypto/qat/qat_sym_hw_dp.c          |   27 +-
 lib/cryptodev/rte_crypto_sym.h              |   13 +-
 lib/ipsec/misc.h                            |    4 +-
 12 files changed, 2390 insertions(+), 95 deletions(-)
 create mode 100644 drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
 create mode 100644 drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c

-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH 01/15] crypto: change sgl to src_sgl in vector
  2021-08-25  7:14 ` [dpdk-dev] [PATCH 00/15] crypto: add raw vector support in DPAAx Hemant Agrawal
@ 2021-08-25  7:14   ` Hemant Agrawal
  2021-09-06 18:36     ` [dpdk-dev] [EXT] " Akhil Goyal
  2021-08-25  7:14   ` [dpdk-dev] [PATCH 02/15] crypto: add total raw buffer length Hemant Agrawal
                     ` (15 subsequent siblings)
  16 siblings, 1 reply; 115+ messages in thread
From: Hemant Agrawal @ 2021-08-25  7:14 UTC (permalink / raw)
  To: dev, gakhil; +Cc: konstantin.ananyev, roy.fan.zhang

This patch renames the sgl to src_sgl to help differentiating
between source and destination sgl.

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 app/test/test_cryptodev.c          |  6 +++---
 drivers/crypto/qat/qat_sym_hw_dp.c | 27 ++++++++++++++++-----------
 lib/cryptodev/rte_crypto_sym.h     |  2 +-
 lib/ipsec/misc.h                   |  4 ++--
 4 files changed, 22 insertions(+), 17 deletions(-)

diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 9ad0b37473..aecd507fa1 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -221,7 +221,7 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id,
 	digest.va = NULL;
 	sgl.vec = data_vec;
 	vec.num = 1;
-	vec.sgl = &sgl;
+	vec.src_sgl = &sgl;
 	vec.iv = &cipher_iv;
 	vec.digest = &digest;
 	vec.aad = &aad_auth_iv;
@@ -385,7 +385,7 @@ process_cpu_aead_op(uint8_t dev_id, struct rte_crypto_op *op)
 
 	sgl.vec = vec;
 	sgl.num = n;
-	symvec.sgl = &sgl;
+	symvec.src_sgl = &sgl;
 	symvec.iv = &iv_ptr;
 	symvec.digest = &digest_ptr;
 	symvec.aad = &aad_ptr;
@@ -431,7 +431,7 @@ process_cpu_crypt_auth_op(uint8_t dev_id, struct rte_crypto_op *op)
 
 	sgl.vec = vec;
 	sgl.num = n;
-	symvec.sgl = &sgl;
+	symvec.src_sgl = &sgl;
 	symvec.iv = &iv_ptr;
 	symvec.digest = &digest_ptr;
 	symvec.status = &st;
diff --git a/drivers/crypto/qat/qat_sym_hw_dp.c b/drivers/crypto/qat/qat_sym_hw_dp.c
index ac9ac05363..4870ebf66a 100644
--- a/drivers/crypto/qat/qat_sym_hw_dp.c
+++ b/drivers/crypto/qat/qat_sym_hw_dp.c
@@ -181,8 +181,9 @@ qat_sym_dp_enqueue_cipher_jobs(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec,
-			vec->sgl[i].num);
+		data_len = qat_sym_dp_parse_data_vec(qp, req,
+			vec->src_sgl[i].vec,
+			vec->src_sgl[i].num);
 		if (unlikely(data_len < 0))
 			break;
 		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
@@ -302,8 +303,9 @@ qat_sym_dp_enqueue_auth_jobs(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec,
-			vec->sgl[i].num);
+		data_len = qat_sym_dp_parse_data_vec(qp, req,
+			vec->src_sgl[i].vec,
+			vec->src_sgl[i].num);
 		if (unlikely(data_len < 0))
 			break;
 		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
@@ -484,14 +486,16 @@ qat_sym_dp_enqueue_chain_jobs(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec,
-			vec->sgl[i].num);
+		data_len = qat_sym_dp_parse_data_vec(qp, req,
+			vec->src_sgl[i].vec,
+			vec->src_sgl[i].num);
 		if (unlikely(data_len < 0))
 			break;
 		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
-		if (unlikely(enqueue_one_chain_job(ctx, req, vec->sgl[i].vec,
-			vec->sgl[i].num, &vec->iv[i], &vec->digest[i],
-				&vec->auth_iv[i], ofs, (uint32_t)data_len)))
+		if (unlikely(enqueue_one_chain_job(ctx, req,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num,
+			&vec->iv[i], &vec->digest[i],
+			&vec->auth_iv[i], ofs, (uint32_t)data_len)))
 			break;
 
 		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
@@ -688,8 +692,9 @@ qat_sym_dp_enqueue_aead_jobs(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec,
-			vec->sgl[i].num);
+		data_len = qat_sym_dp_parse_data_vec(qp, req,
+			vec->src_sgl[i].vec,
+			vec->src_sgl[i].num);
 		if (unlikely(data_len < 0))
 			break;
 		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
diff --git a/lib/cryptodev/rte_crypto_sym.h b/lib/cryptodev/rte_crypto_sym.h
index 58c0724743..dcc0bd5933 100644
--- a/lib/cryptodev/rte_crypto_sym.h
+++ b/lib/cryptodev/rte_crypto_sym.h
@@ -69,7 +69,7 @@ struct rte_crypto_sym_vec {
 	/** number of operations to perform */
 	uint32_t num;
 	/** array of SGL vectors */
-	struct rte_crypto_sgl *sgl;
+	struct rte_crypto_sgl *src_sgl;
 	/** array of pointers to cipher IV */
 	struct rte_crypto_va_iova_ptr *iv;
 	/** array of pointers to digest */
diff --git a/lib/ipsec/misc.h b/lib/ipsec/misc.h
index 79b9a20762..58ff538141 100644
--- a/lib/ipsec/misc.h
+++ b/lib/ipsec/misc.h
@@ -136,7 +136,7 @@ cpu_crypto_bulk(const struct rte_ipsec_session *ss,
 		/* not enough space in vec[] to hold all segments */
 		if (vcnt < 0) {
 			/* fill the request structure */
-			symvec.sgl = &vecpkt[j];
+			symvec.src_sgl = &vecpkt[j];
 			symvec.iv = &iv[j];
 			symvec.digest = &dgst[j];
 			symvec.aad = &aad[j];
@@ -160,7 +160,7 @@ cpu_crypto_bulk(const struct rte_ipsec_session *ss,
 	}
 
 	/* fill the request structure */
-	symvec.sgl = &vecpkt[j];
+	symvec.src_sgl = &vecpkt[j];
 	symvec.iv = &iv[j];
 	symvec.aad = &aad[j];
 	symvec.digest = &dgst[j];
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH 02/15] crypto: add total raw buffer length
  2021-08-25  7:14 ` [dpdk-dev] [PATCH 00/15] crypto: add raw vector support in DPAAx Hemant Agrawal
  2021-08-25  7:14   ` [dpdk-dev] [PATCH 01/15] crypto: change sgl to src_sgl in vector Hemant Agrawal
@ 2021-08-25  7:14   ` Hemant Agrawal
  2021-09-06 18:36     ` [dpdk-dev] [EXT] " Akhil Goyal
  2021-08-25  7:14   ` [dpdk-dev] [PATCH 03/15] crypto: add dest_sgl in raw vector APIs Hemant Agrawal
                     ` (14 subsequent siblings)
  16 siblings, 1 reply; 115+ messages in thread
From: Hemant Agrawal @ 2021-08-25  7:14 UTC (permalink / raw)
  To: dev, gakhil; +Cc: konstantin.ananyev, roy.fan.zhang, Gagandeep Singh

From: Gagandeep Singh <g.singh@nxp.com>

The current crypto raw data vectors is extended to support
rte_security usecases, where we need total data length to know
how much additional memory space is available in buffer other
than data length so that driver/HW can write expanded size
data after encryption.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 lib/cryptodev/rte_crypto_sym.h | 6 ++++++
 1 file changed, 6 insertions(+)

diff --git a/lib/cryptodev/rte_crypto_sym.h b/lib/cryptodev/rte_crypto_sym.h
index dcc0bd5933..e5cef1fb72 100644
--- a/lib/cryptodev/rte_crypto_sym.h
+++ b/lib/cryptodev/rte_crypto_sym.h
@@ -37,6 +37,8 @@ struct rte_crypto_vec {
 	rte_iova_t iova;
 	/** length of the data buffer */
 	uint32_t len;
+	/** total buffer length*/
+	uint32_t tot_len;
 };
 
 /**
@@ -980,12 +982,14 @@ rte_crypto_mbuf_to_vec(const struct rte_mbuf *mb, uint32_t ofs, uint32_t len,
 	seglen = mb->data_len - ofs;
 	if (len <= seglen) {
 		vec[0].len = len;
+		vec[0].tot_len = mb->buf_len;
 		return 1;
 	}
 
 	/* data spread across segments */
 	vec[0].len = seglen;
 	left = len - seglen;
+	vec[0].tot_len = mb->buf_len;
 	for (i = 1, nseg = mb->next; nseg != NULL; nseg = nseg->next, i++) {
 
 		vec[i].base = rte_pktmbuf_mtod(nseg, void *);
@@ -995,6 +999,7 @@ rte_crypto_mbuf_to_vec(const struct rte_mbuf *mb, uint32_t ofs, uint32_t len,
 		if (left <= seglen) {
 			/* whole requested data is completed */
 			vec[i].len = left;
+			vec[i].tot_len = mb->buf_len;
 			left = 0;
 			break;
 		}
@@ -1002,6 +1007,7 @@ rte_crypto_mbuf_to_vec(const struct rte_mbuf *mb, uint32_t ofs, uint32_t len,
 		/* use whole segment */
 		vec[i].len = seglen;
 		left -= seglen;
+		vec[i].tot_len = mb->buf_len;
 	}
 
 	RTE_ASSERT(left == 0);
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH 03/15] crypto: add dest_sgl in raw vector APIs
  2021-08-25  7:14 ` [dpdk-dev] [PATCH 00/15] crypto: add raw vector support in DPAAx Hemant Agrawal
  2021-08-25  7:14   ` [dpdk-dev] [PATCH 01/15] crypto: change sgl to src_sgl in vector Hemant Agrawal
  2021-08-25  7:14   ` [dpdk-dev] [PATCH 02/15] crypto: add total raw buffer length Hemant Agrawal
@ 2021-08-25  7:14   ` Hemant Agrawal
  2021-09-06 18:37     ` [dpdk-dev] [EXT] " Akhil Goyal
  2021-08-25  7:14   ` [dpdk-dev] [PATCH 04/15] crypto: fix raw process for multi-seg case Hemant Agrawal
                     ` (13 subsequent siblings)
  16 siblings, 1 reply; 115+ messages in thread
From: Hemant Agrawal @ 2021-08-25  7:14 UTC (permalink / raw)
  To: dev, gakhil; +Cc: konstantin.ananyev, roy.fan.zhang

The structure rte_crypto_sym_vec is updated to
add dest_sgl to support out of place processing.

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 lib/cryptodev/rte_crypto_sym.h | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/lib/cryptodev/rte_crypto_sym.h b/lib/cryptodev/rte_crypto_sym.h
index e5cef1fb72..978708845f 100644
--- a/lib/cryptodev/rte_crypto_sym.h
+++ b/lib/cryptodev/rte_crypto_sym.h
@@ -72,6 +72,8 @@ struct rte_crypto_sym_vec {
 	uint32_t num;
 	/** array of SGL vectors */
 	struct rte_crypto_sgl *src_sgl;
+	/** array of SGL vectors for OOP, keep it NULL for inplace*/
+	struct rte_crypto_sgl *dest_sgl;
 	/** array of pointers to cipher IV */
 	struct rte_crypto_va_iova_ptr *iv;
 	/** array of pointers to digest */
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH 04/15] crypto: fix raw process for multi-seg case
  2021-08-25  7:14 ` [dpdk-dev] [PATCH 00/15] crypto: add raw vector support in DPAAx Hemant Agrawal
                     ` (2 preceding siblings ...)
  2021-08-25  7:14   ` [dpdk-dev] [PATCH 03/15] crypto: add dest_sgl in raw vector APIs Hemant Agrawal
@ 2021-08-25  7:14   ` Hemant Agrawal
  2021-09-06 18:38     ` [dpdk-dev] [EXT] " Akhil Goyal
  2021-08-25  7:15   ` [dpdk-dev] [PATCH 05/15] crypto/dpaa2_sec: support raw datapath APIs Hemant Agrawal
                     ` (12 subsequent siblings)
  16 siblings, 1 reply; 115+ messages in thread
From: Hemant Agrawal @ 2021-08-25  7:14 UTC (permalink / raw)
  To: dev, gakhil
  Cc: konstantin.ananyev, roy.fan.zhang, Gagandeep Singh,
	marcinx.smoczynski, stable

From: Gagandeep Singh <g.singh@nxp.com>

If no next segment available the “for” loop will fail and it still
returns i+1 i.e. 2, which is wrong as it has filled only 1 buffer.

Fixes: 7adf992fb9bf ("cryptodev: introduce CPU crypto API")
Cc: marcinx.smoczynski@intel.com
Cc: stable@dpdk.org

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 lib/cryptodev/rte_crypto_sym.h | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/lib/cryptodev/rte_crypto_sym.h b/lib/cryptodev/rte_crypto_sym.h
index 978708845f..a48228a646 100644
--- a/lib/cryptodev/rte_crypto_sym.h
+++ b/lib/cryptodev/rte_crypto_sym.h
@@ -1003,6 +1003,7 @@ rte_crypto_mbuf_to_vec(const struct rte_mbuf *mb, uint32_t ofs, uint32_t len,
 			vec[i].len = left;
 			vec[i].tot_len = mb->buf_len;
 			left = 0;
+			i++;
 			break;
 		}
 
@@ -1013,7 +1014,7 @@ rte_crypto_mbuf_to_vec(const struct rte_mbuf *mb, uint32_t ofs, uint32_t len,
 	}
 
 	RTE_ASSERT(left == 0);
-	return i + 1;
+	return i;
 }
 
 
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH 05/15] crypto/dpaa2_sec: support raw datapath APIs
  2021-08-25  7:14 ` [dpdk-dev] [PATCH 00/15] crypto: add raw vector support in DPAAx Hemant Agrawal
                     ` (3 preceding siblings ...)
  2021-08-25  7:14   ` [dpdk-dev] [PATCH 04/15] crypto: fix raw process for multi-seg case Hemant Agrawal
@ 2021-08-25  7:15   ` Hemant Agrawal
  2021-08-25  7:15   ` [dpdk-dev] [PATCH 06/15] crypto/dpaa2_sec: support AUTH only with raw buffer APIs Hemant Agrawal
                     ` (11 subsequent siblings)
  16 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-08-25  7:15 UTC (permalink / raw)
  To: dev, gakhil; +Cc: konstantin.ananyev, roy.fan.zhang, Gagandeep Singh

From: Gagandeep Singh <g.singh@nxp.com>

This path add framework for raw API support.
The initial patch only test cipher only part.

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c |  13 +-
 drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h   |  60 +-
 drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c | 595 ++++++++++++++++++++
 drivers/crypto/dpaa2_sec/meson.build        |   3 +-
 4 files changed, 642 insertions(+), 29 deletions(-)
 create mode 100644 drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c

diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index 1ccead3641..fe90d9d2d8 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -49,15 +49,8 @@
 #define FSL_MC_DPSECI_DEVID     3
 
 #define NO_PREFETCH 0
-/* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */
-#define FLE_POOL_NUM_BUFS	32000
-#define FLE_POOL_BUF_SIZE	256
-#define FLE_POOL_CACHE_SIZE	512
-#define FLE_SG_MEM_SIZE(num)	(FLE_POOL_BUF_SIZE + ((num) * 32))
-#define SEC_FLC_DHR_OUTBOUND	-114
-#define SEC_FLC_DHR_INBOUND	0
 
-static uint8_t cryptodev_driver_id;
+uint8_t cryptodev_driver_id;
 
 #ifdef RTE_LIB_SECURITY
 static inline int
@@ -3805,6 +3798,9 @@ static struct rte_cryptodev_ops crypto_ops = {
 	.sym_session_get_size     = dpaa2_sec_sym_session_get_size,
 	.sym_session_configure    = dpaa2_sec_sym_session_configure,
 	.sym_session_clear        = dpaa2_sec_sym_session_clear,
+	/* Raw data-path API related operations */
+	.sym_get_raw_dp_ctx_size = dpaa2_sec_get_dp_ctx_size,
+	.sym_configure_raw_dp_ctx = dpaa2_sec_configure_raw_dp_ctx,
 };
 
 #ifdef RTE_LIB_SECURITY
@@ -3887,6 +3883,7 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
 			RTE_CRYPTODEV_FF_HW_ACCELERATED |
 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
 			RTE_CRYPTODEV_FF_SECURITY |
+			RTE_CRYPTODEV_FF_SYM_RAW_DP |
 			RTE_CRYPTODEV_FF_IN_PLACE_SGL |
 			RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
 			RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
index 7dbc69f6cb..860c9b6520 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
@@ -15,6 +15,16 @@
 #define CRYPTODEV_NAME_DPAA2_SEC_PMD	crypto_dpaa2_sec
 /**< NXP DPAA2 - SEC PMD device name */
 
+extern uint8_t cryptodev_driver_id;
+
+/* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */
+#define FLE_POOL_NUM_BUFS	32000
+#define FLE_POOL_BUF_SIZE	256
+#define FLE_POOL_CACHE_SIZE	512
+#define FLE_SG_MEM_SIZE(num)	(FLE_POOL_BUF_SIZE + ((num) * 32))
+#define SEC_FLC_DHR_OUTBOUND	-114
+#define SEC_FLC_DHR_INBOUND	0
+
 #define MAX_QUEUES		64
 #define MAX_DESC_SIZE		64
 /** private data structure for each DPAA2_SEC device */
@@ -158,6 +168,24 @@ struct dpaa2_pdcp_ctxt {
 	uint32_t hfn_threshold;	/*!< HFN Threashold for key renegotiation */
 };
 #endif
+
+typedef int (*dpaa2_sec_build_fd_t)(
+	void *qp, uint8_t *drv_ctx, struct rte_crypto_vec *data_vec,
+	uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
+	void *user_data);
+
+typedef int (*dpaa2_sec_build_raw_dp_fd_t)(uint8_t *drv_ctx,
+		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_va_iova_ptr *iv,
+		       struct rte_crypto_va_iova_ptr *digest,
+		       struct rte_crypto_va_iova_ptr *auth_iv,
+		       union rte_crypto_sym_ofs ofs,
+		       void *userdata,
+		       struct qbman_fd *fd);
+
 typedef struct dpaa2_sec_session_entry {
 	void *ctxt;
 	uint8_t ctxt_type;
@@ -165,6 +193,8 @@ typedef struct dpaa2_sec_session_entry {
 	enum rte_crypto_cipher_algorithm cipher_alg; /*!< Cipher Algorithm*/
 	enum rte_crypto_auth_algorithm auth_alg; /*!< Authentication Algorithm*/
 	enum rte_crypto_aead_algorithm aead_alg; /*!< AEAD Algorithm*/
+	dpaa2_sec_build_fd_t build_fd;
+	dpaa2_sec_build_raw_dp_fd_t build_raw_dp_fd;
 	union {
 		struct {
 			uint8_t *data;	/**< pointer to key data */
@@ -547,26 +577,6 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
 			}, }
 		}, }
 	},
-	{	/* NULL (CIPHER) */
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
-		{.sym = {
-			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
-			{.cipher = {
-				.algo = RTE_CRYPTO_CIPHER_NULL,
-				.block_size = 1,
-				.key_size = {
-					.min = 0,
-					.max = 0,
-					.increment = 0
-				},
-				.iv_size = {
-					.min = 0,
-					.max = 0,
-					.increment = 0
-				}
-			}, },
-		}, }
-	},
 	{	/* AES CBC */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
@@ -974,4 +984,14 @@ calc_chksum(void *buffer, int len)
 	return  result;
 }
 
+int
+dpaa2_sec_configure_raw_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
+	enum rte_crypto_op_sess_type sess_type,
+	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update);
+
+int
+dpaa2_sec_get_dp_ctx_size(struct rte_cryptodev *dev);
+
+
 #endif /* _DPAA2_SEC_PMD_PRIVATE_H_ */
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
new file mode 100644
index 0000000000..32abf5a431
--- /dev/null
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
@@ -0,0 +1,595 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2021 NXP
+ */
+
+#include <rte_cryptodev_pmd.h>
+#include <rte_fslmc.h>
+#include <fslmc_vfio.h>
+#include <dpaa2_hw_pvt.h>
+#include <dpaa2_hw_dpio.h>
+
+#include "dpaa2_sec_priv.h"
+#include "dpaa2_sec_logs.h"
+
+struct dpaa2_sec_raw_dp_ctx {
+	dpaa2_sec_session *session;
+	uint32_t tail;
+	uint32_t head;
+	uint16_t cached_enqueue;
+	uint16_t cached_dequeue;
+};
+
+static int
+build_raw_dp_chain_fd(uint8_t *drv_ctx,
+		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_va_iova_ptr *iv,
+		       struct rte_crypto_va_iova_ptr *digest,
+		       struct rte_crypto_va_iova_ptr *auth_iv,
+		       union rte_crypto_sym_ofs ofs,
+		       void *userdata,
+		       struct qbman_fd *fd)
+{
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(sgl);
+	RTE_SET_USED(iv);
+	RTE_SET_USED(digest);
+	RTE_SET_USED(auth_iv);
+	RTE_SET_USED(ofs);
+	RTE_SET_USED(userdata);
+	RTE_SET_USED(fd);
+
+	return 0;
+}
+
+static int
+build_raw_dp_aead_fd(uint8_t *drv_ctx,
+		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_va_iova_ptr *iv,
+		       struct rte_crypto_va_iova_ptr *digest,
+		       struct rte_crypto_va_iova_ptr *auth_iv,
+		       union rte_crypto_sym_ofs ofs,
+		       void *userdata,
+		       struct qbman_fd *fd)
+{
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(sgl);
+	RTE_SET_USED(iv);
+	RTE_SET_USED(digest);
+	RTE_SET_USED(auth_iv);
+	RTE_SET_USED(ofs);
+	RTE_SET_USED(userdata);
+	RTE_SET_USED(fd);
+
+	return 0;
+}
+
+static int
+build_raw_dp_auth_fd(uint8_t *drv_ctx,
+		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_va_iova_ptr *iv,
+		       struct rte_crypto_va_iova_ptr *digest,
+		       struct rte_crypto_va_iova_ptr *auth_iv,
+		       union rte_crypto_sym_ofs ofs,
+		       void *userdata,
+		       struct qbman_fd *fd)
+{
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(sgl);
+	RTE_SET_USED(iv);
+	RTE_SET_USED(digest);
+	RTE_SET_USED(auth_iv);
+	RTE_SET_USED(ofs);
+	RTE_SET_USED(userdata);
+	RTE_SET_USED(fd);
+
+	return 0;
+}
+
+static int
+build_raw_dp_proto_fd(uint8_t *drv_ctx,
+		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_va_iova_ptr *iv,
+		       struct rte_crypto_va_iova_ptr *digest,
+		       struct rte_crypto_va_iova_ptr *auth_iv,
+		       union rte_crypto_sym_ofs ofs,
+		       void *userdata,
+		       struct qbman_fd *fd)
+{
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(sgl);
+	RTE_SET_USED(iv);
+	RTE_SET_USED(digest);
+	RTE_SET_USED(auth_iv);
+	RTE_SET_USED(ofs);
+	RTE_SET_USED(userdata);
+	RTE_SET_USED(fd);
+
+	return 0;
+}
+
+static int
+build_raw_dp_proto_compound_fd(uint8_t *drv_ctx,
+		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_va_iova_ptr *iv,
+		       struct rte_crypto_va_iova_ptr *digest,
+		       struct rte_crypto_va_iova_ptr *auth_iv,
+		       union rte_crypto_sym_ofs ofs,
+		       void *userdata,
+		       struct qbman_fd *fd)
+{
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(sgl);
+	RTE_SET_USED(iv);
+	RTE_SET_USED(digest);
+	RTE_SET_USED(auth_iv);
+	RTE_SET_USED(ofs);
+	RTE_SET_USED(userdata);
+	RTE_SET_USED(fd);
+
+	return 0;
+}
+
+static int
+build_raw_dp_cipher_fd(uint8_t *drv_ctx,
+		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_va_iova_ptr *iv,
+		       struct rte_crypto_va_iova_ptr *digest,
+		       struct rte_crypto_va_iova_ptr *auth_iv,
+		       union rte_crypto_sym_ofs ofs,
+		       void *userdata,
+		       struct qbman_fd *fd)
+{
+	RTE_SET_USED(digest);
+	RTE_SET_USED(auth_iv);
+
+	dpaa2_sec_session *sess =
+		((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
+	struct qbman_fle *ip_fle, *op_fle, *sge, *fle;
+	int total_len = 0, data_len = 0, data_offset;
+	struct sec_flow_context *flc;
+	struct ctxt_priv *priv = sess->ctxt;
+	unsigned int i;
+
+	for (i = 0; i < sgl->num; i++)
+		total_len += sgl->vec[i].len;
+
+	data_len = total_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
+	data_offset = ofs.ofs.cipher.head;
+
+	if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
+		sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
+		if ((data_len & 7) || (data_offset & 7)) {
+			DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes");
+			return -ENOTSUP;
+		}
+
+		data_len = data_len >> 3;
+		data_offset = data_offset >> 3;
+	}
+
+	/* first FLE entry used to store mbuf and session ctxt */
+	fle = (struct qbman_fle *)rte_malloc(NULL,
+			FLE_SG_MEM_SIZE(2*sgl->num),
+			RTE_CACHE_LINE_SIZE);
+	if (!fle) {
+		DPAA2_SEC_ERR("RAW CIPHER SG: Memory alloc failed for SGE");
+		return -ENOMEM;
+	}
+	memset(fle, 0, FLE_SG_MEM_SIZE(2*sgl->num));
+	/* first FLE entry used to store userdata and session ctxt */
+	DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
+	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
+
+	op_fle = fle + 1;
+	ip_fle = fle + 2;
+	sge = fle + 3;
+
+	flc = &priv->flc_desc[0].flc;
+
+	DPAA2_SEC_DP_DEBUG(
+		"RAW CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d\n",
+		data_offset,
+		data_len,
+		sess->iv.length);
+
+	/* o/p fle */
+	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
+	op_fle->length = data_len;
+	DPAA2_SET_FLE_SG_EXT(op_fle);
+
+	/* o/p 1st seg */
+	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+	DPAA2_SET_FLE_OFFSET(sge, data_offset);
+	sge->length = sgl->vec[0].len - data_offset;
+
+	/* o/p segs */
+	for (i = 1; i < sgl->num; i++) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+		DPAA2_SET_FLE_OFFSET(sge, 0);
+		sge->length = sgl->vec[i].len;
+	}
+	DPAA2_SET_FLE_FIN(sge);
+
+	DPAA2_SEC_DP_DEBUG(
+		"RAW CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n",
+		flc, fle, fle->addr_hi, fle->addr_lo,
+		fle->length);
+
+	/* i/p fle */
+	sge++;
+	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
+	ip_fle->length = sess->iv.length + data_len;
+	DPAA2_SET_FLE_SG_EXT(ip_fle);
+
+	/* i/p IV */
+	DPAA2_SET_FLE_ADDR(sge, iv->iova);
+	DPAA2_SET_FLE_OFFSET(sge, 0);
+	sge->length = sess->iv.length;
+
+	sge++;
+
+	/* i/p 1st seg */
+	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+	DPAA2_SET_FLE_OFFSET(sge, data_offset);
+	sge->length = sgl->vec[0].len - data_offset;
+
+	/* i/p segs */
+	for (i = 1; i < sgl->num; i++) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+		DPAA2_SET_FLE_OFFSET(sge, 0);
+		sge->length = sgl->vec[i].len;
+	}
+	DPAA2_SET_FLE_FIN(sge);
+	DPAA2_SET_FLE_FIN(ip_fle);
+
+	/* sg fd */
+	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
+	DPAA2_SET_FD_LEN(fd, ip_fle->length);
+	DPAA2_SET_FD_COMPOUND_FMT(fd);
+	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+	DPAA2_SEC_DP_DEBUG(
+		"RAW CIPHER SG: fdaddr =%" PRIx64 " off =%d, len =%d\n",
+		DPAA2_GET_FD_ADDR(fd),
+		DPAA2_GET_FD_OFFSET(fd),
+		DPAA2_GET_FD_LEN(fd));
+
+	return 0;
+}
+
+static __rte_always_inline uint32_t
+dpaa2_sec_raw_enqueue_burst(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	RTE_SET_USED(user_data);
+	uint32_t loop;
+	int32_t ret;
+	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
+	uint32_t frames_to_send, retry_count;
+	struct qbman_eq_desc eqdesc;
+	struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp_data;
+	dpaa2_sec_session *sess =
+		((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
+	struct qbman_swp *swp;
+	uint16_t num_tx = 0;
+	uint32_t flags[MAX_TX_RING_SLOTS] = {0};
+
+	if (unlikely(vec->num == 0))
+		return 0;
+
+	if (sess == NULL) {
+		DPAA2_SEC_ERR("sessionless raw crypto not supported");
+		return 0;
+	}
+	/*Prepare enqueue descriptor*/
+	qbman_eq_desc_clear(&eqdesc);
+	qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
+	qbman_eq_desc_set_response(&eqdesc, 0, 0);
+	qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
+
+	if (!DPAA2_PER_LCORE_DPIO) {
+		ret = dpaa2_affine_qbman_swp();
+		if (ret) {
+			DPAA2_SEC_ERR(
+				"Failed to allocate IO portal, tid: %d\n",
+				rte_gettid());
+			return 0;
+		}
+	}
+	swp = DPAA2_PER_LCORE_PORTAL;
+
+	while (vec->num) {
+		frames_to_send = (vec->num > dpaa2_eqcr_size) ?
+			dpaa2_eqcr_size : vec->num;
+
+		for (loop = 0; loop < frames_to_send; loop++) {
+			/*Clear the unused FD fields before sending*/
+			memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
+			ret = sess->build_raw_dp_fd(drv_ctx,
+						    &vec->src_sgl[loop],
+						    &vec->iv[loop],
+						    &vec->digest[loop],
+						    &vec->auth_iv[loop],
+						    ofs,
+						    user_data[loop],
+						    &fd_arr[loop]);
+			if (ret) {
+				DPAA2_SEC_ERR("error: Improper packet contents"
+					      " for crypto operation");
+				goto skip_tx;
+			}
+			status[loop] = 1;
+		}
+
+		loop = 0;
+		retry_count = 0;
+		while (loop < frames_to_send) {
+			ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
+							 &fd_arr[loop],
+							 &flags[loop],
+							 frames_to_send - loop);
+			if (unlikely(ret < 0)) {
+				retry_count++;
+				if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
+					num_tx += loop;
+					vec->num -= loop;
+					goto skip_tx;
+				}
+			} else {
+				loop += ret;
+				retry_count = 0;
+			}
+		}
+
+		num_tx += loop;
+		vec->num -= loop;
+	}
+skip_tx:
+	dpaa2_qp->tx_vq.tx_pkts += num_tx;
+	dpaa2_qp->tx_vq.err_pkts += vec->num;
+
+	return num_tx;
+}
+
+static __rte_always_inline int
+dpaa2_sec_raw_enqueue(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data_vec,
+	uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
+	void *user_data)
+{
+	RTE_SET_USED(qp_data);
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(data_vec);
+	RTE_SET_USED(n_data_vecs);
+	RTE_SET_USED(ofs);
+	RTE_SET_USED(iv);
+	RTE_SET_USED(digest);
+	RTE_SET_USED(aad_or_auth_iv);
+	RTE_SET_USED(user_data);
+
+	return 0;
+}
+
+static inline void *
+sec_fd_to_userdata(const struct qbman_fd *fd)
+{
+	struct qbman_fle *fle;
+	void *userdata;
+	fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
+
+	DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n",
+			   fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
+	userdata = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
+	/* free the fle memory */
+	rte_free((void *)(fle-1));
+
+	return userdata;
+}
+
+static __rte_always_inline uint32_t
+dpaa2_sec_raw_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
+	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
+	uint32_t max_nb_to_dequeue,
+	rte_cryptodev_raw_post_dequeue_t post_dequeue,
+	void **out_user_data, uint8_t is_user_data_array,
+	uint32_t *n_success, int *dequeue_status)
+{
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(get_dequeue_count);
+
+	/* Function is responsible to receive frames for a given device and VQ*/
+	struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp_data;
+	struct qbman_result *dq_storage;
+	uint32_t fqid = dpaa2_qp->rx_vq.fqid;
+	int ret, num_rx = 0;
+	uint8_t is_last = 0, status;
+	struct qbman_swp *swp;
+	const struct qbman_fd *fd;
+	struct qbman_pull_desc pulldesc;
+	void *user_data;
+	uint32_t nb_ops = max_nb_to_dequeue;
+
+	if (!DPAA2_PER_LCORE_DPIO) {
+		ret = dpaa2_affine_qbman_swp();
+		if (ret) {
+			DPAA2_SEC_ERR(
+				"Failed to allocate IO portal, tid: %d\n",
+				rte_gettid());
+			return 0;
+		}
+	}
+	swp = DPAA2_PER_LCORE_PORTAL;
+	dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
+
+	qbman_pull_desc_clear(&pulldesc);
+	qbman_pull_desc_set_numframes(&pulldesc,
+				      (nb_ops > dpaa2_dqrr_size) ?
+				      dpaa2_dqrr_size : nb_ops);
+	qbman_pull_desc_set_fq(&pulldesc, fqid);
+	qbman_pull_desc_set_storage(&pulldesc, dq_storage,
+				    (uint64_t)DPAA2_VADDR_TO_IOVA(dq_storage),
+				    1);
+
+	/*Issue a volatile dequeue command. */
+	while (1) {
+		if (qbman_swp_pull(swp, &pulldesc)) {
+			DPAA2_SEC_WARN(
+				"SEC VDQ command is not issued : QBMAN busy");
+			/* Portal was busy, try again */
+			continue;
+		}
+		break;
+	};
+
+	/* Receive the packets till Last Dequeue entry is found with
+	 * respect to the above issues PULL command.
+	 */
+	while (!is_last) {
+		/* Check if the previous issued command is completed.
+		 * Also seems like the SWP is shared between the Ethernet Driver
+		 * and the SEC driver.
+		 */
+		while (!qbman_check_command_complete(dq_storage))
+			;
+
+		/* Loop until the dq_storage is updated with
+		 * new token by QBMAN
+		 */
+		while (!qbman_check_new_result(dq_storage))
+			;
+		/* Check whether Last Pull command is Expired and
+		 * setting Condition for Loop termination
+		 */
+		if (qbman_result_DQ_is_pull_complete(dq_storage)) {
+			is_last = 1;
+			/* Check for valid frame. */
+			status = (uint8_t)qbman_result_DQ_flags(dq_storage);
+			if (unlikely(
+				(status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
+				DPAA2_SEC_DP_DEBUG("No frame is delivered\n");
+				continue;
+			}
+		}
+
+		fd = qbman_result_DQ_fd(dq_storage);
+		user_data = sec_fd_to_userdata(fd);
+		if (is_user_data_array)
+			out_user_data[num_rx] = user_data;
+		else
+			out_user_data[0] = user_data;
+		if (unlikely(fd->simple.frc)) {
+			/* TODO Parse SEC errors */
+			DPAA2_SEC_ERR("SEC returned Error - %x",
+				      fd->simple.frc);
+			status = RTE_CRYPTO_OP_STATUS_ERROR;
+		} else {
+			status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+		}
+		post_dequeue(user_data, num_rx, status);
+
+		num_rx++;
+		dq_storage++;
+	} /* End of Packet Rx loop */
+
+	dpaa2_qp->rx_vq.rx_pkts += num_rx;
+	*dequeue_status = 1;
+	*n_success = num_rx;
+
+	DPAA2_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
+	/*Return the total number of packets received to DPAA2 app*/
+	return num_rx;
+}
+
+static __rte_always_inline void *
+dpaa2_sec_raw_dequeue(void *qp_data, uint8_t *drv_ctx, int *dequeue_status,
+		enum rte_crypto_op_status *op_status)
+{
+	RTE_SET_USED(qp_data);
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(dequeue_status);
+	RTE_SET_USED(op_status);
+
+	return NULL;
+}
+
+static __rte_always_inline int
+dpaa2_sec_raw_enqueue_done(void *qp_data, uint8_t *drv_ctx, uint32_t n)
+{
+	RTE_SET_USED(qp_data);
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(n);
+
+	return 0;
+}
+
+static __rte_always_inline int
+dpaa2_sec_raw_dequeue_done(void *qp_data, uint8_t *drv_ctx, uint32_t n)
+{
+	RTE_SET_USED(qp_data);
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(n);
+
+	return 0;
+}
+
+int
+dpaa2_sec_configure_raw_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
+	enum rte_crypto_op_sess_type sess_type,
+	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
+{
+	dpaa2_sec_session *sess;
+	struct dpaa2_sec_raw_dp_ctx *dp_ctx;
+	RTE_SET_USED(qp_id);
+
+	if (!is_update) {
+		memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx));
+		raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
+	}
+
+	if (sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
+		sess = (dpaa2_sec_session *)get_sec_session_private_data(
+				session_ctx.sec_sess);
+	else if (sess_type == RTE_CRYPTO_OP_WITH_SESSION)
+		sess = (dpaa2_sec_session *)get_sym_session_private_data(
+			session_ctx.crypto_sess, cryptodev_driver_id);
+	else
+		return -ENOTSUP;
+	raw_dp_ctx->dequeue_burst = dpaa2_sec_raw_dequeue_burst;
+	raw_dp_ctx->dequeue = dpaa2_sec_raw_dequeue;
+	raw_dp_ctx->dequeue_done = dpaa2_sec_raw_dequeue_done;
+	raw_dp_ctx->enqueue_burst = dpaa2_sec_raw_enqueue_burst;
+	raw_dp_ctx->enqueue = dpaa2_sec_raw_enqueue;
+	raw_dp_ctx->enqueue_done = dpaa2_sec_raw_enqueue_done;
+
+	if (sess->ctxt_type == DPAA2_SEC_CIPHER_HASH)
+		sess->build_raw_dp_fd = build_raw_dp_chain_fd;
+	else if (sess->ctxt_type == DPAA2_SEC_AEAD)
+		sess->build_raw_dp_fd = build_raw_dp_aead_fd;
+	else if (sess->ctxt_type == DPAA2_SEC_AUTH)
+		sess->build_raw_dp_fd = build_raw_dp_auth_fd;
+	else if (sess->ctxt_type == DPAA2_SEC_CIPHER)
+		sess->build_raw_dp_fd = build_raw_dp_cipher_fd;
+	else if (sess->ctxt_type == DPAA2_SEC_IPSEC)
+		sess->build_raw_dp_fd = build_raw_dp_proto_fd;
+	else if (sess->ctxt_type == DPAA2_SEC_PDCP)
+		sess->build_raw_dp_fd = build_raw_dp_proto_compound_fd;
+	else
+		return -ENOTSUP;
+	dp_ctx = (struct dpaa2_sec_raw_dp_ctx *)raw_dp_ctx->drv_ctx_data;
+	dp_ctx->session = sess;
+
+	return 0;
+}
+
+int
+dpaa2_sec_get_dp_ctx_size(__rte_unused struct rte_cryptodev *dev)
+{
+	return sizeof(struct dpaa2_sec_raw_dp_ctx);
+}
diff --git a/drivers/crypto/dpaa2_sec/meson.build b/drivers/crypto/dpaa2_sec/meson.build
index ea1d73a13d..e6e5abb3c1 100644
--- a/drivers/crypto/dpaa2_sec/meson.build
+++ b/drivers/crypto/dpaa2_sec/meson.build
@@ -1,5 +1,5 @@
 # SPDX-License-Identifier: BSD-3-Clause
-# Copyright 2018 NXP
+# Copyright 2018,2021 NXP
 
 if not is_linux
     build = false
@@ -9,6 +9,7 @@ endif
 deps += ['security', 'mempool_dpaa2']
 sources = files(
         'dpaa2_sec_dpseci.c',
+	'dpaa2_sec_raw_dp.c',
         'mc/dpseci.c',
 )
 
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH 06/15] crypto/dpaa2_sec: support AUTH only with raw buffer APIs
  2021-08-25  7:14 ` [dpdk-dev] [PATCH 00/15] crypto: add raw vector support in DPAAx Hemant Agrawal
                     ` (4 preceding siblings ...)
  2021-08-25  7:15   ` [dpdk-dev] [PATCH 05/15] crypto/dpaa2_sec: support raw datapath APIs Hemant Agrawal
@ 2021-08-25  7:15   ` Hemant Agrawal
  2021-08-25  7:15   ` [dpdk-dev] [PATCH 07/15] crypto/dpaa2_sec: support AUTHENC " Hemant Agrawal
                     ` (10 subsequent siblings)
  16 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-08-25  7:15 UTC (permalink / raw)
  To: dev, gakhil; +Cc: konstantin.ananyev, roy.fan.zhang, Gagandeep Singh

From: Gagandeep Singh <g.singh@nxp.com>

Auth only with raw buffer APIs has been supported in this patch.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h   |  21 ----
 drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c | 114 ++++++++++++++++++--
 2 files changed, 108 insertions(+), 27 deletions(-)

diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
index 860c9b6520..f6507855e3 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
@@ -231,27 +231,6 @@ typedef struct dpaa2_sec_session_entry {
 
 static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
 	/* Symmetric capabilities */
-	{	/* NULL (AUTH) */
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
-		{.sym = {
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
-			{.auth = {
-				.algo = RTE_CRYPTO_AUTH_NULL,
-				.block_size = 1,
-				.key_size = {
-					.min = 0,
-					.max = 0,
-					.increment = 0
-				},
-				.digest_size = {
-					.min = 0,
-					.max = 0,
-					.increment = 0
-				},
-				.iv_size = { 0 }
-			}, },
-		}, },
-	},
 	{	/* MD5 */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
index 32abf5a431..af052202d9 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
@@ -11,6 +11,8 @@
 #include "dpaa2_sec_priv.h"
 #include "dpaa2_sec_logs.h"
 
+#include <desc/algo.h>
+
 struct dpaa2_sec_raw_dp_ctx {
 	dpaa2_sec_session *session;
 	uint32_t tail;
@@ -73,14 +75,114 @@ build_raw_dp_auth_fd(uint8_t *drv_ctx,
 		       void *userdata,
 		       struct qbman_fd *fd)
 {
-	RTE_SET_USED(drv_ctx);
-	RTE_SET_USED(sgl);
 	RTE_SET_USED(iv);
-	RTE_SET_USED(digest);
 	RTE_SET_USED(auth_iv);
-	RTE_SET_USED(ofs);
-	RTE_SET_USED(userdata);
-	RTE_SET_USED(fd);
+
+	dpaa2_sec_session *sess =
+		((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
+	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
+	struct sec_flow_context *flc;
+	int total_len = 0, data_len = 0, data_offset;
+	uint8_t *old_digest;
+	struct ctxt_priv *priv = sess->ctxt;
+	unsigned int i;
+
+	for (i = 0; i < sgl->num; i++)
+		total_len += sgl->vec[i].len;
+
+	data_len = total_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
+	data_offset = ofs.ofs.auth.head;
+
+	if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
+		sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
+		if ((data_len & 7) || (data_offset & 7)) {
+			DPAA2_SEC_ERR("AUTH: len/offset must be full bytes");
+			return -ENOTSUP;
+		}
+
+		data_len = data_len >> 3;
+		data_offset = data_offset >> 3;
+	}
+	fle = (struct qbman_fle *)rte_malloc(NULL,
+		FLE_SG_MEM_SIZE(2 * sgl->num),
+			RTE_CACHE_LINE_SIZE);
+	if (unlikely(!fle)) {
+		DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE");
+		return -ENOMEM;
+	}
+	memset(fle, 0, FLE_SG_MEM_SIZE(2*sgl->num));
+	/* first FLE entry used to store mbuf and session ctxt */
+	DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
+	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
+	op_fle = fle + 1;
+	ip_fle = fle + 2;
+	sge = fle + 3;
+
+	flc = &priv->flc_desc[DESC_INITFINAL].flc;
+
+	/* sg FD */
+	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
+	DPAA2_SET_FD_COMPOUND_FMT(fd);
+
+	/* o/p fle */
+	DPAA2_SET_FLE_ADDR(op_fle,
+			DPAA2_VADDR_TO_IOVA(digest->va));
+	op_fle->length = sess->digest_length;
+
+	/* i/p fle */
+	DPAA2_SET_FLE_SG_EXT(ip_fle);
+	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
+	ip_fle->length = data_len;
+
+	if (sess->iv.length) {
+		uint8_t *iv_ptr;
+
+		iv_ptr = rte_crypto_op_ctod_offset(userdata, uint8_t *,
+						sess->iv.offset);
+
+		if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
+			iv_ptr = conv_to_snow_f9_iv(iv_ptr);
+			sge->length = 12;
+		} else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
+			iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
+			sge->length = 8;
+		} else {
+			sge->length = sess->iv.length;
+		}
+		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
+		ip_fle->length += sge->length;
+		sge++;
+	}
+	/* i/p 1st seg */
+	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+	DPAA2_SET_FLE_OFFSET(sge, data_offset);
+
+	if (data_len <= (int)(sgl->vec[0].len - data_offset)) {
+		sge->length = data_len;
+		data_len = 0;
+	} else {
+		sge->length = sgl->vec[0].len - data_offset;
+		for (i = 1; i < sgl->num; i++) {
+			sge++;
+			DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+			DPAA2_SET_FLE_OFFSET(sge, 0);
+			sge->length = sgl->vec[i].len;
+		}
+	}
+	if (sess->dir == DIR_DEC) {
+		/* Digest verification case */
+		sge++;
+		old_digest = (uint8_t *)(sge + 1);
+		rte_memcpy(old_digest, digest->va,
+			sess->digest_length);
+		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
+		sge->length = sess->digest_length;
+		ip_fle->length += sess->digest_length;
+	}
+	DPAA2_SET_FLE_FIN(sge);
+	DPAA2_SET_FLE_FIN(ip_fle);
+	DPAA2_SET_FD_LEN(fd, ip_fle->length);
 
 	return 0;
 }
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH 07/15] crypto/dpaa2_sec: support AUTHENC with raw buffer APIs
  2021-08-25  7:14 ` [dpdk-dev] [PATCH 00/15] crypto: add raw vector support in DPAAx Hemant Agrawal
                     ` (5 preceding siblings ...)
  2021-08-25  7:15   ` [dpdk-dev] [PATCH 06/15] crypto/dpaa2_sec: support AUTH only with raw buffer APIs Hemant Agrawal
@ 2021-08-25  7:15   ` Hemant Agrawal
  2021-08-25  7:15   ` [dpdk-dev] [PATCH 08/15] crypto/dpaa2_sec: support AEAD " Hemant Agrawal
                     ` (9 subsequent siblings)
  16 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-08-25  7:15 UTC (permalink / raw)
  To: dev, gakhil; +Cc: konstantin.ananyev, roy.fan.zhang, Gagandeep Singh

From: Gagandeep Singh <g.singh@nxp.com>

This patch supports AUTHENC with raw buufer APIs

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c | 128 ++++++++++++++++++--
 1 file changed, 121 insertions(+), 7 deletions(-)

diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
index af052202d9..505431fc23 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
@@ -31,14 +31,128 @@ build_raw_dp_chain_fd(uint8_t *drv_ctx,
 		       void *userdata,
 		       struct qbman_fd *fd)
 {
-	RTE_SET_USED(drv_ctx);
-	RTE_SET_USED(sgl);
-	RTE_SET_USED(iv);
-	RTE_SET_USED(digest);
 	RTE_SET_USED(auth_iv);
-	RTE_SET_USED(ofs);
-	RTE_SET_USED(userdata);
-	RTE_SET_USED(fd);
+
+	dpaa2_sec_session *sess =
+		((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
+	struct ctxt_priv *priv = sess->ctxt;
+	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
+	struct sec_flow_context *flc;
+	int data_len = 0, auth_len = 0, cipher_len = 0;
+	unsigned int i = 0;
+	uint16_t auth_hdr_len = ofs.ofs.cipher.head -
+				ofs.ofs.auth.head;
+
+	uint16_t auth_tail_len = ofs.ofs.auth.tail;
+	uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
+	int icv_len = sess->digest_length;
+	uint8_t *old_icv;
+	uint8_t *iv_ptr = iv->va;
+
+	for (i = 0; i < sgl->num; i++)
+		data_len += sgl->vec[i].len;
+
+	cipher_len = data_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
+	auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
+	/* first FLE entry used to store session ctxt */
+	fle = (struct qbman_fle *)rte_malloc(NULL,
+			FLE_SG_MEM_SIZE(2 * sgl->num),
+			RTE_CACHE_LINE_SIZE);
+	if (unlikely(!fle)) {
+		DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE");
+		return -ENOMEM;
+	}
+	memset(fle, 0, FLE_SG_MEM_SIZE(2 * sgl->num));
+	DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
+	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
+
+	op_fle = fle + 1;
+	ip_fle = fle + 2;
+	sge = fle + 3;
+
+	/* Save the shared descriptor */
+	flc = &priv->flc_desc[0].flc;
+
+	/* Configure FD as a FRAME LIST */
+	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
+	DPAA2_SET_FD_COMPOUND_FMT(fd);
+	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+	/* Configure Output FLE with Scatter/Gather Entry */
+	DPAA2_SET_FLE_SG_EXT(op_fle);
+	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
+
+	if (auth_only_len)
+		DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
+
+	op_fle->length = (sess->dir == DIR_ENC) ?
+			(cipher_len + icv_len) :
+			cipher_len;
+
+	/* Configure Output SGE for Encap/Decap */
+	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+	DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.auth.head);
+	sge->length = sgl->vec[0].len - ofs.ofs.auth.head;
+
+	/* o/p segs */
+	for (i = 1; i < sgl->num; i++) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+		DPAA2_SET_FLE_OFFSET(sge, 0);
+		sge->length = sgl->vec[i].len;
+	}
+
+	if (sess->dir == DIR_ENC) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge,
+			digest->iova);
+		sge->length = icv_len;
+	}
+	DPAA2_SET_FLE_FIN(sge);
+
+	sge++;
+
+	/* Configure Input FLE with Scatter/Gather Entry */
+	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
+	DPAA2_SET_FLE_SG_EXT(ip_fle);
+	DPAA2_SET_FLE_FIN(ip_fle);
+
+	ip_fle->length = (sess->dir == DIR_ENC) ?
+			(auth_len + sess->iv.length) :
+			(auth_len + sess->iv.length +
+			icv_len);
+
+	/* Configure Input SGE for Encap/Decap */
+	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
+	sge->length = sess->iv.length;
+
+	sge++;
+	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+	DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.auth.head);
+	sge->length = sgl->vec[0].len - ofs.ofs.auth.head;
+
+	for (i = 1; i < sgl->num; i++) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+		DPAA2_SET_FLE_OFFSET(sge, 0);
+		sge->length = sgl->vec[i].len;
+	}
+
+	if (sess->dir == DIR_DEC) {
+		sge++;
+		old_icv = (uint8_t *)(sge + 1);
+		memcpy(old_icv, digest->va,
+			icv_len);
+		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
+		sge->length = icv_len;
+	}
+
+	DPAA2_SET_FLE_FIN(sge);
+	if (auth_only_len) {
+		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
+		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
+	}
+	DPAA2_SET_FD_LEN(fd, ip_fle->length);
 
 	return 0;
 }
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH 08/15] crypto/dpaa2_sec: support AEAD with raw buffer APIs
  2021-08-25  7:14 ` [dpdk-dev] [PATCH 00/15] crypto: add raw vector support in DPAAx Hemant Agrawal
                     ` (6 preceding siblings ...)
  2021-08-25  7:15   ` [dpdk-dev] [PATCH 07/15] crypto/dpaa2_sec: support AUTHENC " Hemant Agrawal
@ 2021-08-25  7:15   ` Hemant Agrawal
  2021-08-25  7:15   ` [dpdk-dev] [PATCH 09/15] crypto/dpaa2_sec: support OOP with raw buffer API Hemant Agrawal
                     ` (8 subsequent siblings)
  16 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-08-25  7:15 UTC (permalink / raw)
  To: dev, gakhil; +Cc: konstantin.ananyev, roy.fan.zhang, Gagandeep Singh

From: Gagandeep Singh <g.singh@nxp.com>

add raw vector API support for AEAD algos.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c | 249 +++++++++++++++++---
 1 file changed, 214 insertions(+), 35 deletions(-)

diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
index 505431fc23..5191e5381c 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
@@ -167,14 +167,126 @@ build_raw_dp_aead_fd(uint8_t *drv_ctx,
 		       void *userdata,
 		       struct qbman_fd *fd)
 {
-	RTE_SET_USED(drv_ctx);
-	RTE_SET_USED(sgl);
-	RTE_SET_USED(iv);
-	RTE_SET_USED(digest);
-	RTE_SET_USED(auth_iv);
-	RTE_SET_USED(ofs);
-	RTE_SET_USED(userdata);
-	RTE_SET_USED(fd);
+	dpaa2_sec_session *sess =
+		((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
+	struct ctxt_priv *priv = sess->ctxt;
+	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
+	struct sec_flow_context *flc;
+	uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
+	int icv_len = sess->digest_length;
+	uint8_t *old_icv;
+	uint8_t *IV_ptr = iv->va;
+	unsigned int i = 0;
+	int data_len = 0, aead_len = 0;
+
+	for (i = 0; i < sgl->num; i++)
+		data_len += sgl->vec[i].len;
+
+	aead_len = data_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
+
+	/* first FLE entry used to store mbuf and session ctxt */
+	fle = (struct qbman_fle *)rte_malloc(NULL,
+			FLE_SG_MEM_SIZE(2 * sgl->num),
+			RTE_CACHE_LINE_SIZE);
+	if (unlikely(!fle)) {
+		DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE");
+		return -ENOMEM;
+	}
+	memset(fle, 0, FLE_SG_MEM_SIZE(2 * sgl->num));
+	DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
+	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
+
+	op_fle = fle + 1;
+	ip_fle = fle + 2;
+	sge = fle + 3;
+
+	/* Save the shared descriptor */
+	flc = &priv->flc_desc[0].flc;
+
+	/* Configure FD as a FRAME LIST */
+	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
+	DPAA2_SET_FD_COMPOUND_FMT(fd);
+	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+	/* Configure Output FLE with Scatter/Gather Entry */
+	DPAA2_SET_FLE_SG_EXT(op_fle);
+	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
+
+	if (auth_only_len)
+		DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
+
+	op_fle->length = (sess->dir == DIR_ENC) ?
+			(aead_len + icv_len) :
+			aead_len;
+
+	/* Configure Output SGE for Encap/Decap */
+	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+	DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
+	sge->length = sgl->vec[0].len - ofs.ofs.cipher.head;
+
+	/* o/p segs */
+	for (i = 1; i < sgl->num; i++) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+		DPAA2_SET_FLE_OFFSET(sge, 0);
+		sge->length = sgl->vec[i].len;
+	}
+
+	if (sess->dir == DIR_ENC) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge, digest->iova);
+		sge->length = icv_len;
+	}
+	DPAA2_SET_FLE_FIN(sge);
+
+	sge++;
+
+	/* Configure Input FLE with Scatter/Gather Entry */
+	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
+	DPAA2_SET_FLE_SG_EXT(ip_fle);
+	DPAA2_SET_FLE_FIN(ip_fle);
+	ip_fle->length = (sess->dir == DIR_ENC) ?
+		(aead_len + sess->iv.length + auth_only_len) :
+		(aead_len + sess->iv.length + auth_only_len +
+		icv_len);
+
+	/* Configure Input SGE for Encap/Decap */
+	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
+	sge->length = sess->iv.length;
+
+	sge++;
+	if (auth_only_len) {
+		DPAA2_SET_FLE_ADDR(sge, auth_iv->iova);
+		sge->length = auth_only_len;
+		sge++;
+	}
+
+	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+	DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
+	sge->length = sgl->vec[0].len - ofs.ofs.cipher.head;
+
+	/* i/p segs */
+	for (i = 1; i < sgl->num; i++) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+		DPAA2_SET_FLE_OFFSET(sge, 0);
+		sge->length = sgl->vec[i].len;
+	}
+
+	if (sess->dir == DIR_DEC) {
+		sge++;
+		old_icv = (uint8_t *)(sge + 1);
+		memcpy(old_icv,  digest->va, icv_len);
+		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
+		sge->length = icv_len;
+	}
+
+	DPAA2_SET_FLE_FIN(sge);
+	if (auth_only_len) {
+		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
+		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
+	}
+	DPAA2_SET_FD_LEN(fd, ip_fle->length);
 
 	return 0;
 }
@@ -311,36 +423,104 @@ build_raw_dp_proto_fd(uint8_t *drv_ctx,
 		       void *userdata,
 		       struct qbman_fd *fd)
 {
-	RTE_SET_USED(drv_ctx);
-	RTE_SET_USED(sgl);
 	RTE_SET_USED(iv);
 	RTE_SET_USED(digest);
 	RTE_SET_USED(auth_iv);
 	RTE_SET_USED(ofs);
-	RTE_SET_USED(userdata);
-	RTE_SET_USED(fd);
 
-	return 0;
-}
+	dpaa2_sec_session *sess =
+		((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
+	struct ctxt_priv *priv = sess->ctxt;
+	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
+	struct sec_flow_context *flc;
+	uint32_t in_len = 0, out_len = 0, i;
 
-static int
-build_raw_dp_proto_compound_fd(uint8_t *drv_ctx,
-		       struct rte_crypto_sgl *sgl,
-		       struct rte_crypto_va_iova_ptr *iv,
-		       struct rte_crypto_va_iova_ptr *digest,
-		       struct rte_crypto_va_iova_ptr *auth_iv,
-		       union rte_crypto_sym_ofs ofs,
-		       void *userdata,
-		       struct qbman_fd *fd)
-{
-	RTE_SET_USED(drv_ctx);
-	RTE_SET_USED(sgl);
-	RTE_SET_USED(iv);
-	RTE_SET_USED(digest);
-	RTE_SET_USED(auth_iv);
-	RTE_SET_USED(ofs);
-	RTE_SET_USED(userdata);
-	RTE_SET_USED(fd);
+	/* first FLE entry used to store mbuf and session ctxt */
+	fle = (struct qbman_fle *)rte_malloc(NULL,
+			FLE_SG_MEM_SIZE(2 * sgl->num),
+			RTE_CACHE_LINE_SIZE);
+	if (unlikely(!fle)) {
+		DPAA2_SEC_DP_ERR("Proto:SG: Memory alloc failed for SGE");
+		return -ENOMEM;
+	}
+	memset(fle, 0, FLE_SG_MEM_SIZE(2 * sgl->num));
+	DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
+	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
+
+	/* Save the shared descriptor */
+	flc = &priv->flc_desc[0].flc;
+	op_fle = fle + 1;
+	ip_fle = fle + 2;
+	sge = fle + 3;
+
+	DPAA2_SET_FD_IVP(fd);
+	DPAA2_SET_FLE_IVP(op_fle);
+	DPAA2_SET_FLE_IVP(ip_fle);
+
+	/* Configure FD as a FRAME LIST */
+	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
+	DPAA2_SET_FD_COMPOUND_FMT(fd);
+	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+	/* Configure Output FLE with Scatter/Gather Entry */
+	DPAA2_SET_FLE_SG_EXT(op_fle);
+	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
+
+	/* Configure Output SGE for Encap/Decap */
+	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+	DPAA2_SET_FLE_OFFSET(sge, 0);
+	sge->length = sgl->vec[0].len;
+	out_len += sge->length;
+	/* o/p segs */
+	for (i = 1; i < sgl->num; i++) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+		DPAA2_SET_FLE_OFFSET(sge, 0);
+		sge->length = sgl->vec[i].len;
+		out_len += sge->length;
+	}
+	sge->length = sgl->vec[i - 1].tot_len;
+	out_len += sge->length;
+
+	DPAA2_SET_FLE_FIN(sge);
+	op_fle->length = out_len;
+
+	sge++;
+
+	/* Configure Input FLE with Scatter/Gather Entry */
+	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
+	DPAA2_SET_FLE_SG_EXT(ip_fle);
+	DPAA2_SET_FLE_FIN(ip_fle);
+
+	/* Configure input SGE for Encap/Decap */
+	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+	DPAA2_SET_FLE_OFFSET(sge, 0);
+	sge->length = sgl->vec[0].len;
+	in_len += sge->length;
+	/* i/p segs */
+	for (i = 1; i < sgl->num; i++) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+		DPAA2_SET_FLE_OFFSET(sge, 0);
+		sge->length = sgl->vec[i].len;
+		in_len += sge->length;
+	}
+
+	ip_fle->length = in_len;
+	DPAA2_SET_FLE_FIN(sge);
+
+	/* In case of PDCP, per packet HFN is stored in
+	 * mbuf priv after sym_op.
+	 */
+	if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
+		uint32_t hfn_ovd = *(uint32_t *)((uint8_t *)userdata +
+				sess->pdcp.hfn_ovd_offset);
+		/*enable HFN override override */
+		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd);
+		DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd);
+		DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd);
+	}
+	DPAA2_SET_FD_LEN(fd, ip_fle->length);
 
 	return 0;
 }
@@ -792,10 +972,9 @@ dpaa2_sec_configure_raw_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
 		sess->build_raw_dp_fd = build_raw_dp_auth_fd;
 	else if (sess->ctxt_type == DPAA2_SEC_CIPHER)
 		sess->build_raw_dp_fd = build_raw_dp_cipher_fd;
-	else if (sess->ctxt_type == DPAA2_SEC_IPSEC)
+	else if (sess->ctxt_type == DPAA2_SEC_IPSEC ||
+		sess->ctxt_type == DPAA2_SEC_PDCP)
 		sess->build_raw_dp_fd = build_raw_dp_proto_fd;
-	else if (sess->ctxt_type == DPAA2_SEC_PDCP)
-		sess->build_raw_dp_fd = build_raw_dp_proto_compound_fd;
 	else
 		return -ENOTSUP;
 	dp_ctx = (struct dpaa2_sec_raw_dp_ctx *)raw_dp_ctx->drv_ctx_data;
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH 09/15] crypto/dpaa2_sec: support OOP with raw buffer API
  2021-08-25  7:14 ` [dpdk-dev] [PATCH 00/15] crypto: add raw vector support in DPAAx Hemant Agrawal
                     ` (7 preceding siblings ...)
  2021-08-25  7:15   ` [dpdk-dev] [PATCH 08/15] crypto/dpaa2_sec: support AEAD " Hemant Agrawal
@ 2021-08-25  7:15   ` Hemant Agrawal
  2021-08-25  7:15   ` [dpdk-dev] [PATCH 10/15] crypto/dpaa2_sec: enhance error checks with raw buffer APIs Hemant Agrawal
                     ` (7 subsequent siblings)
  16 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-08-25  7:15 UTC (permalink / raw)
  To: dev, gakhil; +Cc: konstantin.ananyev, roy.fan.zhang, Gagandeep Singh

From: Gagandeep Singh <g.singh@nxp.com>

add support for out of order processing with raw vector APIs.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h   |   1 +
 drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c | 156 +++++++++++++++-----
 2 files changed, 116 insertions(+), 41 deletions(-)

diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
index f6507855e3..db72c11a5f 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
@@ -179,6 +179,7 @@ typedef int (*dpaa2_sec_build_fd_t)(
 
 typedef int (*dpaa2_sec_build_raw_dp_fd_t)(uint8_t *drv_ctx,
 		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_sgl *dest_sgl,
 		       struct rte_crypto_va_iova_ptr *iv,
 		       struct rte_crypto_va_iova_ptr *digest,
 		       struct rte_crypto_va_iova_ptr *auth_iv,
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
index 5191e5381c..51e316cc00 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
@@ -24,6 +24,7 @@ struct dpaa2_sec_raw_dp_ctx {
 static int
 build_raw_dp_chain_fd(uint8_t *drv_ctx,
 		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_sgl *dest_sgl,
 		       struct rte_crypto_va_iova_ptr *iv,
 		       struct rte_crypto_va_iova_ptr *digest,
 		       struct rte_crypto_va_iova_ptr *auth_iv,
@@ -89,17 +90,33 @@ build_raw_dp_chain_fd(uint8_t *drv_ctx,
 			(cipher_len + icv_len) :
 			cipher_len;
 
-	/* Configure Output SGE for Encap/Decap */
-	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
-	DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.auth.head);
-	sge->length = sgl->vec[0].len - ofs.ofs.auth.head;
+	/* OOP */
+	if (dest_sgl) {
+		/* Configure Output SGE for Encap/Decap */
+		DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[0].iova);
+		DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
+		sge->length = dest_sgl->vec[0].len - ofs.ofs.cipher.head;
 
-	/* o/p segs */
-	for (i = 1; i < sgl->num; i++) {
-		sge++;
-		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
-		DPAA2_SET_FLE_OFFSET(sge, 0);
-		sge->length = sgl->vec[i].len;
+		/* o/p segs */
+		for (i = 1; i < dest_sgl->num; i++) {
+			sge++;
+			DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[i].iova);
+			DPAA2_SET_FLE_OFFSET(sge, 0);
+			sge->length = dest_sgl->vec[i].len;
+		}
+	} else {
+		/* Configure Output SGE for Encap/Decap */
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+		DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
+		sge->length = sgl->vec[0].len - ofs.ofs.cipher.head;
+
+		/* o/p segs */
+		for (i = 1; i < sgl->num; i++) {
+			sge++;
+			DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+			DPAA2_SET_FLE_OFFSET(sge, 0);
+			sge->length = sgl->vec[i].len;
+		}
 	}
 
 	if (sess->dir == DIR_ENC) {
@@ -160,6 +177,7 @@ build_raw_dp_chain_fd(uint8_t *drv_ctx,
 static int
 build_raw_dp_aead_fd(uint8_t *drv_ctx,
 		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_sgl *dest_sgl,
 		       struct rte_crypto_va_iova_ptr *iv,
 		       struct rte_crypto_va_iova_ptr *digest,
 		       struct rte_crypto_va_iova_ptr *auth_iv,
@@ -219,17 +237,33 @@ build_raw_dp_aead_fd(uint8_t *drv_ctx,
 			(aead_len + icv_len) :
 			aead_len;
 
-	/* Configure Output SGE for Encap/Decap */
-	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
-	DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
-	sge->length = sgl->vec[0].len - ofs.ofs.cipher.head;
+	/* OOP */
+	if (dest_sgl) {
+		/* Configure Output SGE for Encap/Decap */
+		DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[0].iova);
+		DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
+		sge->length = dest_sgl->vec[0].len - ofs.ofs.cipher.head;
 
-	/* o/p segs */
-	for (i = 1; i < sgl->num; i++) {
-		sge++;
-		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
-		DPAA2_SET_FLE_OFFSET(sge, 0);
-		sge->length = sgl->vec[i].len;
+		/* o/p segs */
+		for (i = 1; i < dest_sgl->num; i++) {
+			sge++;
+			DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[i].iova);
+			DPAA2_SET_FLE_OFFSET(sge, 0);
+			sge->length = dest_sgl->vec[i].len;
+		}
+	} else {
+		/* Configure Output SGE for Encap/Decap */
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+		DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
+		sge->length = sgl->vec[0].len - ofs.ofs.cipher.head;
+
+		/* o/p segs */
+		for (i = 1; i < sgl->num; i++) {
+			sge++;
+			DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+			DPAA2_SET_FLE_OFFSET(sge, 0);
+			sge->length = sgl->vec[i].len;
+		}
 	}
 
 	if (sess->dir == DIR_ENC) {
@@ -294,6 +328,7 @@ build_raw_dp_aead_fd(uint8_t *drv_ctx,
 static int
 build_raw_dp_auth_fd(uint8_t *drv_ctx,
 		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_sgl *dest_sgl,
 		       struct rte_crypto_va_iova_ptr *iv,
 		       struct rte_crypto_va_iova_ptr *digest,
 		       struct rte_crypto_va_iova_ptr *auth_iv,
@@ -303,6 +338,7 @@ build_raw_dp_auth_fd(uint8_t *drv_ctx,
 {
 	RTE_SET_USED(iv);
 	RTE_SET_USED(auth_iv);
+	RTE_SET_USED(dest_sgl);
 
 	dpaa2_sec_session *sess =
 		((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
@@ -416,6 +452,7 @@ build_raw_dp_auth_fd(uint8_t *drv_ctx,
 static int
 build_raw_dp_proto_fd(uint8_t *drv_ctx,
 		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_sgl *dest_sgl,
 		       struct rte_crypto_va_iova_ptr *iv,
 		       struct rte_crypto_va_iova_ptr *digest,
 		       struct rte_crypto_va_iova_ptr *auth_iv,
@@ -466,20 +503,39 @@ build_raw_dp_proto_fd(uint8_t *drv_ctx,
 	DPAA2_SET_FLE_SG_EXT(op_fle);
 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
 
-	/* Configure Output SGE for Encap/Decap */
-	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
-	DPAA2_SET_FLE_OFFSET(sge, 0);
-	sge->length = sgl->vec[0].len;
-	out_len += sge->length;
-	/* o/p segs */
-	for (i = 1; i < sgl->num; i++) {
-		sge++;
-		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+	/* OOP */
+	if (dest_sgl) {
+		/* Configure Output SGE for Encap/Decap */
+		DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[0].iova);
 		DPAA2_SET_FLE_OFFSET(sge, 0);
-		sge->length = sgl->vec[i].len;
+		sge->length = dest_sgl->vec[0].len;
+		out_len += sge->length;
+		/* o/p segs */
+		for (i = 1; i < dest_sgl->num; i++) {
+			sge++;
+			DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[i].iova);
+			DPAA2_SET_FLE_OFFSET(sge, 0);
+			sge->length = dest_sgl->vec[i].len;
+			out_len += sge->length;
+		}
+		sge->length = dest_sgl->vec[i - 1].tot_len;
+
+	} else {
+		/* Configure Output SGE for Encap/Decap */
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+		DPAA2_SET_FLE_OFFSET(sge, 0);
+		sge->length = sgl->vec[0].len;
 		out_len += sge->length;
+		/* o/p segs */
+		for (i = 1; i < sgl->num; i++) {
+			sge++;
+			DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+			DPAA2_SET_FLE_OFFSET(sge, 0);
+			sge->length = sgl->vec[i].len;
+			out_len += sge->length;
+		}
+		sge->length = sgl->vec[i - 1].tot_len;
 	}
-	sge->length = sgl->vec[i - 1].tot_len;
 	out_len += sge->length;
 
 	DPAA2_SET_FLE_FIN(sge);
@@ -528,6 +584,7 @@ build_raw_dp_proto_fd(uint8_t *drv_ctx,
 static int
 build_raw_dp_cipher_fd(uint8_t *drv_ctx,
 		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_sgl *dest_sgl,
 		       struct rte_crypto_va_iova_ptr *iv,
 		       struct rte_crypto_va_iova_ptr *digest,
 		       struct rte_crypto_va_iova_ptr *auth_iv,
@@ -593,17 +650,33 @@ build_raw_dp_cipher_fd(uint8_t *drv_ctx,
 	op_fle->length = data_len;
 	DPAA2_SET_FLE_SG_EXT(op_fle);
 
-	/* o/p 1st seg */
-	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
-	DPAA2_SET_FLE_OFFSET(sge, data_offset);
-	sge->length = sgl->vec[0].len - data_offset;
+	/* OOP */
+	if (dest_sgl) {
+		/* o/p 1st seg */
+		DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[0].iova);
+		DPAA2_SET_FLE_OFFSET(sge, data_offset);
+		sge->length = dest_sgl->vec[0].len - data_offset;
 
-	/* o/p segs */
-	for (i = 1; i < sgl->num; i++) {
-		sge++;
-		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
-		DPAA2_SET_FLE_OFFSET(sge, 0);
-		sge->length = sgl->vec[i].len;
+		/* o/p segs */
+		for (i = 1; i < dest_sgl->num; i++) {
+			sge++;
+			DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[i].iova);
+			DPAA2_SET_FLE_OFFSET(sge, 0);
+			sge->length = dest_sgl->vec[i].len;
+		}
+	} else {
+		/* o/p 1st seg */
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+		DPAA2_SET_FLE_OFFSET(sge, data_offset);
+		sge->length = sgl->vec[0].len - data_offset;
+
+		/* o/p segs */
+		for (i = 1; i < sgl->num; i++) {
+			sge++;
+			DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+			DPAA2_SET_FLE_OFFSET(sge, 0);
+			sge->length = sgl->vec[i].len;
+		}
 	}
 	DPAA2_SET_FLE_FIN(sge);
 
@@ -706,6 +779,7 @@ dpaa2_sec_raw_enqueue_burst(void *qp_data, uint8_t *drv_ctx,
 			memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
 			ret = sess->build_raw_dp_fd(drv_ctx,
 						    &vec->src_sgl[loop],
+						    &vec->dest_sgl[loop],
 						    &vec->iv[loop],
 						    &vec->digest[loop],
 						    &vec->auth_iv[loop],
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH 10/15] crypto/dpaa2_sec: enhance error checks with raw buffer APIs
  2021-08-25  7:14 ` [dpdk-dev] [PATCH 00/15] crypto: add raw vector support in DPAAx Hemant Agrawal
                     ` (8 preceding siblings ...)
  2021-08-25  7:15   ` [dpdk-dev] [PATCH 09/15] crypto/dpaa2_sec: support OOP with raw buffer API Hemant Agrawal
@ 2021-08-25  7:15   ` Hemant Agrawal
  2021-08-25  7:15   ` [dpdk-dev] [PATCH 11/15] crypto/dpaa_sec: support raw datapath APIs Hemant Agrawal
                     ` (6 subsequent siblings)
  16 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-08-25  7:15 UTC (permalink / raw)
  To: dev, gakhil; +Cc: konstantin.ananyev, roy.fan.zhang, Gagandeep Singh

From: Gagandeep Singh <g.singh@nxp.com>

This patch improves error conditions and support of
Wireless algos with raw buffers.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c | 31 ++++-----------------
 1 file changed, 6 insertions(+), 25 deletions(-)

diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
index 51e316cc00..25364454c9 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
@@ -355,16 +355,7 @@ build_raw_dp_auth_fd(uint8_t *drv_ctx,
 	data_len = total_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
 	data_offset = ofs.ofs.auth.head;
 
-	if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
-		sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
-		if ((data_len & 7) || (data_offset & 7)) {
-			DPAA2_SEC_ERR("AUTH: len/offset must be full bytes");
-			return -ENOTSUP;
-		}
-
-		data_len = data_len >> 3;
-		data_offset = data_offset >> 3;
-	}
+	/* For SNOW3G and ZUC, lengths in bits only supported */
 	fle = (struct qbman_fle *)rte_malloc(NULL,
 		FLE_SG_MEM_SIZE(2 * sgl->num),
 			RTE_CACHE_LINE_SIZE);
@@ -609,17 +600,7 @@ build_raw_dp_cipher_fd(uint8_t *drv_ctx,
 	data_len = total_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
 	data_offset = ofs.ofs.cipher.head;
 
-	if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
-		sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
-		if ((data_len & 7) || (data_offset & 7)) {
-			DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes");
-			return -ENOTSUP;
-		}
-
-		data_len = data_len >> 3;
-		data_offset = data_offset >> 3;
-	}
-
+	/* For SNOW3G and ZUC, lengths in bits only supported */
 	/* first FLE entry used to store mbuf and session ctxt */
 	fle = (struct qbman_fle *)rte_malloc(NULL,
 			FLE_SG_MEM_SIZE(2*sgl->num),
@@ -878,7 +859,7 @@ dpaa2_sec_raw_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
 	struct qbman_result *dq_storage;
 	uint32_t fqid = dpaa2_qp->rx_vq.fqid;
 	int ret, num_rx = 0;
-	uint8_t is_last = 0, status;
+	uint8_t is_last = 0, status, is_success = 0;
 	struct qbman_swp *swp;
 	const struct qbman_fd *fd;
 	struct qbman_pull_desc pulldesc;
@@ -957,11 +938,11 @@ dpaa2_sec_raw_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
 			/* TODO Parse SEC errors */
 			DPAA2_SEC_ERR("SEC returned Error - %x",
 				      fd->simple.frc);
-			status = RTE_CRYPTO_OP_STATUS_ERROR;
+			is_success = false;
 		} else {
-			status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+			is_success = true;
 		}
-		post_dequeue(user_data, num_rx, status);
+		post_dequeue(user_data, num_rx, is_success);
 
 		num_rx++;
 		dq_storage++;
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH 11/15] crypto/dpaa_sec: support raw datapath APIs
  2021-08-25  7:14 ` [dpdk-dev] [PATCH 00/15] crypto: add raw vector support in DPAAx Hemant Agrawal
                     ` (9 preceding siblings ...)
  2021-08-25  7:15   ` [dpdk-dev] [PATCH 10/15] crypto/dpaa2_sec: enhance error checks with raw buffer APIs Hemant Agrawal
@ 2021-08-25  7:15   ` Hemant Agrawal
  2021-08-25  7:15   ` [dpdk-dev] [PATCH 12/15] crypto/dpaa_sec: support authonly and chain with raw APIs Hemant Agrawal
                     ` (5 subsequent siblings)
  16 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-08-25  7:15 UTC (permalink / raw)
  To: dev, gakhil; +Cc: konstantin.ananyev, roy.fan.zhang, Gagandeep Singh

From: Gagandeep Singh <g.singh@nxp.com>

This patch add raw vector API framework for dpaa_sec driver.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/crypto/dpaa_sec/dpaa_sec.c        |  23 +-
 drivers/crypto/dpaa_sec/dpaa_sec.h        |  39 +-
 drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c | 485 ++++++++++++++++++++++
 drivers/crypto/dpaa_sec/meson.build       |   4 +-
 4 files changed, 537 insertions(+), 14 deletions(-)
 create mode 100644 drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c

diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c
index 19d4684e24..7534f80195 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.c
@@ -45,10 +45,7 @@
 #include <dpaa_sec_log.h>
 #include <dpaax_iova_table.h>
 
-static uint8_t cryptodev_driver_id;
-
-static int
-dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
+uint8_t dpaa_cryptodev_driver_id;
 
 static inline void
 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
@@ -1745,8 +1742,8 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
 			case RTE_CRYPTO_OP_WITH_SESSION:
 				ses = (dpaa_sec_session *)
 					get_sym_session_private_data(
-							op->sym->session,
-							cryptodev_driver_id);
+						op->sym->session,
+						dpaa_cryptodev_driver_id);
 				break;
 #ifdef RTE_LIB_SECURITY
 			case RTE_CRYPTO_OP_SECURITY_SESSION:
@@ -2307,7 +2304,7 @@ dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
 	return -1;
 }
 
-static int
+int
 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
 {
 	int ret;
@@ -3115,7 +3112,7 @@ dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
 		info->feature_flags = dev->feature_flags;
 		info->capabilities = dpaa_sec_capabilities;
 		info->sym.max_nb_sessions = internals->max_nb_sessions;
-		info->driver_id = cryptodev_driver_id;
+		info->driver_id = dpaa_cryptodev_driver_id;
 	}
 }
 
@@ -3311,7 +3308,10 @@ static struct rte_cryptodev_ops crypto_ops = {
 	.queue_pair_release   = dpaa_sec_queue_pair_release,
 	.sym_session_get_size     = dpaa_sec_sym_session_get_size,
 	.sym_session_configure    = dpaa_sec_sym_session_configure,
-	.sym_session_clear        = dpaa_sec_sym_session_clear
+	.sym_session_clear        = dpaa_sec_sym_session_clear,
+	/* Raw data-path API related operations */
+	.sym_get_raw_dp_ctx_size = dpaa_sec_get_dp_ctx_size,
+	.sym_configure_raw_dp_ctx = dpaa_sec_configure_raw_dp_ctx,
 };
 
 #ifdef RTE_LIB_SECURITY
@@ -3362,7 +3362,7 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	cryptodev->driver_id = cryptodev_driver_id;
+	cryptodev->driver_id = dpaa_cryptodev_driver_id;
 	cryptodev->dev_ops = &crypto_ops;
 
 	cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
@@ -3371,6 +3371,7 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
 			RTE_CRYPTODEV_FF_HW_ACCELERATED |
 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
 			RTE_CRYPTODEV_FF_SECURITY |
+			RTE_CRYPTODEV_FF_SYM_RAW_DP |
 			RTE_CRYPTODEV_FF_IN_PLACE_SGL |
 			RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
 			RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
@@ -3536,5 +3537,5 @@ static struct cryptodev_driver dpaa_sec_crypto_drv;
 
 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
-		cryptodev_driver_id);
+		dpaa_cryptodev_driver_id);
 RTE_LOG_REGISTER(dpaa_logtype_sec, pmd.crypto.dpaa, NOTICE);
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.h b/drivers/crypto/dpaa_sec/dpaa_sec.h
index 368699678b..f6e83d46e7 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.h
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.h
@@ -19,6 +19,8 @@
 #define AES_CTR_IV_LEN		16
 #define AES_GCM_IV_LEN		12
 
+extern uint8_t dpaa_cryptodev_driver_id;
+
 #define DPAA_IPv6_DEFAULT_VTC_FLOW	0x60000000
 
 /* Minimum job descriptor consists of a oneword job descriptor HEADER and
@@ -117,6 +119,24 @@ struct sec_pdcp_ctxt {
 	uint32_t hfn_threshold;	/*!< HFN Threashold for key renegotiation */
 };
 #endif
+
+typedef int (*dpaa_sec_build_fd_t)(
+	void *qp, uint8_t *drv_ctx, struct rte_crypto_vec *data_vec,
+	uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
+	void *user_data);
+
+typedef struct dpaa_sec_job* (*dpaa_sec_build_raw_dp_fd_t)(uint8_t *drv_ctx,
+			struct rte_crypto_sgl *sgl,
+			struct rte_crypto_sgl *dest_sgl,
+			struct rte_crypto_va_iova_ptr *iv,
+			struct rte_crypto_va_iova_ptr *digest,
+			struct rte_crypto_va_iova_ptr *auth_iv,
+			union rte_crypto_sym_ofs ofs,
+			void *userdata);
+
 typedef struct dpaa_sec_session_entry {
 	struct sec_cdb cdb;	/**< cmd block associated with qp */
 	struct dpaa_sec_qp *qp[MAX_DPAA_CORES];
@@ -129,6 +149,8 @@ typedef struct dpaa_sec_session_entry {
 #ifdef RTE_LIB_SECURITY
 	enum rte_security_session_protocol proto_alg; /*!< Security Algorithm*/
 #endif
+	dpaa_sec_build_fd_t build_fd;
+	dpaa_sec_build_raw_dp_fd_t build_raw_dp_fd;
 	union {
 		struct {
 			uint8_t *data;	/**< pointer to key data */
@@ -211,7 +233,10 @@ struct dpaa_sec_job {
 #define DPAA_MAX_NB_MAX_DIGEST	32
 struct dpaa_sec_op_ctx {
 	struct dpaa_sec_job job;
-	struct rte_crypto_op *op;
+	union {
+		struct rte_crypto_op *op;
+		void *userdata;
+	};
 	struct rte_mempool *ctx_pool; /* mempool pointer for dpaa_sec_op_ctx */
 	uint32_t fd_status;
 	int64_t vtop_offset;
@@ -803,4 +828,16 @@ calc_chksum(void *buffer, int len)
 	return  result;
 }
 
+int
+dpaa_sec_configure_raw_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
+	enum rte_crypto_op_sess_type sess_type,
+	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update);
+
+int
+dpaa_sec_get_dp_ctx_size(struct rte_cryptodev *dev);
+
+int
+dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
+
 #endif /* _DPAA_SEC_H_ */
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c b/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c
new file mode 100644
index 0000000000..ee0ca2e0d5
--- /dev/null
+++ b/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c
@@ -0,0 +1,485 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2021 NXP
+ */
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_crypto.h>
+#include <rte_cryptodev.h>
+#ifdef RTE_LIB_SECURITY
+#include <rte_security_driver.h>
+#endif
+
+/* RTA header files */
+#include <desc/ipsec.h>
+
+#include <rte_dpaa_bus.h>
+#include <dpaa_sec.h>
+#include <dpaa_sec_log.h>
+
+struct dpaa_sec_raw_dp_ctx {
+	dpaa_sec_session *session;
+	uint32_t tail;
+	uint32_t head;
+	uint16_t cached_enqueue;
+	uint16_t cached_dequeue;
+};
+
+static __rte_always_inline int
+dpaa_sec_raw_enqueue_done(void *qp_data, uint8_t *drv_ctx, uint32_t n)
+{
+	RTE_SET_USED(qp_data);
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(n);
+
+	return 0;
+}
+
+static __rte_always_inline int
+dpaa_sec_raw_dequeue_done(void *qp_data, uint8_t *drv_ctx, uint32_t n)
+{
+	RTE_SET_USED(qp_data);
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(n);
+
+	return 0;
+}
+
+static inline struct dpaa_sec_op_ctx *
+dpaa_sec_alloc_raw_ctx(dpaa_sec_session *ses, int sg_count)
+{
+	struct dpaa_sec_op_ctx *ctx;
+	int i, retval;
+
+	retval = rte_mempool_get(
+			ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool,
+			(void **)(&ctx));
+	if (!ctx || retval) {
+		DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
+		return NULL;
+	}
+	/*
+	 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
+	 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
+	 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
+	 * each packet, memset is costlier than dcbz_64().
+	 */
+	for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4)
+		dcbz_64(&ctx->job.sg[i]);
+
+	ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool;
+	ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
+
+	return ctx;
+}
+
+static struct dpaa_sec_job *
+build_dpaa_raw_dp_auth_fd(uint8_t *drv_ctx,
+			struct rte_crypto_sgl *sgl,
+			struct rte_crypto_sgl *dest_sgl,
+			struct rte_crypto_va_iova_ptr *iv,
+			struct rte_crypto_va_iova_ptr *digest,
+			struct rte_crypto_va_iova_ptr *auth_iv,
+			union rte_crypto_sym_ofs ofs,
+			void *userdata)
+{
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(sgl);
+	RTE_SET_USED(dest_sgl);
+	RTE_SET_USED(iv);
+	RTE_SET_USED(digest);
+	RTE_SET_USED(auth_iv);
+	RTE_SET_USED(ofs);
+	RTE_SET_USED(userdata);
+
+	return NULL;
+}
+
+static struct dpaa_sec_job *
+build_dpaa_raw_dp_cipher_fd(uint8_t *drv_ctx,
+			struct rte_crypto_sgl *sgl,
+			struct rte_crypto_sgl *dest_sgl,
+			struct rte_crypto_va_iova_ptr *iv,
+			struct rte_crypto_va_iova_ptr *digest,
+			struct rte_crypto_va_iova_ptr *auth_iv,
+			union rte_crypto_sym_ofs ofs,
+			void *userdata)
+{
+	RTE_SET_USED(digest);
+	RTE_SET_USED(auth_iv);
+	dpaa_sec_session *ses =
+		((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
+	struct dpaa_sec_job *cf;
+	struct dpaa_sec_op_ctx *ctx;
+	struct qm_sg_entry *sg, *out_sg, *in_sg;
+	unsigned int i;
+	uint8_t *IV_ptr = iv->va;
+	int data_len, total_len = 0, data_offset;
+
+	for (i = 0; i < sgl->num; i++)
+		total_len += sgl->vec[i].len;
+
+	data_len = total_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
+	data_offset = ofs.ofs.cipher.head;
+
+	/* Support lengths in bits only for SNOW3G and ZUC */
+	if (sgl->num > MAX_SG_ENTRIES) {
+		DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
+				MAX_SG_ENTRIES);
+		return NULL;
+	}
+
+	ctx = dpaa_sec_alloc_raw_ctx(ses, sgl->num * 2 + 3);
+	if (!ctx)
+		return NULL;
+
+	cf = &ctx->job;
+	ctx->userdata = (void *)userdata;
+
+	/* output */
+	out_sg = &cf->sg[0];
+	out_sg->extension = 1;
+	out_sg->length = data_len;
+	qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
+	cpu_to_hw_sg(out_sg);
+
+	if (dest_sgl) {
+		/* 1st seg */
+		sg = &cf->sg[2];
+		qm_sg_entry_set64(sg, dest_sgl->vec[0].iova);
+		sg->length = dest_sgl->vec[0].len - data_offset;
+		sg->offset = data_offset;
+
+		/* Successive segs */
+		for (i = 1; i < dest_sgl->num; i++) {
+			cpu_to_hw_sg(sg);
+			sg++;
+			qm_sg_entry_set64(sg, dest_sgl->vec[i].iova);
+			sg->length = dest_sgl->vec[i].len;
+		}
+	} else {
+		/* 1st seg */
+		sg = &cf->sg[2];
+		qm_sg_entry_set64(sg, sgl->vec[0].iova);
+		sg->length = sgl->vec[0].len - data_offset;
+		sg->offset = data_offset;
+
+		/* Successive segs */
+		for (i = 1; i < sgl->num; i++) {
+			cpu_to_hw_sg(sg);
+			sg++;
+			qm_sg_entry_set64(sg, sgl->vec[i].iova);
+			sg->length = sgl->vec[i].len;
+		}
+
+	}
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+
+	/* input */
+	in_sg = &cf->sg[1];
+	in_sg->extension = 1;
+	in_sg->final = 1;
+	in_sg->length = data_len + ses->iv.length;
+
+	sg++;
+	qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
+	cpu_to_hw_sg(in_sg);
+
+	/* IV */
+	qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
+	sg->length = ses->iv.length;
+	cpu_to_hw_sg(sg);
+
+	/* 1st seg */
+	sg++;
+	qm_sg_entry_set64(sg, sgl->vec[0].iova);
+	sg->length = sgl->vec[0].len - data_offset;
+	sg->offset = data_offset;
+
+	/* Successive segs */
+	for (i = 1; i < sgl->num; i++) {
+		cpu_to_hw_sg(sg);
+		sg++;
+		qm_sg_entry_set64(sg, sgl->vec[i].iova);
+		sg->length = sgl->vec[i].len;
+	}
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+
+	return cf;
+}
+
+static uint32_t
+dpaa_sec_raw_enqueue_burst(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	/* Function to transmit the frames to given device and queuepair */
+	uint32_t loop;
+	struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp_data;
+	uint16_t num_tx = 0;
+	struct qm_fd fds[DPAA_SEC_BURST], *fd;
+	uint32_t frames_to_send;
+	struct dpaa_sec_job *cf;
+	dpaa_sec_session *ses =
+			((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
+	uint32_t flags[DPAA_SEC_BURST] = {0};
+	struct qman_fq *inq[DPAA_SEC_BURST];
+
+	if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
+		if (rte_dpaa_portal_init((void *)0)) {
+			DPAA_SEC_ERR("Failure in affining portal");
+			return 0;
+		}
+	}
+
+	while (vec->num) {
+		frames_to_send = (vec->num > DPAA_SEC_BURST) ?
+				DPAA_SEC_BURST : vec->num;
+		for (loop = 0; loop < frames_to_send; loop++) {
+			if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
+				if (dpaa_sec_attach_sess_q(dpaa_qp, ses)) {
+					frames_to_send = loop;
+					goto send_pkts;
+				}
+			} else if (unlikely(ses->qp[rte_lcore_id() %
+						MAX_DPAA_CORES] != dpaa_qp)) {
+				DPAA_SEC_DP_ERR("Old:sess->qp = %p"
+					" New qp = %p\n",
+					ses->qp[rte_lcore_id() %
+					MAX_DPAA_CORES], dpaa_qp);
+				frames_to_send = loop;
+				goto send_pkts;
+			}
+
+			/*Clear the unused FD fields before sending*/
+			fd = &fds[loop];
+			memset(fd, 0, sizeof(struct qm_fd));
+			cf = ses->build_raw_dp_fd(drv_ctx,
+						&vec->src_sgl[loop],
+						&vec->dest_sgl[loop],
+						&vec->iv[loop],
+						&vec->digest[loop],
+						&vec->auth_iv[loop],
+						ofs,
+						user_data[loop]);
+			if (!cf) {
+				DPAA_SEC_ERR("error: Improper packet contents"
+					" for crypto operation");
+				goto skip_tx;
+			}
+			inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
+			fd->opaque_addr = 0;
+			fd->cmd = 0;
+			qm_fd_addr_set64(fd, rte_dpaa_mem_vtop(cf->sg));
+			fd->_format1 = qm_fd_compound;
+			fd->length29 = 2 * sizeof(struct qm_sg_entry);
+
+			status[loop] = 1;
+		}
+send_pkts:
+		loop = 0;
+		while (loop < frames_to_send) {
+			loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
+					&flags[loop], frames_to_send - loop);
+		}
+		vec->num -= frames_to_send;
+		num_tx += frames_to_send;
+	}
+
+skip_tx:
+	dpaa_qp->tx_pkts += num_tx;
+	dpaa_qp->tx_errs += vec->num - num_tx;
+
+	return num_tx;
+}
+
+static int
+dpaa_sec_deq_raw(struct dpaa_sec_qp *qp, void **out_user_data,
+		uint8_t is_user_data_array,
+		rte_cryptodev_raw_post_dequeue_t post_dequeue,
+		int nb_ops)
+{
+	struct qman_fq *fq;
+	unsigned int pkts = 0;
+	int num_rx_bufs, ret;
+	struct qm_dqrr_entry *dq;
+	uint32_t vdqcr_flags = 0;
+	uint8_t is_success = 0;
+
+	fq = &qp->outq;
+	/*
+	 * Until request for four buffers, we provide exact number of buffers.
+	 * Otherwise we do not set the QM_VDQCR_EXACT flag.
+	 * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
+	 * requested, so we request two less in this case.
+	 */
+	if (nb_ops < 4) {
+		vdqcr_flags = QM_VDQCR_EXACT;
+		num_rx_bufs = nb_ops;
+	} else {
+		num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
+			(DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
+	}
+	ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
+	if (ret)
+		return 0;
+
+	do {
+		const struct qm_fd *fd;
+		struct dpaa_sec_job *job;
+		struct dpaa_sec_op_ctx *ctx;
+
+		dq = qman_dequeue(fq);
+		if (!dq)
+			continue;
+
+		fd = &dq->fd;
+		/* sg is embedded in an op ctx,
+		 * sg[0] is for output
+		 * sg[1] for input
+		 */
+		job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
+
+		ctx = container_of(job, struct dpaa_sec_op_ctx, job);
+		ctx->fd_status = fd->status;
+		if (is_user_data_array)
+			out_user_data[pkts] = ctx->userdata;
+		else
+			out_user_data[0] = ctx->userdata;
+
+		if (!ctx->fd_status) {
+			is_success = true;
+		} else {
+			is_success = false;
+			DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
+		}
+		post_dequeue(ctx->op, pkts, is_success);
+		pkts++;
+
+		/* report op status to sym->op and then free the ctx memory */
+		rte_mempool_put(ctx->ctx_pool, (void *)ctx);
+
+		qman_dqrr_consume(fq, dq);
+	} while (fq->flags & QMAN_FQ_STATE_VDQCR);
+
+	return pkts;
+}
+
+
+static __rte_always_inline uint32_t
+dpaa_sec_raw_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
+	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
+	uint32_t max_nb_to_dequeue,
+	rte_cryptodev_raw_post_dequeue_t post_dequeue,
+	void **out_user_data, uint8_t is_user_data_array,
+	uint32_t *n_success, int *dequeue_status)
+{
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(get_dequeue_count);
+	uint16_t num_rx;
+	struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp_data;
+	uint32_t nb_ops = max_nb_to_dequeue;
+
+	if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
+		if (rte_dpaa_portal_init((void *)0)) {
+			DPAA_SEC_ERR("Failure in affining portal");
+			return 0;
+		}
+	}
+
+	num_rx = dpaa_sec_deq_raw(dpaa_qp, out_user_data,
+			is_user_data_array, post_dequeue, nb_ops);
+
+	dpaa_qp->rx_pkts += num_rx;
+	*dequeue_status = 1;
+	*n_success = num_rx;
+
+	DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
+
+	return num_rx;
+}
+
+static __rte_always_inline int
+dpaa_sec_raw_enqueue(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data_vec,
+	uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
+	void *user_data)
+{
+	RTE_SET_USED(qp_data);
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(data_vec);
+	RTE_SET_USED(n_data_vecs);
+	RTE_SET_USED(ofs);
+	RTE_SET_USED(iv);
+	RTE_SET_USED(digest);
+	RTE_SET_USED(aad_or_auth_iv);
+	RTE_SET_USED(user_data);
+
+	return 0;
+}
+
+static __rte_always_inline void *
+dpaa_sec_raw_dequeue(void *qp_data, uint8_t *drv_ctx, int *dequeue_status,
+	enum rte_crypto_op_status *op_status)
+{
+	RTE_SET_USED(qp_data);
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(dequeue_status);
+	RTE_SET_USED(op_status);
+
+	return NULL;
+}
+
+int
+dpaa_sec_configure_raw_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
+	enum rte_crypto_op_sess_type sess_type,
+	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
+{
+	dpaa_sec_session *sess;
+	struct dpaa_sec_raw_dp_ctx *dp_ctx;
+	RTE_SET_USED(qp_id);
+
+	if (!is_update) {
+		memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx));
+		raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
+	}
+
+	if (sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
+		sess = (dpaa_sec_session *)get_sec_session_private_data(
+				session_ctx.sec_sess);
+	else if (sess_type == RTE_CRYPTO_OP_WITH_SESSION)
+		sess = (dpaa_sec_session *)get_sym_session_private_data(
+			session_ctx.crypto_sess, dpaa_cryptodev_driver_id);
+	else
+		return -ENOTSUP;
+	raw_dp_ctx->dequeue_burst = dpaa_sec_raw_dequeue_burst;
+	raw_dp_ctx->dequeue = dpaa_sec_raw_dequeue;
+	raw_dp_ctx->dequeue_done = dpaa_sec_raw_dequeue_done;
+	raw_dp_ctx->enqueue_burst = dpaa_sec_raw_enqueue_burst;
+	raw_dp_ctx->enqueue = dpaa_sec_raw_enqueue;
+	raw_dp_ctx->enqueue_done = dpaa_sec_raw_enqueue_done;
+
+	if (sess->ctxt == DPAA_SEC_CIPHER)
+		sess->build_raw_dp_fd = build_dpaa_raw_dp_cipher_fd;
+	else if (sess->ctxt == DPAA_SEC_AUTH)
+		sess->build_raw_dp_fd = build_dpaa_raw_dp_auth_fd;
+	else
+		return -ENOTSUP;
+	dp_ctx = (struct dpaa_sec_raw_dp_ctx *)raw_dp_ctx->drv_ctx_data;
+	dp_ctx->session = sess;
+
+	return 0;
+}
+
+int
+dpaa_sec_get_dp_ctx_size(__rte_unused struct rte_cryptodev *dev)
+{
+	return sizeof(struct dpaa_sec_raw_dp_ctx);
+}
diff --git a/drivers/crypto/dpaa_sec/meson.build b/drivers/crypto/dpaa_sec/meson.build
index 44fd60e5ae..f87ad6c7e7 100644
--- a/drivers/crypto/dpaa_sec/meson.build
+++ b/drivers/crypto/dpaa_sec/meson.build
@@ -1,5 +1,5 @@
 # SPDX-License-Identifier: BSD-3-Clause
-# Copyright 2018 NXP
+# Copyright 2018-2021 NXP
 
 if not is_linux
     build = false
@@ -7,7 +7,7 @@ if not is_linux
 endif
 
 deps += ['bus_dpaa', 'mempool_dpaa', 'security']
-sources = files('dpaa_sec.c')
+sources = files('dpaa_sec.c', 'dpaa_sec_raw_dp.c')
 
 includes += include_directories('../../bus/dpaa/include')
 includes += include_directories('../../common/dpaax')
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH 12/15] crypto/dpaa_sec: support authonly and chain with raw APIs
  2021-08-25  7:14 ` [dpdk-dev] [PATCH 00/15] crypto: add raw vector support in DPAAx Hemant Agrawal
                     ` (10 preceding siblings ...)
  2021-08-25  7:15   ` [dpdk-dev] [PATCH 11/15] crypto/dpaa_sec: support raw datapath APIs Hemant Agrawal
@ 2021-08-25  7:15   ` Hemant Agrawal
  2021-08-25  7:15   ` [dpdk-dev] [PATCH 13/15] crypto/dpaa_sec: support AEAD and proto " Hemant Agrawal
                     ` (4 subsequent siblings)
  16 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-08-25  7:15 UTC (permalink / raw)
  To: dev, gakhil; +Cc: konstantin.ananyev, roy.fan.zhang, Gagandeep Singh

From: Gagandeep Singh <g.singh@nxp.com>

This patch improves the raw vector support in dpaa_sec driver
for authonly and chain usecase.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/crypto/dpaa_sec/dpaa_sec.h        |   3 +-
 drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c | 296 +++++++++++++++++++++-
 2 files changed, 287 insertions(+), 12 deletions(-)

diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.h b/drivers/crypto/dpaa_sec/dpaa_sec.h
index f6e83d46e7..2e0ab93ff0 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.h
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.h
@@ -135,7 +135,8 @@ typedef struct dpaa_sec_job* (*dpaa_sec_build_raw_dp_fd_t)(uint8_t *drv_ctx,
 			struct rte_crypto_va_iova_ptr *digest,
 			struct rte_crypto_va_iova_ptr *auth_iv,
 			union rte_crypto_sym_ofs ofs,
-			void *userdata);
+			void *userdata,
+			struct qm_fd *fd);
 
 typedef struct dpaa_sec_session_entry {
 	struct sec_cdb cdb;	/**< cmd block associated with qp */
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c b/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c
index ee0ca2e0d5..4e34629f18 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c
@@ -12,6 +12,7 @@
 #endif
 
 /* RTA header files */
+#include <desc/algo.h>
 #include <desc/ipsec.h>
 
 #include <rte_dpaa_bus.h>
@@ -26,6 +27,17 @@ struct dpaa_sec_raw_dp_ctx {
 	uint16_t cached_dequeue;
 };
 
+static inline int
+is_encode(dpaa_sec_session *ses)
+{
+	return ses->dir == DIR_ENC;
+}
+
+static inline int is_decode(dpaa_sec_session *ses)
+{
+	return ses->dir == DIR_DEC;
+}
+
 static __rte_always_inline int
 dpaa_sec_raw_enqueue_done(void *qp_data, uint8_t *drv_ctx, uint32_t n)
 {
@@ -82,18 +94,276 @@ build_dpaa_raw_dp_auth_fd(uint8_t *drv_ctx,
 			struct rte_crypto_va_iova_ptr *digest,
 			struct rte_crypto_va_iova_ptr *auth_iv,
 			union rte_crypto_sym_ofs ofs,
-			void *userdata)
+			void *userdata,
+			struct qm_fd *fd)
 {
-	RTE_SET_USED(drv_ctx);
-	RTE_SET_USED(sgl);
 	RTE_SET_USED(dest_sgl);
 	RTE_SET_USED(iv);
-	RTE_SET_USED(digest);
 	RTE_SET_USED(auth_iv);
-	RTE_SET_USED(ofs);
-	RTE_SET_USED(userdata);
+	RTE_SET_USED(fd);
 
-	return NULL;
+	dpaa_sec_session *ses =
+		((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
+	struct dpaa_sec_job *cf;
+	struct dpaa_sec_op_ctx *ctx;
+	struct qm_sg_entry *sg, *out_sg, *in_sg;
+	phys_addr_t start_addr;
+	uint8_t *old_digest, extra_segs;
+	int data_len, data_offset, total_len = 0;
+	unsigned int i;
+
+	for (i = 0; i < sgl->num; i++)
+		total_len += sgl->vec[i].len;
+
+	data_len = total_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
+	data_offset =  ofs.ofs.auth.head;
+
+	/* Support only length in bits for SNOW3G and ZUC */
+
+	if (is_decode(ses))
+		extra_segs = 3;
+	else
+		extra_segs = 2;
+
+	if (sgl->num > MAX_SG_ENTRIES) {
+		DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
+				MAX_SG_ENTRIES);
+		return NULL;
+	}
+	ctx = dpaa_sec_alloc_raw_ctx(ses, sgl->num * 2 + extra_segs);
+	if (!ctx)
+		return NULL;
+
+	cf = &ctx->job;
+	ctx->userdata = (void *)userdata;
+	old_digest = ctx->digest;
+
+	/* output */
+	out_sg = &cf->sg[0];
+	qm_sg_entry_set64(out_sg, digest->iova);
+	out_sg->length = ses->digest_length;
+	cpu_to_hw_sg(out_sg);
+
+	/* input */
+	in_sg = &cf->sg[1];
+	/* need to extend the input to a compound frame */
+	in_sg->extension = 1;
+	in_sg->final = 1;
+	in_sg->length = data_len;
+	qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
+
+	/* 1st seg */
+	sg = in_sg + 1;
+
+	if (ses->iv.length) {
+		uint8_t *iv_ptr;
+
+		iv_ptr = rte_crypto_op_ctod_offset(userdata, uint8_t *,
+						   ses->iv.offset);
+
+		if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
+			iv_ptr = conv_to_snow_f9_iv(iv_ptr);
+			sg->length = 12;
+		} else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
+			iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
+			sg->length = 8;
+		} else {
+			sg->length = ses->iv.length;
+		}
+		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
+		in_sg->length += sg->length;
+		cpu_to_hw_sg(sg);
+		sg++;
+	}
+
+	qm_sg_entry_set64(sg, sgl->vec[0].iova);
+	sg->offset = data_offset;
+
+	if (data_len <= (int)(sgl->vec[0].len - data_offset)) {
+		sg->length = data_len;
+	} else {
+		sg->length = sgl->vec[0].len - data_offset;
+
+		/* remaining i/p segs */
+		for (i = 1; i < sgl->num; i++) {
+			cpu_to_hw_sg(sg);
+			sg++;
+			qm_sg_entry_set64(sg, sgl->vec[i].iova);
+			if (data_len > (int)sgl->vec[i].len)
+				sg->length = sgl->vec[0].len;
+			else
+				sg->length = data_len;
+
+			data_len = data_len - sg->length;
+			if (data_len < 1)
+				break;
+		}
+	}
+
+	if (is_decode(ses)) {
+		/* Digest verification case */
+		cpu_to_hw_sg(sg);
+		sg++;
+		rte_memcpy(old_digest, digest->va,
+				ses->digest_length);
+		start_addr = rte_dpaa_mem_vtop(old_digest);
+		qm_sg_entry_set64(sg, start_addr);
+		sg->length = ses->digest_length;
+		in_sg->length += ses->digest_length;
+	}
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+	cpu_to_hw_sg(in_sg);
+
+	return cf;
+}
+
+static inline struct dpaa_sec_job *
+build_dpaa_raw_dp_chain_fd(uint8_t *drv_ctx,
+			struct rte_crypto_sgl *sgl,
+			struct rte_crypto_sgl *dest_sgl,
+			struct rte_crypto_va_iova_ptr *iv,
+			struct rte_crypto_va_iova_ptr *digest,
+			struct rte_crypto_va_iova_ptr *auth_iv,
+			union rte_crypto_sym_ofs ofs,
+			void *userdata,
+			struct qm_fd *fd)
+{
+	RTE_SET_USED(auth_iv);
+
+	dpaa_sec_session *ses =
+		((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
+	struct dpaa_sec_job *cf;
+	struct dpaa_sec_op_ctx *ctx;
+	struct qm_sg_entry *sg, *out_sg, *in_sg;
+	uint8_t *IV_ptr = iv->va;
+	unsigned int i;
+	uint16_t auth_hdr_len = ofs.ofs.cipher.head -
+				ofs.ofs.auth.head;
+	uint16_t auth_tail_len = ofs.ofs.auth.tail;
+	uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
+	int data_len = 0, auth_len = 0, cipher_len = 0;
+
+	for (i = 0; i < sgl->num; i++)
+		data_len += sgl->vec[i].len;
+
+	cipher_len = data_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
+	auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
+
+	if (sgl->num > MAX_SG_ENTRIES) {
+		DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
+				MAX_SG_ENTRIES);
+		return NULL;
+	}
+
+	ctx = dpaa_sec_alloc_raw_ctx(ses, sgl->num * 2 + 4);
+	if (!ctx)
+		return NULL;
+
+	cf = &ctx->job;
+	ctx->userdata = (void *)userdata;
+
+	rte_prefetch0(cf->sg);
+
+	/* output */
+	out_sg = &cf->sg[0];
+	out_sg->extension = 1;
+	if (is_encode(ses))
+		out_sg->length = cipher_len + ses->digest_length;
+	else
+		out_sg->length = cipher_len;
+
+	/* output sg entries */
+	sg = &cf->sg[2];
+	qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
+	cpu_to_hw_sg(out_sg);
+
+	/* 1st seg */
+	if (dest_sgl) {
+		qm_sg_entry_set64(sg, dest_sgl->vec[0].iova);
+		sg->length = dest_sgl->vec[0].len - ofs.ofs.cipher.head;
+		sg->offset = ofs.ofs.cipher.head;
+
+		/* Successive segs */
+		for (i = 1; i < dest_sgl->num; i++) {
+			cpu_to_hw_sg(sg);
+			sg++;
+			qm_sg_entry_set64(sg, dest_sgl->vec[i].iova);
+			sg->length = dest_sgl->vec[i].len;
+		}
+	} else {
+		qm_sg_entry_set64(sg, sgl->vec[0].iova);
+		sg->length = sgl->vec[0].len - ofs.ofs.cipher.head;
+		sg->offset = ofs.ofs.cipher.head;
+
+		/* Successive segs */
+		for (i = 1; i < sgl->num; i++) {
+			cpu_to_hw_sg(sg);
+			sg++;
+			qm_sg_entry_set64(sg, sgl->vec[i].iova);
+			sg->length = sgl->vec[i].len;
+		}
+	}
+
+	if (is_encode(ses)) {
+		cpu_to_hw_sg(sg);
+		/* set auth output */
+		sg++;
+		qm_sg_entry_set64(sg, digest->iova);
+		sg->length = ses->digest_length;
+	}
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+
+	/* input */
+	in_sg = &cf->sg[1];
+	in_sg->extension = 1;
+	in_sg->final = 1;
+	if (is_encode(ses))
+		in_sg->length = ses->iv.length + auth_len;
+	else
+		in_sg->length = ses->iv.length + auth_len
+						+ ses->digest_length;
+
+	/* input sg entries */
+	sg++;
+	qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
+	cpu_to_hw_sg(in_sg);
+
+	/* 1st seg IV */
+	qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
+	sg->length = ses->iv.length;
+	cpu_to_hw_sg(sg);
+
+	/* 2 seg */
+	sg++;
+	qm_sg_entry_set64(sg, sgl->vec[0].iova);
+	sg->length = sgl->vec[0].len - ofs.ofs.auth.head;
+	sg->offset = ofs.ofs.auth.head;
+
+	/* Successive segs */
+	for (i = 1; i < sgl->num; i++) {
+		cpu_to_hw_sg(sg);
+		sg++;
+		qm_sg_entry_set64(sg, sgl->vec[i].iova);
+		sg->length = sgl->vec[i].len;
+	}
+
+	if (is_decode(ses)) {
+		cpu_to_hw_sg(sg);
+		sg++;
+		memcpy(ctx->digest, digest->va,
+			ses->digest_length);
+		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
+		sg->length = ses->digest_length;
+	}
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+
+	if (auth_only_len)
+		fd->cmd = 0x80000000 | auth_only_len;
+
+	return cf;
 }
 
 static struct dpaa_sec_job *
@@ -104,10 +374,13 @@ build_dpaa_raw_dp_cipher_fd(uint8_t *drv_ctx,
 			struct rte_crypto_va_iova_ptr *digest,
 			struct rte_crypto_va_iova_ptr *auth_iv,
 			union rte_crypto_sym_ofs ofs,
-			void *userdata)
+			void *userdata,
+			struct qm_fd *fd)
 {
 	RTE_SET_USED(digest);
 	RTE_SET_USED(auth_iv);
+	RTE_SET_USED(fd);
+
 	dpaa_sec_session *ses =
 		((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
 	struct dpaa_sec_job *cf;
@@ -264,15 +537,14 @@ dpaa_sec_raw_enqueue_burst(void *qp_data, uint8_t *drv_ctx,
 						&vec->digest[loop],
 						&vec->auth_iv[loop],
 						ofs,
-						user_data[loop]);
+						user_data[loop],
+						fd);
 			if (!cf) {
 				DPAA_SEC_ERR("error: Improper packet contents"
 					" for crypto operation");
 				goto skip_tx;
 			}
 			inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
-			fd->opaque_addr = 0;
-			fd->cmd = 0;
 			qm_fd_addr_set64(fd, rte_dpaa_mem_vtop(cf->sg));
 			fd->_format1 = qm_fd_compound;
 			fd->length29 = 2 * sizeof(struct qm_sg_entry);
@@ -470,6 +742,8 @@ dpaa_sec_configure_raw_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
 		sess->build_raw_dp_fd = build_dpaa_raw_dp_cipher_fd;
 	else if (sess->ctxt == DPAA_SEC_AUTH)
 		sess->build_raw_dp_fd = build_dpaa_raw_dp_auth_fd;
+	else if (sess->ctxt == DPAA_SEC_CIPHER_HASH)
+		sess->build_raw_dp_fd = build_dpaa_raw_dp_chain_fd;
 	else
 		return -ENOTSUP;
 	dp_ctx = (struct dpaa_sec_raw_dp_ctx *)raw_dp_ctx->drv_ctx_data;
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH 13/15] crypto/dpaa_sec: support AEAD and proto with raw APIs
  2021-08-25  7:14 ` [dpdk-dev] [PATCH 00/15] crypto: add raw vector support in DPAAx Hemant Agrawal
                     ` (11 preceding siblings ...)
  2021-08-25  7:15   ` [dpdk-dev] [PATCH 12/15] crypto/dpaa_sec: support authonly and chain with raw APIs Hemant Agrawal
@ 2021-08-25  7:15   ` Hemant Agrawal
  2021-08-25  7:15   ` [dpdk-dev] [PATCH 14/15] test/crypto: add raw API test for dpaax Hemant Agrawal
                     ` (3 subsequent siblings)
  16 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-08-25  7:15 UTC (permalink / raw)
  To: dev, gakhil; +Cc: konstantin.ananyev, roy.fan.zhang, Gagandeep Singh

From: Gagandeep Singh <g.singh@nxp.com>

This add support for AEAD and proto offload with raw APIs
for dpaa_sec driver.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c | 293 ++++++++++++++++++++++
 1 file changed, 293 insertions(+)

diff --git a/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c b/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c
index 4e34629f18..b0c22a7c26 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c
@@ -218,6 +218,163 @@ build_dpaa_raw_dp_auth_fd(uint8_t *drv_ctx,
 	return cf;
 }
 
+static inline struct dpaa_sec_job *
+build_raw_cipher_auth_gcm_sg(uint8_t *drv_ctx,
+			struct rte_crypto_sgl *sgl,
+			struct rte_crypto_sgl *dest_sgl,
+			struct rte_crypto_va_iova_ptr *iv,
+			struct rte_crypto_va_iova_ptr *digest,
+			struct rte_crypto_va_iova_ptr *auth_iv,
+			union rte_crypto_sym_ofs ofs,
+			void *userdata,
+			struct qm_fd *fd)
+{
+	dpaa_sec_session *ses =
+		((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
+	struct dpaa_sec_job *cf;
+	struct dpaa_sec_op_ctx *ctx;
+	struct qm_sg_entry *sg, *out_sg, *in_sg;
+	uint8_t extra_req_segs;
+	uint8_t *IV_ptr = iv->va;
+	int data_len = 0, aead_len = 0;
+	unsigned int i;
+
+	for (i = 0; i < sgl->num; i++)
+		data_len += sgl->vec[i].len;
+
+	extra_req_segs = 4;
+	aead_len = data_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
+
+	if (ses->auth_only_len)
+		extra_req_segs++;
+
+	if (sgl->num > MAX_SG_ENTRIES) {
+		DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
+				MAX_SG_ENTRIES);
+		return NULL;
+	}
+
+	ctx = dpaa_sec_alloc_raw_ctx(ses,  sgl->num * 2 + extra_req_segs);
+	if (!ctx)
+		return NULL;
+
+	cf = &ctx->job;
+	ctx->userdata = (void *)userdata;
+
+	rte_prefetch0(cf->sg);
+
+	/* output */
+	out_sg = &cf->sg[0];
+	out_sg->extension = 1;
+	if (is_encode(ses))
+		out_sg->length = aead_len + ses->digest_length;
+	else
+		out_sg->length = aead_len;
+
+	/* output sg entries */
+	sg = &cf->sg[2];
+	qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
+	cpu_to_hw_sg(out_sg);
+
+	if (dest_sgl) {
+		/* 1st seg */
+		qm_sg_entry_set64(sg, dest_sgl->vec[0].iova);
+		sg->length = dest_sgl->vec[0].len - ofs.ofs.cipher.head;
+		sg->offset = ofs.ofs.cipher.head;
+
+		/* Successive segs */
+		for (i = 1; i < dest_sgl->num; i++) {
+			cpu_to_hw_sg(sg);
+			sg++;
+			qm_sg_entry_set64(sg, dest_sgl->vec[i].iova);
+			sg->length = dest_sgl->vec[i].len;
+		}
+	} else {
+		/* 1st seg */
+		qm_sg_entry_set64(sg, sgl->vec[0].iova);
+		sg->length = sgl->vec[0].len - ofs.ofs.cipher.head;
+		sg->offset = ofs.ofs.cipher.head;
+
+		/* Successive segs */
+		for (i = 1; i < sgl->num; i++) {
+			cpu_to_hw_sg(sg);
+			sg++;
+			qm_sg_entry_set64(sg, sgl->vec[i].iova);
+			sg->length = sgl->vec[i].len;
+		}
+
+	}
+
+	if (is_encode(ses)) {
+		cpu_to_hw_sg(sg);
+		/* set auth output */
+		sg++;
+		qm_sg_entry_set64(sg, digest->iova);
+		sg->length = ses->digest_length;
+	}
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+
+	/* input */
+	in_sg = &cf->sg[1];
+	in_sg->extension = 1;
+	in_sg->final = 1;
+	if (is_encode(ses))
+		in_sg->length = ses->iv.length + aead_len
+						+ ses->auth_only_len;
+	else
+		in_sg->length = ses->iv.length + aead_len
+				+ ses->auth_only_len + ses->digest_length;
+
+	/* input sg entries */
+	sg++;
+	qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
+	cpu_to_hw_sg(in_sg);
+
+	/* 1st seg IV */
+	qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
+	sg->length = ses->iv.length;
+	cpu_to_hw_sg(sg);
+
+	/* 2 seg auth only */
+	if (ses->auth_only_len) {
+		sg++;
+		qm_sg_entry_set64(sg, auth_iv->iova);
+		sg->length = ses->auth_only_len;
+		cpu_to_hw_sg(sg);
+	}
+
+	/* 3rd seg */
+	sg++;
+	qm_sg_entry_set64(sg, sgl->vec[0].iova);
+	sg->length = sgl->vec[0].len - ofs.ofs.cipher.head;
+	sg->offset = ofs.ofs.cipher.head;
+
+	/* Successive segs */
+	for (i = 1; i < sgl->num; i++) {
+		cpu_to_hw_sg(sg);
+		sg++;
+		qm_sg_entry_set64(sg, sgl->vec[i].iova);
+		sg->length =  sgl->vec[i].len;
+	}
+
+	if (is_decode(ses)) {
+		cpu_to_hw_sg(sg);
+		sg++;
+		memcpy(ctx->digest, digest->va,
+			ses->digest_length);
+		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
+		sg->length = ses->digest_length;
+	}
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+
+	if (ses->auth_only_len)
+		fd->cmd = 0x80000000 | ses->auth_only_len;
+
+	return cf;
+}
+
 static inline struct dpaa_sec_job *
 build_dpaa_raw_dp_chain_fd(uint8_t *drv_ctx,
 			struct rte_crypto_sgl *sgl,
@@ -484,6 +641,135 @@ build_dpaa_raw_dp_cipher_fd(uint8_t *drv_ctx,
 	return cf;
 }
 
+#ifdef RTE_LIBRTE_SECURITY
+static inline struct dpaa_sec_job *
+build_dpaa_raw_proto_sg(uint8_t *drv_ctx,
+			struct rte_crypto_sgl *sgl,
+			struct rte_crypto_sgl *dest_sgl,
+			struct rte_crypto_va_iova_ptr *iv,
+			struct rte_crypto_va_iova_ptr *digest,
+			struct rte_crypto_va_iova_ptr *auth_iv,
+			union rte_crypto_sym_ofs ofs,
+			void *userdata,
+			struct qm_fd *fd)
+{
+	RTE_SET_USED(iv);
+	RTE_SET_USED(digest);
+	RTE_SET_USED(auth_iv);
+	RTE_SET_USED(ofs);
+
+	dpaa_sec_session *ses =
+		((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
+	struct dpaa_sec_job *cf;
+	struct dpaa_sec_op_ctx *ctx;
+	struct qm_sg_entry *sg, *out_sg, *in_sg;
+	uint32_t in_len = 0, out_len = 0;
+	unsigned int i;
+
+	if (sgl->num > MAX_SG_ENTRIES) {
+		DPAA_SEC_DP_ERR("Proto: Max sec segs supported is %d",
+				MAX_SG_ENTRIES);
+		return NULL;
+	}
+
+	ctx = dpaa_sec_alloc_raw_ctx(ses, sgl->num * 2 + 4);
+	if (!ctx)
+		return NULL;
+	cf = &ctx->job;
+	ctx->userdata = (void *)userdata;
+	/* output */
+	out_sg = &cf->sg[0];
+	out_sg->extension = 1;
+	qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
+
+	if (dest_sgl) {
+		/* 1st seg */
+		sg = &cf->sg[2];
+		qm_sg_entry_set64(sg, dest_sgl->vec[0].iova);
+		sg->offset = 0;
+		sg->length = dest_sgl->vec[0].len;
+		out_len += sg->length;
+
+		for (i = 1; i < dest_sgl->num; i++) {
+		/* Successive segs */
+			cpu_to_hw_sg(sg);
+			sg++;
+			qm_sg_entry_set64(sg, dest_sgl->vec[i].iova);
+			sg->offset = 0;
+			sg->length = dest_sgl->vec[i].len;
+			out_len += sg->length;
+		}
+		sg->length = dest_sgl->vec[i - 1].tot_len;
+	} else {
+		/* 1st seg */
+		sg = &cf->sg[2];
+		qm_sg_entry_set64(sg, sgl->vec[0].iova);
+		sg->offset = 0;
+		sg->length = sgl->vec[0].len;
+		out_len += sg->length;
+
+		for (i = 1; i < sgl->num; i++) {
+		/* Successive segs */
+			cpu_to_hw_sg(sg);
+			sg++;
+			qm_sg_entry_set64(sg, sgl->vec[i].iova);
+			sg->offset = 0;
+			sg->length = sgl->vec[i].len;
+			out_len += sg->length;
+		}
+		sg->length = sgl->vec[i - 1].tot_len;
+
+	}
+	out_len += sg->length;
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+
+	out_sg->length = out_len;
+	cpu_to_hw_sg(out_sg);
+
+	/* input */
+	in_sg = &cf->sg[1];
+	in_sg->extension = 1;
+	in_sg->final = 1;
+	in_len = sgl->vec[0].len;
+
+	sg++;
+	qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
+
+	/* 1st seg */
+	qm_sg_entry_set64(sg, sgl->vec[0].iova);
+	sg->length = sgl->vec[0].len;
+	sg->offset = 0;
+
+	/* Successive segs */
+	for (i = 1; i < sgl->num; i++) {
+		cpu_to_hw_sg(sg);
+		sg++;
+		qm_sg_entry_set64(sg, sgl->vec[i].iova);
+		sg->length = sgl->vec[i].len;
+		sg->offset = 0;
+		in_len += sg->length;
+	}
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+
+	in_sg->length = in_len;
+	cpu_to_hw_sg(in_sg);
+
+	if ((ses->ctxt == DPAA_SEC_PDCP) && ses->pdcp.hfn_ovd) {
+		fd->cmd = 0x80000000 |
+			*((uint32_t *)((uint8_t *)userdata +
+			ses->pdcp.hfn_ovd_offset));
+		DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u\n",
+			*((uint32_t *)((uint8_t *)userdata +
+			ses->pdcp.hfn_ovd_offset)),
+			ses->pdcp.hfn_ovd);
+	}
+
+	return cf;
+}
+#endif
+
 static uint32_t
 dpaa_sec_raw_enqueue_burst(void *qp_data, uint8_t *drv_ctx,
 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
@@ -744,6 +1030,13 @@ dpaa_sec_configure_raw_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
 		sess->build_raw_dp_fd = build_dpaa_raw_dp_auth_fd;
 	else if (sess->ctxt == DPAA_SEC_CIPHER_HASH)
 		sess->build_raw_dp_fd = build_dpaa_raw_dp_chain_fd;
+	else if (sess->ctxt == DPAA_SEC_AEAD)
+		sess->build_raw_dp_fd = build_raw_cipher_auth_gcm_sg;
+#ifdef RTE_LIBRTE_SECURITY
+	else if (sess->ctxt == DPAA_SEC_IPSEC ||
+			sess->ctxt == DPAA_SEC_PDCP)
+		sess->build_raw_dp_fd = build_dpaa_raw_proto_sg;
+#endif
 	else
 		return -ENOTSUP;
 	dp_ctx = (struct dpaa_sec_raw_dp_ctx *)raw_dp_ctx->drv_ctx_data;
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH 14/15] test/crypto: add raw API test for dpaax
  2021-08-25  7:14 ` [dpdk-dev] [PATCH 00/15] crypto: add raw vector support in DPAAx Hemant Agrawal
                     ` (12 preceding siblings ...)
  2021-08-25  7:15   ` [dpdk-dev] [PATCH 13/15] crypto/dpaa_sec: support AEAD and proto " Hemant Agrawal
@ 2021-08-25  7:15   ` Hemant Agrawal
  2021-08-25  7:15   ` [dpdk-dev] [PATCH 15/15] test/crypto: add raw API support in 5G algos Hemant Agrawal
                     ` (2 subsequent siblings)
  16 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-08-25  7:15 UTC (permalink / raw)
  To: dev, gakhil; +Cc: konstantin.ananyev, roy.fan.zhang, Gagandeep Singh

This patch add support for raw API tests for
dpaa_sec and dpaa2_sec platforms.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 app/test/test_cryptodev.c | 116 +++++++++++++++++++++++++++++++++++---
 1 file changed, 109 insertions(+), 7 deletions(-)

diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index aecd507fa1..59bed6e6d2 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -175,11 +175,11 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id,
 {
 	struct rte_crypto_sym_op *sop = op->sym;
 	struct rte_crypto_op *ret_op = NULL;
-	struct rte_crypto_vec data_vec[UINT8_MAX];
+	struct rte_crypto_vec data_vec[UINT8_MAX], dest_data_vec[UINT8_MAX];
 	struct rte_crypto_va_iova_ptr cipher_iv, digest, aad_auth_iv;
 	union rte_crypto_sym_ofs ofs;
 	struct rte_crypto_sym_vec vec;
-	struct rte_crypto_sgl sgl;
+	struct rte_crypto_sgl sgl, dest_sgl;
 	uint32_t max_len;
 	union rte_cryptodev_session_ctx sess;
 	uint32_t count = 0;
@@ -315,6 +315,19 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id,
 	}
 
 	sgl.num = n;
+	/* Out of place */
+	if (sop->m_dst != NULL) {
+		dest_sgl.vec = dest_data_vec;
+		vec.dest_sgl = &dest_sgl;
+		n = rte_crypto_mbuf_to_vec(sop->m_dst, 0, max_len,
+				dest_data_vec, RTE_DIM(dest_data_vec));
+		if (n < 0 || n > sop->m_dst->nb_segs) {
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			goto exit;
+		}
+		dest_sgl.num = n;
+	} else
+		vec.dest_sgl = NULL;
 
 	if (rte_cryptodev_raw_enqueue_burst(ctx, &vec, ofs, (void **)&op,
 			&enqueue_status) < 1) {
@@ -8305,10 +8318,21 @@ test_pdcp_proto_SGL(int i, int oop,
 	int to_trn_tbl[16];
 	int segs = 1;
 	unsigned int trn_data = 0;
+	struct rte_cryptodev_info dev_info;
+	uint64_t feat_flags;
 	struct rte_security_ctx *ctx = (struct rte_security_ctx *)
 				rte_cryptodev_get_sec_ctx(
 				ts_params->valid_devs[0]);
+	struct rte_mbuf *temp_mbuf;
+
+	rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
+	feat_flags = dev_info.feature_flags;
 
+	if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) &&
+			(!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) {
+		printf("Device does not support RAW data-path APIs.\n");
+		return -ENOTSUP;
+	}
 	/* Verify the capabilities */
 	struct rte_security_capability_idx sec_cap_idx;
 
@@ -8492,8 +8516,23 @@ test_pdcp_proto_SGL(int i, int oop,
 		ut_params->op->sym->m_dst = ut_params->obuf;
 
 	/* Process crypto operation */
-	if (process_crypto_request(ts_params->valid_devs[0], ut_params->op)
-		== NULL) {
+	temp_mbuf = ut_params->op->sym->m_src;
+	if (global_api_test_type == CRYPTODEV_RAW_API_TEST) {
+		/* filling lengths */
+		while (temp_mbuf) {
+			ut_params->op->sym->cipher.data.length
+				+= temp_mbuf->pkt_len;
+			ut_params->op->sym->auth.data.length
+				+= temp_mbuf->pkt_len;
+			temp_mbuf = temp_mbuf->next;
+		}
+		process_sym_raw_dp_op(ts_params->valid_devs[0], 0,
+			ut_params->op, 1, 1, 0, 0);
+	} else {
+		ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+							ut_params->op);
+	}
+	if (ut_params->op == NULL) {
 		printf("TestCase %s()-%d line %d failed %s: ",
 			__func__, i, __LINE__,
 			"failed to process sym crypto op");
@@ -9934,6 +9973,7 @@ test_authenticated_encryption_oop(const struct aead_test_data *tdata)
 	int retval;
 	uint8_t *ciphertext, *auth_tag;
 	uint16_t plaintext_pad_len;
+	struct rte_cryptodev_info dev_info;
 
 	/* Verify the capabilities */
 	struct rte_cryptodev_sym_capability_idx cap_idx;
@@ -9943,7 +9983,11 @@ test_authenticated_encryption_oop(const struct aead_test_data *tdata)
 			&cap_idx) == NULL)
 		return TEST_SKIPPED;
 
-	if (global_api_test_type == CRYPTODEV_RAW_API_TEST)
+	rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
+	uint64_t feat_flags = dev_info.feature_flags;
+
+	if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) &&
+			(!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP)))
 		return TEST_SKIPPED;
 
 	/* not supported with CPU crypto */
@@ -9980,7 +10024,11 @@ test_authenticated_encryption_oop(const struct aead_test_data *tdata)
 	ut_params->op->sym->m_dst = ut_params->obuf;
 
 	/* Process crypto operation */
-	TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+	if (global_api_test_type == CRYPTODEV_RAW_API_TEST)
+		process_sym_raw_dp_op(ts_params->valid_devs[0], 0,
+			ut_params->op, 0, 0, 0, 0);
+	else
+		TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
 			ut_params->op), "failed to process sym crypto op");
 
 	TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
@@ -10026,6 +10074,10 @@ test_authenticated_decryption_oop(const struct aead_test_data *tdata)
 
 	int retval;
 	uint8_t *plaintext;
+	struct rte_cryptodev_info dev_info;
+
+	rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
+	uint64_t feat_flags = dev_info.feature_flags;
 
 	/* Verify the capabilities */
 	struct rte_cryptodev_sym_capability_idx cap_idx;
@@ -10040,6 +10092,12 @@ test_authenticated_decryption_oop(const struct aead_test_data *tdata)
 			global_api_test_type == CRYPTODEV_RAW_API_TEST)
 		return TEST_SKIPPED;
 
+	if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) &&
+			(!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) {
+		printf("Device does not support RAW data-path APIs.\n");
+		return TEST_SKIPPED;
+	}
+
 	/* Create AEAD session */
 	retval = create_aead_session(ts_params->valid_devs[0],
 			tdata->algo,
@@ -10070,7 +10128,11 @@ test_authenticated_decryption_oop(const struct aead_test_data *tdata)
 	ut_params->op->sym->m_dst = ut_params->obuf;
 
 	/* Process crypto operation */
-	TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+	if (global_api_test_type == CRYPTODEV_RAW_API_TEST)
+		process_sym_raw_dp_op(ts_params->valid_devs[0], 0,
+				ut_params->op, 0, 0, 0, 0);
+	else
+		TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
 			ut_params->op), "failed to process sym crypto op");
 
 	TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
@@ -14834,6 +14896,46 @@ test_cryptodev_cn10k(void)
 	return run_cryptodev_testsuite(RTE_STR(CRYPTODEV_NAME_CN10K_PMD));
 }
 
+static int
+test_cryptodev_dpaa2_sec_raw_api(void)
+{
+	static const char *pmd_name = RTE_STR(CRYPTODEV_NAME_DPAA2_SEC_PMD);
+	int ret;
+
+	ret = require_feature_flag(pmd_name, RTE_CRYPTODEV_FF_SYM_RAW_DP,
+			"RAW API");
+	if (ret)
+		return ret;
+
+	global_api_test_type = CRYPTODEV_RAW_API_TEST;
+	ret = run_cryptodev_testsuite(pmd_name);
+	global_api_test_type = CRYPTODEV_API_TEST;
+
+	return ret;
+}
+
+static int
+test_cryptodev_dpaa_sec_raw_api(void)
+{
+	static const char *pmd_name = RTE_STR(CRYPTODEV_NAME_DPAA2_SEC_PMD);
+	int ret;
+
+	ret = require_feature_flag(pmd_name, RTE_CRYPTODEV_FF_SYM_RAW_DP,
+			"RAW API");
+	if (ret)
+		return ret;
+
+	global_api_test_type = CRYPTODEV_RAW_API_TEST;
+	ret = run_cryptodev_testsuite(pmd_name);
+	global_api_test_type = CRYPTODEV_API_TEST;
+
+	return ret;
+}
+
+REGISTER_TEST_COMMAND(cryptodev_dpaa2_sec_raw_api_autotest,
+		test_cryptodev_dpaa2_sec_raw_api);
+REGISTER_TEST_COMMAND(cryptodev_dpaa_sec_raw_api_autotest,
+		test_cryptodev_dpaa_sec_raw_api);
 REGISTER_TEST_COMMAND(cryptodev_qat_raw_api_autotest,
 		test_cryptodev_qat_raw_api);
 REGISTER_TEST_COMMAND(cryptodev_qat_autotest, test_cryptodev_qat);
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH 15/15] test/crypto: add raw API support in 5G algos
  2021-08-25  7:14 ` [dpdk-dev] [PATCH 00/15] crypto: add raw vector support in DPAAx Hemant Agrawal
                     ` (13 preceding siblings ...)
  2021-08-25  7:15   ` [dpdk-dev] [PATCH 14/15] test/crypto: add raw API test for dpaax Hemant Agrawal
@ 2021-08-25  7:15   ` Hemant Agrawal
  2021-09-06 18:45     ` [dpdk-dev] [EXT] " Akhil Goyal
  2021-09-06 18:46   ` [dpdk-dev] [EXT] [PATCH 00/15] crypto: add raw vector support in DPAAx Akhil Goyal
  2021-09-07  7:59   ` [dpdk-dev] [PATCH v2 " Hemant Agrawal
  16 siblings, 1 reply; 115+ messages in thread
From: Hemant Agrawal @ 2021-08-25  7:15 UTC (permalink / raw)
  To: dev, gakhil; +Cc: konstantin.ananyev, roy.fan.zhang, Gagandeep Singh

This patch add support for RAW API testing with ZUC
and SNOW test cases.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 app/test/test_cryptodev.c | 57 ++++++++++++++++++++++++++++++++++-----
 1 file changed, 51 insertions(+), 6 deletions(-)

diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 59bed6e6d2..ab3394bb09 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -368,6 +368,7 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id,
 	}
 
 	op->status = (count == MAX_RAW_DEQUEUE_COUNT + 1 || ret_op != op ||
+			ret_op->status == RTE_CRYPTO_OP_STATUS_ERROR ||
 			n_success < 1) ? RTE_CRYPTO_OP_STATUS_ERROR :
 					RTE_CRYPTO_OP_STATUS_SUCCESS;
 
@@ -4152,6 +4153,16 @@ test_snow3g_encryption_oop(const struct snow3g_test_data *tdata)
 	int retval;
 	unsigned plaintext_pad_len;
 	unsigned plaintext_len;
+	struct rte_cryptodev_info dev_info;
+
+	rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
+	uint64_t feat_flags = dev_info.feature_flags;
+
+	if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) &&
+			(!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) {
+		printf("Device does not support RAW data-path APIs.\n");
+		return -ENOTSUP;
+	}
 
 	/* Verify the capabilities */
 	struct rte_cryptodev_sym_capability_idx cap_idx;
@@ -4207,7 +4218,11 @@ test_snow3g_encryption_oop(const struct snow3g_test_data *tdata)
 	if (retval < 0)
 		return retval;
 
-	ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+	if (global_api_test_type == CRYPTODEV_RAW_API_TEST)
+		process_sym_raw_dp_op(ts_params->valid_devs[0], 0,
+			ut_params->op, 1, 0, 1, tdata->cipher_iv.len);
+	else
+		ut_params->op = process_crypto_request(ts_params->valid_devs[0],
 						ut_params->op);
 	TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
 
@@ -4267,6 +4282,12 @@ test_snow3g_encryption_oop_sgl(const struct snow3g_test_data *tdata)
 		return TEST_SKIPPED;
 	}
 
+	if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) &&
+			(!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) {
+		printf("Device does not support RAW data-path APIs.\n");
+		return -ENOTSUP;
+	}
+
 	/* Create SNOW 3G session */
 	retval = create_wireless_algo_cipher_session(ts_params->valid_devs[0],
 					RTE_CRYPTO_CIPHER_OP_ENCRYPT,
@@ -4301,7 +4322,11 @@ test_snow3g_encryption_oop_sgl(const struct snow3g_test_data *tdata)
 	if (retval < 0)
 		return retval;
 
-	ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+	if (global_api_test_type == CRYPTODEV_RAW_API_TEST)
+		process_sym_raw_dp_op(ts_params->valid_devs[0], 0,
+			ut_params->op, 1, 0, 1, tdata->cipher_iv.len);
+	else
+		ut_params->op = process_crypto_request(ts_params->valid_devs[0],
 						ut_params->op);
 	TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
 
@@ -4428,7 +4453,11 @@ test_snow3g_encryption_offset_oop(const struct snow3g_test_data *tdata)
 	if (retval < 0)
 		return retval;
 
-	ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+	if (global_api_test_type == CRYPTODEV_RAW_API_TEST)
+		process_sym_raw_dp_op(ts_params->valid_devs[0], 0,
+			ut_params->op, 1, 0, 1, tdata->cipher_iv.len);
+	else
+		ut_params->op = process_crypto_request(ts_params->valid_devs[0],
 						ut_params->op);
 	TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
 
@@ -4559,7 +4588,16 @@ static int test_snow3g_decryption_oop(const struct snow3g_test_data *tdata)
 	uint8_t *plaintext, *ciphertext;
 	unsigned ciphertext_pad_len;
 	unsigned ciphertext_len;
+	struct rte_cryptodev_info dev_info;
+
+	rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
+	uint64_t feat_flags = dev_info.feature_flags;
 
+	if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) &&
+			(!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) {
+		printf("Device does not support RAW data-path APIs.\n");
+		return -ENOTSUP;
+	}
 	/* Verify the capabilities */
 	struct rte_cryptodev_sym_capability_idx cap_idx;
 	cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
@@ -4617,7 +4655,11 @@ static int test_snow3g_decryption_oop(const struct snow3g_test_data *tdata)
 	if (retval < 0)
 		return retval;
 
-	ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+	if (global_api_test_type == CRYPTODEV_RAW_API_TEST)
+		process_sym_raw_dp_op(ts_params->valid_devs[0], 0,
+			ut_params->op, 1, 0, 1, tdata->cipher_iv.len);
+	else
+		ut_params->op = process_crypto_request(ts_params->valid_devs[0],
 						ut_params->op);
 	TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
 	ut_params->obuf = ut_params->op->sym->m_dst;
@@ -12653,10 +12695,13 @@ test_authenticated_decryption_fail_when_corruption(
 	else {
 		ut_params->op = process_crypto_request(ts_params->valid_devs[0],
 			ut_params->op);
-		TEST_ASSERT_NULL(ut_params->op, "authentication not failed");
 	}
+	if (ut_params->op == NULL)
+		return 0;
+	else if (ut_params->op->status != RTE_CRYPTO_OP_STATUS_SUCCESS)
+		return 0;
 
-	return 0;
+	return -1;
 }
 
 static int
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [dpdk-dev] [EXT] [PATCH 01/15] crypto: change sgl to src_sgl in vector
  2021-08-25  7:14   ` [dpdk-dev] [PATCH 01/15] crypto: change sgl to src_sgl in vector Hemant Agrawal
@ 2021-09-06 18:36     ` Akhil Goyal
  0 siblings, 0 replies; 115+ messages in thread
From: Akhil Goyal @ 2021-09-06 18:36 UTC (permalink / raw)
  To: Hemant Agrawal, dev; +Cc: konstantin.ananyev, roy.fan.zhang

> This patch renames the sgl to src_sgl to help differentiating
> between source and destination sgl.
> 
> Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
> ---
Acked-by: Akhil Goyal <gakhil@marvell.com>

^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [dpdk-dev] [EXT] [PATCH 02/15] crypto: add total raw buffer length
  2021-08-25  7:14   ` [dpdk-dev] [PATCH 02/15] crypto: add total raw buffer length Hemant Agrawal
@ 2021-09-06 18:36     ` Akhil Goyal
  0 siblings, 0 replies; 115+ messages in thread
From: Akhil Goyal @ 2021-09-06 18:36 UTC (permalink / raw)
  To: Hemant Agrawal, dev; +Cc: konstantin.ananyev, roy.fan.zhang, Gagandeep Singh

> From: Gagandeep Singh <g.singh@nxp.com>
> 
> The current crypto raw data vectors is extended to support
> rte_security usecases, where we need total data length to know
> how much additional memory space is available in buffer other
> than data length so that driver/HW can write expanded size
> data after encryption.
> 
> Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
> ---
Acked-by: Akhil Goyal <gakhil@marvell.com>

^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [dpdk-dev] [EXT] [PATCH 03/15] crypto: add dest_sgl in raw vector APIs
  2021-08-25  7:14   ` [dpdk-dev] [PATCH 03/15] crypto: add dest_sgl in raw vector APIs Hemant Agrawal
@ 2021-09-06 18:37     ` Akhil Goyal
  0 siblings, 0 replies; 115+ messages in thread
From: Akhil Goyal @ 2021-09-06 18:37 UTC (permalink / raw)
  To: Hemant Agrawal, dev; +Cc: konstantin.ananyev, roy.fan.zhang

> The structure rte_crypto_sym_vec is updated to
> add dest_sgl to support out of place processing.
> 
> Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
> ---
Acked-by: Akhil Goyal <gakhil@marvell.com>

^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [dpdk-dev] [EXT] [PATCH 04/15] crypto: fix raw process for multi-seg case
  2021-08-25  7:14   ` [dpdk-dev] [PATCH 04/15] crypto: fix raw process for multi-seg case Hemant Agrawal
@ 2021-09-06 18:38     ` Akhil Goyal
  0 siblings, 0 replies; 115+ messages in thread
From: Akhil Goyal @ 2021-09-06 18:38 UTC (permalink / raw)
  To: Hemant Agrawal, dev, roy.fan.zhang
  Cc: konstantin.ananyev, Gagandeep Singh, marcinx.smoczynski, stable

> From: Gagandeep Singh <g.singh@nxp.com>
> 
> If no next segment available the “for” loop will fail and it still
> returns i+1 i.e. 2, which is wrong as it has filled only 1 buffer.
> 
> Fixes: 7adf992fb9bf ("cryptodev: introduce CPU crypto API")
> Cc: marcinx.smoczynski@intel.com
> Cc: stable@dpdk.org
> 
> Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
@Fan: Could you please review this patch.

^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [dpdk-dev] [EXT] [PATCH 15/15] test/crypto: add raw API support in 5G algos
  2021-08-25  7:15   ` [dpdk-dev] [PATCH 15/15] test/crypto: add raw API support in 5G algos Hemant Agrawal
@ 2021-09-06 18:45     ` Akhil Goyal
  0 siblings, 0 replies; 115+ messages in thread
From: Akhil Goyal @ 2021-09-06 18:45 UTC (permalink / raw)
  To: Hemant Agrawal, dev; +Cc: konstantin.ananyev, roy.fan.zhang, Gagandeep Singh

> This patch add support for RAW API testing with ZUC
> and SNOW test cases.
> 
> Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
> Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
> ---
Please fix compilation.



^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [dpdk-dev] [EXT] [PATCH 00/15] crypto: add raw vector support in DPAAx
  2021-08-25  7:14 ` [dpdk-dev] [PATCH 00/15] crypto: add raw vector support in DPAAx Hemant Agrawal
                     ` (14 preceding siblings ...)
  2021-08-25  7:15   ` [dpdk-dev] [PATCH 15/15] test/crypto: add raw API support in 5G algos Hemant Agrawal
@ 2021-09-06 18:46   ` Akhil Goyal
  2021-09-07  7:59   ` [dpdk-dev] [PATCH v2 " Hemant Agrawal
  16 siblings, 0 replies; 115+ messages in thread
From: Akhil Goyal @ 2021-09-06 18:46 UTC (permalink / raw)
  To: Hemant Agrawal, dev; +Cc: konstantin.ananyev, roy.fan.zhang

> This patch series adds support for raw vector API in dpaax_sec drivers
> This also enhances the raw vector APIs to support OOP and security
> protocol support.
> 
> Gagandeep Singh (11):
>   crypto: add total raw buffer length
>   crypto: fix raw process for multi-seg case
>   crypto/dpaa2_sec: support raw datapath APIs
>   crypto/dpaa2_sec: support AUTH only with raw buffer APIs
>   crypto/dpaa2_sec: support AUTHENC with raw buffer APIs
>   crypto/dpaa2_sec: support AEAD with raw buffer APIs
>   crypto/dpaa2_sec: support OOP with raw buffer API
>   crypto/dpaa2_sec: enhance error checks with raw buffer APIs
>   crypto/dpaa_sec: support raw datapath APIs
>   crypto/dpaa_sec: support authonly and chain with raw APIs
>   crypto/dpaa_sec: support AEAD and proto with raw APIs
> 
> Hemant Agrawal (4):
>   crypto: change sgl to src_sgl in vector
>   crypto: add dest_sgl in raw vector APIs
>   test/crypto: add raw API test for dpaax
>   test/crypto: add raw API support in 5G algos
> 
>  app/test/test_cryptodev.c                   |  179 +++-
>  drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c |   13 +-
>  drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h   |   82 +-
>  drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c | 1045
> ++++++++++++++++++
>  drivers/crypto/dpaa2_sec/meson.build        |    3 +-
>  drivers/crypto/dpaa_sec/dpaa_sec.c          |   23 +-
>  drivers/crypto/dpaa_sec/dpaa_sec.h          |   40 +-
>  drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c   | 1052
> +++++++++++++++++++
>  drivers/crypto/dpaa_sec/meson.build         |    4 +-
>  drivers/crypto/qat/qat_sym_hw_dp.c          |   27 +-
>  lib/cryptodev/rte_crypto_sym.h              |   13 +-
>  lib/ipsec/misc.h                            |    4 +-
>  12 files changed, 2390 insertions(+), 95 deletions(-)
>  create mode 100644 drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
>  create mode 100644 drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c
> 
Release notes update missing.

^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH v2 00/15] crypto: add raw vector support in DPAAx
  2021-08-25  7:14 ` [dpdk-dev] [PATCH 00/15] crypto: add raw vector support in DPAAx Hemant Agrawal
                     ` (15 preceding siblings ...)
  2021-09-06 18:46   ` [dpdk-dev] [EXT] [PATCH 00/15] crypto: add raw vector support in DPAAx Akhil Goyal
@ 2021-09-07  7:59   ` Hemant Agrawal
  2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 01/15] crypto: change sgl to src_sgl in vector Hemant Agrawal
                       ` (16 more replies)
  16 siblings, 17 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-09-07  7:59 UTC (permalink / raw)
  To: dev, gakhil; +Cc: konstantin.ananyev, roy.fan.zhang

This patch series adds support for raw vector API in dpaax_sec drivers
This also enhances the raw vector APIs to support OOP and security
protocol support.

v2: fix aesni compilation and add release notes.

Gagandeep Singh (11):
  crypto: add total raw buffer length
  crypto: fix raw process for multi-seg case
  crypto/dpaa2_sec: support raw datapath APIs
  crypto/dpaa2_sec: support AUTH only with raw buffer APIs
  crypto/dpaa2_sec: support AUTHENC with raw buffer APIs
  crypto/dpaa2_sec: support AEAD with raw buffer APIs
  crypto/dpaa2_sec: support OOP with raw buffer API
  crypto/dpaa2_sec: enhance error checks with raw buffer APIs
  crypto/dpaa_sec: support raw datapath APIs
  crypto/dpaa_sec: support authonly and chain with raw APIs
  crypto/dpaa_sec: support AEAD and proto with raw APIs

Hemant Agrawal (4):
  crypto: change sgl to src_sgl in vector
  crypto: add dest_sgl in raw vector APIs
  test/crypto: add raw API test for dpaax
  test/crypto: add raw API support in 5G algos

 app/test/test_cryptodev.c                   |  179 +++-
 doc/guides/rel_notes/release_21_11.rst      |    8 +
 drivers/crypto/aesni_gcm/aesni_gcm_pmd.c    |   12 +-
 drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c  |    6 +-
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c |   13 +-
 drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h   |   82 +-
 drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c | 1045 ++++++++++++++++++
 drivers/crypto/dpaa2_sec/meson.build        |    3 +-
 drivers/crypto/dpaa_sec/dpaa_sec.c          |   23 +-
 drivers/crypto/dpaa_sec/dpaa_sec.h          |   40 +-
 drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c   | 1052 +++++++++++++++++++
 drivers/crypto/dpaa_sec/meson.build         |    4 +-
 drivers/crypto/qat/qat_sym_hw_dp.c          |   27 +-
 lib/cryptodev/rte_crypto_sym.h              |   13 +-
 lib/ipsec/misc.h                            |    4 +-
 15 files changed, 2407 insertions(+), 104 deletions(-)
 create mode 100644 drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
 create mode 100644 drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c

-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH v2 01/15] crypto: change sgl to src_sgl in vector
  2021-09-07  7:59   ` [dpdk-dev] [PATCH v2 " Hemant Agrawal
@ 2021-09-07  7:59     ` Hemant Agrawal
  2021-09-16 11:38       ` Ananyev, Konstantin
  2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 02/15] crypto: add total raw buffer length Hemant Agrawal
                       ` (15 subsequent siblings)
  16 siblings, 1 reply; 115+ messages in thread
From: Hemant Agrawal @ 2021-09-07  7:59 UTC (permalink / raw)
  To: dev, gakhil; +Cc: konstantin.ananyev, roy.fan.zhang

This patch renames the sgl to src_sgl to help differentiating
between source and destination sgl.

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
Acked-by: Akhil Goyal <gakhil@marvell.com>
---
 app/test/test_cryptodev.c                  |  6 ++---
 drivers/crypto/aesni_gcm/aesni_gcm_pmd.c   | 12 +++++-----
 drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c |  6 ++---
 drivers/crypto/qat/qat_sym_hw_dp.c         | 27 +++++++++++++---------
 lib/cryptodev/rte_crypto_sym.h             |  2 +-
 lib/ipsec/misc.h                           |  4 ++--
 6 files changed, 31 insertions(+), 26 deletions(-)

diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 843d07ba37..ed63524edc 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -221,7 +221,7 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id,
 	digest.va = NULL;
 	sgl.vec = data_vec;
 	vec.num = 1;
-	vec.sgl = &sgl;
+	vec.src_sgl = &sgl;
 	vec.iv = &cipher_iv;
 	vec.digest = &digest;
 	vec.aad = &aad_auth_iv;
@@ -385,7 +385,7 @@ process_cpu_aead_op(uint8_t dev_id, struct rte_crypto_op *op)
 
 	sgl.vec = vec;
 	sgl.num = n;
-	symvec.sgl = &sgl;
+	symvec.src_sgl = &sgl;
 	symvec.iv = &iv_ptr;
 	symvec.digest = &digest_ptr;
 	symvec.aad = &aad_ptr;
@@ -431,7 +431,7 @@ process_cpu_crypt_auth_op(uint8_t dev_id, struct rte_crypto_op *op)
 
 	sgl.vec = vec;
 	sgl.num = n;
-	symvec.sgl = &sgl;
+	symvec.src_sgl = &sgl;
 	symvec.iv = &iv_ptr;
 	symvec.digest = &digest_ptr;
 	symvec.status = &st;
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
index 886e2a5aaa..5fbb9b79f8 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
@@ -535,7 +535,7 @@ aesni_gcm_sgl_encrypt(struct aesni_gcm_session *s,
 	processed = 0;
 	for (i = 0; i < vec->num; ++i) {
 		aesni_gcm_process_gcm_sgl_op(s, gdata_ctx,
-			&vec->sgl[i], vec->iv[i].va,
+			&vec->src_sgl[i], vec->iv[i].va,
 			vec->aad[i].va);
 		vec->status[i] = aesni_gcm_sgl_op_finalize_encryption(s,
 			gdata_ctx, vec->digest[i].va);
@@ -554,7 +554,7 @@ aesni_gcm_sgl_decrypt(struct aesni_gcm_session *s,
 	processed = 0;
 	for (i = 0; i < vec->num; ++i) {
 		aesni_gcm_process_gcm_sgl_op(s, gdata_ctx,
-			&vec->sgl[i], vec->iv[i].va,
+			&vec->src_sgl[i], vec->iv[i].va,
 			vec->aad[i].va);
 		 vec->status[i] = aesni_gcm_sgl_op_finalize_decryption(s,
 			gdata_ctx, vec->digest[i].va);
@@ -572,13 +572,13 @@ aesni_gmac_sgl_generate(struct aesni_gcm_session *s,
 
 	processed = 0;
 	for (i = 0; i < vec->num; ++i) {
-		if (vec->sgl[i].num != 1) {
+		if (vec->src_sgl[i].num != 1) {
 			vec->status[i] = ENOTSUP;
 			continue;
 		}
 
 		aesni_gcm_process_gmac_sgl_op(s, gdata_ctx,
-			&vec->sgl[i], vec->iv[i].va);
+			&vec->src_sgl[i], vec->iv[i].va);
 		vec->status[i] = aesni_gcm_sgl_op_finalize_encryption(s,
 			gdata_ctx, vec->digest[i].va);
 		processed += (vec->status[i] == 0);
@@ -595,13 +595,13 @@ aesni_gmac_sgl_verify(struct aesni_gcm_session *s,
 
 	processed = 0;
 	for (i = 0; i < vec->num; ++i) {
-		if (vec->sgl[i].num != 1) {
+		if (vec->src_sgl[i].num != 1) {
 			vec->status[i] = ENOTSUP;
 			continue;
 		}
 
 		aesni_gcm_process_gmac_sgl_op(s, gdata_ctx,
-			&vec->sgl[i], vec->iv[i].va);
+			&vec->src_sgl[i], vec->iv[i].va);
 		vec->status[i] = aesni_gcm_sgl_op_finalize_decryption(s,
 			gdata_ctx, vec->digest[i].va);
 		processed += (vec->status[i] == 0);
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
index a01c826a3c..1b05099446 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
@@ -2002,14 +2002,14 @@ aesni_mb_cpu_crypto_process_bulk(struct rte_cryptodev *dev,
 	for (i = 0, j = 0, k = 0; i != vec->num; i++) {
 
 
-		ret = check_crypto_sgl(sofs, vec->sgl + i);
+		ret = check_crypto_sgl(sofs, vec->src_sgl + i);
 		if (ret != 0) {
 			vec->status[i] = ret;
 			continue;
 		}
 
-		buf = vec->sgl[i].vec[0].base;
-		len = vec->sgl[i].vec[0].len;
+		buf = vec->src_sgl[i].vec[0].base;
+		len = vec->src_sgl[i].vec[0].len;
 
 		job = IMB_GET_NEXT_JOB(mb_mgr);
 		if (job == NULL) {
diff --git a/drivers/crypto/qat/qat_sym_hw_dp.c b/drivers/crypto/qat/qat_sym_hw_dp.c
index ac9ac05363..4870ebf66a 100644
--- a/drivers/crypto/qat/qat_sym_hw_dp.c
+++ b/drivers/crypto/qat/qat_sym_hw_dp.c
@@ -181,8 +181,9 @@ qat_sym_dp_enqueue_cipher_jobs(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec,
-			vec->sgl[i].num);
+		data_len = qat_sym_dp_parse_data_vec(qp, req,
+			vec->src_sgl[i].vec,
+			vec->src_sgl[i].num);
 		if (unlikely(data_len < 0))
 			break;
 		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
@@ -302,8 +303,9 @@ qat_sym_dp_enqueue_auth_jobs(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec,
-			vec->sgl[i].num);
+		data_len = qat_sym_dp_parse_data_vec(qp, req,
+			vec->src_sgl[i].vec,
+			vec->src_sgl[i].num);
 		if (unlikely(data_len < 0))
 			break;
 		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
@@ -484,14 +486,16 @@ qat_sym_dp_enqueue_chain_jobs(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec,
-			vec->sgl[i].num);
+		data_len = qat_sym_dp_parse_data_vec(qp, req,
+			vec->src_sgl[i].vec,
+			vec->src_sgl[i].num);
 		if (unlikely(data_len < 0))
 			break;
 		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
-		if (unlikely(enqueue_one_chain_job(ctx, req, vec->sgl[i].vec,
-			vec->sgl[i].num, &vec->iv[i], &vec->digest[i],
-				&vec->auth_iv[i], ofs, (uint32_t)data_len)))
+		if (unlikely(enqueue_one_chain_job(ctx, req,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num,
+			&vec->iv[i], &vec->digest[i],
+			&vec->auth_iv[i], ofs, (uint32_t)data_len)))
 			break;
 
 		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
@@ -688,8 +692,9 @@ qat_sym_dp_enqueue_aead_jobs(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec,
-			vec->sgl[i].num);
+		data_len = qat_sym_dp_parse_data_vec(qp, req,
+			vec->src_sgl[i].vec,
+			vec->src_sgl[i].num);
 		if (unlikely(data_len < 0))
 			break;
 		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
diff --git a/lib/cryptodev/rte_crypto_sym.h b/lib/cryptodev/rte_crypto_sym.h
index 58c0724743..dcc0bd5933 100644
--- a/lib/cryptodev/rte_crypto_sym.h
+++ b/lib/cryptodev/rte_crypto_sym.h
@@ -69,7 +69,7 @@ struct rte_crypto_sym_vec {
 	/** number of operations to perform */
 	uint32_t num;
 	/** array of SGL vectors */
-	struct rte_crypto_sgl *sgl;
+	struct rte_crypto_sgl *src_sgl;
 	/** array of pointers to cipher IV */
 	struct rte_crypto_va_iova_ptr *iv;
 	/** array of pointers to digest */
diff --git a/lib/ipsec/misc.h b/lib/ipsec/misc.h
index 79b9a20762..58ff538141 100644
--- a/lib/ipsec/misc.h
+++ b/lib/ipsec/misc.h
@@ -136,7 +136,7 @@ cpu_crypto_bulk(const struct rte_ipsec_session *ss,
 		/* not enough space in vec[] to hold all segments */
 		if (vcnt < 0) {
 			/* fill the request structure */
-			symvec.sgl = &vecpkt[j];
+			symvec.src_sgl = &vecpkt[j];
 			symvec.iv = &iv[j];
 			symvec.digest = &dgst[j];
 			symvec.aad = &aad[j];
@@ -160,7 +160,7 @@ cpu_crypto_bulk(const struct rte_ipsec_session *ss,
 	}
 
 	/* fill the request structure */
-	symvec.sgl = &vecpkt[j];
+	symvec.src_sgl = &vecpkt[j];
 	symvec.iv = &iv[j];
 	symvec.aad = &aad[j];
 	symvec.digest = &dgst[j];
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH v2 02/15] crypto: add total raw buffer length
  2021-09-07  7:59   ` [dpdk-dev] [PATCH v2 " Hemant Agrawal
  2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 01/15] crypto: change sgl to src_sgl in vector Hemant Agrawal
@ 2021-09-07  7:59     ` Hemant Agrawal
  2021-09-16 11:42       ` Ananyev, Konstantin
  2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 03/15] crypto: add dest_sgl in raw vector APIs Hemant Agrawal
                       ` (14 subsequent siblings)
  16 siblings, 1 reply; 115+ messages in thread
From: Hemant Agrawal @ 2021-09-07  7:59 UTC (permalink / raw)
  To: dev, gakhil; +Cc: konstantin.ananyev, roy.fan.zhang, Gagandeep Singh

From: Gagandeep Singh <g.singh@nxp.com>

The current crypto raw data vectors is extended to support
rte_security usecases, where we need total data length to know
how much additional memory space is available in buffer other
than data length so that driver/HW can write expanded size
data after encryption.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
Acked-by: Akhil Goyal <gakhil@marvell.com>
---
 lib/cryptodev/rte_crypto_sym.h | 6 ++++++
 1 file changed, 6 insertions(+)

diff --git a/lib/cryptodev/rte_crypto_sym.h b/lib/cryptodev/rte_crypto_sym.h
index dcc0bd5933..e5cef1fb72 100644
--- a/lib/cryptodev/rte_crypto_sym.h
+++ b/lib/cryptodev/rte_crypto_sym.h
@@ -37,6 +37,8 @@ struct rte_crypto_vec {
 	rte_iova_t iova;
 	/** length of the data buffer */
 	uint32_t len;
+	/** total buffer length*/
+	uint32_t tot_len;
 };
 
 /**
@@ -980,12 +982,14 @@ rte_crypto_mbuf_to_vec(const struct rte_mbuf *mb, uint32_t ofs, uint32_t len,
 	seglen = mb->data_len - ofs;
 	if (len <= seglen) {
 		vec[0].len = len;
+		vec[0].tot_len = mb->buf_len;
 		return 1;
 	}
 
 	/* data spread across segments */
 	vec[0].len = seglen;
 	left = len - seglen;
+	vec[0].tot_len = mb->buf_len;
 	for (i = 1, nseg = mb->next; nseg != NULL; nseg = nseg->next, i++) {
 
 		vec[i].base = rte_pktmbuf_mtod(nseg, void *);
@@ -995,6 +999,7 @@ rte_crypto_mbuf_to_vec(const struct rte_mbuf *mb, uint32_t ofs, uint32_t len,
 		if (left <= seglen) {
 			/* whole requested data is completed */
 			vec[i].len = left;
+			vec[i].tot_len = mb->buf_len;
 			left = 0;
 			break;
 		}
@@ -1002,6 +1007,7 @@ rte_crypto_mbuf_to_vec(const struct rte_mbuf *mb, uint32_t ofs, uint32_t len,
 		/* use whole segment */
 		vec[i].len = seglen;
 		left -= seglen;
+		vec[i].tot_len = mb->buf_len;
 	}
 
 	RTE_ASSERT(left == 0);
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH v2 03/15] crypto: add dest_sgl in raw vector APIs
  2021-09-07  7:59   ` [dpdk-dev] [PATCH v2 " Hemant Agrawal
  2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 01/15] crypto: change sgl to src_sgl in vector Hemant Agrawal
  2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 02/15] crypto: add total raw buffer length Hemant Agrawal
@ 2021-09-07  7:59     ` Hemant Agrawal
  2021-09-16 11:44       ` Ananyev, Konstantin
  2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 04/15] crypto: fix raw process for multi-seg case Hemant Agrawal
                       ` (13 subsequent siblings)
  16 siblings, 1 reply; 115+ messages in thread
From: Hemant Agrawal @ 2021-09-07  7:59 UTC (permalink / raw)
  To: dev, gakhil; +Cc: konstantin.ananyev, roy.fan.zhang

The structure rte_crypto_sym_vec is updated to
add dest_sgl to support out of place processing.

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
Acked-by: Akhil Goyal <gakhil@marvell.com>
---
 lib/cryptodev/rte_crypto_sym.h | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/lib/cryptodev/rte_crypto_sym.h b/lib/cryptodev/rte_crypto_sym.h
index e5cef1fb72..978708845f 100644
--- a/lib/cryptodev/rte_crypto_sym.h
+++ b/lib/cryptodev/rte_crypto_sym.h
@@ -72,6 +72,8 @@ struct rte_crypto_sym_vec {
 	uint32_t num;
 	/** array of SGL vectors */
 	struct rte_crypto_sgl *src_sgl;
+	/** array of SGL vectors for OOP, keep it NULL for inplace*/
+	struct rte_crypto_sgl *dest_sgl;
 	/** array of pointers to cipher IV */
 	struct rte_crypto_va_iova_ptr *iv;
 	/** array of pointers to digest */
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH v2 04/15] crypto: fix raw process for multi-seg case
  2021-09-07  7:59   ` [dpdk-dev] [PATCH v2 " Hemant Agrawal
                       ` (2 preceding siblings ...)
  2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 03/15] crypto: add dest_sgl in raw vector APIs Hemant Agrawal
@ 2021-09-07  7:59     ` Hemant Agrawal
  2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 05/15] crypto/dpaa2_sec: support raw datapath APIs Hemant Agrawal
                       ` (12 subsequent siblings)
  16 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-09-07  7:59 UTC (permalink / raw)
  To: dev, gakhil
  Cc: konstantin.ananyev, roy.fan.zhang, Gagandeep Singh,
	marcinx.smoczynski, stable

From: Gagandeep Singh <g.singh@nxp.com>

If no next segment available the “for” loop will fail and it still
returns i+1 i.e. 2, which is wrong as it has filled only 1 buffer.

Fixes: 7adf992fb9bf ("cryptodev: introduce CPU crypto API")
Cc: marcinx.smoczynski@intel.com
Cc: stable@dpdk.org

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 lib/cryptodev/rte_crypto_sym.h | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/lib/cryptodev/rte_crypto_sym.h b/lib/cryptodev/rte_crypto_sym.h
index 978708845f..a48228a646 100644
--- a/lib/cryptodev/rte_crypto_sym.h
+++ b/lib/cryptodev/rte_crypto_sym.h
@@ -1003,6 +1003,7 @@ rte_crypto_mbuf_to_vec(const struct rte_mbuf *mb, uint32_t ofs, uint32_t len,
 			vec[i].len = left;
 			vec[i].tot_len = mb->buf_len;
 			left = 0;
+			i++;
 			break;
 		}
 
@@ -1013,7 +1014,7 @@ rte_crypto_mbuf_to_vec(const struct rte_mbuf *mb, uint32_t ofs, uint32_t len,
 	}
 
 	RTE_ASSERT(left == 0);
-	return i + 1;
+	return i;
 }
 
 
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH v2 05/15] crypto/dpaa2_sec: support raw datapath APIs
  2021-09-07  7:59   ` [dpdk-dev] [PATCH v2 " Hemant Agrawal
                       ` (3 preceding siblings ...)
  2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 04/15] crypto: fix raw process for multi-seg case Hemant Agrawal
@ 2021-09-07  7:59     ` Hemant Agrawal
  2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 06/15] crypto/dpaa2_sec: support AUTH only with raw buffer APIs Hemant Agrawal
                       ` (11 subsequent siblings)
  16 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-09-07  7:59 UTC (permalink / raw)
  To: dev, gakhil; +Cc: konstantin.ananyev, roy.fan.zhang, Gagandeep Singh

From: Gagandeep Singh <g.singh@nxp.com>

This path add framework for raw API support.
The initial patch only test cipher only part.

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 doc/guides/rel_notes/release_21_11.rst      |   4 +
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c |  13 +-
 drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h   |  60 +-
 drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c | 595 ++++++++++++++++++++
 drivers/crypto/dpaa2_sec/meson.build        |   3 +-
 5 files changed, 646 insertions(+), 29 deletions(-)
 create mode 100644 drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c

diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index b55900936d..9cbe960dbe 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -72,6 +72,10 @@ New Features
 
   * Added event crypto adapter OP_FORWARD mode support.
 
+* **Updated NXP dpaa2_sec crypto PMD.**
+
+  * Added raw vector datapath API support
+
 
 Removed Items
 -------------
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index 1ccead3641..fe90d9d2d8 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -49,15 +49,8 @@
 #define FSL_MC_DPSECI_DEVID     3
 
 #define NO_PREFETCH 0
-/* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */
-#define FLE_POOL_NUM_BUFS	32000
-#define FLE_POOL_BUF_SIZE	256
-#define FLE_POOL_CACHE_SIZE	512
-#define FLE_SG_MEM_SIZE(num)	(FLE_POOL_BUF_SIZE + ((num) * 32))
-#define SEC_FLC_DHR_OUTBOUND	-114
-#define SEC_FLC_DHR_INBOUND	0
 
-static uint8_t cryptodev_driver_id;
+uint8_t cryptodev_driver_id;
 
 #ifdef RTE_LIB_SECURITY
 static inline int
@@ -3805,6 +3798,9 @@ static struct rte_cryptodev_ops crypto_ops = {
 	.sym_session_get_size     = dpaa2_sec_sym_session_get_size,
 	.sym_session_configure    = dpaa2_sec_sym_session_configure,
 	.sym_session_clear        = dpaa2_sec_sym_session_clear,
+	/* Raw data-path API related operations */
+	.sym_get_raw_dp_ctx_size = dpaa2_sec_get_dp_ctx_size,
+	.sym_configure_raw_dp_ctx = dpaa2_sec_configure_raw_dp_ctx,
 };
 
 #ifdef RTE_LIB_SECURITY
@@ -3887,6 +3883,7 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
 			RTE_CRYPTODEV_FF_HW_ACCELERATED |
 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
 			RTE_CRYPTODEV_FF_SECURITY |
+			RTE_CRYPTODEV_FF_SYM_RAW_DP |
 			RTE_CRYPTODEV_FF_IN_PLACE_SGL |
 			RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
 			RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
index 7dbc69f6cb..860c9b6520 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
@@ -15,6 +15,16 @@
 #define CRYPTODEV_NAME_DPAA2_SEC_PMD	crypto_dpaa2_sec
 /**< NXP DPAA2 - SEC PMD device name */
 
+extern uint8_t cryptodev_driver_id;
+
+/* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */
+#define FLE_POOL_NUM_BUFS	32000
+#define FLE_POOL_BUF_SIZE	256
+#define FLE_POOL_CACHE_SIZE	512
+#define FLE_SG_MEM_SIZE(num)	(FLE_POOL_BUF_SIZE + ((num) * 32))
+#define SEC_FLC_DHR_OUTBOUND	-114
+#define SEC_FLC_DHR_INBOUND	0
+
 #define MAX_QUEUES		64
 #define MAX_DESC_SIZE		64
 /** private data structure for each DPAA2_SEC device */
@@ -158,6 +168,24 @@ struct dpaa2_pdcp_ctxt {
 	uint32_t hfn_threshold;	/*!< HFN Threashold for key renegotiation */
 };
 #endif
+
+typedef int (*dpaa2_sec_build_fd_t)(
+	void *qp, uint8_t *drv_ctx, struct rte_crypto_vec *data_vec,
+	uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
+	void *user_data);
+
+typedef int (*dpaa2_sec_build_raw_dp_fd_t)(uint8_t *drv_ctx,
+		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_va_iova_ptr *iv,
+		       struct rte_crypto_va_iova_ptr *digest,
+		       struct rte_crypto_va_iova_ptr *auth_iv,
+		       union rte_crypto_sym_ofs ofs,
+		       void *userdata,
+		       struct qbman_fd *fd);
+
 typedef struct dpaa2_sec_session_entry {
 	void *ctxt;
 	uint8_t ctxt_type;
@@ -165,6 +193,8 @@ typedef struct dpaa2_sec_session_entry {
 	enum rte_crypto_cipher_algorithm cipher_alg; /*!< Cipher Algorithm*/
 	enum rte_crypto_auth_algorithm auth_alg; /*!< Authentication Algorithm*/
 	enum rte_crypto_aead_algorithm aead_alg; /*!< AEAD Algorithm*/
+	dpaa2_sec_build_fd_t build_fd;
+	dpaa2_sec_build_raw_dp_fd_t build_raw_dp_fd;
 	union {
 		struct {
 			uint8_t *data;	/**< pointer to key data */
@@ -547,26 +577,6 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
 			}, }
 		}, }
 	},
-	{	/* NULL (CIPHER) */
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
-		{.sym = {
-			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
-			{.cipher = {
-				.algo = RTE_CRYPTO_CIPHER_NULL,
-				.block_size = 1,
-				.key_size = {
-					.min = 0,
-					.max = 0,
-					.increment = 0
-				},
-				.iv_size = {
-					.min = 0,
-					.max = 0,
-					.increment = 0
-				}
-			}, },
-		}, }
-	},
 	{	/* AES CBC */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
@@ -974,4 +984,14 @@ calc_chksum(void *buffer, int len)
 	return  result;
 }
 
+int
+dpaa2_sec_configure_raw_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
+	enum rte_crypto_op_sess_type sess_type,
+	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update);
+
+int
+dpaa2_sec_get_dp_ctx_size(struct rte_cryptodev *dev);
+
+
 #endif /* _DPAA2_SEC_PMD_PRIVATE_H_ */
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
new file mode 100644
index 0000000000..32abf5a431
--- /dev/null
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
@@ -0,0 +1,595 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2021 NXP
+ */
+
+#include <rte_cryptodev_pmd.h>
+#include <rte_fslmc.h>
+#include <fslmc_vfio.h>
+#include <dpaa2_hw_pvt.h>
+#include <dpaa2_hw_dpio.h>
+
+#include "dpaa2_sec_priv.h"
+#include "dpaa2_sec_logs.h"
+
+struct dpaa2_sec_raw_dp_ctx {
+	dpaa2_sec_session *session;
+	uint32_t tail;
+	uint32_t head;
+	uint16_t cached_enqueue;
+	uint16_t cached_dequeue;
+};
+
+static int
+build_raw_dp_chain_fd(uint8_t *drv_ctx,
+		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_va_iova_ptr *iv,
+		       struct rte_crypto_va_iova_ptr *digest,
+		       struct rte_crypto_va_iova_ptr *auth_iv,
+		       union rte_crypto_sym_ofs ofs,
+		       void *userdata,
+		       struct qbman_fd *fd)
+{
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(sgl);
+	RTE_SET_USED(iv);
+	RTE_SET_USED(digest);
+	RTE_SET_USED(auth_iv);
+	RTE_SET_USED(ofs);
+	RTE_SET_USED(userdata);
+	RTE_SET_USED(fd);
+
+	return 0;
+}
+
+static int
+build_raw_dp_aead_fd(uint8_t *drv_ctx,
+		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_va_iova_ptr *iv,
+		       struct rte_crypto_va_iova_ptr *digest,
+		       struct rte_crypto_va_iova_ptr *auth_iv,
+		       union rte_crypto_sym_ofs ofs,
+		       void *userdata,
+		       struct qbman_fd *fd)
+{
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(sgl);
+	RTE_SET_USED(iv);
+	RTE_SET_USED(digest);
+	RTE_SET_USED(auth_iv);
+	RTE_SET_USED(ofs);
+	RTE_SET_USED(userdata);
+	RTE_SET_USED(fd);
+
+	return 0;
+}
+
+static int
+build_raw_dp_auth_fd(uint8_t *drv_ctx,
+		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_va_iova_ptr *iv,
+		       struct rte_crypto_va_iova_ptr *digest,
+		       struct rte_crypto_va_iova_ptr *auth_iv,
+		       union rte_crypto_sym_ofs ofs,
+		       void *userdata,
+		       struct qbman_fd *fd)
+{
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(sgl);
+	RTE_SET_USED(iv);
+	RTE_SET_USED(digest);
+	RTE_SET_USED(auth_iv);
+	RTE_SET_USED(ofs);
+	RTE_SET_USED(userdata);
+	RTE_SET_USED(fd);
+
+	return 0;
+}
+
+static int
+build_raw_dp_proto_fd(uint8_t *drv_ctx,
+		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_va_iova_ptr *iv,
+		       struct rte_crypto_va_iova_ptr *digest,
+		       struct rte_crypto_va_iova_ptr *auth_iv,
+		       union rte_crypto_sym_ofs ofs,
+		       void *userdata,
+		       struct qbman_fd *fd)
+{
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(sgl);
+	RTE_SET_USED(iv);
+	RTE_SET_USED(digest);
+	RTE_SET_USED(auth_iv);
+	RTE_SET_USED(ofs);
+	RTE_SET_USED(userdata);
+	RTE_SET_USED(fd);
+
+	return 0;
+}
+
+static int
+build_raw_dp_proto_compound_fd(uint8_t *drv_ctx,
+		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_va_iova_ptr *iv,
+		       struct rte_crypto_va_iova_ptr *digest,
+		       struct rte_crypto_va_iova_ptr *auth_iv,
+		       union rte_crypto_sym_ofs ofs,
+		       void *userdata,
+		       struct qbman_fd *fd)
+{
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(sgl);
+	RTE_SET_USED(iv);
+	RTE_SET_USED(digest);
+	RTE_SET_USED(auth_iv);
+	RTE_SET_USED(ofs);
+	RTE_SET_USED(userdata);
+	RTE_SET_USED(fd);
+
+	return 0;
+}
+
+static int
+build_raw_dp_cipher_fd(uint8_t *drv_ctx,
+		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_va_iova_ptr *iv,
+		       struct rte_crypto_va_iova_ptr *digest,
+		       struct rte_crypto_va_iova_ptr *auth_iv,
+		       union rte_crypto_sym_ofs ofs,
+		       void *userdata,
+		       struct qbman_fd *fd)
+{
+	RTE_SET_USED(digest);
+	RTE_SET_USED(auth_iv);
+
+	dpaa2_sec_session *sess =
+		((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
+	struct qbman_fle *ip_fle, *op_fle, *sge, *fle;
+	int total_len = 0, data_len = 0, data_offset;
+	struct sec_flow_context *flc;
+	struct ctxt_priv *priv = sess->ctxt;
+	unsigned int i;
+
+	for (i = 0; i < sgl->num; i++)
+		total_len += sgl->vec[i].len;
+
+	data_len = total_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
+	data_offset = ofs.ofs.cipher.head;
+
+	if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
+		sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
+		if ((data_len & 7) || (data_offset & 7)) {
+			DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes");
+			return -ENOTSUP;
+		}
+
+		data_len = data_len >> 3;
+		data_offset = data_offset >> 3;
+	}
+
+	/* first FLE entry used to store mbuf and session ctxt */
+	fle = (struct qbman_fle *)rte_malloc(NULL,
+			FLE_SG_MEM_SIZE(2*sgl->num),
+			RTE_CACHE_LINE_SIZE);
+	if (!fle) {
+		DPAA2_SEC_ERR("RAW CIPHER SG: Memory alloc failed for SGE");
+		return -ENOMEM;
+	}
+	memset(fle, 0, FLE_SG_MEM_SIZE(2*sgl->num));
+	/* first FLE entry used to store userdata and session ctxt */
+	DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
+	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
+
+	op_fle = fle + 1;
+	ip_fle = fle + 2;
+	sge = fle + 3;
+
+	flc = &priv->flc_desc[0].flc;
+
+	DPAA2_SEC_DP_DEBUG(
+		"RAW CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d\n",
+		data_offset,
+		data_len,
+		sess->iv.length);
+
+	/* o/p fle */
+	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
+	op_fle->length = data_len;
+	DPAA2_SET_FLE_SG_EXT(op_fle);
+
+	/* o/p 1st seg */
+	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+	DPAA2_SET_FLE_OFFSET(sge, data_offset);
+	sge->length = sgl->vec[0].len - data_offset;
+
+	/* o/p segs */
+	for (i = 1; i < sgl->num; i++) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+		DPAA2_SET_FLE_OFFSET(sge, 0);
+		sge->length = sgl->vec[i].len;
+	}
+	DPAA2_SET_FLE_FIN(sge);
+
+	DPAA2_SEC_DP_DEBUG(
+		"RAW CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n",
+		flc, fle, fle->addr_hi, fle->addr_lo,
+		fle->length);
+
+	/* i/p fle */
+	sge++;
+	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
+	ip_fle->length = sess->iv.length + data_len;
+	DPAA2_SET_FLE_SG_EXT(ip_fle);
+
+	/* i/p IV */
+	DPAA2_SET_FLE_ADDR(sge, iv->iova);
+	DPAA2_SET_FLE_OFFSET(sge, 0);
+	sge->length = sess->iv.length;
+
+	sge++;
+
+	/* i/p 1st seg */
+	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+	DPAA2_SET_FLE_OFFSET(sge, data_offset);
+	sge->length = sgl->vec[0].len - data_offset;
+
+	/* i/p segs */
+	for (i = 1; i < sgl->num; i++) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+		DPAA2_SET_FLE_OFFSET(sge, 0);
+		sge->length = sgl->vec[i].len;
+	}
+	DPAA2_SET_FLE_FIN(sge);
+	DPAA2_SET_FLE_FIN(ip_fle);
+
+	/* sg fd */
+	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
+	DPAA2_SET_FD_LEN(fd, ip_fle->length);
+	DPAA2_SET_FD_COMPOUND_FMT(fd);
+	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+	DPAA2_SEC_DP_DEBUG(
+		"RAW CIPHER SG: fdaddr =%" PRIx64 " off =%d, len =%d\n",
+		DPAA2_GET_FD_ADDR(fd),
+		DPAA2_GET_FD_OFFSET(fd),
+		DPAA2_GET_FD_LEN(fd));
+
+	return 0;
+}
+
+static __rte_always_inline uint32_t
+dpaa2_sec_raw_enqueue_burst(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	RTE_SET_USED(user_data);
+	uint32_t loop;
+	int32_t ret;
+	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
+	uint32_t frames_to_send, retry_count;
+	struct qbman_eq_desc eqdesc;
+	struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp_data;
+	dpaa2_sec_session *sess =
+		((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
+	struct qbman_swp *swp;
+	uint16_t num_tx = 0;
+	uint32_t flags[MAX_TX_RING_SLOTS] = {0};
+
+	if (unlikely(vec->num == 0))
+		return 0;
+
+	if (sess == NULL) {
+		DPAA2_SEC_ERR("sessionless raw crypto not supported");
+		return 0;
+	}
+	/*Prepare enqueue descriptor*/
+	qbman_eq_desc_clear(&eqdesc);
+	qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
+	qbman_eq_desc_set_response(&eqdesc, 0, 0);
+	qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
+
+	if (!DPAA2_PER_LCORE_DPIO) {
+		ret = dpaa2_affine_qbman_swp();
+		if (ret) {
+			DPAA2_SEC_ERR(
+				"Failed to allocate IO portal, tid: %d\n",
+				rte_gettid());
+			return 0;
+		}
+	}
+	swp = DPAA2_PER_LCORE_PORTAL;
+
+	while (vec->num) {
+		frames_to_send = (vec->num > dpaa2_eqcr_size) ?
+			dpaa2_eqcr_size : vec->num;
+
+		for (loop = 0; loop < frames_to_send; loop++) {
+			/*Clear the unused FD fields before sending*/
+			memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
+			ret = sess->build_raw_dp_fd(drv_ctx,
+						    &vec->src_sgl[loop],
+						    &vec->iv[loop],
+						    &vec->digest[loop],
+						    &vec->auth_iv[loop],
+						    ofs,
+						    user_data[loop],
+						    &fd_arr[loop]);
+			if (ret) {
+				DPAA2_SEC_ERR("error: Improper packet contents"
+					      " for crypto operation");
+				goto skip_tx;
+			}
+			status[loop] = 1;
+		}
+
+		loop = 0;
+		retry_count = 0;
+		while (loop < frames_to_send) {
+			ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
+							 &fd_arr[loop],
+							 &flags[loop],
+							 frames_to_send - loop);
+			if (unlikely(ret < 0)) {
+				retry_count++;
+				if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
+					num_tx += loop;
+					vec->num -= loop;
+					goto skip_tx;
+				}
+			} else {
+				loop += ret;
+				retry_count = 0;
+			}
+		}
+
+		num_tx += loop;
+		vec->num -= loop;
+	}
+skip_tx:
+	dpaa2_qp->tx_vq.tx_pkts += num_tx;
+	dpaa2_qp->tx_vq.err_pkts += vec->num;
+
+	return num_tx;
+}
+
+static __rte_always_inline int
+dpaa2_sec_raw_enqueue(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data_vec,
+	uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
+	void *user_data)
+{
+	RTE_SET_USED(qp_data);
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(data_vec);
+	RTE_SET_USED(n_data_vecs);
+	RTE_SET_USED(ofs);
+	RTE_SET_USED(iv);
+	RTE_SET_USED(digest);
+	RTE_SET_USED(aad_or_auth_iv);
+	RTE_SET_USED(user_data);
+
+	return 0;
+}
+
+static inline void *
+sec_fd_to_userdata(const struct qbman_fd *fd)
+{
+	struct qbman_fle *fle;
+	void *userdata;
+	fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
+
+	DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n",
+			   fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
+	userdata = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
+	/* free the fle memory */
+	rte_free((void *)(fle-1));
+
+	return userdata;
+}
+
+static __rte_always_inline uint32_t
+dpaa2_sec_raw_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
+	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
+	uint32_t max_nb_to_dequeue,
+	rte_cryptodev_raw_post_dequeue_t post_dequeue,
+	void **out_user_data, uint8_t is_user_data_array,
+	uint32_t *n_success, int *dequeue_status)
+{
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(get_dequeue_count);
+
+	/* Function is responsible to receive frames for a given device and VQ*/
+	struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp_data;
+	struct qbman_result *dq_storage;
+	uint32_t fqid = dpaa2_qp->rx_vq.fqid;
+	int ret, num_rx = 0;
+	uint8_t is_last = 0, status;
+	struct qbman_swp *swp;
+	const struct qbman_fd *fd;
+	struct qbman_pull_desc pulldesc;
+	void *user_data;
+	uint32_t nb_ops = max_nb_to_dequeue;
+
+	if (!DPAA2_PER_LCORE_DPIO) {
+		ret = dpaa2_affine_qbman_swp();
+		if (ret) {
+			DPAA2_SEC_ERR(
+				"Failed to allocate IO portal, tid: %d\n",
+				rte_gettid());
+			return 0;
+		}
+	}
+	swp = DPAA2_PER_LCORE_PORTAL;
+	dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
+
+	qbman_pull_desc_clear(&pulldesc);
+	qbman_pull_desc_set_numframes(&pulldesc,
+				      (nb_ops > dpaa2_dqrr_size) ?
+				      dpaa2_dqrr_size : nb_ops);
+	qbman_pull_desc_set_fq(&pulldesc, fqid);
+	qbman_pull_desc_set_storage(&pulldesc, dq_storage,
+				    (uint64_t)DPAA2_VADDR_TO_IOVA(dq_storage),
+				    1);
+
+	/*Issue a volatile dequeue command. */
+	while (1) {
+		if (qbman_swp_pull(swp, &pulldesc)) {
+			DPAA2_SEC_WARN(
+				"SEC VDQ command is not issued : QBMAN busy");
+			/* Portal was busy, try again */
+			continue;
+		}
+		break;
+	};
+
+	/* Receive the packets till Last Dequeue entry is found with
+	 * respect to the above issues PULL command.
+	 */
+	while (!is_last) {
+		/* Check if the previous issued command is completed.
+		 * Also seems like the SWP is shared between the Ethernet Driver
+		 * and the SEC driver.
+		 */
+		while (!qbman_check_command_complete(dq_storage))
+			;
+
+		/* Loop until the dq_storage is updated with
+		 * new token by QBMAN
+		 */
+		while (!qbman_check_new_result(dq_storage))
+			;
+		/* Check whether Last Pull command is Expired and
+		 * setting Condition for Loop termination
+		 */
+		if (qbman_result_DQ_is_pull_complete(dq_storage)) {
+			is_last = 1;
+			/* Check for valid frame. */
+			status = (uint8_t)qbman_result_DQ_flags(dq_storage);
+			if (unlikely(
+				(status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
+				DPAA2_SEC_DP_DEBUG("No frame is delivered\n");
+				continue;
+			}
+		}
+
+		fd = qbman_result_DQ_fd(dq_storage);
+		user_data = sec_fd_to_userdata(fd);
+		if (is_user_data_array)
+			out_user_data[num_rx] = user_data;
+		else
+			out_user_data[0] = user_data;
+		if (unlikely(fd->simple.frc)) {
+			/* TODO Parse SEC errors */
+			DPAA2_SEC_ERR("SEC returned Error - %x",
+				      fd->simple.frc);
+			status = RTE_CRYPTO_OP_STATUS_ERROR;
+		} else {
+			status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+		}
+		post_dequeue(user_data, num_rx, status);
+
+		num_rx++;
+		dq_storage++;
+	} /* End of Packet Rx loop */
+
+	dpaa2_qp->rx_vq.rx_pkts += num_rx;
+	*dequeue_status = 1;
+	*n_success = num_rx;
+
+	DPAA2_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
+	/*Return the total number of packets received to DPAA2 app*/
+	return num_rx;
+}
+
+static __rte_always_inline void *
+dpaa2_sec_raw_dequeue(void *qp_data, uint8_t *drv_ctx, int *dequeue_status,
+		enum rte_crypto_op_status *op_status)
+{
+	RTE_SET_USED(qp_data);
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(dequeue_status);
+	RTE_SET_USED(op_status);
+
+	return NULL;
+}
+
+static __rte_always_inline int
+dpaa2_sec_raw_enqueue_done(void *qp_data, uint8_t *drv_ctx, uint32_t n)
+{
+	RTE_SET_USED(qp_data);
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(n);
+
+	return 0;
+}
+
+static __rte_always_inline int
+dpaa2_sec_raw_dequeue_done(void *qp_data, uint8_t *drv_ctx, uint32_t n)
+{
+	RTE_SET_USED(qp_data);
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(n);
+
+	return 0;
+}
+
+int
+dpaa2_sec_configure_raw_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
+	enum rte_crypto_op_sess_type sess_type,
+	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
+{
+	dpaa2_sec_session *sess;
+	struct dpaa2_sec_raw_dp_ctx *dp_ctx;
+	RTE_SET_USED(qp_id);
+
+	if (!is_update) {
+		memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx));
+		raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
+	}
+
+	if (sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
+		sess = (dpaa2_sec_session *)get_sec_session_private_data(
+				session_ctx.sec_sess);
+	else if (sess_type == RTE_CRYPTO_OP_WITH_SESSION)
+		sess = (dpaa2_sec_session *)get_sym_session_private_data(
+			session_ctx.crypto_sess, cryptodev_driver_id);
+	else
+		return -ENOTSUP;
+	raw_dp_ctx->dequeue_burst = dpaa2_sec_raw_dequeue_burst;
+	raw_dp_ctx->dequeue = dpaa2_sec_raw_dequeue;
+	raw_dp_ctx->dequeue_done = dpaa2_sec_raw_dequeue_done;
+	raw_dp_ctx->enqueue_burst = dpaa2_sec_raw_enqueue_burst;
+	raw_dp_ctx->enqueue = dpaa2_sec_raw_enqueue;
+	raw_dp_ctx->enqueue_done = dpaa2_sec_raw_enqueue_done;
+
+	if (sess->ctxt_type == DPAA2_SEC_CIPHER_HASH)
+		sess->build_raw_dp_fd = build_raw_dp_chain_fd;
+	else if (sess->ctxt_type == DPAA2_SEC_AEAD)
+		sess->build_raw_dp_fd = build_raw_dp_aead_fd;
+	else if (sess->ctxt_type == DPAA2_SEC_AUTH)
+		sess->build_raw_dp_fd = build_raw_dp_auth_fd;
+	else if (sess->ctxt_type == DPAA2_SEC_CIPHER)
+		sess->build_raw_dp_fd = build_raw_dp_cipher_fd;
+	else if (sess->ctxt_type == DPAA2_SEC_IPSEC)
+		sess->build_raw_dp_fd = build_raw_dp_proto_fd;
+	else if (sess->ctxt_type == DPAA2_SEC_PDCP)
+		sess->build_raw_dp_fd = build_raw_dp_proto_compound_fd;
+	else
+		return -ENOTSUP;
+	dp_ctx = (struct dpaa2_sec_raw_dp_ctx *)raw_dp_ctx->drv_ctx_data;
+	dp_ctx->session = sess;
+
+	return 0;
+}
+
+int
+dpaa2_sec_get_dp_ctx_size(__rte_unused struct rte_cryptodev *dev)
+{
+	return sizeof(struct dpaa2_sec_raw_dp_ctx);
+}
diff --git a/drivers/crypto/dpaa2_sec/meson.build b/drivers/crypto/dpaa2_sec/meson.build
index ea1d73a13d..e6e5abb3c1 100644
--- a/drivers/crypto/dpaa2_sec/meson.build
+++ b/drivers/crypto/dpaa2_sec/meson.build
@@ -1,5 +1,5 @@
 # SPDX-License-Identifier: BSD-3-Clause
-# Copyright 2018 NXP
+# Copyright 2018,2021 NXP
 
 if not is_linux
     build = false
@@ -9,6 +9,7 @@ endif
 deps += ['security', 'mempool_dpaa2']
 sources = files(
         'dpaa2_sec_dpseci.c',
+	'dpaa2_sec_raw_dp.c',
         'mc/dpseci.c',
 )
 
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH v2 06/15] crypto/dpaa2_sec: support AUTH only with raw buffer APIs
  2021-09-07  7:59   ` [dpdk-dev] [PATCH v2 " Hemant Agrawal
                       ` (4 preceding siblings ...)
  2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 05/15] crypto/dpaa2_sec: support raw datapath APIs Hemant Agrawal
@ 2021-09-07  7:59     ` Hemant Agrawal
  2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 07/15] crypto/dpaa2_sec: support AUTHENC " Hemant Agrawal
                       ` (10 subsequent siblings)
  16 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-09-07  7:59 UTC (permalink / raw)
  To: dev, gakhil; +Cc: konstantin.ananyev, roy.fan.zhang, Gagandeep Singh

From: Gagandeep Singh <g.singh@nxp.com>

Auth only with raw buffer APIs has been supported in this patch.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h   |  21 ----
 drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c | 114 ++++++++++++++++++--
 2 files changed, 108 insertions(+), 27 deletions(-)

diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
index 860c9b6520..f6507855e3 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
@@ -231,27 +231,6 @@ typedef struct dpaa2_sec_session_entry {
 
 static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
 	/* Symmetric capabilities */
-	{	/* NULL (AUTH) */
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
-		{.sym = {
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
-			{.auth = {
-				.algo = RTE_CRYPTO_AUTH_NULL,
-				.block_size = 1,
-				.key_size = {
-					.min = 0,
-					.max = 0,
-					.increment = 0
-				},
-				.digest_size = {
-					.min = 0,
-					.max = 0,
-					.increment = 0
-				},
-				.iv_size = { 0 }
-			}, },
-		}, },
-	},
 	{	/* MD5 */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
index 32abf5a431..af052202d9 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
@@ -11,6 +11,8 @@
 #include "dpaa2_sec_priv.h"
 #include "dpaa2_sec_logs.h"
 
+#include <desc/algo.h>
+
 struct dpaa2_sec_raw_dp_ctx {
 	dpaa2_sec_session *session;
 	uint32_t tail;
@@ -73,14 +75,114 @@ build_raw_dp_auth_fd(uint8_t *drv_ctx,
 		       void *userdata,
 		       struct qbman_fd *fd)
 {
-	RTE_SET_USED(drv_ctx);
-	RTE_SET_USED(sgl);
 	RTE_SET_USED(iv);
-	RTE_SET_USED(digest);
 	RTE_SET_USED(auth_iv);
-	RTE_SET_USED(ofs);
-	RTE_SET_USED(userdata);
-	RTE_SET_USED(fd);
+
+	dpaa2_sec_session *sess =
+		((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
+	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
+	struct sec_flow_context *flc;
+	int total_len = 0, data_len = 0, data_offset;
+	uint8_t *old_digest;
+	struct ctxt_priv *priv = sess->ctxt;
+	unsigned int i;
+
+	for (i = 0; i < sgl->num; i++)
+		total_len += sgl->vec[i].len;
+
+	data_len = total_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
+	data_offset = ofs.ofs.auth.head;
+
+	if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
+		sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
+		if ((data_len & 7) || (data_offset & 7)) {
+			DPAA2_SEC_ERR("AUTH: len/offset must be full bytes");
+			return -ENOTSUP;
+		}
+
+		data_len = data_len >> 3;
+		data_offset = data_offset >> 3;
+	}
+	fle = (struct qbman_fle *)rte_malloc(NULL,
+		FLE_SG_MEM_SIZE(2 * sgl->num),
+			RTE_CACHE_LINE_SIZE);
+	if (unlikely(!fle)) {
+		DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE");
+		return -ENOMEM;
+	}
+	memset(fle, 0, FLE_SG_MEM_SIZE(2*sgl->num));
+	/* first FLE entry used to store mbuf and session ctxt */
+	DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
+	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
+	op_fle = fle + 1;
+	ip_fle = fle + 2;
+	sge = fle + 3;
+
+	flc = &priv->flc_desc[DESC_INITFINAL].flc;
+
+	/* sg FD */
+	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
+	DPAA2_SET_FD_COMPOUND_FMT(fd);
+
+	/* o/p fle */
+	DPAA2_SET_FLE_ADDR(op_fle,
+			DPAA2_VADDR_TO_IOVA(digest->va));
+	op_fle->length = sess->digest_length;
+
+	/* i/p fle */
+	DPAA2_SET_FLE_SG_EXT(ip_fle);
+	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
+	ip_fle->length = data_len;
+
+	if (sess->iv.length) {
+		uint8_t *iv_ptr;
+
+		iv_ptr = rte_crypto_op_ctod_offset(userdata, uint8_t *,
+						sess->iv.offset);
+
+		if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
+			iv_ptr = conv_to_snow_f9_iv(iv_ptr);
+			sge->length = 12;
+		} else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
+			iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
+			sge->length = 8;
+		} else {
+			sge->length = sess->iv.length;
+		}
+		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
+		ip_fle->length += sge->length;
+		sge++;
+	}
+	/* i/p 1st seg */
+	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+	DPAA2_SET_FLE_OFFSET(sge, data_offset);
+
+	if (data_len <= (int)(sgl->vec[0].len - data_offset)) {
+		sge->length = data_len;
+		data_len = 0;
+	} else {
+		sge->length = sgl->vec[0].len - data_offset;
+		for (i = 1; i < sgl->num; i++) {
+			sge++;
+			DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+			DPAA2_SET_FLE_OFFSET(sge, 0);
+			sge->length = sgl->vec[i].len;
+		}
+	}
+	if (sess->dir == DIR_DEC) {
+		/* Digest verification case */
+		sge++;
+		old_digest = (uint8_t *)(sge + 1);
+		rte_memcpy(old_digest, digest->va,
+			sess->digest_length);
+		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
+		sge->length = sess->digest_length;
+		ip_fle->length += sess->digest_length;
+	}
+	DPAA2_SET_FLE_FIN(sge);
+	DPAA2_SET_FLE_FIN(ip_fle);
+	DPAA2_SET_FD_LEN(fd, ip_fle->length);
 
 	return 0;
 }
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH v2 07/15] crypto/dpaa2_sec: support AUTHENC with raw buffer APIs
  2021-09-07  7:59   ` [dpdk-dev] [PATCH v2 " Hemant Agrawal
                       ` (5 preceding siblings ...)
  2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 06/15] crypto/dpaa2_sec: support AUTH only with raw buffer APIs Hemant Agrawal
@ 2021-09-07  7:59     ` Hemant Agrawal
  2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 08/15] crypto/dpaa2_sec: support AEAD " Hemant Agrawal
                       ` (9 subsequent siblings)
  16 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-09-07  7:59 UTC (permalink / raw)
  To: dev, gakhil; +Cc: konstantin.ananyev, roy.fan.zhang, Gagandeep Singh

From: Gagandeep Singh <g.singh@nxp.com>

This patch supports AUTHENC with raw buufer APIs

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c | 128 ++++++++++++++++++--
 1 file changed, 121 insertions(+), 7 deletions(-)

diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
index af052202d9..505431fc23 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
@@ -31,14 +31,128 @@ build_raw_dp_chain_fd(uint8_t *drv_ctx,
 		       void *userdata,
 		       struct qbman_fd *fd)
 {
-	RTE_SET_USED(drv_ctx);
-	RTE_SET_USED(sgl);
-	RTE_SET_USED(iv);
-	RTE_SET_USED(digest);
 	RTE_SET_USED(auth_iv);
-	RTE_SET_USED(ofs);
-	RTE_SET_USED(userdata);
-	RTE_SET_USED(fd);
+
+	dpaa2_sec_session *sess =
+		((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
+	struct ctxt_priv *priv = sess->ctxt;
+	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
+	struct sec_flow_context *flc;
+	int data_len = 0, auth_len = 0, cipher_len = 0;
+	unsigned int i = 0;
+	uint16_t auth_hdr_len = ofs.ofs.cipher.head -
+				ofs.ofs.auth.head;
+
+	uint16_t auth_tail_len = ofs.ofs.auth.tail;
+	uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
+	int icv_len = sess->digest_length;
+	uint8_t *old_icv;
+	uint8_t *iv_ptr = iv->va;
+
+	for (i = 0; i < sgl->num; i++)
+		data_len += sgl->vec[i].len;
+
+	cipher_len = data_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
+	auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
+	/* first FLE entry used to store session ctxt */
+	fle = (struct qbman_fle *)rte_malloc(NULL,
+			FLE_SG_MEM_SIZE(2 * sgl->num),
+			RTE_CACHE_LINE_SIZE);
+	if (unlikely(!fle)) {
+		DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE");
+		return -ENOMEM;
+	}
+	memset(fle, 0, FLE_SG_MEM_SIZE(2 * sgl->num));
+	DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
+	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
+
+	op_fle = fle + 1;
+	ip_fle = fle + 2;
+	sge = fle + 3;
+
+	/* Save the shared descriptor */
+	flc = &priv->flc_desc[0].flc;
+
+	/* Configure FD as a FRAME LIST */
+	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
+	DPAA2_SET_FD_COMPOUND_FMT(fd);
+	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+	/* Configure Output FLE with Scatter/Gather Entry */
+	DPAA2_SET_FLE_SG_EXT(op_fle);
+	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
+
+	if (auth_only_len)
+		DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
+
+	op_fle->length = (sess->dir == DIR_ENC) ?
+			(cipher_len + icv_len) :
+			cipher_len;
+
+	/* Configure Output SGE for Encap/Decap */
+	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+	DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.auth.head);
+	sge->length = sgl->vec[0].len - ofs.ofs.auth.head;
+
+	/* o/p segs */
+	for (i = 1; i < sgl->num; i++) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+		DPAA2_SET_FLE_OFFSET(sge, 0);
+		sge->length = sgl->vec[i].len;
+	}
+
+	if (sess->dir == DIR_ENC) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge,
+			digest->iova);
+		sge->length = icv_len;
+	}
+	DPAA2_SET_FLE_FIN(sge);
+
+	sge++;
+
+	/* Configure Input FLE with Scatter/Gather Entry */
+	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
+	DPAA2_SET_FLE_SG_EXT(ip_fle);
+	DPAA2_SET_FLE_FIN(ip_fle);
+
+	ip_fle->length = (sess->dir == DIR_ENC) ?
+			(auth_len + sess->iv.length) :
+			(auth_len + sess->iv.length +
+			icv_len);
+
+	/* Configure Input SGE for Encap/Decap */
+	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
+	sge->length = sess->iv.length;
+
+	sge++;
+	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+	DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.auth.head);
+	sge->length = sgl->vec[0].len - ofs.ofs.auth.head;
+
+	for (i = 1; i < sgl->num; i++) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+		DPAA2_SET_FLE_OFFSET(sge, 0);
+		sge->length = sgl->vec[i].len;
+	}
+
+	if (sess->dir == DIR_DEC) {
+		sge++;
+		old_icv = (uint8_t *)(sge + 1);
+		memcpy(old_icv, digest->va,
+			icv_len);
+		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
+		sge->length = icv_len;
+	}
+
+	DPAA2_SET_FLE_FIN(sge);
+	if (auth_only_len) {
+		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
+		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
+	}
+	DPAA2_SET_FD_LEN(fd, ip_fle->length);
 
 	return 0;
 }
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH v2 08/15] crypto/dpaa2_sec: support AEAD with raw buffer APIs
  2021-09-07  7:59   ` [dpdk-dev] [PATCH v2 " Hemant Agrawal
                       ` (6 preceding siblings ...)
  2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 07/15] crypto/dpaa2_sec: support AUTHENC " Hemant Agrawal
@ 2021-09-07  7:59     ` Hemant Agrawal
  2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 09/15] crypto/dpaa2_sec: support OOP with raw buffer API Hemant Agrawal
                       ` (8 subsequent siblings)
  16 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-09-07  7:59 UTC (permalink / raw)
  To: dev, gakhil; +Cc: konstantin.ananyev, roy.fan.zhang, Gagandeep Singh

From: Gagandeep Singh <g.singh@nxp.com>

add raw vector API support for AEAD algos.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c | 249 +++++++++++++++++---
 1 file changed, 214 insertions(+), 35 deletions(-)

diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
index 505431fc23..5191e5381c 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
@@ -167,14 +167,126 @@ build_raw_dp_aead_fd(uint8_t *drv_ctx,
 		       void *userdata,
 		       struct qbman_fd *fd)
 {
-	RTE_SET_USED(drv_ctx);
-	RTE_SET_USED(sgl);
-	RTE_SET_USED(iv);
-	RTE_SET_USED(digest);
-	RTE_SET_USED(auth_iv);
-	RTE_SET_USED(ofs);
-	RTE_SET_USED(userdata);
-	RTE_SET_USED(fd);
+	dpaa2_sec_session *sess =
+		((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
+	struct ctxt_priv *priv = sess->ctxt;
+	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
+	struct sec_flow_context *flc;
+	uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
+	int icv_len = sess->digest_length;
+	uint8_t *old_icv;
+	uint8_t *IV_ptr = iv->va;
+	unsigned int i = 0;
+	int data_len = 0, aead_len = 0;
+
+	for (i = 0; i < sgl->num; i++)
+		data_len += sgl->vec[i].len;
+
+	aead_len = data_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
+
+	/* first FLE entry used to store mbuf and session ctxt */
+	fle = (struct qbman_fle *)rte_malloc(NULL,
+			FLE_SG_MEM_SIZE(2 * sgl->num),
+			RTE_CACHE_LINE_SIZE);
+	if (unlikely(!fle)) {
+		DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE");
+		return -ENOMEM;
+	}
+	memset(fle, 0, FLE_SG_MEM_SIZE(2 * sgl->num));
+	DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
+	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
+
+	op_fle = fle + 1;
+	ip_fle = fle + 2;
+	sge = fle + 3;
+
+	/* Save the shared descriptor */
+	flc = &priv->flc_desc[0].flc;
+
+	/* Configure FD as a FRAME LIST */
+	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
+	DPAA2_SET_FD_COMPOUND_FMT(fd);
+	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+	/* Configure Output FLE with Scatter/Gather Entry */
+	DPAA2_SET_FLE_SG_EXT(op_fle);
+	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
+
+	if (auth_only_len)
+		DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
+
+	op_fle->length = (sess->dir == DIR_ENC) ?
+			(aead_len + icv_len) :
+			aead_len;
+
+	/* Configure Output SGE for Encap/Decap */
+	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+	DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
+	sge->length = sgl->vec[0].len - ofs.ofs.cipher.head;
+
+	/* o/p segs */
+	for (i = 1; i < sgl->num; i++) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+		DPAA2_SET_FLE_OFFSET(sge, 0);
+		sge->length = sgl->vec[i].len;
+	}
+
+	if (sess->dir == DIR_ENC) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge, digest->iova);
+		sge->length = icv_len;
+	}
+	DPAA2_SET_FLE_FIN(sge);
+
+	sge++;
+
+	/* Configure Input FLE with Scatter/Gather Entry */
+	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
+	DPAA2_SET_FLE_SG_EXT(ip_fle);
+	DPAA2_SET_FLE_FIN(ip_fle);
+	ip_fle->length = (sess->dir == DIR_ENC) ?
+		(aead_len + sess->iv.length + auth_only_len) :
+		(aead_len + sess->iv.length + auth_only_len +
+		icv_len);
+
+	/* Configure Input SGE for Encap/Decap */
+	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
+	sge->length = sess->iv.length;
+
+	sge++;
+	if (auth_only_len) {
+		DPAA2_SET_FLE_ADDR(sge, auth_iv->iova);
+		sge->length = auth_only_len;
+		sge++;
+	}
+
+	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+	DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
+	sge->length = sgl->vec[0].len - ofs.ofs.cipher.head;
+
+	/* i/p segs */
+	for (i = 1; i < sgl->num; i++) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+		DPAA2_SET_FLE_OFFSET(sge, 0);
+		sge->length = sgl->vec[i].len;
+	}
+
+	if (sess->dir == DIR_DEC) {
+		sge++;
+		old_icv = (uint8_t *)(sge + 1);
+		memcpy(old_icv,  digest->va, icv_len);
+		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
+		sge->length = icv_len;
+	}
+
+	DPAA2_SET_FLE_FIN(sge);
+	if (auth_only_len) {
+		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
+		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
+	}
+	DPAA2_SET_FD_LEN(fd, ip_fle->length);
 
 	return 0;
 }
@@ -311,36 +423,104 @@ build_raw_dp_proto_fd(uint8_t *drv_ctx,
 		       void *userdata,
 		       struct qbman_fd *fd)
 {
-	RTE_SET_USED(drv_ctx);
-	RTE_SET_USED(sgl);
 	RTE_SET_USED(iv);
 	RTE_SET_USED(digest);
 	RTE_SET_USED(auth_iv);
 	RTE_SET_USED(ofs);
-	RTE_SET_USED(userdata);
-	RTE_SET_USED(fd);
 
-	return 0;
-}
+	dpaa2_sec_session *sess =
+		((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
+	struct ctxt_priv *priv = sess->ctxt;
+	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
+	struct sec_flow_context *flc;
+	uint32_t in_len = 0, out_len = 0, i;
 
-static int
-build_raw_dp_proto_compound_fd(uint8_t *drv_ctx,
-		       struct rte_crypto_sgl *sgl,
-		       struct rte_crypto_va_iova_ptr *iv,
-		       struct rte_crypto_va_iova_ptr *digest,
-		       struct rte_crypto_va_iova_ptr *auth_iv,
-		       union rte_crypto_sym_ofs ofs,
-		       void *userdata,
-		       struct qbman_fd *fd)
-{
-	RTE_SET_USED(drv_ctx);
-	RTE_SET_USED(sgl);
-	RTE_SET_USED(iv);
-	RTE_SET_USED(digest);
-	RTE_SET_USED(auth_iv);
-	RTE_SET_USED(ofs);
-	RTE_SET_USED(userdata);
-	RTE_SET_USED(fd);
+	/* first FLE entry used to store mbuf and session ctxt */
+	fle = (struct qbman_fle *)rte_malloc(NULL,
+			FLE_SG_MEM_SIZE(2 * sgl->num),
+			RTE_CACHE_LINE_SIZE);
+	if (unlikely(!fle)) {
+		DPAA2_SEC_DP_ERR("Proto:SG: Memory alloc failed for SGE");
+		return -ENOMEM;
+	}
+	memset(fle, 0, FLE_SG_MEM_SIZE(2 * sgl->num));
+	DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
+	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
+
+	/* Save the shared descriptor */
+	flc = &priv->flc_desc[0].flc;
+	op_fle = fle + 1;
+	ip_fle = fle + 2;
+	sge = fle + 3;
+
+	DPAA2_SET_FD_IVP(fd);
+	DPAA2_SET_FLE_IVP(op_fle);
+	DPAA2_SET_FLE_IVP(ip_fle);
+
+	/* Configure FD as a FRAME LIST */
+	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
+	DPAA2_SET_FD_COMPOUND_FMT(fd);
+	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+	/* Configure Output FLE with Scatter/Gather Entry */
+	DPAA2_SET_FLE_SG_EXT(op_fle);
+	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
+
+	/* Configure Output SGE for Encap/Decap */
+	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+	DPAA2_SET_FLE_OFFSET(sge, 0);
+	sge->length = sgl->vec[0].len;
+	out_len += sge->length;
+	/* o/p segs */
+	for (i = 1; i < sgl->num; i++) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+		DPAA2_SET_FLE_OFFSET(sge, 0);
+		sge->length = sgl->vec[i].len;
+		out_len += sge->length;
+	}
+	sge->length = sgl->vec[i - 1].tot_len;
+	out_len += sge->length;
+
+	DPAA2_SET_FLE_FIN(sge);
+	op_fle->length = out_len;
+
+	sge++;
+
+	/* Configure Input FLE with Scatter/Gather Entry */
+	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
+	DPAA2_SET_FLE_SG_EXT(ip_fle);
+	DPAA2_SET_FLE_FIN(ip_fle);
+
+	/* Configure input SGE for Encap/Decap */
+	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+	DPAA2_SET_FLE_OFFSET(sge, 0);
+	sge->length = sgl->vec[0].len;
+	in_len += sge->length;
+	/* i/p segs */
+	for (i = 1; i < sgl->num; i++) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+		DPAA2_SET_FLE_OFFSET(sge, 0);
+		sge->length = sgl->vec[i].len;
+		in_len += sge->length;
+	}
+
+	ip_fle->length = in_len;
+	DPAA2_SET_FLE_FIN(sge);
+
+	/* In case of PDCP, per packet HFN is stored in
+	 * mbuf priv after sym_op.
+	 */
+	if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
+		uint32_t hfn_ovd = *(uint32_t *)((uint8_t *)userdata +
+				sess->pdcp.hfn_ovd_offset);
+		/*enable HFN override override */
+		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd);
+		DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd);
+		DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd);
+	}
+	DPAA2_SET_FD_LEN(fd, ip_fle->length);
 
 	return 0;
 }
@@ -792,10 +972,9 @@ dpaa2_sec_configure_raw_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
 		sess->build_raw_dp_fd = build_raw_dp_auth_fd;
 	else if (sess->ctxt_type == DPAA2_SEC_CIPHER)
 		sess->build_raw_dp_fd = build_raw_dp_cipher_fd;
-	else if (sess->ctxt_type == DPAA2_SEC_IPSEC)
+	else if (sess->ctxt_type == DPAA2_SEC_IPSEC ||
+		sess->ctxt_type == DPAA2_SEC_PDCP)
 		sess->build_raw_dp_fd = build_raw_dp_proto_fd;
-	else if (sess->ctxt_type == DPAA2_SEC_PDCP)
-		sess->build_raw_dp_fd = build_raw_dp_proto_compound_fd;
 	else
 		return -ENOTSUP;
 	dp_ctx = (struct dpaa2_sec_raw_dp_ctx *)raw_dp_ctx->drv_ctx_data;
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH v2 09/15] crypto/dpaa2_sec: support OOP with raw buffer API
  2021-09-07  7:59   ` [dpdk-dev] [PATCH v2 " Hemant Agrawal
                       ` (7 preceding siblings ...)
  2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 08/15] crypto/dpaa2_sec: support AEAD " Hemant Agrawal
@ 2021-09-07  7:59     ` Hemant Agrawal
  2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 10/15] crypto/dpaa2_sec: enhance error checks with raw buffer APIs Hemant Agrawal
                       ` (7 subsequent siblings)
  16 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-09-07  7:59 UTC (permalink / raw)
  To: dev, gakhil; +Cc: konstantin.ananyev, roy.fan.zhang, Gagandeep Singh

From: Gagandeep Singh <g.singh@nxp.com>

add support for out of order processing with raw vector APIs.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h   |   1 +
 drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c | 156 +++++++++++++++-----
 2 files changed, 116 insertions(+), 41 deletions(-)

diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
index f6507855e3..db72c11a5f 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
@@ -179,6 +179,7 @@ typedef int (*dpaa2_sec_build_fd_t)(
 
 typedef int (*dpaa2_sec_build_raw_dp_fd_t)(uint8_t *drv_ctx,
 		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_sgl *dest_sgl,
 		       struct rte_crypto_va_iova_ptr *iv,
 		       struct rte_crypto_va_iova_ptr *digest,
 		       struct rte_crypto_va_iova_ptr *auth_iv,
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
index 5191e5381c..51e316cc00 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
@@ -24,6 +24,7 @@ struct dpaa2_sec_raw_dp_ctx {
 static int
 build_raw_dp_chain_fd(uint8_t *drv_ctx,
 		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_sgl *dest_sgl,
 		       struct rte_crypto_va_iova_ptr *iv,
 		       struct rte_crypto_va_iova_ptr *digest,
 		       struct rte_crypto_va_iova_ptr *auth_iv,
@@ -89,17 +90,33 @@ build_raw_dp_chain_fd(uint8_t *drv_ctx,
 			(cipher_len + icv_len) :
 			cipher_len;
 
-	/* Configure Output SGE for Encap/Decap */
-	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
-	DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.auth.head);
-	sge->length = sgl->vec[0].len - ofs.ofs.auth.head;
+	/* OOP */
+	if (dest_sgl) {
+		/* Configure Output SGE for Encap/Decap */
+		DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[0].iova);
+		DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
+		sge->length = dest_sgl->vec[0].len - ofs.ofs.cipher.head;
 
-	/* o/p segs */
-	for (i = 1; i < sgl->num; i++) {
-		sge++;
-		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
-		DPAA2_SET_FLE_OFFSET(sge, 0);
-		sge->length = sgl->vec[i].len;
+		/* o/p segs */
+		for (i = 1; i < dest_sgl->num; i++) {
+			sge++;
+			DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[i].iova);
+			DPAA2_SET_FLE_OFFSET(sge, 0);
+			sge->length = dest_sgl->vec[i].len;
+		}
+	} else {
+		/* Configure Output SGE for Encap/Decap */
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+		DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
+		sge->length = sgl->vec[0].len - ofs.ofs.cipher.head;
+
+		/* o/p segs */
+		for (i = 1; i < sgl->num; i++) {
+			sge++;
+			DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+			DPAA2_SET_FLE_OFFSET(sge, 0);
+			sge->length = sgl->vec[i].len;
+		}
 	}
 
 	if (sess->dir == DIR_ENC) {
@@ -160,6 +177,7 @@ build_raw_dp_chain_fd(uint8_t *drv_ctx,
 static int
 build_raw_dp_aead_fd(uint8_t *drv_ctx,
 		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_sgl *dest_sgl,
 		       struct rte_crypto_va_iova_ptr *iv,
 		       struct rte_crypto_va_iova_ptr *digest,
 		       struct rte_crypto_va_iova_ptr *auth_iv,
@@ -219,17 +237,33 @@ build_raw_dp_aead_fd(uint8_t *drv_ctx,
 			(aead_len + icv_len) :
 			aead_len;
 
-	/* Configure Output SGE for Encap/Decap */
-	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
-	DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
-	sge->length = sgl->vec[0].len - ofs.ofs.cipher.head;
+	/* OOP */
+	if (dest_sgl) {
+		/* Configure Output SGE for Encap/Decap */
+		DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[0].iova);
+		DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
+		sge->length = dest_sgl->vec[0].len - ofs.ofs.cipher.head;
 
-	/* o/p segs */
-	for (i = 1; i < sgl->num; i++) {
-		sge++;
-		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
-		DPAA2_SET_FLE_OFFSET(sge, 0);
-		sge->length = sgl->vec[i].len;
+		/* o/p segs */
+		for (i = 1; i < dest_sgl->num; i++) {
+			sge++;
+			DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[i].iova);
+			DPAA2_SET_FLE_OFFSET(sge, 0);
+			sge->length = dest_sgl->vec[i].len;
+		}
+	} else {
+		/* Configure Output SGE for Encap/Decap */
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+		DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
+		sge->length = sgl->vec[0].len - ofs.ofs.cipher.head;
+
+		/* o/p segs */
+		for (i = 1; i < sgl->num; i++) {
+			sge++;
+			DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+			DPAA2_SET_FLE_OFFSET(sge, 0);
+			sge->length = sgl->vec[i].len;
+		}
 	}
 
 	if (sess->dir == DIR_ENC) {
@@ -294,6 +328,7 @@ build_raw_dp_aead_fd(uint8_t *drv_ctx,
 static int
 build_raw_dp_auth_fd(uint8_t *drv_ctx,
 		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_sgl *dest_sgl,
 		       struct rte_crypto_va_iova_ptr *iv,
 		       struct rte_crypto_va_iova_ptr *digest,
 		       struct rte_crypto_va_iova_ptr *auth_iv,
@@ -303,6 +338,7 @@ build_raw_dp_auth_fd(uint8_t *drv_ctx,
 {
 	RTE_SET_USED(iv);
 	RTE_SET_USED(auth_iv);
+	RTE_SET_USED(dest_sgl);
 
 	dpaa2_sec_session *sess =
 		((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
@@ -416,6 +452,7 @@ build_raw_dp_auth_fd(uint8_t *drv_ctx,
 static int
 build_raw_dp_proto_fd(uint8_t *drv_ctx,
 		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_sgl *dest_sgl,
 		       struct rte_crypto_va_iova_ptr *iv,
 		       struct rte_crypto_va_iova_ptr *digest,
 		       struct rte_crypto_va_iova_ptr *auth_iv,
@@ -466,20 +503,39 @@ build_raw_dp_proto_fd(uint8_t *drv_ctx,
 	DPAA2_SET_FLE_SG_EXT(op_fle);
 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
 
-	/* Configure Output SGE for Encap/Decap */
-	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
-	DPAA2_SET_FLE_OFFSET(sge, 0);
-	sge->length = sgl->vec[0].len;
-	out_len += sge->length;
-	/* o/p segs */
-	for (i = 1; i < sgl->num; i++) {
-		sge++;
-		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+	/* OOP */
+	if (dest_sgl) {
+		/* Configure Output SGE for Encap/Decap */
+		DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[0].iova);
 		DPAA2_SET_FLE_OFFSET(sge, 0);
-		sge->length = sgl->vec[i].len;
+		sge->length = dest_sgl->vec[0].len;
+		out_len += sge->length;
+		/* o/p segs */
+		for (i = 1; i < dest_sgl->num; i++) {
+			sge++;
+			DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[i].iova);
+			DPAA2_SET_FLE_OFFSET(sge, 0);
+			sge->length = dest_sgl->vec[i].len;
+			out_len += sge->length;
+		}
+		sge->length = dest_sgl->vec[i - 1].tot_len;
+
+	} else {
+		/* Configure Output SGE for Encap/Decap */
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+		DPAA2_SET_FLE_OFFSET(sge, 0);
+		sge->length = sgl->vec[0].len;
 		out_len += sge->length;
+		/* o/p segs */
+		for (i = 1; i < sgl->num; i++) {
+			sge++;
+			DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+			DPAA2_SET_FLE_OFFSET(sge, 0);
+			sge->length = sgl->vec[i].len;
+			out_len += sge->length;
+		}
+		sge->length = sgl->vec[i - 1].tot_len;
 	}
-	sge->length = sgl->vec[i - 1].tot_len;
 	out_len += sge->length;
 
 	DPAA2_SET_FLE_FIN(sge);
@@ -528,6 +584,7 @@ build_raw_dp_proto_fd(uint8_t *drv_ctx,
 static int
 build_raw_dp_cipher_fd(uint8_t *drv_ctx,
 		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_sgl *dest_sgl,
 		       struct rte_crypto_va_iova_ptr *iv,
 		       struct rte_crypto_va_iova_ptr *digest,
 		       struct rte_crypto_va_iova_ptr *auth_iv,
@@ -593,17 +650,33 @@ build_raw_dp_cipher_fd(uint8_t *drv_ctx,
 	op_fle->length = data_len;
 	DPAA2_SET_FLE_SG_EXT(op_fle);
 
-	/* o/p 1st seg */
-	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
-	DPAA2_SET_FLE_OFFSET(sge, data_offset);
-	sge->length = sgl->vec[0].len - data_offset;
+	/* OOP */
+	if (dest_sgl) {
+		/* o/p 1st seg */
+		DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[0].iova);
+		DPAA2_SET_FLE_OFFSET(sge, data_offset);
+		sge->length = dest_sgl->vec[0].len - data_offset;
 
-	/* o/p segs */
-	for (i = 1; i < sgl->num; i++) {
-		sge++;
-		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
-		DPAA2_SET_FLE_OFFSET(sge, 0);
-		sge->length = sgl->vec[i].len;
+		/* o/p segs */
+		for (i = 1; i < dest_sgl->num; i++) {
+			sge++;
+			DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[i].iova);
+			DPAA2_SET_FLE_OFFSET(sge, 0);
+			sge->length = dest_sgl->vec[i].len;
+		}
+	} else {
+		/* o/p 1st seg */
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+		DPAA2_SET_FLE_OFFSET(sge, data_offset);
+		sge->length = sgl->vec[0].len - data_offset;
+
+		/* o/p segs */
+		for (i = 1; i < sgl->num; i++) {
+			sge++;
+			DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+			DPAA2_SET_FLE_OFFSET(sge, 0);
+			sge->length = sgl->vec[i].len;
+		}
 	}
 	DPAA2_SET_FLE_FIN(sge);
 
@@ -706,6 +779,7 @@ dpaa2_sec_raw_enqueue_burst(void *qp_data, uint8_t *drv_ctx,
 			memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
 			ret = sess->build_raw_dp_fd(drv_ctx,
 						    &vec->src_sgl[loop],
+						    &vec->dest_sgl[loop],
 						    &vec->iv[loop],
 						    &vec->digest[loop],
 						    &vec->auth_iv[loop],
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH v2 10/15] crypto/dpaa2_sec: enhance error checks with raw buffer APIs
  2021-09-07  7:59   ` [dpdk-dev] [PATCH v2 " Hemant Agrawal
                       ` (8 preceding siblings ...)
  2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 09/15] crypto/dpaa2_sec: support OOP with raw buffer API Hemant Agrawal
@ 2021-09-07  7:59     ` Hemant Agrawal
  2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 11/15] crypto/dpaa_sec: support raw datapath APIs Hemant Agrawal
                       ` (6 subsequent siblings)
  16 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-09-07  7:59 UTC (permalink / raw)
  To: dev, gakhil; +Cc: konstantin.ananyev, roy.fan.zhang, Gagandeep Singh

From: Gagandeep Singh <g.singh@nxp.com>

This patch improves error conditions and support of
Wireless algos with raw buffers.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c | 31 ++++-----------------
 1 file changed, 6 insertions(+), 25 deletions(-)

diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
index 51e316cc00..25364454c9 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
@@ -355,16 +355,7 @@ build_raw_dp_auth_fd(uint8_t *drv_ctx,
 	data_len = total_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
 	data_offset = ofs.ofs.auth.head;
 
-	if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
-		sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
-		if ((data_len & 7) || (data_offset & 7)) {
-			DPAA2_SEC_ERR("AUTH: len/offset must be full bytes");
-			return -ENOTSUP;
-		}
-
-		data_len = data_len >> 3;
-		data_offset = data_offset >> 3;
-	}
+	/* For SNOW3G and ZUC, lengths in bits only supported */
 	fle = (struct qbman_fle *)rte_malloc(NULL,
 		FLE_SG_MEM_SIZE(2 * sgl->num),
 			RTE_CACHE_LINE_SIZE);
@@ -609,17 +600,7 @@ build_raw_dp_cipher_fd(uint8_t *drv_ctx,
 	data_len = total_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
 	data_offset = ofs.ofs.cipher.head;
 
-	if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
-		sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
-		if ((data_len & 7) || (data_offset & 7)) {
-			DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes");
-			return -ENOTSUP;
-		}
-
-		data_len = data_len >> 3;
-		data_offset = data_offset >> 3;
-	}
-
+	/* For SNOW3G and ZUC, lengths in bits only supported */
 	/* first FLE entry used to store mbuf and session ctxt */
 	fle = (struct qbman_fle *)rte_malloc(NULL,
 			FLE_SG_MEM_SIZE(2*sgl->num),
@@ -878,7 +859,7 @@ dpaa2_sec_raw_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
 	struct qbman_result *dq_storage;
 	uint32_t fqid = dpaa2_qp->rx_vq.fqid;
 	int ret, num_rx = 0;
-	uint8_t is_last = 0, status;
+	uint8_t is_last = 0, status, is_success = 0;
 	struct qbman_swp *swp;
 	const struct qbman_fd *fd;
 	struct qbman_pull_desc pulldesc;
@@ -957,11 +938,11 @@ dpaa2_sec_raw_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
 			/* TODO Parse SEC errors */
 			DPAA2_SEC_ERR("SEC returned Error - %x",
 				      fd->simple.frc);
-			status = RTE_CRYPTO_OP_STATUS_ERROR;
+			is_success = false;
 		} else {
-			status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+			is_success = true;
 		}
-		post_dequeue(user_data, num_rx, status);
+		post_dequeue(user_data, num_rx, is_success);
 
 		num_rx++;
 		dq_storage++;
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH v2 11/15] crypto/dpaa_sec: support raw datapath APIs
  2021-09-07  7:59   ` [dpdk-dev] [PATCH v2 " Hemant Agrawal
                       ` (9 preceding siblings ...)
  2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 10/15] crypto/dpaa2_sec: enhance error checks with raw buffer APIs Hemant Agrawal
@ 2021-09-07  7:59     ` Hemant Agrawal
  2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 12/15] crypto/dpaa_sec: support authonly and chain with raw APIs Hemant Agrawal
                       ` (5 subsequent siblings)
  16 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-09-07  7:59 UTC (permalink / raw)
  To: dev, gakhil; +Cc: konstantin.ananyev, roy.fan.zhang, Gagandeep Singh

From: Gagandeep Singh <g.singh@nxp.com>

This patch add raw vector API framework for dpaa_sec driver.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 doc/guides/rel_notes/release_21_11.rst    |   4 +
 drivers/crypto/dpaa_sec/dpaa_sec.c        |  23 +-
 drivers/crypto/dpaa_sec/dpaa_sec.h        |  39 +-
 drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c | 485 ++++++++++++++++++++++
 drivers/crypto/dpaa_sec/meson.build       |   4 +-
 5 files changed, 541 insertions(+), 14 deletions(-)
 create mode 100644 drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c

diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index 9cbe960dbe..0afd21812f 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -76,6 +76,10 @@ New Features
 
   * Added raw vector datapath API support
 
+* **Updated NXP dpaa_sec crypto PMD.**
+
+  * Added raw vector datapath API support
+
 
 Removed Items
 -------------
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c
index 19d4684e24..7534f80195 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.c
@@ -45,10 +45,7 @@
 #include <dpaa_sec_log.h>
 #include <dpaax_iova_table.h>
 
-static uint8_t cryptodev_driver_id;
-
-static int
-dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
+uint8_t dpaa_cryptodev_driver_id;
 
 static inline void
 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
@@ -1745,8 +1742,8 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
 			case RTE_CRYPTO_OP_WITH_SESSION:
 				ses = (dpaa_sec_session *)
 					get_sym_session_private_data(
-							op->sym->session,
-							cryptodev_driver_id);
+						op->sym->session,
+						dpaa_cryptodev_driver_id);
 				break;
 #ifdef RTE_LIB_SECURITY
 			case RTE_CRYPTO_OP_SECURITY_SESSION:
@@ -2307,7 +2304,7 @@ dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
 	return -1;
 }
 
-static int
+int
 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
 {
 	int ret;
@@ -3115,7 +3112,7 @@ dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
 		info->feature_flags = dev->feature_flags;
 		info->capabilities = dpaa_sec_capabilities;
 		info->sym.max_nb_sessions = internals->max_nb_sessions;
-		info->driver_id = cryptodev_driver_id;
+		info->driver_id = dpaa_cryptodev_driver_id;
 	}
 }
 
@@ -3311,7 +3308,10 @@ static struct rte_cryptodev_ops crypto_ops = {
 	.queue_pair_release   = dpaa_sec_queue_pair_release,
 	.sym_session_get_size     = dpaa_sec_sym_session_get_size,
 	.sym_session_configure    = dpaa_sec_sym_session_configure,
-	.sym_session_clear        = dpaa_sec_sym_session_clear
+	.sym_session_clear        = dpaa_sec_sym_session_clear,
+	/* Raw data-path API related operations */
+	.sym_get_raw_dp_ctx_size = dpaa_sec_get_dp_ctx_size,
+	.sym_configure_raw_dp_ctx = dpaa_sec_configure_raw_dp_ctx,
 };
 
 #ifdef RTE_LIB_SECURITY
@@ -3362,7 +3362,7 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	cryptodev->driver_id = cryptodev_driver_id;
+	cryptodev->driver_id = dpaa_cryptodev_driver_id;
 	cryptodev->dev_ops = &crypto_ops;
 
 	cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
@@ -3371,6 +3371,7 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
 			RTE_CRYPTODEV_FF_HW_ACCELERATED |
 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
 			RTE_CRYPTODEV_FF_SECURITY |
+			RTE_CRYPTODEV_FF_SYM_RAW_DP |
 			RTE_CRYPTODEV_FF_IN_PLACE_SGL |
 			RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
 			RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
@@ -3536,5 +3537,5 @@ static struct cryptodev_driver dpaa_sec_crypto_drv;
 
 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
-		cryptodev_driver_id);
+		dpaa_cryptodev_driver_id);
 RTE_LOG_REGISTER(dpaa_logtype_sec, pmd.crypto.dpaa, NOTICE);
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.h b/drivers/crypto/dpaa_sec/dpaa_sec.h
index 368699678b..f6e83d46e7 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.h
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.h
@@ -19,6 +19,8 @@
 #define AES_CTR_IV_LEN		16
 #define AES_GCM_IV_LEN		12
 
+extern uint8_t dpaa_cryptodev_driver_id;
+
 #define DPAA_IPv6_DEFAULT_VTC_FLOW	0x60000000
 
 /* Minimum job descriptor consists of a oneword job descriptor HEADER and
@@ -117,6 +119,24 @@ struct sec_pdcp_ctxt {
 	uint32_t hfn_threshold;	/*!< HFN Threashold for key renegotiation */
 };
 #endif
+
+typedef int (*dpaa_sec_build_fd_t)(
+	void *qp, uint8_t *drv_ctx, struct rte_crypto_vec *data_vec,
+	uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
+	void *user_data);
+
+typedef struct dpaa_sec_job* (*dpaa_sec_build_raw_dp_fd_t)(uint8_t *drv_ctx,
+			struct rte_crypto_sgl *sgl,
+			struct rte_crypto_sgl *dest_sgl,
+			struct rte_crypto_va_iova_ptr *iv,
+			struct rte_crypto_va_iova_ptr *digest,
+			struct rte_crypto_va_iova_ptr *auth_iv,
+			union rte_crypto_sym_ofs ofs,
+			void *userdata);
+
 typedef struct dpaa_sec_session_entry {
 	struct sec_cdb cdb;	/**< cmd block associated with qp */
 	struct dpaa_sec_qp *qp[MAX_DPAA_CORES];
@@ -129,6 +149,8 @@ typedef struct dpaa_sec_session_entry {
 #ifdef RTE_LIB_SECURITY
 	enum rte_security_session_protocol proto_alg; /*!< Security Algorithm*/
 #endif
+	dpaa_sec_build_fd_t build_fd;
+	dpaa_sec_build_raw_dp_fd_t build_raw_dp_fd;
 	union {
 		struct {
 			uint8_t *data;	/**< pointer to key data */
@@ -211,7 +233,10 @@ struct dpaa_sec_job {
 #define DPAA_MAX_NB_MAX_DIGEST	32
 struct dpaa_sec_op_ctx {
 	struct dpaa_sec_job job;
-	struct rte_crypto_op *op;
+	union {
+		struct rte_crypto_op *op;
+		void *userdata;
+	};
 	struct rte_mempool *ctx_pool; /* mempool pointer for dpaa_sec_op_ctx */
 	uint32_t fd_status;
 	int64_t vtop_offset;
@@ -803,4 +828,16 @@ calc_chksum(void *buffer, int len)
 	return  result;
 }
 
+int
+dpaa_sec_configure_raw_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
+	enum rte_crypto_op_sess_type sess_type,
+	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update);
+
+int
+dpaa_sec_get_dp_ctx_size(struct rte_cryptodev *dev);
+
+int
+dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
+
 #endif /* _DPAA_SEC_H_ */
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c b/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c
new file mode 100644
index 0000000000..ee0ca2e0d5
--- /dev/null
+++ b/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c
@@ -0,0 +1,485 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2021 NXP
+ */
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_crypto.h>
+#include <rte_cryptodev.h>
+#ifdef RTE_LIB_SECURITY
+#include <rte_security_driver.h>
+#endif
+
+/* RTA header files */
+#include <desc/ipsec.h>
+
+#include <rte_dpaa_bus.h>
+#include <dpaa_sec.h>
+#include <dpaa_sec_log.h>
+
+struct dpaa_sec_raw_dp_ctx {
+	dpaa_sec_session *session;
+	uint32_t tail;
+	uint32_t head;
+	uint16_t cached_enqueue;
+	uint16_t cached_dequeue;
+};
+
+static __rte_always_inline int
+dpaa_sec_raw_enqueue_done(void *qp_data, uint8_t *drv_ctx, uint32_t n)
+{
+	RTE_SET_USED(qp_data);
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(n);
+
+	return 0;
+}
+
+static __rte_always_inline int
+dpaa_sec_raw_dequeue_done(void *qp_data, uint8_t *drv_ctx, uint32_t n)
+{
+	RTE_SET_USED(qp_data);
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(n);
+
+	return 0;
+}
+
+static inline struct dpaa_sec_op_ctx *
+dpaa_sec_alloc_raw_ctx(dpaa_sec_session *ses, int sg_count)
+{
+	struct dpaa_sec_op_ctx *ctx;
+	int i, retval;
+
+	retval = rte_mempool_get(
+			ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool,
+			(void **)(&ctx));
+	if (!ctx || retval) {
+		DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
+		return NULL;
+	}
+	/*
+	 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
+	 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
+	 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
+	 * each packet, memset is costlier than dcbz_64().
+	 */
+	for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4)
+		dcbz_64(&ctx->job.sg[i]);
+
+	ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool;
+	ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
+
+	return ctx;
+}
+
+static struct dpaa_sec_job *
+build_dpaa_raw_dp_auth_fd(uint8_t *drv_ctx,
+			struct rte_crypto_sgl *sgl,
+			struct rte_crypto_sgl *dest_sgl,
+			struct rte_crypto_va_iova_ptr *iv,
+			struct rte_crypto_va_iova_ptr *digest,
+			struct rte_crypto_va_iova_ptr *auth_iv,
+			union rte_crypto_sym_ofs ofs,
+			void *userdata)
+{
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(sgl);
+	RTE_SET_USED(dest_sgl);
+	RTE_SET_USED(iv);
+	RTE_SET_USED(digest);
+	RTE_SET_USED(auth_iv);
+	RTE_SET_USED(ofs);
+	RTE_SET_USED(userdata);
+
+	return NULL;
+}
+
+static struct dpaa_sec_job *
+build_dpaa_raw_dp_cipher_fd(uint8_t *drv_ctx,
+			struct rte_crypto_sgl *sgl,
+			struct rte_crypto_sgl *dest_sgl,
+			struct rte_crypto_va_iova_ptr *iv,
+			struct rte_crypto_va_iova_ptr *digest,
+			struct rte_crypto_va_iova_ptr *auth_iv,
+			union rte_crypto_sym_ofs ofs,
+			void *userdata)
+{
+	RTE_SET_USED(digest);
+	RTE_SET_USED(auth_iv);
+	dpaa_sec_session *ses =
+		((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
+	struct dpaa_sec_job *cf;
+	struct dpaa_sec_op_ctx *ctx;
+	struct qm_sg_entry *sg, *out_sg, *in_sg;
+	unsigned int i;
+	uint8_t *IV_ptr = iv->va;
+	int data_len, total_len = 0, data_offset;
+
+	for (i = 0; i < sgl->num; i++)
+		total_len += sgl->vec[i].len;
+
+	data_len = total_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
+	data_offset = ofs.ofs.cipher.head;
+
+	/* Support lengths in bits only for SNOW3G and ZUC */
+	if (sgl->num > MAX_SG_ENTRIES) {
+		DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
+				MAX_SG_ENTRIES);
+		return NULL;
+	}
+
+	ctx = dpaa_sec_alloc_raw_ctx(ses, sgl->num * 2 + 3);
+	if (!ctx)
+		return NULL;
+
+	cf = &ctx->job;
+	ctx->userdata = (void *)userdata;
+
+	/* output */
+	out_sg = &cf->sg[0];
+	out_sg->extension = 1;
+	out_sg->length = data_len;
+	qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
+	cpu_to_hw_sg(out_sg);
+
+	if (dest_sgl) {
+		/* 1st seg */
+		sg = &cf->sg[2];
+		qm_sg_entry_set64(sg, dest_sgl->vec[0].iova);
+		sg->length = dest_sgl->vec[0].len - data_offset;
+		sg->offset = data_offset;
+
+		/* Successive segs */
+		for (i = 1; i < dest_sgl->num; i++) {
+			cpu_to_hw_sg(sg);
+			sg++;
+			qm_sg_entry_set64(sg, dest_sgl->vec[i].iova);
+			sg->length = dest_sgl->vec[i].len;
+		}
+	} else {
+		/* 1st seg */
+		sg = &cf->sg[2];
+		qm_sg_entry_set64(sg, sgl->vec[0].iova);
+		sg->length = sgl->vec[0].len - data_offset;
+		sg->offset = data_offset;
+
+		/* Successive segs */
+		for (i = 1; i < sgl->num; i++) {
+			cpu_to_hw_sg(sg);
+			sg++;
+			qm_sg_entry_set64(sg, sgl->vec[i].iova);
+			sg->length = sgl->vec[i].len;
+		}
+
+	}
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+
+	/* input */
+	in_sg = &cf->sg[1];
+	in_sg->extension = 1;
+	in_sg->final = 1;
+	in_sg->length = data_len + ses->iv.length;
+
+	sg++;
+	qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
+	cpu_to_hw_sg(in_sg);
+
+	/* IV */
+	qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
+	sg->length = ses->iv.length;
+	cpu_to_hw_sg(sg);
+
+	/* 1st seg */
+	sg++;
+	qm_sg_entry_set64(sg, sgl->vec[0].iova);
+	sg->length = sgl->vec[0].len - data_offset;
+	sg->offset = data_offset;
+
+	/* Successive segs */
+	for (i = 1; i < sgl->num; i++) {
+		cpu_to_hw_sg(sg);
+		sg++;
+		qm_sg_entry_set64(sg, sgl->vec[i].iova);
+		sg->length = sgl->vec[i].len;
+	}
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+
+	return cf;
+}
+
+static uint32_t
+dpaa_sec_raw_enqueue_burst(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	/* Function to transmit the frames to given device and queuepair */
+	uint32_t loop;
+	struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp_data;
+	uint16_t num_tx = 0;
+	struct qm_fd fds[DPAA_SEC_BURST], *fd;
+	uint32_t frames_to_send;
+	struct dpaa_sec_job *cf;
+	dpaa_sec_session *ses =
+			((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
+	uint32_t flags[DPAA_SEC_BURST] = {0};
+	struct qman_fq *inq[DPAA_SEC_BURST];
+
+	if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
+		if (rte_dpaa_portal_init((void *)0)) {
+			DPAA_SEC_ERR("Failure in affining portal");
+			return 0;
+		}
+	}
+
+	while (vec->num) {
+		frames_to_send = (vec->num > DPAA_SEC_BURST) ?
+				DPAA_SEC_BURST : vec->num;
+		for (loop = 0; loop < frames_to_send; loop++) {
+			if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
+				if (dpaa_sec_attach_sess_q(dpaa_qp, ses)) {
+					frames_to_send = loop;
+					goto send_pkts;
+				}
+			} else if (unlikely(ses->qp[rte_lcore_id() %
+						MAX_DPAA_CORES] != dpaa_qp)) {
+				DPAA_SEC_DP_ERR("Old:sess->qp = %p"
+					" New qp = %p\n",
+					ses->qp[rte_lcore_id() %
+					MAX_DPAA_CORES], dpaa_qp);
+				frames_to_send = loop;
+				goto send_pkts;
+			}
+
+			/*Clear the unused FD fields before sending*/
+			fd = &fds[loop];
+			memset(fd, 0, sizeof(struct qm_fd));
+			cf = ses->build_raw_dp_fd(drv_ctx,
+						&vec->src_sgl[loop],
+						&vec->dest_sgl[loop],
+						&vec->iv[loop],
+						&vec->digest[loop],
+						&vec->auth_iv[loop],
+						ofs,
+						user_data[loop]);
+			if (!cf) {
+				DPAA_SEC_ERR("error: Improper packet contents"
+					" for crypto operation");
+				goto skip_tx;
+			}
+			inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
+			fd->opaque_addr = 0;
+			fd->cmd = 0;
+			qm_fd_addr_set64(fd, rte_dpaa_mem_vtop(cf->sg));
+			fd->_format1 = qm_fd_compound;
+			fd->length29 = 2 * sizeof(struct qm_sg_entry);
+
+			status[loop] = 1;
+		}
+send_pkts:
+		loop = 0;
+		while (loop < frames_to_send) {
+			loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
+					&flags[loop], frames_to_send - loop);
+		}
+		vec->num -= frames_to_send;
+		num_tx += frames_to_send;
+	}
+
+skip_tx:
+	dpaa_qp->tx_pkts += num_tx;
+	dpaa_qp->tx_errs += vec->num - num_tx;
+
+	return num_tx;
+}
+
+static int
+dpaa_sec_deq_raw(struct dpaa_sec_qp *qp, void **out_user_data,
+		uint8_t is_user_data_array,
+		rte_cryptodev_raw_post_dequeue_t post_dequeue,
+		int nb_ops)
+{
+	struct qman_fq *fq;
+	unsigned int pkts = 0;
+	int num_rx_bufs, ret;
+	struct qm_dqrr_entry *dq;
+	uint32_t vdqcr_flags = 0;
+	uint8_t is_success = 0;
+
+	fq = &qp->outq;
+	/*
+	 * Until request for four buffers, we provide exact number of buffers.
+	 * Otherwise we do not set the QM_VDQCR_EXACT flag.
+	 * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
+	 * requested, so we request two less in this case.
+	 */
+	if (nb_ops < 4) {
+		vdqcr_flags = QM_VDQCR_EXACT;
+		num_rx_bufs = nb_ops;
+	} else {
+		num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
+			(DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
+	}
+	ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
+	if (ret)
+		return 0;
+
+	do {
+		const struct qm_fd *fd;
+		struct dpaa_sec_job *job;
+		struct dpaa_sec_op_ctx *ctx;
+
+		dq = qman_dequeue(fq);
+		if (!dq)
+			continue;
+
+		fd = &dq->fd;
+		/* sg is embedded in an op ctx,
+		 * sg[0] is for output
+		 * sg[1] for input
+		 */
+		job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
+
+		ctx = container_of(job, struct dpaa_sec_op_ctx, job);
+		ctx->fd_status = fd->status;
+		if (is_user_data_array)
+			out_user_data[pkts] = ctx->userdata;
+		else
+			out_user_data[0] = ctx->userdata;
+
+		if (!ctx->fd_status) {
+			is_success = true;
+		} else {
+			is_success = false;
+			DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
+		}
+		post_dequeue(ctx->op, pkts, is_success);
+		pkts++;
+
+		/* report op status to sym->op and then free the ctx memory */
+		rte_mempool_put(ctx->ctx_pool, (void *)ctx);
+
+		qman_dqrr_consume(fq, dq);
+	} while (fq->flags & QMAN_FQ_STATE_VDQCR);
+
+	return pkts;
+}
+
+
+static __rte_always_inline uint32_t
+dpaa_sec_raw_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
+	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
+	uint32_t max_nb_to_dequeue,
+	rte_cryptodev_raw_post_dequeue_t post_dequeue,
+	void **out_user_data, uint8_t is_user_data_array,
+	uint32_t *n_success, int *dequeue_status)
+{
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(get_dequeue_count);
+	uint16_t num_rx;
+	struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp_data;
+	uint32_t nb_ops = max_nb_to_dequeue;
+
+	if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
+		if (rte_dpaa_portal_init((void *)0)) {
+			DPAA_SEC_ERR("Failure in affining portal");
+			return 0;
+		}
+	}
+
+	num_rx = dpaa_sec_deq_raw(dpaa_qp, out_user_data,
+			is_user_data_array, post_dequeue, nb_ops);
+
+	dpaa_qp->rx_pkts += num_rx;
+	*dequeue_status = 1;
+	*n_success = num_rx;
+
+	DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
+
+	return num_rx;
+}
+
+static __rte_always_inline int
+dpaa_sec_raw_enqueue(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data_vec,
+	uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
+	void *user_data)
+{
+	RTE_SET_USED(qp_data);
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(data_vec);
+	RTE_SET_USED(n_data_vecs);
+	RTE_SET_USED(ofs);
+	RTE_SET_USED(iv);
+	RTE_SET_USED(digest);
+	RTE_SET_USED(aad_or_auth_iv);
+	RTE_SET_USED(user_data);
+
+	return 0;
+}
+
+static __rte_always_inline void *
+dpaa_sec_raw_dequeue(void *qp_data, uint8_t *drv_ctx, int *dequeue_status,
+	enum rte_crypto_op_status *op_status)
+{
+	RTE_SET_USED(qp_data);
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(dequeue_status);
+	RTE_SET_USED(op_status);
+
+	return NULL;
+}
+
+int
+dpaa_sec_configure_raw_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
+	enum rte_crypto_op_sess_type sess_type,
+	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
+{
+	dpaa_sec_session *sess;
+	struct dpaa_sec_raw_dp_ctx *dp_ctx;
+	RTE_SET_USED(qp_id);
+
+	if (!is_update) {
+		memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx));
+		raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
+	}
+
+	if (sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
+		sess = (dpaa_sec_session *)get_sec_session_private_data(
+				session_ctx.sec_sess);
+	else if (sess_type == RTE_CRYPTO_OP_WITH_SESSION)
+		sess = (dpaa_sec_session *)get_sym_session_private_data(
+			session_ctx.crypto_sess, dpaa_cryptodev_driver_id);
+	else
+		return -ENOTSUP;
+	raw_dp_ctx->dequeue_burst = dpaa_sec_raw_dequeue_burst;
+	raw_dp_ctx->dequeue = dpaa_sec_raw_dequeue;
+	raw_dp_ctx->dequeue_done = dpaa_sec_raw_dequeue_done;
+	raw_dp_ctx->enqueue_burst = dpaa_sec_raw_enqueue_burst;
+	raw_dp_ctx->enqueue = dpaa_sec_raw_enqueue;
+	raw_dp_ctx->enqueue_done = dpaa_sec_raw_enqueue_done;
+
+	if (sess->ctxt == DPAA_SEC_CIPHER)
+		sess->build_raw_dp_fd = build_dpaa_raw_dp_cipher_fd;
+	else if (sess->ctxt == DPAA_SEC_AUTH)
+		sess->build_raw_dp_fd = build_dpaa_raw_dp_auth_fd;
+	else
+		return -ENOTSUP;
+	dp_ctx = (struct dpaa_sec_raw_dp_ctx *)raw_dp_ctx->drv_ctx_data;
+	dp_ctx->session = sess;
+
+	return 0;
+}
+
+int
+dpaa_sec_get_dp_ctx_size(__rte_unused struct rte_cryptodev *dev)
+{
+	return sizeof(struct dpaa_sec_raw_dp_ctx);
+}
diff --git a/drivers/crypto/dpaa_sec/meson.build b/drivers/crypto/dpaa_sec/meson.build
index 44fd60e5ae..f87ad6c7e7 100644
--- a/drivers/crypto/dpaa_sec/meson.build
+++ b/drivers/crypto/dpaa_sec/meson.build
@@ -1,5 +1,5 @@
 # SPDX-License-Identifier: BSD-3-Clause
-# Copyright 2018 NXP
+# Copyright 2018-2021 NXP
 
 if not is_linux
     build = false
@@ -7,7 +7,7 @@ if not is_linux
 endif
 
 deps += ['bus_dpaa', 'mempool_dpaa', 'security']
-sources = files('dpaa_sec.c')
+sources = files('dpaa_sec.c', 'dpaa_sec_raw_dp.c')
 
 includes += include_directories('../../bus/dpaa/include')
 includes += include_directories('../../common/dpaax')
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH v2 12/15] crypto/dpaa_sec: support authonly and chain with raw APIs
  2021-09-07  7:59   ` [dpdk-dev] [PATCH v2 " Hemant Agrawal
                       ` (10 preceding siblings ...)
  2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 11/15] crypto/dpaa_sec: support raw datapath APIs Hemant Agrawal
@ 2021-09-07  7:59     ` Hemant Agrawal
  2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 13/15] crypto/dpaa_sec: support AEAD and proto " Hemant Agrawal
                       ` (4 subsequent siblings)
  16 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-09-07  7:59 UTC (permalink / raw)
  To: dev, gakhil; +Cc: konstantin.ananyev, roy.fan.zhang, Gagandeep Singh

From: Gagandeep Singh <g.singh@nxp.com>

This patch improves the raw vector support in dpaa_sec driver
for authonly and chain usecase.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/crypto/dpaa_sec/dpaa_sec.h        |   3 +-
 drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c | 296 +++++++++++++++++++++-
 2 files changed, 287 insertions(+), 12 deletions(-)

diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.h b/drivers/crypto/dpaa_sec/dpaa_sec.h
index f6e83d46e7..2e0ab93ff0 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.h
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.h
@@ -135,7 +135,8 @@ typedef struct dpaa_sec_job* (*dpaa_sec_build_raw_dp_fd_t)(uint8_t *drv_ctx,
 			struct rte_crypto_va_iova_ptr *digest,
 			struct rte_crypto_va_iova_ptr *auth_iv,
 			union rte_crypto_sym_ofs ofs,
-			void *userdata);
+			void *userdata,
+			struct qm_fd *fd);
 
 typedef struct dpaa_sec_session_entry {
 	struct sec_cdb cdb;	/**< cmd block associated with qp */
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c b/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c
index ee0ca2e0d5..4e34629f18 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c
@@ -12,6 +12,7 @@
 #endif
 
 /* RTA header files */
+#include <desc/algo.h>
 #include <desc/ipsec.h>
 
 #include <rte_dpaa_bus.h>
@@ -26,6 +27,17 @@ struct dpaa_sec_raw_dp_ctx {
 	uint16_t cached_dequeue;
 };
 
+static inline int
+is_encode(dpaa_sec_session *ses)
+{
+	return ses->dir == DIR_ENC;
+}
+
+static inline int is_decode(dpaa_sec_session *ses)
+{
+	return ses->dir == DIR_DEC;
+}
+
 static __rte_always_inline int
 dpaa_sec_raw_enqueue_done(void *qp_data, uint8_t *drv_ctx, uint32_t n)
 {
@@ -82,18 +94,276 @@ build_dpaa_raw_dp_auth_fd(uint8_t *drv_ctx,
 			struct rte_crypto_va_iova_ptr *digest,
 			struct rte_crypto_va_iova_ptr *auth_iv,
 			union rte_crypto_sym_ofs ofs,
-			void *userdata)
+			void *userdata,
+			struct qm_fd *fd)
 {
-	RTE_SET_USED(drv_ctx);
-	RTE_SET_USED(sgl);
 	RTE_SET_USED(dest_sgl);
 	RTE_SET_USED(iv);
-	RTE_SET_USED(digest);
 	RTE_SET_USED(auth_iv);
-	RTE_SET_USED(ofs);
-	RTE_SET_USED(userdata);
+	RTE_SET_USED(fd);
 
-	return NULL;
+	dpaa_sec_session *ses =
+		((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
+	struct dpaa_sec_job *cf;
+	struct dpaa_sec_op_ctx *ctx;
+	struct qm_sg_entry *sg, *out_sg, *in_sg;
+	phys_addr_t start_addr;
+	uint8_t *old_digest, extra_segs;
+	int data_len, data_offset, total_len = 0;
+	unsigned int i;
+
+	for (i = 0; i < sgl->num; i++)
+		total_len += sgl->vec[i].len;
+
+	data_len = total_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
+	data_offset =  ofs.ofs.auth.head;
+
+	/* Support only length in bits for SNOW3G and ZUC */
+
+	if (is_decode(ses))
+		extra_segs = 3;
+	else
+		extra_segs = 2;
+
+	if (sgl->num > MAX_SG_ENTRIES) {
+		DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
+				MAX_SG_ENTRIES);
+		return NULL;
+	}
+	ctx = dpaa_sec_alloc_raw_ctx(ses, sgl->num * 2 + extra_segs);
+	if (!ctx)
+		return NULL;
+
+	cf = &ctx->job;
+	ctx->userdata = (void *)userdata;
+	old_digest = ctx->digest;
+
+	/* output */
+	out_sg = &cf->sg[0];
+	qm_sg_entry_set64(out_sg, digest->iova);
+	out_sg->length = ses->digest_length;
+	cpu_to_hw_sg(out_sg);
+
+	/* input */
+	in_sg = &cf->sg[1];
+	/* need to extend the input to a compound frame */
+	in_sg->extension = 1;
+	in_sg->final = 1;
+	in_sg->length = data_len;
+	qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
+
+	/* 1st seg */
+	sg = in_sg + 1;
+
+	if (ses->iv.length) {
+		uint8_t *iv_ptr;
+
+		iv_ptr = rte_crypto_op_ctod_offset(userdata, uint8_t *,
+						   ses->iv.offset);
+
+		if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
+			iv_ptr = conv_to_snow_f9_iv(iv_ptr);
+			sg->length = 12;
+		} else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
+			iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
+			sg->length = 8;
+		} else {
+			sg->length = ses->iv.length;
+		}
+		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
+		in_sg->length += sg->length;
+		cpu_to_hw_sg(sg);
+		sg++;
+	}
+
+	qm_sg_entry_set64(sg, sgl->vec[0].iova);
+	sg->offset = data_offset;
+
+	if (data_len <= (int)(sgl->vec[0].len - data_offset)) {
+		sg->length = data_len;
+	} else {
+		sg->length = sgl->vec[0].len - data_offset;
+
+		/* remaining i/p segs */
+		for (i = 1; i < sgl->num; i++) {
+			cpu_to_hw_sg(sg);
+			sg++;
+			qm_sg_entry_set64(sg, sgl->vec[i].iova);
+			if (data_len > (int)sgl->vec[i].len)
+				sg->length = sgl->vec[0].len;
+			else
+				sg->length = data_len;
+
+			data_len = data_len - sg->length;
+			if (data_len < 1)
+				break;
+		}
+	}
+
+	if (is_decode(ses)) {
+		/* Digest verification case */
+		cpu_to_hw_sg(sg);
+		sg++;
+		rte_memcpy(old_digest, digest->va,
+				ses->digest_length);
+		start_addr = rte_dpaa_mem_vtop(old_digest);
+		qm_sg_entry_set64(sg, start_addr);
+		sg->length = ses->digest_length;
+		in_sg->length += ses->digest_length;
+	}
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+	cpu_to_hw_sg(in_sg);
+
+	return cf;
+}
+
+static inline struct dpaa_sec_job *
+build_dpaa_raw_dp_chain_fd(uint8_t *drv_ctx,
+			struct rte_crypto_sgl *sgl,
+			struct rte_crypto_sgl *dest_sgl,
+			struct rte_crypto_va_iova_ptr *iv,
+			struct rte_crypto_va_iova_ptr *digest,
+			struct rte_crypto_va_iova_ptr *auth_iv,
+			union rte_crypto_sym_ofs ofs,
+			void *userdata,
+			struct qm_fd *fd)
+{
+	RTE_SET_USED(auth_iv);
+
+	dpaa_sec_session *ses =
+		((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
+	struct dpaa_sec_job *cf;
+	struct dpaa_sec_op_ctx *ctx;
+	struct qm_sg_entry *sg, *out_sg, *in_sg;
+	uint8_t *IV_ptr = iv->va;
+	unsigned int i;
+	uint16_t auth_hdr_len = ofs.ofs.cipher.head -
+				ofs.ofs.auth.head;
+	uint16_t auth_tail_len = ofs.ofs.auth.tail;
+	uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
+	int data_len = 0, auth_len = 0, cipher_len = 0;
+
+	for (i = 0; i < sgl->num; i++)
+		data_len += sgl->vec[i].len;
+
+	cipher_len = data_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
+	auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
+
+	if (sgl->num > MAX_SG_ENTRIES) {
+		DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
+				MAX_SG_ENTRIES);
+		return NULL;
+	}
+
+	ctx = dpaa_sec_alloc_raw_ctx(ses, sgl->num * 2 + 4);
+	if (!ctx)
+		return NULL;
+
+	cf = &ctx->job;
+	ctx->userdata = (void *)userdata;
+
+	rte_prefetch0(cf->sg);
+
+	/* output */
+	out_sg = &cf->sg[0];
+	out_sg->extension = 1;
+	if (is_encode(ses))
+		out_sg->length = cipher_len + ses->digest_length;
+	else
+		out_sg->length = cipher_len;
+
+	/* output sg entries */
+	sg = &cf->sg[2];
+	qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
+	cpu_to_hw_sg(out_sg);
+
+	/* 1st seg */
+	if (dest_sgl) {
+		qm_sg_entry_set64(sg, dest_sgl->vec[0].iova);
+		sg->length = dest_sgl->vec[0].len - ofs.ofs.cipher.head;
+		sg->offset = ofs.ofs.cipher.head;
+
+		/* Successive segs */
+		for (i = 1; i < dest_sgl->num; i++) {
+			cpu_to_hw_sg(sg);
+			sg++;
+			qm_sg_entry_set64(sg, dest_sgl->vec[i].iova);
+			sg->length = dest_sgl->vec[i].len;
+		}
+	} else {
+		qm_sg_entry_set64(sg, sgl->vec[0].iova);
+		sg->length = sgl->vec[0].len - ofs.ofs.cipher.head;
+		sg->offset = ofs.ofs.cipher.head;
+
+		/* Successive segs */
+		for (i = 1; i < sgl->num; i++) {
+			cpu_to_hw_sg(sg);
+			sg++;
+			qm_sg_entry_set64(sg, sgl->vec[i].iova);
+			sg->length = sgl->vec[i].len;
+		}
+	}
+
+	if (is_encode(ses)) {
+		cpu_to_hw_sg(sg);
+		/* set auth output */
+		sg++;
+		qm_sg_entry_set64(sg, digest->iova);
+		sg->length = ses->digest_length;
+	}
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+
+	/* input */
+	in_sg = &cf->sg[1];
+	in_sg->extension = 1;
+	in_sg->final = 1;
+	if (is_encode(ses))
+		in_sg->length = ses->iv.length + auth_len;
+	else
+		in_sg->length = ses->iv.length + auth_len
+						+ ses->digest_length;
+
+	/* input sg entries */
+	sg++;
+	qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
+	cpu_to_hw_sg(in_sg);
+
+	/* 1st seg IV */
+	qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
+	sg->length = ses->iv.length;
+	cpu_to_hw_sg(sg);
+
+	/* 2 seg */
+	sg++;
+	qm_sg_entry_set64(sg, sgl->vec[0].iova);
+	sg->length = sgl->vec[0].len - ofs.ofs.auth.head;
+	sg->offset = ofs.ofs.auth.head;
+
+	/* Successive segs */
+	for (i = 1; i < sgl->num; i++) {
+		cpu_to_hw_sg(sg);
+		sg++;
+		qm_sg_entry_set64(sg, sgl->vec[i].iova);
+		sg->length = sgl->vec[i].len;
+	}
+
+	if (is_decode(ses)) {
+		cpu_to_hw_sg(sg);
+		sg++;
+		memcpy(ctx->digest, digest->va,
+			ses->digest_length);
+		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
+		sg->length = ses->digest_length;
+	}
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+
+	if (auth_only_len)
+		fd->cmd = 0x80000000 | auth_only_len;
+
+	return cf;
 }
 
 static struct dpaa_sec_job *
@@ -104,10 +374,13 @@ build_dpaa_raw_dp_cipher_fd(uint8_t *drv_ctx,
 			struct rte_crypto_va_iova_ptr *digest,
 			struct rte_crypto_va_iova_ptr *auth_iv,
 			union rte_crypto_sym_ofs ofs,
-			void *userdata)
+			void *userdata,
+			struct qm_fd *fd)
 {
 	RTE_SET_USED(digest);
 	RTE_SET_USED(auth_iv);
+	RTE_SET_USED(fd);
+
 	dpaa_sec_session *ses =
 		((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
 	struct dpaa_sec_job *cf;
@@ -264,15 +537,14 @@ dpaa_sec_raw_enqueue_burst(void *qp_data, uint8_t *drv_ctx,
 						&vec->digest[loop],
 						&vec->auth_iv[loop],
 						ofs,
-						user_data[loop]);
+						user_data[loop],
+						fd);
 			if (!cf) {
 				DPAA_SEC_ERR("error: Improper packet contents"
 					" for crypto operation");
 				goto skip_tx;
 			}
 			inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
-			fd->opaque_addr = 0;
-			fd->cmd = 0;
 			qm_fd_addr_set64(fd, rte_dpaa_mem_vtop(cf->sg));
 			fd->_format1 = qm_fd_compound;
 			fd->length29 = 2 * sizeof(struct qm_sg_entry);
@@ -470,6 +742,8 @@ dpaa_sec_configure_raw_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
 		sess->build_raw_dp_fd = build_dpaa_raw_dp_cipher_fd;
 	else if (sess->ctxt == DPAA_SEC_AUTH)
 		sess->build_raw_dp_fd = build_dpaa_raw_dp_auth_fd;
+	else if (sess->ctxt == DPAA_SEC_CIPHER_HASH)
+		sess->build_raw_dp_fd = build_dpaa_raw_dp_chain_fd;
 	else
 		return -ENOTSUP;
 	dp_ctx = (struct dpaa_sec_raw_dp_ctx *)raw_dp_ctx->drv_ctx_data;
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH v2 13/15] crypto/dpaa_sec: support AEAD and proto with raw APIs
  2021-09-07  7:59   ` [dpdk-dev] [PATCH v2 " Hemant Agrawal
                       ` (11 preceding siblings ...)
  2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 12/15] crypto/dpaa_sec: support authonly and chain with raw APIs Hemant Agrawal
@ 2021-09-07  7:59     ` Hemant Agrawal
  2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 14/15] test/crypto: add raw API test for dpaax Hemant Agrawal
                       ` (3 subsequent siblings)
  16 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-09-07  7:59 UTC (permalink / raw)
  To: dev, gakhil; +Cc: konstantin.ananyev, roy.fan.zhang, Gagandeep Singh

From: Gagandeep Singh <g.singh@nxp.com>

This add support for AEAD and proto offload with raw APIs
for dpaa_sec driver.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c | 293 ++++++++++++++++++++++
 1 file changed, 293 insertions(+)

diff --git a/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c b/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c
index 4e34629f18..b0c22a7c26 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c
@@ -218,6 +218,163 @@ build_dpaa_raw_dp_auth_fd(uint8_t *drv_ctx,
 	return cf;
 }
 
+static inline struct dpaa_sec_job *
+build_raw_cipher_auth_gcm_sg(uint8_t *drv_ctx,
+			struct rte_crypto_sgl *sgl,
+			struct rte_crypto_sgl *dest_sgl,
+			struct rte_crypto_va_iova_ptr *iv,
+			struct rte_crypto_va_iova_ptr *digest,
+			struct rte_crypto_va_iova_ptr *auth_iv,
+			union rte_crypto_sym_ofs ofs,
+			void *userdata,
+			struct qm_fd *fd)
+{
+	dpaa_sec_session *ses =
+		((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
+	struct dpaa_sec_job *cf;
+	struct dpaa_sec_op_ctx *ctx;
+	struct qm_sg_entry *sg, *out_sg, *in_sg;
+	uint8_t extra_req_segs;
+	uint8_t *IV_ptr = iv->va;
+	int data_len = 0, aead_len = 0;
+	unsigned int i;
+
+	for (i = 0; i < sgl->num; i++)
+		data_len += sgl->vec[i].len;
+
+	extra_req_segs = 4;
+	aead_len = data_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
+
+	if (ses->auth_only_len)
+		extra_req_segs++;
+
+	if (sgl->num > MAX_SG_ENTRIES) {
+		DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
+				MAX_SG_ENTRIES);
+		return NULL;
+	}
+
+	ctx = dpaa_sec_alloc_raw_ctx(ses,  sgl->num * 2 + extra_req_segs);
+	if (!ctx)
+		return NULL;
+
+	cf = &ctx->job;
+	ctx->userdata = (void *)userdata;
+
+	rte_prefetch0(cf->sg);
+
+	/* output */
+	out_sg = &cf->sg[0];
+	out_sg->extension = 1;
+	if (is_encode(ses))
+		out_sg->length = aead_len + ses->digest_length;
+	else
+		out_sg->length = aead_len;
+
+	/* output sg entries */
+	sg = &cf->sg[2];
+	qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
+	cpu_to_hw_sg(out_sg);
+
+	if (dest_sgl) {
+		/* 1st seg */
+		qm_sg_entry_set64(sg, dest_sgl->vec[0].iova);
+		sg->length = dest_sgl->vec[0].len - ofs.ofs.cipher.head;
+		sg->offset = ofs.ofs.cipher.head;
+
+		/* Successive segs */
+		for (i = 1; i < dest_sgl->num; i++) {
+			cpu_to_hw_sg(sg);
+			sg++;
+			qm_sg_entry_set64(sg, dest_sgl->vec[i].iova);
+			sg->length = dest_sgl->vec[i].len;
+		}
+	} else {
+		/* 1st seg */
+		qm_sg_entry_set64(sg, sgl->vec[0].iova);
+		sg->length = sgl->vec[0].len - ofs.ofs.cipher.head;
+		sg->offset = ofs.ofs.cipher.head;
+
+		/* Successive segs */
+		for (i = 1; i < sgl->num; i++) {
+			cpu_to_hw_sg(sg);
+			sg++;
+			qm_sg_entry_set64(sg, sgl->vec[i].iova);
+			sg->length = sgl->vec[i].len;
+		}
+
+	}
+
+	if (is_encode(ses)) {
+		cpu_to_hw_sg(sg);
+		/* set auth output */
+		sg++;
+		qm_sg_entry_set64(sg, digest->iova);
+		sg->length = ses->digest_length;
+	}
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+
+	/* input */
+	in_sg = &cf->sg[1];
+	in_sg->extension = 1;
+	in_sg->final = 1;
+	if (is_encode(ses))
+		in_sg->length = ses->iv.length + aead_len
+						+ ses->auth_only_len;
+	else
+		in_sg->length = ses->iv.length + aead_len
+				+ ses->auth_only_len + ses->digest_length;
+
+	/* input sg entries */
+	sg++;
+	qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
+	cpu_to_hw_sg(in_sg);
+
+	/* 1st seg IV */
+	qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
+	sg->length = ses->iv.length;
+	cpu_to_hw_sg(sg);
+
+	/* 2 seg auth only */
+	if (ses->auth_only_len) {
+		sg++;
+		qm_sg_entry_set64(sg, auth_iv->iova);
+		sg->length = ses->auth_only_len;
+		cpu_to_hw_sg(sg);
+	}
+
+	/* 3rd seg */
+	sg++;
+	qm_sg_entry_set64(sg, sgl->vec[0].iova);
+	sg->length = sgl->vec[0].len - ofs.ofs.cipher.head;
+	sg->offset = ofs.ofs.cipher.head;
+
+	/* Successive segs */
+	for (i = 1; i < sgl->num; i++) {
+		cpu_to_hw_sg(sg);
+		sg++;
+		qm_sg_entry_set64(sg, sgl->vec[i].iova);
+		sg->length =  sgl->vec[i].len;
+	}
+
+	if (is_decode(ses)) {
+		cpu_to_hw_sg(sg);
+		sg++;
+		memcpy(ctx->digest, digest->va,
+			ses->digest_length);
+		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
+		sg->length = ses->digest_length;
+	}
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+
+	if (ses->auth_only_len)
+		fd->cmd = 0x80000000 | ses->auth_only_len;
+
+	return cf;
+}
+
 static inline struct dpaa_sec_job *
 build_dpaa_raw_dp_chain_fd(uint8_t *drv_ctx,
 			struct rte_crypto_sgl *sgl,
@@ -484,6 +641,135 @@ build_dpaa_raw_dp_cipher_fd(uint8_t *drv_ctx,
 	return cf;
 }
 
+#ifdef RTE_LIBRTE_SECURITY
+static inline struct dpaa_sec_job *
+build_dpaa_raw_proto_sg(uint8_t *drv_ctx,
+			struct rte_crypto_sgl *sgl,
+			struct rte_crypto_sgl *dest_sgl,
+			struct rte_crypto_va_iova_ptr *iv,
+			struct rte_crypto_va_iova_ptr *digest,
+			struct rte_crypto_va_iova_ptr *auth_iv,
+			union rte_crypto_sym_ofs ofs,
+			void *userdata,
+			struct qm_fd *fd)
+{
+	RTE_SET_USED(iv);
+	RTE_SET_USED(digest);
+	RTE_SET_USED(auth_iv);
+	RTE_SET_USED(ofs);
+
+	dpaa_sec_session *ses =
+		((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
+	struct dpaa_sec_job *cf;
+	struct dpaa_sec_op_ctx *ctx;
+	struct qm_sg_entry *sg, *out_sg, *in_sg;
+	uint32_t in_len = 0, out_len = 0;
+	unsigned int i;
+
+	if (sgl->num > MAX_SG_ENTRIES) {
+		DPAA_SEC_DP_ERR("Proto: Max sec segs supported is %d",
+				MAX_SG_ENTRIES);
+		return NULL;
+	}
+
+	ctx = dpaa_sec_alloc_raw_ctx(ses, sgl->num * 2 + 4);
+	if (!ctx)
+		return NULL;
+	cf = &ctx->job;
+	ctx->userdata = (void *)userdata;
+	/* output */
+	out_sg = &cf->sg[0];
+	out_sg->extension = 1;
+	qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
+
+	if (dest_sgl) {
+		/* 1st seg */
+		sg = &cf->sg[2];
+		qm_sg_entry_set64(sg, dest_sgl->vec[0].iova);
+		sg->offset = 0;
+		sg->length = dest_sgl->vec[0].len;
+		out_len += sg->length;
+
+		for (i = 1; i < dest_sgl->num; i++) {
+		/* Successive segs */
+			cpu_to_hw_sg(sg);
+			sg++;
+			qm_sg_entry_set64(sg, dest_sgl->vec[i].iova);
+			sg->offset = 0;
+			sg->length = dest_sgl->vec[i].len;
+			out_len += sg->length;
+		}
+		sg->length = dest_sgl->vec[i - 1].tot_len;
+	} else {
+		/* 1st seg */
+		sg = &cf->sg[2];
+		qm_sg_entry_set64(sg, sgl->vec[0].iova);
+		sg->offset = 0;
+		sg->length = sgl->vec[0].len;
+		out_len += sg->length;
+
+		for (i = 1; i < sgl->num; i++) {
+		/* Successive segs */
+			cpu_to_hw_sg(sg);
+			sg++;
+			qm_sg_entry_set64(sg, sgl->vec[i].iova);
+			sg->offset = 0;
+			sg->length = sgl->vec[i].len;
+			out_len += sg->length;
+		}
+		sg->length = sgl->vec[i - 1].tot_len;
+
+	}
+	out_len += sg->length;
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+
+	out_sg->length = out_len;
+	cpu_to_hw_sg(out_sg);
+
+	/* input */
+	in_sg = &cf->sg[1];
+	in_sg->extension = 1;
+	in_sg->final = 1;
+	in_len = sgl->vec[0].len;
+
+	sg++;
+	qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
+
+	/* 1st seg */
+	qm_sg_entry_set64(sg, sgl->vec[0].iova);
+	sg->length = sgl->vec[0].len;
+	sg->offset = 0;
+
+	/* Successive segs */
+	for (i = 1; i < sgl->num; i++) {
+		cpu_to_hw_sg(sg);
+		sg++;
+		qm_sg_entry_set64(sg, sgl->vec[i].iova);
+		sg->length = sgl->vec[i].len;
+		sg->offset = 0;
+		in_len += sg->length;
+	}
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+
+	in_sg->length = in_len;
+	cpu_to_hw_sg(in_sg);
+
+	if ((ses->ctxt == DPAA_SEC_PDCP) && ses->pdcp.hfn_ovd) {
+		fd->cmd = 0x80000000 |
+			*((uint32_t *)((uint8_t *)userdata +
+			ses->pdcp.hfn_ovd_offset));
+		DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u\n",
+			*((uint32_t *)((uint8_t *)userdata +
+			ses->pdcp.hfn_ovd_offset)),
+			ses->pdcp.hfn_ovd);
+	}
+
+	return cf;
+}
+#endif
+
 static uint32_t
 dpaa_sec_raw_enqueue_burst(void *qp_data, uint8_t *drv_ctx,
 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
@@ -744,6 +1030,13 @@ dpaa_sec_configure_raw_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
 		sess->build_raw_dp_fd = build_dpaa_raw_dp_auth_fd;
 	else if (sess->ctxt == DPAA_SEC_CIPHER_HASH)
 		sess->build_raw_dp_fd = build_dpaa_raw_dp_chain_fd;
+	else if (sess->ctxt == DPAA_SEC_AEAD)
+		sess->build_raw_dp_fd = build_raw_cipher_auth_gcm_sg;
+#ifdef RTE_LIBRTE_SECURITY
+	else if (sess->ctxt == DPAA_SEC_IPSEC ||
+			sess->ctxt == DPAA_SEC_PDCP)
+		sess->build_raw_dp_fd = build_dpaa_raw_proto_sg;
+#endif
 	else
 		return -ENOTSUP;
 	dp_ctx = (struct dpaa_sec_raw_dp_ctx *)raw_dp_ctx->drv_ctx_data;
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH v2 14/15] test/crypto: add raw API test for dpaax
  2021-09-07  7:59   ` [dpdk-dev] [PATCH v2 " Hemant Agrawal
                       ` (12 preceding siblings ...)
  2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 13/15] crypto/dpaa_sec: support AEAD and proto " Hemant Agrawal
@ 2021-09-07  7:59     ` Hemant Agrawal
  2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 15/15] test/crypto: add raw API support in 5G algos Hemant Agrawal
                       ` (2 subsequent siblings)
  16 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-09-07  7:59 UTC (permalink / raw)
  To: dev, gakhil; +Cc: konstantin.ananyev, roy.fan.zhang, Gagandeep Singh

This patch add support for raw API tests for
dpaa_sec and dpaa2_sec platforms.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 app/test/test_cryptodev.c | 116 +++++++++++++++++++++++++++++++++++---
 1 file changed, 109 insertions(+), 7 deletions(-)

diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index ed63524edc..de4fb0f3d1 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -175,11 +175,11 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id,
 {
 	struct rte_crypto_sym_op *sop = op->sym;
 	struct rte_crypto_op *ret_op = NULL;
-	struct rte_crypto_vec data_vec[UINT8_MAX];
+	struct rte_crypto_vec data_vec[UINT8_MAX], dest_data_vec[UINT8_MAX];
 	struct rte_crypto_va_iova_ptr cipher_iv, digest, aad_auth_iv;
 	union rte_crypto_sym_ofs ofs;
 	struct rte_crypto_sym_vec vec;
-	struct rte_crypto_sgl sgl;
+	struct rte_crypto_sgl sgl, dest_sgl;
 	uint32_t max_len;
 	union rte_cryptodev_session_ctx sess;
 	uint32_t count = 0;
@@ -315,6 +315,19 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id,
 	}
 
 	sgl.num = n;
+	/* Out of place */
+	if (sop->m_dst != NULL) {
+		dest_sgl.vec = dest_data_vec;
+		vec.dest_sgl = &dest_sgl;
+		n = rte_crypto_mbuf_to_vec(sop->m_dst, 0, max_len,
+				dest_data_vec, RTE_DIM(dest_data_vec));
+		if (n < 0 || n > sop->m_dst->nb_segs) {
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			goto exit;
+		}
+		dest_sgl.num = n;
+	} else
+		vec.dest_sgl = NULL;
 
 	if (rte_cryptodev_raw_enqueue_burst(ctx, &vec, ofs, (void **)&op,
 			&enqueue_status) < 1) {
@@ -8305,10 +8318,21 @@ test_pdcp_proto_SGL(int i, int oop,
 	int to_trn_tbl[16];
 	int segs = 1;
 	unsigned int trn_data = 0;
+	struct rte_cryptodev_info dev_info;
+	uint64_t feat_flags;
 	struct rte_security_ctx *ctx = (struct rte_security_ctx *)
 				rte_cryptodev_get_sec_ctx(
 				ts_params->valid_devs[0]);
+	struct rte_mbuf *temp_mbuf;
+
+	rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
+	feat_flags = dev_info.feature_flags;
 
+	if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) &&
+			(!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) {
+		printf("Device does not support RAW data-path APIs.\n");
+		return -ENOTSUP;
+	}
 	/* Verify the capabilities */
 	struct rte_security_capability_idx sec_cap_idx;
 
@@ -8492,8 +8516,23 @@ test_pdcp_proto_SGL(int i, int oop,
 		ut_params->op->sym->m_dst = ut_params->obuf;
 
 	/* Process crypto operation */
-	if (process_crypto_request(ts_params->valid_devs[0], ut_params->op)
-		== NULL) {
+	temp_mbuf = ut_params->op->sym->m_src;
+	if (global_api_test_type == CRYPTODEV_RAW_API_TEST) {
+		/* filling lengths */
+		while (temp_mbuf) {
+			ut_params->op->sym->cipher.data.length
+				+= temp_mbuf->pkt_len;
+			ut_params->op->sym->auth.data.length
+				+= temp_mbuf->pkt_len;
+			temp_mbuf = temp_mbuf->next;
+		}
+		process_sym_raw_dp_op(ts_params->valid_devs[0], 0,
+			ut_params->op, 1, 1, 0, 0);
+	} else {
+		ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+							ut_params->op);
+	}
+	if (ut_params->op == NULL) {
 		printf("TestCase %s()-%d line %d failed %s: ",
 			__func__, i, __LINE__,
 			"failed to process sym crypto op");
@@ -9934,6 +9973,7 @@ test_authenticated_encryption_oop(const struct aead_test_data *tdata)
 	int retval;
 	uint8_t *ciphertext, *auth_tag;
 	uint16_t plaintext_pad_len;
+	struct rte_cryptodev_info dev_info;
 
 	/* Verify the capabilities */
 	struct rte_cryptodev_sym_capability_idx cap_idx;
@@ -9943,7 +9983,11 @@ test_authenticated_encryption_oop(const struct aead_test_data *tdata)
 			&cap_idx) == NULL)
 		return TEST_SKIPPED;
 
-	if (global_api_test_type == CRYPTODEV_RAW_API_TEST)
+	rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
+	uint64_t feat_flags = dev_info.feature_flags;
+
+	if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) &&
+			(!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP)))
 		return TEST_SKIPPED;
 
 	/* not supported with CPU crypto */
@@ -9980,7 +10024,11 @@ test_authenticated_encryption_oop(const struct aead_test_data *tdata)
 	ut_params->op->sym->m_dst = ut_params->obuf;
 
 	/* Process crypto operation */
-	TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+	if (global_api_test_type == CRYPTODEV_RAW_API_TEST)
+		process_sym_raw_dp_op(ts_params->valid_devs[0], 0,
+			ut_params->op, 0, 0, 0, 0);
+	else
+		TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
 			ut_params->op), "failed to process sym crypto op");
 
 	TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
@@ -10026,6 +10074,10 @@ test_authenticated_decryption_oop(const struct aead_test_data *tdata)
 
 	int retval;
 	uint8_t *plaintext;
+	struct rte_cryptodev_info dev_info;
+
+	rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
+	uint64_t feat_flags = dev_info.feature_flags;
 
 	/* Verify the capabilities */
 	struct rte_cryptodev_sym_capability_idx cap_idx;
@@ -10040,6 +10092,12 @@ test_authenticated_decryption_oop(const struct aead_test_data *tdata)
 			global_api_test_type == CRYPTODEV_RAW_API_TEST)
 		return TEST_SKIPPED;
 
+	if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) &&
+			(!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) {
+		printf("Device does not support RAW data-path APIs.\n");
+		return TEST_SKIPPED;
+	}
+
 	/* Create AEAD session */
 	retval = create_aead_session(ts_params->valid_devs[0],
 			tdata->algo,
@@ -10070,7 +10128,11 @@ test_authenticated_decryption_oop(const struct aead_test_data *tdata)
 	ut_params->op->sym->m_dst = ut_params->obuf;
 
 	/* Process crypto operation */
-	TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+	if (global_api_test_type == CRYPTODEV_RAW_API_TEST)
+		process_sym_raw_dp_op(ts_params->valid_devs[0], 0,
+				ut_params->op, 0, 0, 0, 0);
+	else
+		TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
 			ut_params->op), "failed to process sym crypto op");
 
 	TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
@@ -14835,6 +14897,46 @@ test_cryptodev_cn10k(void)
 	return run_cryptodev_testsuite(RTE_STR(CRYPTODEV_NAME_CN10K_PMD));
 }
 
+static int
+test_cryptodev_dpaa2_sec_raw_api(void)
+{
+	static const char *pmd_name = RTE_STR(CRYPTODEV_NAME_DPAA2_SEC_PMD);
+	int ret;
+
+	ret = require_feature_flag(pmd_name, RTE_CRYPTODEV_FF_SYM_RAW_DP,
+			"RAW API");
+	if (ret)
+		return ret;
+
+	global_api_test_type = CRYPTODEV_RAW_API_TEST;
+	ret = run_cryptodev_testsuite(pmd_name);
+	global_api_test_type = CRYPTODEV_API_TEST;
+
+	return ret;
+}
+
+static int
+test_cryptodev_dpaa_sec_raw_api(void)
+{
+	static const char *pmd_name = RTE_STR(CRYPTODEV_NAME_DPAA2_SEC_PMD);
+	int ret;
+
+	ret = require_feature_flag(pmd_name, RTE_CRYPTODEV_FF_SYM_RAW_DP,
+			"RAW API");
+	if (ret)
+		return ret;
+
+	global_api_test_type = CRYPTODEV_RAW_API_TEST;
+	ret = run_cryptodev_testsuite(pmd_name);
+	global_api_test_type = CRYPTODEV_API_TEST;
+
+	return ret;
+}
+
+REGISTER_TEST_COMMAND(cryptodev_dpaa2_sec_raw_api_autotest,
+		test_cryptodev_dpaa2_sec_raw_api);
+REGISTER_TEST_COMMAND(cryptodev_dpaa_sec_raw_api_autotest,
+		test_cryptodev_dpaa_sec_raw_api);
 REGISTER_TEST_COMMAND(cryptodev_qat_raw_api_autotest,
 		test_cryptodev_qat_raw_api);
 REGISTER_TEST_COMMAND(cryptodev_qat_autotest, test_cryptodev_qat);
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH v2 15/15] test/crypto: add raw API support in 5G algos
  2021-09-07  7:59   ` [dpdk-dev] [PATCH v2 " Hemant Agrawal
                       ` (13 preceding siblings ...)
  2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 14/15] test/crypto: add raw API test for dpaax Hemant Agrawal
@ 2021-09-07  7:59     ` Hemant Agrawal
  2021-09-15 16:01     ` [dpdk-dev] [PATCH v2 00/15] crypto: add raw vector support in DPAAx Troy, Rebecca
  2021-10-13 19:00     ` [dpdk-dev] [PATCH v4 " Hemant Agrawal
  16 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-09-07  7:59 UTC (permalink / raw)
  To: dev, gakhil; +Cc: konstantin.ananyev, roy.fan.zhang, Gagandeep Singh

This patch add support for RAW API testing with ZUC
and SNOW test cases.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 app/test/test_cryptodev.c | 57 ++++++++++++++++++++++++++++++++++-----
 1 file changed, 51 insertions(+), 6 deletions(-)

diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index de4fb0f3d1..0ee603b1b5 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -368,6 +368,7 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id,
 	}
 
 	op->status = (count == MAX_RAW_DEQUEUE_COUNT + 1 || ret_op != op ||
+			ret_op->status == RTE_CRYPTO_OP_STATUS_ERROR ||
 			n_success < 1) ? RTE_CRYPTO_OP_STATUS_ERROR :
 					RTE_CRYPTO_OP_STATUS_SUCCESS;
 
@@ -4152,6 +4153,16 @@ test_snow3g_encryption_oop(const struct snow3g_test_data *tdata)
 	int retval;
 	unsigned plaintext_pad_len;
 	unsigned plaintext_len;
+	struct rte_cryptodev_info dev_info;
+
+	rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
+	uint64_t feat_flags = dev_info.feature_flags;
+
+	if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) &&
+			(!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) {
+		printf("Device does not support RAW data-path APIs.\n");
+		return -ENOTSUP;
+	}
 
 	/* Verify the capabilities */
 	struct rte_cryptodev_sym_capability_idx cap_idx;
@@ -4207,7 +4218,11 @@ test_snow3g_encryption_oop(const struct snow3g_test_data *tdata)
 	if (retval < 0)
 		return retval;
 
-	ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+	if (global_api_test_type == CRYPTODEV_RAW_API_TEST)
+		process_sym_raw_dp_op(ts_params->valid_devs[0], 0,
+			ut_params->op, 1, 0, 1, tdata->cipher_iv.len);
+	else
+		ut_params->op = process_crypto_request(ts_params->valid_devs[0],
 						ut_params->op);
 	TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
 
@@ -4267,6 +4282,12 @@ test_snow3g_encryption_oop_sgl(const struct snow3g_test_data *tdata)
 		return TEST_SKIPPED;
 	}
 
+	if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) &&
+			(!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) {
+		printf("Device does not support RAW data-path APIs.\n");
+		return -ENOTSUP;
+	}
+
 	/* Create SNOW 3G session */
 	retval = create_wireless_algo_cipher_session(ts_params->valid_devs[0],
 					RTE_CRYPTO_CIPHER_OP_ENCRYPT,
@@ -4301,7 +4322,11 @@ test_snow3g_encryption_oop_sgl(const struct snow3g_test_data *tdata)
 	if (retval < 0)
 		return retval;
 
-	ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+	if (global_api_test_type == CRYPTODEV_RAW_API_TEST)
+		process_sym_raw_dp_op(ts_params->valid_devs[0], 0,
+			ut_params->op, 1, 0, 1, tdata->cipher_iv.len);
+	else
+		ut_params->op = process_crypto_request(ts_params->valid_devs[0],
 						ut_params->op);
 	TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
 
@@ -4428,7 +4453,11 @@ test_snow3g_encryption_offset_oop(const struct snow3g_test_data *tdata)
 	if (retval < 0)
 		return retval;
 
-	ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+	if (global_api_test_type == CRYPTODEV_RAW_API_TEST)
+		process_sym_raw_dp_op(ts_params->valid_devs[0], 0,
+			ut_params->op, 1, 0, 1, tdata->cipher_iv.len);
+	else
+		ut_params->op = process_crypto_request(ts_params->valid_devs[0],
 						ut_params->op);
 	TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
 
@@ -4559,7 +4588,16 @@ static int test_snow3g_decryption_oop(const struct snow3g_test_data *tdata)
 	uint8_t *plaintext, *ciphertext;
 	unsigned ciphertext_pad_len;
 	unsigned ciphertext_len;
+	struct rte_cryptodev_info dev_info;
+
+	rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
+	uint64_t feat_flags = dev_info.feature_flags;
 
+	if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) &&
+			(!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) {
+		printf("Device does not support RAW data-path APIs.\n");
+		return -ENOTSUP;
+	}
 	/* Verify the capabilities */
 	struct rte_cryptodev_sym_capability_idx cap_idx;
 	cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
@@ -4617,7 +4655,11 @@ static int test_snow3g_decryption_oop(const struct snow3g_test_data *tdata)
 	if (retval < 0)
 		return retval;
 
-	ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+	if (global_api_test_type == CRYPTODEV_RAW_API_TEST)
+		process_sym_raw_dp_op(ts_params->valid_devs[0], 0,
+			ut_params->op, 1, 0, 1, tdata->cipher_iv.len);
+	else
+		ut_params->op = process_crypto_request(ts_params->valid_devs[0],
 						ut_params->op);
 	TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
 	ut_params->obuf = ut_params->op->sym->m_dst;
@@ -12653,10 +12695,13 @@ test_authenticated_decryption_fail_when_corruption(
 	else {
 		ut_params->op = process_crypto_request(ts_params->valid_devs[0],
 			ut_params->op);
-		TEST_ASSERT_NULL(ut_params->op, "authentication not failed");
 	}
+	if (ut_params->op == NULL)
+		return 0;
+	else if (ut_params->op->status != RTE_CRYPTO_OP_STATUS_SUCCESS)
+		return 0;
 
-	return 0;
+	return -1;
 }
 
 static int
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [dpdk-dev] [PATCH v2 00/15] crypto: add raw vector support in DPAAx
  2021-09-07  7:59   ` [dpdk-dev] [PATCH v2 " Hemant Agrawal
                       ` (14 preceding siblings ...)
  2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 15/15] test/crypto: add raw API support in 5G algos Hemant Agrawal
@ 2021-09-15 16:01     ` Troy, Rebecca
  2021-10-13 19:00     ` [dpdk-dev] [PATCH v4 " Hemant Agrawal
  16 siblings, 0 replies; 115+ messages in thread
From: Troy, Rebecca @ 2021-09-15 16:01 UTC (permalink / raw)
  To: Hemant Agrawal, dev, gakhil; +Cc: Ananyev, Konstantin, Zhang, Roy Fan

-----Original Message-----
From: dev <dev-bounces@dpdk.org> On Behalf Of Hemant Agrawal
Sent: Tuesday 7 September 2021 09:00
To: dev@dpdk.org; gakhil@marvell.com
Cc: Ananyev, Konstantin <konstantin.ananyev@intel.com>; Zhang, Roy Fan <roy.fan.zhang@intel.com>
Subject: [dpdk-dev] [PATCH v2 00/15] crypto: add raw vector support in DPAAx

This patch series adds support for raw vector API in dpaax_sec drivers This also enhances the raw vector APIs to support OOP and security protocol support.

v2: fix aesni compilation and add release notes.

Gagandeep Singh (11):
  crypto: add total raw buffer length
  crypto: fix raw process for multi-seg case
  crypto/dpaa2_sec: support raw datapath APIs
  crypto/dpaa2_sec: support AUTH only with raw buffer APIs
  crypto/dpaa2_sec: support AUTHENC with raw buffer APIs
  crypto/dpaa2_sec: support AEAD with raw buffer APIs
  crypto/dpaa2_sec: support OOP with raw buffer API
  crypto/dpaa2_sec: enhance error checks with raw buffer APIs
  crypto/dpaa_sec: support raw datapath APIs
  crypto/dpaa_sec: support authonly and chain with raw APIs
  crypto/dpaa_sec: support AEAD and proto with raw APIs

Hemant Agrawal (4):
  crypto: change sgl to src_sgl in vector
  crypto: add dest_sgl in raw vector APIs
  test/crypto: add raw API test for dpaax
  test/crypto: add raw API support in 5G algos

 app/test/test_cryptodev.c                   |  179 +++-
 doc/guides/rel_notes/release_21_11.rst      |    8 +
 drivers/crypto/aesni_gcm/aesni_gcm_pmd.c    |   12 +-
 drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c  |    6 +-
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c |   13 +-
 drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h   |   82 +-
 drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c | 1045 ++++++++++++++++++
 drivers/crypto/dpaa2_sec/meson.build        |    3 +-
 drivers/crypto/dpaa_sec/dpaa_sec.c          |   23 +-
 drivers/crypto/dpaa_sec/dpaa_sec.h          |   40 +-
 drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c   | 1052 +++++++++++++++++++
 drivers/crypto/dpaa_sec/meson.build         |    4 +-
 drivers/crypto/qat/qat_sym_hw_dp.c          |   27 +-
 lib/cryptodev/rte_crypto_sym.h              |   13 +-
 lib/ipsec/misc.h                            |    4 +-
 15 files changed, 2407 insertions(+), 104 deletions(-)  create mode 100644 drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
 create mode 100644 drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c

--
2.17.1

Series-acked-by: Rebecca Troy <rebecca.troy@intel.com>

^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [dpdk-dev] [PATCH v2 01/15] crypto: change sgl to src_sgl in vector
  2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 01/15] crypto: change sgl to src_sgl in vector Hemant Agrawal
@ 2021-09-16 11:38       ` Ananyev, Konstantin
  0 siblings, 0 replies; 115+ messages in thread
From: Ananyev, Konstantin @ 2021-09-16 11:38 UTC (permalink / raw)
  To: Hemant Agrawal, dev, gakhil; +Cc: Zhang, Roy Fan


> This patch renames the sgl to src_sgl to help differentiating
> between source and destination sgl.
> 
> Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
> Acked-by: Akhil Goyal <gakhil@marvell.com>
> ---
>  app/test/test_cryptodev.c                  |  6 ++---
>  drivers/crypto/aesni_gcm/aesni_gcm_pmd.c   | 12 +++++-----
>  drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c |  6 ++---
>  drivers/crypto/qat/qat_sym_hw_dp.c         | 27 +++++++++++++---------
>  lib/cryptodev/rte_crypto_sym.h             |  2 +-
>  lib/ipsec/misc.h                           |  4 ++--
>  6 files changed, 31 insertions(+), 26 deletions(-)
> 
> diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
> index 843d07ba37..ed63524edc 100644
> --- a/app/test/test_cryptodev.c
> +++ b/app/test/test_cryptodev.c
> @@ -221,7 +221,7 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id,
>  	digest.va = NULL;
>  	sgl.vec = data_vec;
>  	vec.num = 1;
> -	vec.sgl = &sgl;
> +	vec.src_sgl = &sgl;
>  	vec.iv = &cipher_iv;
>  	vec.digest = &digest;
>  	vec.aad = &aad_auth_iv;
> @@ -385,7 +385,7 @@ process_cpu_aead_op(uint8_t dev_id, struct rte_crypto_op *op)
> 
>  	sgl.vec = vec;
>  	sgl.num = n;
> -	symvec.sgl = &sgl;
> +	symvec.src_sgl = &sgl;
>  	symvec.iv = &iv_ptr;
>  	symvec.digest = &digest_ptr;
>  	symvec.aad = &aad_ptr;
> @@ -431,7 +431,7 @@ process_cpu_crypt_auth_op(uint8_t dev_id, struct rte_crypto_op *op)
> 
>  	sgl.vec = vec;
>  	sgl.num = n;
> -	symvec.sgl = &sgl;
> +	symvec.src_sgl = &sgl;
>  	symvec.iv = &iv_ptr;
>  	symvec.digest = &digest_ptr;
>  	symvec.status = &st;
> diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
> index 886e2a5aaa..5fbb9b79f8 100644
> --- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
> +++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
> @@ -535,7 +535,7 @@ aesni_gcm_sgl_encrypt(struct aesni_gcm_session *s,
>  	processed = 0;
>  	for (i = 0; i < vec->num; ++i) {
>  		aesni_gcm_process_gcm_sgl_op(s, gdata_ctx,
> -			&vec->sgl[i], vec->iv[i].va,
> +			&vec->src_sgl[i], vec->iv[i].va,
>  			vec->aad[i].va);
>  		vec->status[i] = aesni_gcm_sgl_op_finalize_encryption(s,
>  			gdata_ctx, vec->digest[i].va);
> @@ -554,7 +554,7 @@ aesni_gcm_sgl_decrypt(struct aesni_gcm_session *s,
>  	processed = 0;
>  	for (i = 0; i < vec->num; ++i) {
>  		aesni_gcm_process_gcm_sgl_op(s, gdata_ctx,
> -			&vec->sgl[i], vec->iv[i].va,
> +			&vec->src_sgl[i], vec->iv[i].va,
>  			vec->aad[i].va);
>  		 vec->status[i] = aesni_gcm_sgl_op_finalize_decryption(s,
>  			gdata_ctx, vec->digest[i].va);
> @@ -572,13 +572,13 @@ aesni_gmac_sgl_generate(struct aesni_gcm_session *s,
> 
>  	processed = 0;
>  	for (i = 0; i < vec->num; ++i) {
> -		if (vec->sgl[i].num != 1) {
> +		if (vec->src_sgl[i].num != 1) {
>  			vec->status[i] = ENOTSUP;
>  			continue;
>  		}
> 
>  		aesni_gcm_process_gmac_sgl_op(s, gdata_ctx,
> -			&vec->sgl[i], vec->iv[i].va);
> +			&vec->src_sgl[i], vec->iv[i].va);
>  		vec->status[i] = aesni_gcm_sgl_op_finalize_encryption(s,
>  			gdata_ctx, vec->digest[i].va);
>  		processed += (vec->status[i] == 0);
> @@ -595,13 +595,13 @@ aesni_gmac_sgl_verify(struct aesni_gcm_session *s,
> 
>  	processed = 0;
>  	for (i = 0; i < vec->num; ++i) {
> -		if (vec->sgl[i].num != 1) {
> +		if (vec->src_sgl[i].num != 1) {
>  			vec->status[i] = ENOTSUP;
>  			continue;
>  		}
> 
>  		aesni_gcm_process_gmac_sgl_op(s, gdata_ctx,
> -			&vec->sgl[i], vec->iv[i].va);
> +			&vec->src_sgl[i], vec->iv[i].va);
>  		vec->status[i] = aesni_gcm_sgl_op_finalize_decryption(s,
>  			gdata_ctx, vec->digest[i].va);
>  		processed += (vec->status[i] == 0);
> diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
> index a01c826a3c..1b05099446 100644
> --- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
> +++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
> @@ -2002,14 +2002,14 @@ aesni_mb_cpu_crypto_process_bulk(struct rte_cryptodev *dev,
>  	for (i = 0, j = 0, k = 0; i != vec->num; i++) {
> 
> 
> -		ret = check_crypto_sgl(sofs, vec->sgl + i);
> +		ret = check_crypto_sgl(sofs, vec->src_sgl + i);
>  		if (ret != 0) {
>  			vec->status[i] = ret;
>  			continue;
>  		}
> 
> -		buf = vec->sgl[i].vec[0].base;
> -		len = vec->sgl[i].vec[0].len;
> +		buf = vec->src_sgl[i].vec[0].base;
> +		len = vec->src_sgl[i].vec[0].len;
> 
>  		job = IMB_GET_NEXT_JOB(mb_mgr);
>  		if (job == NULL) {
> diff --git a/drivers/crypto/qat/qat_sym_hw_dp.c b/drivers/crypto/qat/qat_sym_hw_dp.c
> index ac9ac05363..4870ebf66a 100644
> --- a/drivers/crypto/qat/qat_sym_hw_dp.c
> +++ b/drivers/crypto/qat/qat_sym_hw_dp.c
> @@ -181,8 +181,9 @@ qat_sym_dp_enqueue_cipher_jobs(void *qp_data, uint8_t *drv_ctx,
>  			(uint8_t *)tx_queue->base_addr + tail);
>  		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
> 
> -		data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec,
> -			vec->sgl[i].num);
> +		data_len = qat_sym_dp_parse_data_vec(qp, req,
> +			vec->src_sgl[i].vec,
> +			vec->src_sgl[i].num);
>  		if (unlikely(data_len < 0))
>  			break;
>  		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
> @@ -302,8 +303,9 @@ qat_sym_dp_enqueue_auth_jobs(void *qp_data, uint8_t *drv_ctx,
>  			(uint8_t *)tx_queue->base_addr + tail);
>  		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
> 
> -		data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec,
> -			vec->sgl[i].num);
> +		data_len = qat_sym_dp_parse_data_vec(qp, req,
> +			vec->src_sgl[i].vec,
> +			vec->src_sgl[i].num);
>  		if (unlikely(data_len < 0))
>  			break;
>  		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
> @@ -484,14 +486,16 @@ qat_sym_dp_enqueue_chain_jobs(void *qp_data, uint8_t *drv_ctx,
>  			(uint8_t *)tx_queue->base_addr + tail);
>  		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
> 
> -		data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec,
> -			vec->sgl[i].num);
> +		data_len = qat_sym_dp_parse_data_vec(qp, req,
> +			vec->src_sgl[i].vec,
> +			vec->src_sgl[i].num);
>  		if (unlikely(data_len < 0))
>  			break;
>  		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
> -		if (unlikely(enqueue_one_chain_job(ctx, req, vec->sgl[i].vec,
> -			vec->sgl[i].num, &vec->iv[i], &vec->digest[i],
> -				&vec->auth_iv[i], ofs, (uint32_t)data_len)))
> +		if (unlikely(enqueue_one_chain_job(ctx, req,
> +			vec->src_sgl[i].vec, vec->src_sgl[i].num,
> +			&vec->iv[i], &vec->digest[i],
> +			&vec->auth_iv[i], ofs, (uint32_t)data_len)))
>  			break;
> 
>  		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
> @@ -688,8 +692,9 @@ qat_sym_dp_enqueue_aead_jobs(void *qp_data, uint8_t *drv_ctx,
>  			(uint8_t *)tx_queue->base_addr + tail);
>  		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
> 
> -		data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec,
> -			vec->sgl[i].num);
> +		data_len = qat_sym_dp_parse_data_vec(qp, req,
> +			vec->src_sgl[i].vec,
> +			vec->src_sgl[i].num);
>  		if (unlikely(data_len < 0))
>  			break;
>  		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
> diff --git a/lib/cryptodev/rte_crypto_sym.h b/lib/cryptodev/rte_crypto_sym.h
> index 58c0724743..dcc0bd5933 100644
> --- a/lib/cryptodev/rte_crypto_sym.h
> +++ b/lib/cryptodev/rte_crypto_sym.h
> @@ -69,7 +69,7 @@ struct rte_crypto_sym_vec {
>  	/** number of operations to perform */
>  	uint32_t num;
>  	/** array of SGL vectors */
> -	struct rte_crypto_sgl *sgl;
> +	struct rte_crypto_sgl *src_sgl;
>  	/** array of pointers to cipher IV */
>  	struct rte_crypto_va_iova_ptr *iv;
>  	/** array of pointers to digest */
> diff --git a/lib/ipsec/misc.h b/lib/ipsec/misc.h
> index 79b9a20762..58ff538141 100644
> --- a/lib/ipsec/misc.h
> +++ b/lib/ipsec/misc.h
> @@ -136,7 +136,7 @@ cpu_crypto_bulk(const struct rte_ipsec_session *ss,
>  		/* not enough space in vec[] to hold all segments */
>  		if (vcnt < 0) {
>  			/* fill the request structure */
> -			symvec.sgl = &vecpkt[j];
> +			symvec.src_sgl = &vecpkt[j];
>  			symvec.iv = &iv[j];
>  			symvec.digest = &dgst[j];
>  			symvec.aad = &aad[j];
> @@ -160,7 +160,7 @@ cpu_crypto_bulk(const struct rte_ipsec_session *ss,
>  	}
> 
>  	/* fill the request structure */
> -	symvec.sgl = &vecpkt[j];
> +	symvec.src_sgl = &vecpkt[j];
>  	symvec.iv = &iv[j];
>  	symvec.aad = &aad[j];
>  	symvec.digest = &dgst[j];
> --

Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>

> 2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [dpdk-dev] [PATCH v2 02/15] crypto: add total raw buffer length
  2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 02/15] crypto: add total raw buffer length Hemant Agrawal
@ 2021-09-16 11:42       ` Ananyev, Konstantin
  2021-09-20 19:28         ` Akhil Goyal
  0 siblings, 1 reply; 115+ messages in thread
From: Ananyev, Konstantin @ 2021-09-16 11:42 UTC (permalink / raw)
  To: Hemant Agrawal, dev, gakhil; +Cc: Zhang, Roy Fan, Gagandeep Singh


> From: Gagandeep Singh <g.singh@nxp.com>
> 
> The current crypto raw data vectors is extended to support
> rte_security usecases, where we need total data length to know
> how much additional memory space is available in buffer other
> than data length so that driver/HW can write expanded size
> data after encryption.
> 
> Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
> Acked-by: Akhil Goyal <gakhil@marvell.com>
> ---
>  lib/cryptodev/rte_crypto_sym.h | 6 ++++++
>  1 file changed, 6 insertions(+)
> 
> diff --git a/lib/cryptodev/rte_crypto_sym.h b/lib/cryptodev/rte_crypto_sym.h
> index dcc0bd5933..e5cef1fb72 100644
> --- a/lib/cryptodev/rte_crypto_sym.h
> +++ b/lib/cryptodev/rte_crypto_sym.h
> @@ -37,6 +37,8 @@ struct rte_crypto_vec {
>  	rte_iova_t iova;
>  	/** length of the data buffer */
>  	uint32_t len;
> +	/** total buffer length*/
> +	uint32_t tot_len;
>  };
> 
>  /**
> @@ -980,12 +982,14 @@ rte_crypto_mbuf_to_vec(const struct rte_mbuf *mb, uint32_t ofs, uint32_t len,
>  	seglen = mb->data_len - ofs;
>  	if (len <= seglen) {
>  		vec[0].len = len;
> +		vec[0].tot_len = mb->buf_len;

That doesn't look right.
We should take into a count mbuf headroom and input offset.
Something like:
vec[0].tot_len = mb->buf_len - rte_pktmbuf_headroom(m) - ofs;
Same in other places below.

>  		return 1;
>  	}
> 
>  	/* data spread across segments */
>  	vec[0].len = seglen;
>  	left = len - seglen;
> +	vec[0].tot_len = mb->buf_len;
>  	for (i = 1, nseg = mb->next; nseg != NULL; nseg = nseg->next, i++) {
> 
>  		vec[i].base = rte_pktmbuf_mtod(nseg, void *);
> @@ -995,6 +999,7 @@ rte_crypto_mbuf_to_vec(const struct rte_mbuf *mb, uint32_t ofs, uint32_t len,
>  		if (left <= seglen) {
>  			/* whole requested data is completed */
>  			vec[i].len = left;
> +			vec[i].tot_len = mb->buf_len;
>  			left = 0;
>  			break;
>  		}
> @@ -1002,6 +1007,7 @@ rte_crypto_mbuf_to_vec(const struct rte_mbuf *mb, uint32_t ofs, uint32_t len,
>  		/* use whole segment */
>  		vec[i].len = seglen;
>  		left -= seglen;
> +		vec[i].tot_len = mb->buf_len;
>  	}
> 
>  	RTE_ASSERT(left == 0);
> --
> 2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [dpdk-dev] [PATCH v2 03/15] crypto: add dest_sgl in raw vector APIs
  2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 03/15] crypto: add dest_sgl in raw vector APIs Hemant Agrawal
@ 2021-09-16 11:44       ` Ananyev, Konstantin
  0 siblings, 0 replies; 115+ messages in thread
From: Ananyev, Konstantin @ 2021-09-16 11:44 UTC (permalink / raw)
  To: Hemant Agrawal, dev, gakhil; +Cc: Zhang, Roy Fan


> 
> The structure rte_crypto_sym_vec is updated to
> add dest_sgl to support out of place processing.
> 
> Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
> Acked-by: Akhil Goyal <gakhil@marvell.com>
> ---
>  lib/cryptodev/rte_crypto_sym.h | 2 ++
>  1 file changed, 2 insertions(+)
> 
> diff --git a/lib/cryptodev/rte_crypto_sym.h b/lib/cryptodev/rte_crypto_sym.h
> index e5cef1fb72..978708845f 100644
> --- a/lib/cryptodev/rte_crypto_sym.h
> +++ b/lib/cryptodev/rte_crypto_sym.h
> @@ -72,6 +72,8 @@ struct rte_crypto_sym_vec {
>  	uint32_t num;
>  	/** array of SGL vectors */
>  	struct rte_crypto_sgl *src_sgl;
> +	/** array of SGL vectors for OOP, keep it NULL for inplace*/
> +	struct rte_crypto_sgl *dest_sgl;
>  	/** array of pointers to cipher IV */
>  	struct rte_crypto_va_iova_ptr *iv;
>  	/** array of pointers to digest */
> --

Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>

> 2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [dpdk-dev] [PATCH v2 02/15] crypto: add total raw buffer length
  2021-09-16 11:42       ` Ananyev, Konstantin
@ 2021-09-20 19:28         ` Akhil Goyal
  2021-09-21  9:59           ` Hemant Agrawal
  0 siblings, 1 reply; 115+ messages in thread
From: Akhil Goyal @ 2021-09-20 19:28 UTC (permalink / raw)
  To: Ananyev, Konstantin, Hemant Agrawal, dev; +Cc: Zhang, Roy Fan, Gagandeep Singh

> 
> > From: Gagandeep Singh <g.singh@nxp.com>
> >
> > The current crypto raw data vectors is extended to support
> > rte_security usecases, where we need total data length to know
> > how much additional memory space is available in buffer other
> > than data length so that driver/HW can write expanded size
> > data after encryption.
> >
> > Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
> > Acked-by: Akhil Goyal <gakhil@marvell.com>
> > ---
> >  lib/cryptodev/rte_crypto_sym.h | 6 ++++++
> >  1 file changed, 6 insertions(+)
> >
> > diff --git a/lib/cryptodev/rte_crypto_sym.h
> b/lib/cryptodev/rte_crypto_sym.h
> > index dcc0bd5933..e5cef1fb72 100644
> > --- a/lib/cryptodev/rte_crypto_sym.h
> > +++ b/lib/cryptodev/rte_crypto_sym.h
> > @@ -37,6 +37,8 @@ struct rte_crypto_vec {
> >  	rte_iova_t iova;
> >  	/** length of the data buffer */
> >  	uint32_t len;
> > +	/** total buffer length*/
> > +	uint32_t tot_len;
> >  };
> >
> >  /**
> > @@ -980,12 +982,14 @@ rte_crypto_mbuf_to_vec(const struct rte_mbuf
> *mb, uint32_t ofs, uint32_t len,
> >  	seglen = mb->data_len - ofs;
> >  	if (len <= seglen) {
> >  		vec[0].len = len;
> > +		vec[0].tot_len = mb->buf_len;
> 
> That doesn't look right.
> We should take into a count mbuf headroom and input offset.
> Something like:
> vec[0].tot_len = mb->buf_len - rte_pktmbuf_headroom(m) - ofs;
> Same in other places below.
> 
I believe the packet can expand into headroom based on the protocol support.

^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [dpdk-dev] [PATCH v2 02/15] crypto: add total raw buffer length
  2021-09-20 19:28         ` Akhil Goyal
@ 2021-09-21  9:59           ` Hemant Agrawal
  2021-09-21 10:04             ` Ananyev, Konstantin
  0 siblings, 1 reply; 115+ messages in thread
From: Hemant Agrawal @ 2021-09-21  9:59 UTC (permalink / raw)
  To: Akhil Goyal, Ananyev, Konstantin, Hemant Agrawal, dev
  Cc: Zhang, Roy Fan, Gagandeep Singh

Hi Konstantin

On 9/21/2021 12:58 AM, Akhil Goyal wrote:
>>> From: Gagandeep Singh <g.singh@nxp.com>
>>>
>>> The current crypto raw data vectors is extended to support
>>> rte_security usecases, where we need total data length to know
>>> how much additional memory space is available in buffer other
>>> than data length so that driver/HW can write expanded size
>>> data after encryption.
>>>
>>> Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
>>> Acked-by: Akhil Goyal <gakhil@marvell.com>
>>> ---
>>>   lib/cryptodev/rte_crypto_sym.h | 6 ++++++
>>>   1 file changed, 6 insertions(+)
>>>
>>> diff --git a/lib/cryptodev/rte_crypto_sym.h
>> b/lib/cryptodev/rte_crypto_sym.h
>>> index dcc0bd5933..e5cef1fb72 100644
>>> --- a/lib/cryptodev/rte_crypto_sym.h
>>> +++ b/lib/cryptodev/rte_crypto_sym.h
>>> @@ -37,6 +37,8 @@ struct rte_crypto_vec {
>>>   	rte_iova_t iova;
>>>   	/** length of the data buffer */
>>>   	uint32_t len;
>>> +	/** total buffer length*/
>>> +	uint32_t tot_len;
>>>   };
>>>
>>>   /**
>>> @@ -980,12 +982,14 @@ rte_crypto_mbuf_to_vec(const struct rte_mbuf
>> *mb, uint32_t ofs, uint32_t len,
>>>   	seglen = mb->data_len - ofs;
>>>   	if (len <= seglen) {
>>>   		vec[0].len = len;
>>> +		vec[0].tot_len = mb->buf_len;
>> That doesn't look right.
>> We should take into a count mbuf headroom and input offset.
>> Something like:
>> vec[0].tot_len = mb->buf_len - rte_pktmbuf_headroom(m) - ofs;
>> Same in other places below.
>>
> I believe the packet can expand into headroom based on the protocol support.
Yes, total length is representing the total buffer length available. The 
security protocol shall take care of the headroom and offsets.

^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [dpdk-dev] [PATCH v2 02/15] crypto: add total raw buffer length
  2021-09-21  9:59           ` Hemant Agrawal
@ 2021-09-21 10:04             ` Ananyev, Konstantin
  2021-09-24 18:06               ` Akhil Goyal
  0 siblings, 1 reply; 115+ messages in thread
From: Ananyev, Konstantin @ 2021-09-21 10:04 UTC (permalink / raw)
  To: hemant.agrawal, Akhil Goyal, dev; +Cc: Zhang, Roy Fan, Gagandeep Singh


> Hi Konstantin
> 
> On 9/21/2021 12:58 AM, Akhil Goyal wrote:
> >>> From: Gagandeep Singh <g.singh@nxp.com>
> >>>
> >>> The current crypto raw data vectors is extended to support
> >>> rte_security usecases, where we need total data length to know
> >>> how much additional memory space is available in buffer other
> >>> than data length so that driver/HW can write expanded size
> >>> data after encryption.
> >>>
> >>> Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
> >>> Acked-by: Akhil Goyal <gakhil@marvell.com>
> >>> ---
> >>>   lib/cryptodev/rte_crypto_sym.h | 6 ++++++
> >>>   1 file changed, 6 insertions(+)
> >>>
> >>> diff --git a/lib/cryptodev/rte_crypto_sym.h
> >> b/lib/cryptodev/rte_crypto_sym.h
> >>> index dcc0bd5933..e5cef1fb72 100644
> >>> --- a/lib/cryptodev/rte_crypto_sym.h
> >>> +++ b/lib/cryptodev/rte_crypto_sym.h
> >>> @@ -37,6 +37,8 @@ struct rte_crypto_vec {
> >>>   	rte_iova_t iova;
> >>>   	/** length of the data buffer */
> >>>   	uint32_t len;
> >>> +	/** total buffer length*/
> >>> +	uint32_t tot_len;
> >>>   };
> >>>
> >>>   /**
> >>> @@ -980,12 +982,14 @@ rte_crypto_mbuf_to_vec(const struct rte_mbuf
> >> *mb, uint32_t ofs, uint32_t len,
> >>>   	seglen = mb->data_len - ofs;
> >>>   	if (len <= seglen) {
> >>>   		vec[0].len = len;
> >>> +		vec[0].tot_len = mb->buf_len;
> >> That doesn't look right.
> >> We should take into a count mbuf headroom and input offset.
> >> Something like:
> >> vec[0].tot_len = mb->buf_len - rte_pktmbuf_headroom(m) - ofs;
> >> Same in other places below.
> >>
> > I believe the packet can expand into headroom based on the protocol support.
> Yes, total length is representing the total buffer length available. The
> security protocol shall take care of the headroom and offsets.

Hmm, and how it will now how many bytes are in head-room, and how many are in tail-room?
We either need to provide values for both, or assume that only tail-room is available for the driver.


^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [dpdk-dev] [PATCH v2 02/15] crypto: add total raw buffer length
  2021-09-21 10:04             ` Ananyev, Konstantin
@ 2021-09-24 18:06               ` Akhil Goyal
  2021-09-27  9:23                 ` Ananyev, Konstantin
  0 siblings, 1 reply; 115+ messages in thread
From: Akhil Goyal @ 2021-09-24 18:06 UTC (permalink / raw)
  To: Ananyev, Konstantin, hemant.agrawal, dev; +Cc: Zhang, Roy Fan, Gagandeep Singh

> > Hi Konstantin
> >
> > On 9/21/2021 12:58 AM, Akhil Goyal wrote:
> > >>> From: Gagandeep Singh <g.singh@nxp.com>
> > >>>
> > >>> The current crypto raw data vectors is extended to support
> > >>> rte_security usecases, where we need total data length to know
> > >>> how much additional memory space is available in buffer other
> > >>> than data length so that driver/HW can write expanded size
> > >>> data after encryption.
> > >>>
> > >>> Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
> > >>> Acked-by: Akhil Goyal <gakhil@marvell.com>
> > >>> ---
> > >>>   lib/cryptodev/rte_crypto_sym.h | 6 ++++++
> > >>>   1 file changed, 6 insertions(+)
> > >>>
> > >>> diff --git a/lib/cryptodev/rte_crypto_sym.h
> > >> b/lib/cryptodev/rte_crypto_sym.h
> > >>> index dcc0bd5933..e5cef1fb72 100644
> > >>> --- a/lib/cryptodev/rte_crypto_sym.h
> > >>> +++ b/lib/cryptodev/rte_crypto_sym.h
> > >>> @@ -37,6 +37,8 @@ struct rte_crypto_vec {
> > >>>   	rte_iova_t iova;
> > >>>   	/** length of the data buffer */
> > >>>   	uint32_t len;
> > >>> +	/** total buffer length*/
> > >>> +	uint32_t tot_len;
> > >>>   };
> > >>>
> > >>>   /**
> > >>> @@ -980,12 +982,14 @@ rte_crypto_mbuf_to_vec(const struct
> rte_mbuf
> > >> *mb, uint32_t ofs, uint32_t len,
> > >>>   	seglen = mb->data_len - ofs;
> > >>>   	if (len <= seglen) {
> > >>>   		vec[0].len = len;
> > >>> +		vec[0].tot_len = mb->buf_len;
> > >> That doesn't look right.
> > >> We should take into a count mbuf headroom and input offset.
> > >> Something like:
> > >> vec[0].tot_len = mb->buf_len - rte_pktmbuf_headroom(m) - ofs;
> > >> Same in other places below.
> > >>
> > > I believe the packet can expand into headroom based on the protocol
> support.
> > Yes, total length is representing the total buffer length available. The
> > security protocol shall take care of the headroom and offsets.
> 
> Hmm, and how it will now how many bytes are in head-room, and how
> many are in tail-room?
> We either need to provide values for both, or assume that only tail-room is
> available for the driver.
I believe it should be starting point where output can be written till the end of buffer.
There should not be any headroom and tailroom for raw buffers.
It should be mbuf->buf_len - ofs.


^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [dpdk-dev] [PATCH v2 02/15] crypto: add total raw buffer length
  2021-09-24 18:06               ` Akhil Goyal
@ 2021-09-27  9:23                 ` Ananyev, Konstantin
  2021-09-28  7:05                   ` Akhil Goyal
  0 siblings, 1 reply; 115+ messages in thread
From: Ananyev, Konstantin @ 2021-09-27  9:23 UTC (permalink / raw)
  To: Akhil Goyal, hemant.agrawal, dev; +Cc: Zhang, Roy Fan, Gagandeep Singh


Hi Akhil,

> > > On 9/21/2021 12:58 AM, Akhil Goyal wrote:
> > > >>> From: Gagandeep Singh <g.singh@nxp.com>
> > > >>>
> > > >>> The current crypto raw data vectors is extended to support
> > > >>> rte_security usecases, where we need total data length to know
> > > >>> how much additional memory space is available in buffer other
> > > >>> than data length so that driver/HW can write expanded size
> > > >>> data after encryption.
> > > >>>
> > > >>> Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
> > > >>> Acked-by: Akhil Goyal <gakhil@marvell.com>
> > > >>> ---
> > > >>>   lib/cryptodev/rte_crypto_sym.h | 6 ++++++
> > > >>>   1 file changed, 6 insertions(+)
> > > >>>
> > > >>> diff --git a/lib/cryptodev/rte_crypto_sym.h
> > > >> b/lib/cryptodev/rte_crypto_sym.h
> > > >>> index dcc0bd5933..e5cef1fb72 100644
> > > >>> --- a/lib/cryptodev/rte_crypto_sym.h
> > > >>> +++ b/lib/cryptodev/rte_crypto_sym.h
> > > >>> @@ -37,6 +37,8 @@ struct rte_crypto_vec {
> > > >>>   	rte_iova_t iova;
> > > >>>   	/** length of the data buffer */
> > > >>>   	uint32_t len;
> > > >>> +	/** total buffer length*/
> > > >>> +	uint32_t tot_len;
> > > >>>   };
> > > >>>
> > > >>>   /**
> > > >>> @@ -980,12 +982,14 @@ rte_crypto_mbuf_to_vec(const struct
> > rte_mbuf
> > > >> *mb, uint32_t ofs, uint32_t len,
> > > >>>   	seglen = mb->data_len - ofs;
> > > >>>   	if (len <= seglen) {
> > > >>>   		vec[0].len = len;
> > > >>> +		vec[0].tot_len = mb->buf_len;
> > > >> That doesn't look right.
> > > >> We should take into a count mbuf headroom and input offset.
> > > >> Something like:
> > > >> vec[0].tot_len = mb->buf_len - rte_pktmbuf_headroom(m) - ofs;
> > > >> Same in other places below.
> > > >>
> > > > I believe the packet can expand into headroom based on the protocol
> > support.
> > > Yes, total length is representing the total buffer length available. The
> > > security protocol shall take care of the headroom and offsets.
> >
> > Hmm, and how it will now how many bytes are in head-room, and how
> > many are in tail-room?
> > We either need to provide values for both, or assume that only tail-room is
> > available for the driver.
> I believe it should be starting point where output can be written till the end of buffer.

Right, that's:
base = rte_pktmbuf_mtod_offset(mb, void *, ofs); 

> There should not be any headroom and tailroom for raw buffers.

I am not talking about raw buffers, what I am saying that some space in the mbuf
might be already occupied, that's why we have data_off inside rte_mbuf, etc. 

> It should be mbuf->buf_len - ofs.

No, it should be:
len = mb->buf_len - rte_pktmbuf_headroom(m) - ofs;
Otherwise PMD can overwrite memory beyond its buf_len.



^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [dpdk-dev] [PATCH v2 02/15] crypto: add total raw buffer length
  2021-09-27  9:23                 ` Ananyev, Konstantin
@ 2021-09-28  7:05                   ` Akhil Goyal
  2021-09-28  8:20                     ` Hemant Agrawal
  0 siblings, 1 reply; 115+ messages in thread
From: Akhil Goyal @ 2021-09-28  7:05 UTC (permalink / raw)
  To: Ananyev, Konstantin, hemant.agrawal, dev; +Cc: Zhang, Roy Fan, Gagandeep Singh

> Hi Akhil,
> 
> > > > On 9/21/2021 12:58 AM, Akhil Goyal wrote:
> > > > >>> From: Gagandeep Singh <g.singh@nxp.com>
> > > > >>>
> > > > >>> The current crypto raw data vectors is extended to support
> > > > >>> rte_security usecases, where we need total data length to know
> > > > >>> how much additional memory space is available in buffer other
> > > > >>> than data length so that driver/HW can write expanded size
> > > > >>> data after encryption.
> > > > >>>
> > > > >>> Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
> > > > >>> Acked-by: Akhil Goyal <gakhil@marvell.com>
> > > > >>> ---
> > > > >>>   lib/cryptodev/rte_crypto_sym.h | 6 ++++++
> > > > >>>   1 file changed, 6 insertions(+)
> > > > >>>
> > > > >>> diff --git a/lib/cryptodev/rte_crypto_sym.h
> > > > >> b/lib/cryptodev/rte_crypto_sym.h
> > > > >>> index dcc0bd5933..e5cef1fb72 100644
> > > > >>> --- a/lib/cryptodev/rte_crypto_sym.h
> > > > >>> +++ b/lib/cryptodev/rte_crypto_sym.h
> > > > >>> @@ -37,6 +37,8 @@ struct rte_crypto_vec {
> > > > >>>   	rte_iova_t iova;
> > > > >>>   	/** length of the data buffer */
> > > > >>>   	uint32_t len;
> > > > >>> +	/** total buffer length*/
> > > > >>> +	uint32_t tot_len;
> > > > >>>   };
> > > > >>>
> > > > >>>   /**
> > > > >>> @@ -980,12 +982,14 @@ rte_crypto_mbuf_to_vec(const struct
> > > rte_mbuf
> > > > >> *mb, uint32_t ofs, uint32_t len,
> > > > >>>   	seglen = mb->data_len - ofs;
> > > > >>>   	if (len <= seglen) {
> > > > >>>   		vec[0].len = len;
> > > > >>> +		vec[0].tot_len = mb->buf_len;
> > > > >> That doesn't look right.
> > > > >> We should take into a count mbuf headroom and input offset.
> > > > >> Something like:
> > > > >> vec[0].tot_len = mb->buf_len - rte_pktmbuf_headroom(m) - ofs;
> > > > >> Same in other places below.
> > > > >>
> > > > > I believe the packet can expand into headroom based on the protocol
> > > support.
> > > > Yes, total length is representing the total buffer length available. The
> > > > security protocol shall take care of the headroom and offsets.
> > >
> > > Hmm, and how it will now how many bytes are in head-room, and how
> > > many are in tail-room?
> > > We either need to provide values for both, or assume that only tail-room
> is
> > > available for the driver.
> > I believe it should be starting point where output can be written till the end
> of buffer.
> 
> Right, that's:
> base = rte_pktmbuf_mtod_offset(mb, void *, ofs);
> 
> > There should not be any headroom and tailroom for raw buffers.
> 
> I am not talking about raw buffers, what I am saying that some space in the
> mbuf
> might be already occupied, that's why we have data_off inside rte_mbuf, etc.
> 
> > It should be mbuf->buf_len - ofs.
> 
> No, it should be:
> len = mb->buf_len - rte_pktmbuf_headroom(m) - ofs;
> Otherwise PMD can overwrite memory beyond its buf_len.
> 
@Hemant: Do you agree. Please send next version.

^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [dpdk-dev] [PATCH v2 02/15] crypto: add total raw buffer length
  2021-09-28  7:05                   ` Akhil Goyal
@ 2021-09-28  8:20                     ` Hemant Agrawal
  0 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-09-28  8:20 UTC (permalink / raw)
  To: Akhil Goyal, Ananyev, Konstantin, dev; +Cc: Zhang, Roy Fan, Gagandeep Singh

HI Akhil/Konstantin
> > Hi Akhil,
> >
> > > > > On 9/21/2021 12:58 AM, Akhil Goyal wrote:
> > > > > >>> From: Gagandeep Singh <g.singh@nxp.com>
> > > > > >>>
> > > > > >>> The current crypto raw data vectors is extended to support
> > > > > >>> rte_security usecases, where we need total data length to
> > > > > >>> know how much additional memory space is available in buffer
> > > > > >>> other than data length so that driver/HW can write expanded
> > > > > >>> size data after encryption.
> > > > > >>>
> > > > > >>> Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
> > > > > >>> Acked-by: Akhil Goyal <gakhil@marvell.com>
> > > > > >>> ---
> > > > > >>>   lib/cryptodev/rte_crypto_sym.h | 6 ++++++
> > > > > >>>   1 file changed, 6 insertions(+)
> > > > > >>>
> > > > > >>> diff --git a/lib/cryptodev/rte_crypto_sym.h
> > > > > >> b/lib/cryptodev/rte_crypto_sym.h
> > > > > >>> index dcc0bd5933..e5cef1fb72 100644
> > > > > >>> --- a/lib/cryptodev/rte_crypto_sym.h
> > > > > >>> +++ b/lib/cryptodev/rte_crypto_sym.h
> > > > > >>> @@ -37,6 +37,8 @@ struct rte_crypto_vec {
> > > > > >>>   	rte_iova_t iova;
> > > > > >>>   	/** length of the data buffer */
> > > > > >>>   	uint32_t len;
> > > > > >>> +	/** total buffer length*/
> > > > > >>> +	uint32_t tot_len;
> > > > > >>>   };
> > > > > >>>
> > > > > >>>   /**
> > > > > >>> @@ -980,12 +982,14 @@ rte_crypto_mbuf_to_vec(const struct
> > > > rte_mbuf
> > > > > >> *mb, uint32_t ofs, uint32_t len,
> > > > > >>>   	seglen = mb->data_len - ofs;
> > > > > >>>   	if (len <= seglen) {
> > > > > >>>   		vec[0].len = len;
> > > > > >>> +		vec[0].tot_len = mb->buf_len;
> > > > > >> That doesn't look right.
> > > > > >> We should take into a count mbuf headroom and input offset.
> > > > > >> Something like:
> > > > > >> vec[0].tot_len = mb->buf_len - rte_pktmbuf_headroom(m) - ofs;
> > > > > >> Same in other places below.
> > > > > >>
> > > > > > I believe the packet can expand into headroom based on the
> > > > > > protocol
> > > > support.
> > > > > Yes, total length is representing the total buffer length
> > > > > available. The security protocol shall take care of the headroom and
> offsets.
> > > >
> > > > Hmm, and how it will now how many bytes are in head-room, and how
> > > > many are in tail-room?
> > > > We either need to provide values for both, or assume that only
> > > > tail-room
> > is
> > > > available for the driver.
> > > I believe it should be starting point where output can be written
> > > till the end
> > of buffer.
> >
> > Right, that's:
> > base = rte_pktmbuf_mtod_offset(mb, void *, ofs);
> >
> > > There should not be any headroom and tailroom for raw buffers.
> >
> > I am not talking about raw buffers, what I am saying that some space
> > in the mbuf might be already occupied, that's why we have data_off
> > inside rte_mbuf, etc.
> >
> > > It should be mbuf->buf_len - ofs.
> >
> > No, it should be:
> > len = mb->buf_len - rte_pktmbuf_headroom(m) - ofs; Otherwise PMD can
> > overwrite memory beyond its buf_len.
> >
> @Hemant: Do you agree. Please send next version.
[Hemant] 
[Hemant] Yes, I will send the new version

^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH v4 00/15] crypto: add raw vector support in DPAAx
  2021-09-07  7:59   ` [dpdk-dev] [PATCH v2 " Hemant Agrawal
                       ` (15 preceding siblings ...)
  2021-09-15 16:01     ` [dpdk-dev] [PATCH v2 00/15] crypto: add raw vector support in DPAAx Troy, Rebecca
@ 2021-10-13 19:00     ` Hemant Agrawal
  2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 01/15] crypto: change sgl to src_sgl in vector Hemant Agrawal
                         ` (15 more replies)
  16 siblings, 16 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-10-13 19:00 UTC (permalink / raw)
  To: dev, gakhil; +Cc: konstantin.ananyev, roy.fan.zhang

This patch series adds support for raw vector API in dpaax_sec drivers This also enhances the raw vector APIs to support OOP and security protocol support.

v2: fix aesni compilation and add release notes.
v3: ignore it
v4: fix the tot_length patch as per Konstantin's comments


Gagandeep Singh (11):
  crypto: add total raw buffer length
  crypto: fix raw process for multi-seg case
  crypto/dpaa2_sec: support raw datapath APIs
  crypto/dpaa2_sec: support AUTH only with raw buffer APIs
  crypto/dpaa2_sec: support AUTHENC with raw buffer APIs
  crypto/dpaa2_sec: support AEAD with raw buffer APIs
  crypto/dpaa2_sec: support OOP with raw buffer API
  crypto/dpaa2_sec: enhance error checks with raw buffer APIs
  crypto/dpaa_sec: support raw datapath APIs
  crypto/dpaa_sec: support authonly and chain with raw APIs
  crypto/dpaa_sec: support AEAD and proto with raw APIs

Hemant Agrawal (4):
  crypto: change sgl to src_sgl in vector
  crypto: add dest_sgl in raw vector APIs
  test/crypto: add raw API test for dpaax
  test/crypto: add raw API support in 5G algos

 app/test/test_cryptodev.c                   |  179 +++-
 doc/guides/rel_notes/deprecation.rst        |   12 -
 doc/guides/rel_notes/release_21_11.rst      |    2 +
 drivers/crypto/aesni_gcm/aesni_gcm_pmd.c    |   12 +-
 drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c  |    6 +-
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c |   13 +-
 drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h   |   82 +-
 drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c | 1045 ++++++++++++++++++
 drivers/crypto/dpaa2_sec/meson.build        |    3 +-
 drivers/crypto/dpaa_sec/dpaa_sec.c          |   23 +-
 drivers/crypto/dpaa_sec/dpaa_sec.h          |   40 +-
 drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c   | 1052 +++++++++++++++++++
 drivers/crypto/dpaa_sec/meson.build         |    4 +-
 drivers/crypto/qat/qat_sym_hw_dp.c          |   27 +-
 lib/cryptodev/rte_crypto_sym.h              |   14 +-
 lib/ipsec/misc.h                            |    4 +-
 16 files changed, 2402 insertions(+), 116 deletions(-)
 create mode 100644 drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
 create mode 100644 drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c

-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH v4 01/15] crypto: change sgl to src_sgl in vector
  2021-10-13 19:00     ` [dpdk-dev] [PATCH v4 " Hemant Agrawal
@ 2021-10-13 19:00       ` Hemant Agrawal
  2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 02/15] crypto: add total raw buffer length Hemant Agrawal
                         ` (14 subsequent siblings)
  15 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-10-13 19:00 UTC (permalink / raw)
  To: dev, gakhil; +Cc: konstantin.ananyev, roy.fan.zhang

This patch renames the sgl to src_sgl to help differentiating
between source and destination sgl.

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
Acked-by: Akhil Goyal <gakhil@marvell.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
---
 app/test/test_cryptodev.c                  |  6 ++---
 drivers/crypto/aesni_gcm/aesni_gcm_pmd.c   | 12 +++++-----
 drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c |  6 ++---
 drivers/crypto/qat/qat_sym_hw_dp.c         | 27 +++++++++++++---------
 lib/cryptodev/rte_crypto_sym.h             |  2 +-
 lib/ipsec/misc.h                           |  4 ++--
 6 files changed, 31 insertions(+), 26 deletions(-)

diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 65b64e1af0..4778dafbe5 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -232,7 +232,7 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id,
 	digest.va = NULL;
 	sgl.vec = data_vec;
 	vec.num = 1;
-	vec.sgl = &sgl;
+	vec.src_sgl = &sgl;
 	vec.iv = &cipher_iv;
 	vec.digest = &digest;
 	vec.aad = &aad_auth_iv;
@@ -396,7 +396,7 @@ process_cpu_aead_op(uint8_t dev_id, struct rte_crypto_op *op)
 
 	sgl.vec = vec;
 	sgl.num = n;
-	symvec.sgl = &sgl;
+	symvec.src_sgl = &sgl;
 	symvec.iv = &iv_ptr;
 	symvec.digest = &digest_ptr;
 	symvec.aad = &aad_ptr;
@@ -442,7 +442,7 @@ process_cpu_crypt_auth_op(uint8_t dev_id, struct rte_crypto_op *op)
 
 	sgl.vec = vec;
 	sgl.num = n;
-	symvec.sgl = &sgl;
+	symvec.src_sgl = &sgl;
 	symvec.iv = &iv_ptr;
 	symvec.digest = &digest_ptr;
 	symvec.status = &st;
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
index 330aad8157..d0368828e9 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
@@ -535,7 +535,7 @@ aesni_gcm_sgl_encrypt(struct aesni_gcm_session *s,
 	processed = 0;
 	for (i = 0; i < vec->num; ++i) {
 		aesni_gcm_process_gcm_sgl_op(s, gdata_ctx,
-			&vec->sgl[i], vec->iv[i].va,
+			&vec->src_sgl[i], vec->iv[i].va,
 			vec->aad[i].va);
 		vec->status[i] = aesni_gcm_sgl_op_finalize_encryption(s,
 			gdata_ctx, vec->digest[i].va);
@@ -554,7 +554,7 @@ aesni_gcm_sgl_decrypt(struct aesni_gcm_session *s,
 	processed = 0;
 	for (i = 0; i < vec->num; ++i) {
 		aesni_gcm_process_gcm_sgl_op(s, gdata_ctx,
-			&vec->sgl[i], vec->iv[i].va,
+			&vec->src_sgl[i], vec->iv[i].va,
 			vec->aad[i].va);
 		 vec->status[i] = aesni_gcm_sgl_op_finalize_decryption(s,
 			gdata_ctx, vec->digest[i].va);
@@ -572,13 +572,13 @@ aesni_gmac_sgl_generate(struct aesni_gcm_session *s,
 
 	processed = 0;
 	for (i = 0; i < vec->num; ++i) {
-		if (vec->sgl[i].num != 1) {
+		if (vec->src_sgl[i].num != 1) {
 			vec->status[i] = ENOTSUP;
 			continue;
 		}
 
 		aesni_gcm_process_gmac_sgl_op(s, gdata_ctx,
-			&vec->sgl[i], vec->iv[i].va);
+			&vec->src_sgl[i], vec->iv[i].va);
 		vec->status[i] = aesni_gcm_sgl_op_finalize_encryption(s,
 			gdata_ctx, vec->digest[i].va);
 		processed += (vec->status[i] == 0);
@@ -595,13 +595,13 @@ aesni_gmac_sgl_verify(struct aesni_gcm_session *s,
 
 	processed = 0;
 	for (i = 0; i < vec->num; ++i) {
-		if (vec->sgl[i].num != 1) {
+		if (vec->src_sgl[i].num != 1) {
 			vec->status[i] = ENOTSUP;
 			continue;
 		}
 
 		aesni_gcm_process_gmac_sgl_op(s, gdata_ctx,
-			&vec->sgl[i], vec->iv[i].va);
+			&vec->src_sgl[i], vec->iv[i].va);
 		vec->status[i] = aesni_gcm_sgl_op_finalize_decryption(s,
 			gdata_ctx, vec->digest[i].va);
 		processed += (vec->status[i] == 0);
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
index 60963a8208..2419adc699 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
@@ -2002,14 +2002,14 @@ aesni_mb_cpu_crypto_process_bulk(struct rte_cryptodev *dev,
 	for (i = 0, j = 0, k = 0; i != vec->num; i++) {
 
 
-		ret = check_crypto_sgl(sofs, vec->sgl + i);
+		ret = check_crypto_sgl(sofs, vec->src_sgl + i);
 		if (ret != 0) {
 			vec->status[i] = ret;
 			continue;
 		}
 
-		buf = vec->sgl[i].vec[0].base;
-		len = vec->sgl[i].vec[0].len;
+		buf = vec->src_sgl[i].vec[0].base;
+		len = vec->src_sgl[i].vec[0].len;
 
 		job = IMB_GET_NEXT_JOB(mb_mgr);
 		if (job == NULL) {
diff --git a/drivers/crypto/qat/qat_sym_hw_dp.c b/drivers/crypto/qat/qat_sym_hw_dp.c
index 36d11e0dc9..12825e448b 100644
--- a/drivers/crypto/qat/qat_sym_hw_dp.c
+++ b/drivers/crypto/qat/qat_sym_hw_dp.c
@@ -181,8 +181,9 @@ qat_sym_dp_enqueue_cipher_jobs(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec,
-			vec->sgl[i].num);
+		data_len = qat_sym_dp_parse_data_vec(qp, req,
+			vec->src_sgl[i].vec,
+			vec->src_sgl[i].num);
 		if (unlikely(data_len < 0))
 			break;
 		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
@@ -302,8 +303,9 @@ qat_sym_dp_enqueue_auth_jobs(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec,
-			vec->sgl[i].num);
+		data_len = qat_sym_dp_parse_data_vec(qp, req,
+			vec->src_sgl[i].vec,
+			vec->src_sgl[i].num);
 		if (unlikely(data_len < 0))
 			break;
 		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
@@ -484,14 +486,16 @@ qat_sym_dp_enqueue_chain_jobs(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec,
-			vec->sgl[i].num);
+		data_len = qat_sym_dp_parse_data_vec(qp, req,
+			vec->src_sgl[i].vec,
+			vec->src_sgl[i].num);
 		if (unlikely(data_len < 0))
 			break;
 		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
-		if (unlikely(enqueue_one_chain_job(ctx, req, vec->sgl[i].vec,
-			vec->sgl[i].num, &vec->iv[i], &vec->digest[i],
-				&vec->auth_iv[i], ofs, (uint32_t)data_len)))
+		if (unlikely(enqueue_one_chain_job(ctx, req,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num,
+			&vec->iv[i], &vec->digest[i],
+			&vec->auth_iv[i], ofs, (uint32_t)data_len)))
 			break;
 
 		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
@@ -688,8 +692,9 @@ qat_sym_dp_enqueue_aead_jobs(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec,
-			vec->sgl[i].num);
+		data_len = qat_sym_dp_parse_data_vec(qp, req,
+			vec->src_sgl[i].vec,
+			vec->src_sgl[i].num);
 		if (unlikely(data_len < 0))
 			break;
 		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
diff --git a/lib/cryptodev/rte_crypto_sym.h b/lib/cryptodev/rte_crypto_sym.h
index 58c0724743..dcc0bd5933 100644
--- a/lib/cryptodev/rte_crypto_sym.h
+++ b/lib/cryptodev/rte_crypto_sym.h
@@ -69,7 +69,7 @@ struct rte_crypto_sym_vec {
 	/** number of operations to perform */
 	uint32_t num;
 	/** array of SGL vectors */
-	struct rte_crypto_sgl *sgl;
+	struct rte_crypto_sgl *src_sgl;
 	/** array of pointers to cipher IV */
 	struct rte_crypto_va_iova_ptr *iv;
 	/** array of pointers to digest */
diff --git a/lib/ipsec/misc.h b/lib/ipsec/misc.h
index 79b9a20762..58ff538141 100644
--- a/lib/ipsec/misc.h
+++ b/lib/ipsec/misc.h
@@ -136,7 +136,7 @@ cpu_crypto_bulk(const struct rte_ipsec_session *ss,
 		/* not enough space in vec[] to hold all segments */
 		if (vcnt < 0) {
 			/* fill the request structure */
-			symvec.sgl = &vecpkt[j];
+			symvec.src_sgl = &vecpkt[j];
 			symvec.iv = &iv[j];
 			symvec.digest = &dgst[j];
 			symvec.aad = &aad[j];
@@ -160,7 +160,7 @@ cpu_crypto_bulk(const struct rte_ipsec_session *ss,
 	}
 
 	/* fill the request structure */
-	symvec.sgl = &vecpkt[j];
+	symvec.src_sgl = &vecpkt[j];
 	symvec.iv = &iv[j];
 	symvec.aad = &aad[j];
 	symvec.digest = &dgst[j];
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH v4 02/15] crypto: add total raw buffer length
  2021-10-13 19:00     ` [dpdk-dev] [PATCH v4 " Hemant Agrawal
  2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 01/15] crypto: change sgl to src_sgl in vector Hemant Agrawal
@ 2021-10-13 19:00       ` Hemant Agrawal
  2021-10-14 12:39         ` Zhang, Roy Fan
  2021-10-15 17:45         ` Ananyev, Konstantin
  2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 03/15] crypto: add dest_sgl in raw vector APIs Hemant Agrawal
                         ` (13 subsequent siblings)
  15 siblings, 2 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-10-13 19:00 UTC (permalink / raw)
  To: dev, gakhil; +Cc: konstantin.ananyev, roy.fan.zhang, Gagandeep Singh

From: Gagandeep Singh <g.singh@nxp.com>

The current crypto raw data vectors is extended to support
rte_security usecases, where we need total data length to know
how much additional memory space is available in buffer other
than data length so that driver/HW can write expanded size
data after encryption.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
Acked-by: Akhil Goyal <gakhil@marvell.com>
---
 doc/guides/rel_notes/deprecation.rst | 7 -------
 lib/cryptodev/rte_crypto_sym.h       | 7 +++++++
 2 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/doc/guides/rel_notes/deprecation.rst b/doc/guides/rel_notes/deprecation.rst
index a4e86b31f5..53155459a0 100644
--- a/doc/guides/rel_notes/deprecation.rst
+++ b/doc/guides/rel_notes/deprecation.rst
@@ -188,13 +188,6 @@ Deprecation Notices
   This field will be null for inplace processing.
   This change is targeted for DPDK 21.11.
 
-* cryptodev: The structure ``rte_crypto_vec`` would be updated to add
-  ``tot_len`` to support total buffer length.
-  This is required for security cases like IPsec and PDCP encryption offload
-  to know how much additional memory space is available in buffer other than
-  data length so that driver/HW can write expanded size data after encryption.
-  This change is targeted for DPDK 21.11.
-
 * cryptodev: Hide structures ``rte_cryptodev_sym_session`` and
   ``rte_cryptodev_asym_session`` to remove unnecessary indirection between
   session and the private data of session. An opaque pointer can be exposed
diff --git a/lib/cryptodev/rte_crypto_sym.h b/lib/cryptodev/rte_crypto_sym.h
index dcc0bd5933..6be283e83c 100644
--- a/lib/cryptodev/rte_crypto_sym.h
+++ b/lib/cryptodev/rte_crypto_sym.h
@@ -37,6 +37,8 @@ struct rte_crypto_vec {
 	rte_iova_t iova;
 	/** length of the data buffer */
 	uint32_t len;
+	/** total buffer length */
+	uint32_t tot_len;
 };
 
 /**
@@ -980,12 +982,14 @@ rte_crypto_mbuf_to_vec(const struct rte_mbuf *mb, uint32_t ofs, uint32_t len,
 	seglen = mb->data_len - ofs;
 	if (len <= seglen) {
 		vec[0].len = len;
+		vec[0].tot_len = mb->buf_len - rte_pktmbuf_headroom(mb) - ofs;
 		return 1;
 	}
 
 	/* data spread across segments */
 	vec[0].len = seglen;
 	left = len - seglen;
+	vec[0].tot_len = mb->buf_len - rte_pktmbuf_headroom(mb) - ofs;
 	for (i = 1, nseg = mb->next; nseg != NULL; nseg = nseg->next, i++) {
 
 		vec[i].base = rte_pktmbuf_mtod(nseg, void *);
@@ -995,6 +999,8 @@ rte_crypto_mbuf_to_vec(const struct rte_mbuf *mb, uint32_t ofs, uint32_t len,
 		if (left <= seglen) {
 			/* whole requested data is completed */
 			vec[i].len = left;
+			vec[i].tot_len = mb->buf_len - rte_pktmbuf_headroom(mb)
+					- ofs;
 			left = 0;
 			break;
 		}
@@ -1002,6 +1008,7 @@ rte_crypto_mbuf_to_vec(const struct rte_mbuf *mb, uint32_t ofs, uint32_t len,
 		/* use whole segment */
 		vec[i].len = seglen;
 		left -= seglen;
+		vec[i].tot_len = mb->buf_len - rte_pktmbuf_headroom(mb) - ofs;
 	}
 
 	RTE_ASSERT(left == 0);
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH v4 03/15] crypto: add dest_sgl in raw vector APIs
  2021-10-13 19:00     ` [dpdk-dev] [PATCH v4 " Hemant Agrawal
  2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 01/15] crypto: change sgl to src_sgl in vector Hemant Agrawal
  2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 02/15] crypto: add total raw buffer length Hemant Agrawal
@ 2021-10-13 19:00       ` Hemant Agrawal
  2021-10-17 12:21         ` [dpdk-dev] [EXT] " Akhil Goyal
  2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 04/15] crypto: fix raw process for multi-seg case Hemant Agrawal
                         ` (12 subsequent siblings)
  15 siblings, 1 reply; 115+ messages in thread
From: Hemant Agrawal @ 2021-10-13 19:00 UTC (permalink / raw)
  To: dev, gakhil; +Cc: konstantin.ananyev, roy.fan.zhang

The structure rte_crypto_sym_vec is updated to
add dest_sgl to support out of place processing.

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
Acked-by: Akhil Goyal <gakhil@marvell.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
---
 doc/guides/rel_notes/deprecation.rst | 5 -----
 lib/cryptodev/rte_crypto_sym.h       | 2 ++
 2 files changed, 2 insertions(+), 5 deletions(-)

diff --git a/doc/guides/rel_notes/deprecation.rst b/doc/guides/rel_notes/deprecation.rst
index 53155459a0..581ee21449 100644
--- a/doc/guides/rel_notes/deprecation.rst
+++ b/doc/guides/rel_notes/deprecation.rst
@@ -183,11 +183,6 @@ Deprecation Notices
   has a limited size ``uint16_t``.
   It will be moved and extended as ``uint32_t`` in DPDK 21.11.
 
-* cryptodev: The structure ``rte_crypto_sym_vec`` would be updated to add
-  ``dest_sgl`` to support out of place processing.
-  This field will be null for inplace processing.
-  This change is targeted for DPDK 21.11.
-
 * cryptodev: Hide structures ``rte_cryptodev_sym_session`` and
   ``rte_cryptodev_asym_session`` to remove unnecessary indirection between
   session and the private data of session. An opaque pointer can be exposed
diff --git a/lib/cryptodev/rte_crypto_sym.h b/lib/cryptodev/rte_crypto_sym.h
index 6be283e83c..b6a229e263 100644
--- a/lib/cryptodev/rte_crypto_sym.h
+++ b/lib/cryptodev/rte_crypto_sym.h
@@ -72,6 +72,8 @@ struct rte_crypto_sym_vec {
 	uint32_t num;
 	/** array of SGL vectors */
 	struct rte_crypto_sgl *src_sgl;
+	/** array of SGL vectors for OOP, keep it NULL for inplace*/
+	struct rte_crypto_sgl *dest_sgl;
 	/** array of pointers to cipher IV */
 	struct rte_crypto_va_iova_ptr *iv;
 	/** array of pointers to digest */
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH v4 04/15] crypto: fix raw process for multi-seg case
  2021-10-13 19:00     ` [dpdk-dev] [PATCH v4 " Hemant Agrawal
                         ` (2 preceding siblings ...)
  2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 03/15] crypto: add dest_sgl in raw vector APIs Hemant Agrawal
@ 2021-10-13 19:00       ` Hemant Agrawal
  2021-10-15 17:39         ` Ananyev, Konstantin
  2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 05/15] crypto/dpaa2_sec: support raw datapath APIs Hemant Agrawal
                         ` (11 subsequent siblings)
  15 siblings, 1 reply; 115+ messages in thread
From: Hemant Agrawal @ 2021-10-13 19:00 UTC (permalink / raw)
  To: dev, gakhil
  Cc: konstantin.ananyev, roy.fan.zhang, Gagandeep Singh,
	marcinx.smoczynski, stable

From: Gagandeep Singh <g.singh@nxp.com>

If no next segment available the “for” loop will fail and it still
returns i+1 i.e. 2, which is wrong as it has filled only 1 buffer.

Fixes: 7adf992fb9bf ("cryptodev: introduce CPU crypto API")
Cc: marcinx.smoczynski@intel.com
Cc: stable@dpdk.org

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 lib/cryptodev/rte_crypto_sym.h | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/lib/cryptodev/rte_crypto_sym.h b/lib/cryptodev/rte_crypto_sym.h
index b6a229e263..dc88ad6dcc 100644
--- a/lib/cryptodev/rte_crypto_sym.h
+++ b/lib/cryptodev/rte_crypto_sym.h
@@ -1004,6 +1004,7 @@ rte_crypto_mbuf_to_vec(const struct rte_mbuf *mb, uint32_t ofs, uint32_t len,
 			vec[i].tot_len = mb->buf_len - rte_pktmbuf_headroom(mb)
 					- ofs;
 			left = 0;
+			i++;
 			break;
 		}
 
@@ -1014,7 +1015,7 @@ rte_crypto_mbuf_to_vec(const struct rte_mbuf *mb, uint32_t ofs, uint32_t len,
 	}
 
 	RTE_ASSERT(left == 0);
-	return i + 1;
+	return i;
 }
 
 
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH v4 05/15] crypto/dpaa2_sec: support raw datapath APIs
  2021-10-13 19:00     ` [dpdk-dev] [PATCH v4 " Hemant Agrawal
                         ` (3 preceding siblings ...)
  2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 04/15] crypto: fix raw process for multi-seg case Hemant Agrawal
@ 2021-10-13 19:00       ` Hemant Agrawal
  2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 06/15] crypto/dpaa2_sec: support AUTH only with raw buffer APIs Hemant Agrawal
                         ` (10 subsequent siblings)
  15 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-10-13 19:00 UTC (permalink / raw)
  To: dev, gakhil; +Cc: konstantin.ananyev, roy.fan.zhang, Gagandeep Singh

From: Gagandeep Singh <g.singh@nxp.com>

This path add framework for raw API support.
The initial patch only test cipher only part.

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 doc/guides/rel_notes/release_21_11.rst      |   1 +
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c |  13 +-
 drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h   |  60 +-
 drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c | 595 ++++++++++++++++++++
 drivers/crypto/dpaa2_sec/meson.build        |   3 +-
 5 files changed, 643 insertions(+), 29 deletions(-)
 create mode 100644 drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c

diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index 54718ff367..279f0adf62 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -123,6 +123,7 @@ New Features
 * **Updated NXP dpaa2_sec crypto PMD.**
 
   * Added PDCP short MAC-I support.
+  * Added raw vector datapath API support
 
 * **Updated the ACC100 bbdev PMD.**
 
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index dfa72f3f93..4eb3615250 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -49,15 +49,8 @@
 #define FSL_MC_DPSECI_DEVID     3
 
 #define NO_PREFETCH 0
-/* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */
-#define FLE_POOL_NUM_BUFS	32000
-#define FLE_POOL_BUF_SIZE	256
-#define FLE_POOL_CACHE_SIZE	512
-#define FLE_SG_MEM_SIZE(num)	(FLE_POOL_BUF_SIZE + ((num) * 32))
-#define SEC_FLC_DHR_OUTBOUND	-114
-#define SEC_FLC_DHR_INBOUND	0
 
-static uint8_t cryptodev_driver_id;
+uint8_t cryptodev_driver_id;
 
 #ifdef RTE_LIB_SECURITY
 static inline int
@@ -3828,6 +3821,9 @@ static struct rte_cryptodev_ops crypto_ops = {
 	.sym_session_get_size     = dpaa2_sec_sym_session_get_size,
 	.sym_session_configure    = dpaa2_sec_sym_session_configure,
 	.sym_session_clear        = dpaa2_sec_sym_session_clear,
+	/* Raw data-path API related operations */
+	.sym_get_raw_dp_ctx_size = dpaa2_sec_get_dp_ctx_size,
+	.sym_configure_raw_dp_ctx = dpaa2_sec_configure_raw_dp_ctx,
 };
 
 #ifdef RTE_LIB_SECURITY
@@ -3910,6 +3906,7 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
 			RTE_CRYPTODEV_FF_HW_ACCELERATED |
 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
 			RTE_CRYPTODEV_FF_SECURITY |
+			RTE_CRYPTODEV_FF_SYM_RAW_DP |
 			RTE_CRYPTODEV_FF_IN_PLACE_SGL |
 			RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
 			RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
index 8dee0a4bda..e9b888186e 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
@@ -15,6 +15,16 @@
 #define CRYPTODEV_NAME_DPAA2_SEC_PMD	crypto_dpaa2_sec
 /**< NXP DPAA2 - SEC PMD device name */
 
+extern uint8_t cryptodev_driver_id;
+
+/* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */
+#define FLE_POOL_NUM_BUFS	32000
+#define FLE_POOL_BUF_SIZE	256
+#define FLE_POOL_CACHE_SIZE	512
+#define FLE_SG_MEM_SIZE(num)	(FLE_POOL_BUF_SIZE + ((num) * 32))
+#define SEC_FLC_DHR_OUTBOUND	-114
+#define SEC_FLC_DHR_INBOUND	0
+
 #define MAX_QUEUES		64
 #define MAX_DESC_SIZE		64
 /** private data structure for each DPAA2_SEC device */
@@ -158,6 +168,24 @@ struct dpaa2_pdcp_ctxt {
 	uint32_t hfn_threshold;	/*!< HFN Threashold for key renegotiation */
 };
 #endif
+
+typedef int (*dpaa2_sec_build_fd_t)(
+	void *qp, uint8_t *drv_ctx, struct rte_crypto_vec *data_vec,
+	uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
+	void *user_data);
+
+typedef int (*dpaa2_sec_build_raw_dp_fd_t)(uint8_t *drv_ctx,
+		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_va_iova_ptr *iv,
+		       struct rte_crypto_va_iova_ptr *digest,
+		       struct rte_crypto_va_iova_ptr *auth_iv,
+		       union rte_crypto_sym_ofs ofs,
+		       void *userdata,
+		       struct qbman_fd *fd);
+
 typedef struct dpaa2_sec_session_entry {
 	void *ctxt;
 	uint8_t ctxt_type;
@@ -165,6 +193,8 @@ typedef struct dpaa2_sec_session_entry {
 	enum rte_crypto_cipher_algorithm cipher_alg; /*!< Cipher Algorithm*/
 	enum rte_crypto_auth_algorithm auth_alg; /*!< Authentication Algorithm*/
 	enum rte_crypto_aead_algorithm aead_alg; /*!< AEAD Algorithm*/
+	dpaa2_sec_build_fd_t build_fd;
+	dpaa2_sec_build_raw_dp_fd_t build_raw_dp_fd;
 	union {
 		struct {
 			uint8_t *data;	/**< pointer to key data */
@@ -547,26 +577,6 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
 			}, }
 		}, }
 	},
-	{	/* NULL (CIPHER) */
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
-		{.sym = {
-			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
-			{.cipher = {
-				.algo = RTE_CRYPTO_CIPHER_NULL,
-				.block_size = 1,
-				.key_size = {
-					.min = 0,
-					.max = 0,
-					.increment = 0
-				},
-				.iv_size = {
-					.min = 0,
-					.max = 0,
-					.increment = 0
-				}
-			}, },
-		}, }
-	},
 	{	/* AES CBC */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
@@ -983,4 +993,14 @@ calc_chksum(void *buffer, int len)
 	return  result;
 }
 
+int
+dpaa2_sec_configure_raw_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
+	enum rte_crypto_op_sess_type sess_type,
+	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update);
+
+int
+dpaa2_sec_get_dp_ctx_size(struct rte_cryptodev *dev);
+
+
 #endif /* _DPAA2_SEC_PMD_PRIVATE_H_ */
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
new file mode 100644
index 0000000000..8925c8e938
--- /dev/null
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
@@ -0,0 +1,595 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2021 NXP
+ */
+
+#include <cryptodev_pmd.h>
+#include <rte_fslmc.h>
+#include <fslmc_vfio.h>
+#include <dpaa2_hw_pvt.h>
+#include <dpaa2_hw_dpio.h>
+
+#include "dpaa2_sec_priv.h"
+#include "dpaa2_sec_logs.h"
+
+struct dpaa2_sec_raw_dp_ctx {
+	dpaa2_sec_session *session;
+	uint32_t tail;
+	uint32_t head;
+	uint16_t cached_enqueue;
+	uint16_t cached_dequeue;
+};
+
+static int
+build_raw_dp_chain_fd(uint8_t *drv_ctx,
+		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_va_iova_ptr *iv,
+		       struct rte_crypto_va_iova_ptr *digest,
+		       struct rte_crypto_va_iova_ptr *auth_iv,
+		       union rte_crypto_sym_ofs ofs,
+		       void *userdata,
+		       struct qbman_fd *fd)
+{
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(sgl);
+	RTE_SET_USED(iv);
+	RTE_SET_USED(digest);
+	RTE_SET_USED(auth_iv);
+	RTE_SET_USED(ofs);
+	RTE_SET_USED(userdata);
+	RTE_SET_USED(fd);
+
+	return 0;
+}
+
+static int
+build_raw_dp_aead_fd(uint8_t *drv_ctx,
+		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_va_iova_ptr *iv,
+		       struct rte_crypto_va_iova_ptr *digest,
+		       struct rte_crypto_va_iova_ptr *auth_iv,
+		       union rte_crypto_sym_ofs ofs,
+		       void *userdata,
+		       struct qbman_fd *fd)
+{
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(sgl);
+	RTE_SET_USED(iv);
+	RTE_SET_USED(digest);
+	RTE_SET_USED(auth_iv);
+	RTE_SET_USED(ofs);
+	RTE_SET_USED(userdata);
+	RTE_SET_USED(fd);
+
+	return 0;
+}
+
+static int
+build_raw_dp_auth_fd(uint8_t *drv_ctx,
+		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_va_iova_ptr *iv,
+		       struct rte_crypto_va_iova_ptr *digest,
+		       struct rte_crypto_va_iova_ptr *auth_iv,
+		       union rte_crypto_sym_ofs ofs,
+		       void *userdata,
+		       struct qbman_fd *fd)
+{
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(sgl);
+	RTE_SET_USED(iv);
+	RTE_SET_USED(digest);
+	RTE_SET_USED(auth_iv);
+	RTE_SET_USED(ofs);
+	RTE_SET_USED(userdata);
+	RTE_SET_USED(fd);
+
+	return 0;
+}
+
+static int
+build_raw_dp_proto_fd(uint8_t *drv_ctx,
+		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_va_iova_ptr *iv,
+		       struct rte_crypto_va_iova_ptr *digest,
+		       struct rte_crypto_va_iova_ptr *auth_iv,
+		       union rte_crypto_sym_ofs ofs,
+		       void *userdata,
+		       struct qbman_fd *fd)
+{
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(sgl);
+	RTE_SET_USED(iv);
+	RTE_SET_USED(digest);
+	RTE_SET_USED(auth_iv);
+	RTE_SET_USED(ofs);
+	RTE_SET_USED(userdata);
+	RTE_SET_USED(fd);
+
+	return 0;
+}
+
+static int
+build_raw_dp_proto_compound_fd(uint8_t *drv_ctx,
+		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_va_iova_ptr *iv,
+		       struct rte_crypto_va_iova_ptr *digest,
+		       struct rte_crypto_va_iova_ptr *auth_iv,
+		       union rte_crypto_sym_ofs ofs,
+		       void *userdata,
+		       struct qbman_fd *fd)
+{
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(sgl);
+	RTE_SET_USED(iv);
+	RTE_SET_USED(digest);
+	RTE_SET_USED(auth_iv);
+	RTE_SET_USED(ofs);
+	RTE_SET_USED(userdata);
+	RTE_SET_USED(fd);
+
+	return 0;
+}
+
+static int
+build_raw_dp_cipher_fd(uint8_t *drv_ctx,
+		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_va_iova_ptr *iv,
+		       struct rte_crypto_va_iova_ptr *digest,
+		       struct rte_crypto_va_iova_ptr *auth_iv,
+		       union rte_crypto_sym_ofs ofs,
+		       void *userdata,
+		       struct qbman_fd *fd)
+{
+	RTE_SET_USED(digest);
+	RTE_SET_USED(auth_iv);
+
+	dpaa2_sec_session *sess =
+		((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
+	struct qbman_fle *ip_fle, *op_fle, *sge, *fle;
+	int total_len = 0, data_len = 0, data_offset;
+	struct sec_flow_context *flc;
+	struct ctxt_priv *priv = sess->ctxt;
+	unsigned int i;
+
+	for (i = 0; i < sgl->num; i++)
+		total_len += sgl->vec[i].len;
+
+	data_len = total_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
+	data_offset = ofs.ofs.cipher.head;
+
+	if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
+		sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
+		if ((data_len & 7) || (data_offset & 7)) {
+			DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes");
+			return -ENOTSUP;
+		}
+
+		data_len = data_len >> 3;
+		data_offset = data_offset >> 3;
+	}
+
+	/* first FLE entry used to store mbuf and session ctxt */
+	fle = (struct qbman_fle *)rte_malloc(NULL,
+			FLE_SG_MEM_SIZE(2*sgl->num),
+			RTE_CACHE_LINE_SIZE);
+	if (!fle) {
+		DPAA2_SEC_ERR("RAW CIPHER SG: Memory alloc failed for SGE");
+		return -ENOMEM;
+	}
+	memset(fle, 0, FLE_SG_MEM_SIZE(2*sgl->num));
+	/* first FLE entry used to store userdata and session ctxt */
+	DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
+	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
+
+	op_fle = fle + 1;
+	ip_fle = fle + 2;
+	sge = fle + 3;
+
+	flc = &priv->flc_desc[0].flc;
+
+	DPAA2_SEC_DP_DEBUG(
+		"RAW CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d\n",
+		data_offset,
+		data_len,
+		sess->iv.length);
+
+	/* o/p fle */
+	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
+	op_fle->length = data_len;
+	DPAA2_SET_FLE_SG_EXT(op_fle);
+
+	/* o/p 1st seg */
+	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+	DPAA2_SET_FLE_OFFSET(sge, data_offset);
+	sge->length = sgl->vec[0].len - data_offset;
+
+	/* o/p segs */
+	for (i = 1; i < sgl->num; i++) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+		DPAA2_SET_FLE_OFFSET(sge, 0);
+		sge->length = sgl->vec[i].len;
+	}
+	DPAA2_SET_FLE_FIN(sge);
+
+	DPAA2_SEC_DP_DEBUG(
+		"RAW CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n",
+		flc, fle, fle->addr_hi, fle->addr_lo,
+		fle->length);
+
+	/* i/p fle */
+	sge++;
+	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
+	ip_fle->length = sess->iv.length + data_len;
+	DPAA2_SET_FLE_SG_EXT(ip_fle);
+
+	/* i/p IV */
+	DPAA2_SET_FLE_ADDR(sge, iv->iova);
+	DPAA2_SET_FLE_OFFSET(sge, 0);
+	sge->length = sess->iv.length;
+
+	sge++;
+
+	/* i/p 1st seg */
+	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+	DPAA2_SET_FLE_OFFSET(sge, data_offset);
+	sge->length = sgl->vec[0].len - data_offset;
+
+	/* i/p segs */
+	for (i = 1; i < sgl->num; i++) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+		DPAA2_SET_FLE_OFFSET(sge, 0);
+		sge->length = sgl->vec[i].len;
+	}
+	DPAA2_SET_FLE_FIN(sge);
+	DPAA2_SET_FLE_FIN(ip_fle);
+
+	/* sg fd */
+	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
+	DPAA2_SET_FD_LEN(fd, ip_fle->length);
+	DPAA2_SET_FD_COMPOUND_FMT(fd);
+	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+	DPAA2_SEC_DP_DEBUG(
+		"RAW CIPHER SG: fdaddr =%" PRIx64 " off =%d, len =%d\n",
+		DPAA2_GET_FD_ADDR(fd),
+		DPAA2_GET_FD_OFFSET(fd),
+		DPAA2_GET_FD_LEN(fd));
+
+	return 0;
+}
+
+static __rte_always_inline uint32_t
+dpaa2_sec_raw_enqueue_burst(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	RTE_SET_USED(user_data);
+	uint32_t loop;
+	int32_t ret;
+	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
+	uint32_t frames_to_send, retry_count;
+	struct qbman_eq_desc eqdesc;
+	struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp_data;
+	dpaa2_sec_session *sess =
+		((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
+	struct qbman_swp *swp;
+	uint16_t num_tx = 0;
+	uint32_t flags[MAX_TX_RING_SLOTS] = {0};
+
+	if (unlikely(vec->num == 0))
+		return 0;
+
+	if (sess == NULL) {
+		DPAA2_SEC_ERR("sessionless raw crypto not supported");
+		return 0;
+	}
+	/*Prepare enqueue descriptor*/
+	qbman_eq_desc_clear(&eqdesc);
+	qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
+	qbman_eq_desc_set_response(&eqdesc, 0, 0);
+	qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
+
+	if (!DPAA2_PER_LCORE_DPIO) {
+		ret = dpaa2_affine_qbman_swp();
+		if (ret) {
+			DPAA2_SEC_ERR(
+				"Failed to allocate IO portal, tid: %d\n",
+				rte_gettid());
+			return 0;
+		}
+	}
+	swp = DPAA2_PER_LCORE_PORTAL;
+
+	while (vec->num) {
+		frames_to_send = (vec->num > dpaa2_eqcr_size) ?
+			dpaa2_eqcr_size : vec->num;
+
+		for (loop = 0; loop < frames_to_send; loop++) {
+			/*Clear the unused FD fields before sending*/
+			memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
+			ret = sess->build_raw_dp_fd(drv_ctx,
+						    &vec->src_sgl[loop],
+						    &vec->iv[loop],
+						    &vec->digest[loop],
+						    &vec->auth_iv[loop],
+						    ofs,
+						    user_data[loop],
+						    &fd_arr[loop]);
+			if (ret) {
+				DPAA2_SEC_ERR("error: Improper packet contents"
+					      " for crypto operation");
+				goto skip_tx;
+			}
+			status[loop] = 1;
+		}
+
+		loop = 0;
+		retry_count = 0;
+		while (loop < frames_to_send) {
+			ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
+							 &fd_arr[loop],
+							 &flags[loop],
+							 frames_to_send - loop);
+			if (unlikely(ret < 0)) {
+				retry_count++;
+				if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
+					num_tx += loop;
+					vec->num -= loop;
+					goto skip_tx;
+				}
+			} else {
+				loop += ret;
+				retry_count = 0;
+			}
+		}
+
+		num_tx += loop;
+		vec->num -= loop;
+	}
+skip_tx:
+	dpaa2_qp->tx_vq.tx_pkts += num_tx;
+	dpaa2_qp->tx_vq.err_pkts += vec->num;
+
+	return num_tx;
+}
+
+static __rte_always_inline int
+dpaa2_sec_raw_enqueue(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data_vec,
+	uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
+	void *user_data)
+{
+	RTE_SET_USED(qp_data);
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(data_vec);
+	RTE_SET_USED(n_data_vecs);
+	RTE_SET_USED(ofs);
+	RTE_SET_USED(iv);
+	RTE_SET_USED(digest);
+	RTE_SET_USED(aad_or_auth_iv);
+	RTE_SET_USED(user_data);
+
+	return 0;
+}
+
+static inline void *
+sec_fd_to_userdata(const struct qbman_fd *fd)
+{
+	struct qbman_fle *fle;
+	void *userdata;
+	fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
+
+	DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n",
+			   fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
+	userdata = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
+	/* free the fle memory */
+	rte_free((void *)(fle-1));
+
+	return userdata;
+}
+
+static __rte_always_inline uint32_t
+dpaa2_sec_raw_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
+	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
+	uint32_t max_nb_to_dequeue,
+	rte_cryptodev_raw_post_dequeue_t post_dequeue,
+	void **out_user_data, uint8_t is_user_data_array,
+	uint32_t *n_success, int *dequeue_status)
+{
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(get_dequeue_count);
+
+	/* Function is responsible to receive frames for a given device and VQ*/
+	struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp_data;
+	struct qbman_result *dq_storage;
+	uint32_t fqid = dpaa2_qp->rx_vq.fqid;
+	int ret, num_rx = 0;
+	uint8_t is_last = 0, status;
+	struct qbman_swp *swp;
+	const struct qbman_fd *fd;
+	struct qbman_pull_desc pulldesc;
+	void *user_data;
+	uint32_t nb_ops = max_nb_to_dequeue;
+
+	if (!DPAA2_PER_LCORE_DPIO) {
+		ret = dpaa2_affine_qbman_swp();
+		if (ret) {
+			DPAA2_SEC_ERR(
+				"Failed to allocate IO portal, tid: %d\n",
+				rte_gettid());
+			return 0;
+		}
+	}
+	swp = DPAA2_PER_LCORE_PORTAL;
+	dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
+
+	qbman_pull_desc_clear(&pulldesc);
+	qbman_pull_desc_set_numframes(&pulldesc,
+				      (nb_ops > dpaa2_dqrr_size) ?
+				      dpaa2_dqrr_size : nb_ops);
+	qbman_pull_desc_set_fq(&pulldesc, fqid);
+	qbman_pull_desc_set_storage(&pulldesc, dq_storage,
+				    (uint64_t)DPAA2_VADDR_TO_IOVA(dq_storage),
+				    1);
+
+	/*Issue a volatile dequeue command. */
+	while (1) {
+		if (qbman_swp_pull(swp, &pulldesc)) {
+			DPAA2_SEC_WARN(
+				"SEC VDQ command is not issued : QBMAN busy");
+			/* Portal was busy, try again */
+			continue;
+		}
+		break;
+	};
+
+	/* Receive the packets till Last Dequeue entry is found with
+	 * respect to the above issues PULL command.
+	 */
+	while (!is_last) {
+		/* Check if the previous issued command is completed.
+		 * Also seems like the SWP is shared between the Ethernet Driver
+		 * and the SEC driver.
+		 */
+		while (!qbman_check_command_complete(dq_storage))
+			;
+
+		/* Loop until the dq_storage is updated with
+		 * new token by QBMAN
+		 */
+		while (!qbman_check_new_result(dq_storage))
+			;
+		/* Check whether Last Pull command is Expired and
+		 * setting Condition for Loop termination
+		 */
+		if (qbman_result_DQ_is_pull_complete(dq_storage)) {
+			is_last = 1;
+			/* Check for valid frame. */
+			status = (uint8_t)qbman_result_DQ_flags(dq_storage);
+			if (unlikely(
+				(status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
+				DPAA2_SEC_DP_DEBUG("No frame is delivered\n");
+				continue;
+			}
+		}
+
+		fd = qbman_result_DQ_fd(dq_storage);
+		user_data = sec_fd_to_userdata(fd);
+		if (is_user_data_array)
+			out_user_data[num_rx] = user_data;
+		else
+			out_user_data[0] = user_data;
+		if (unlikely(fd->simple.frc)) {
+			/* TODO Parse SEC errors */
+			DPAA2_SEC_ERR("SEC returned Error - %x",
+				      fd->simple.frc);
+			status = RTE_CRYPTO_OP_STATUS_ERROR;
+		} else {
+			status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+		}
+		post_dequeue(user_data, num_rx, status);
+
+		num_rx++;
+		dq_storage++;
+	} /* End of Packet Rx loop */
+
+	dpaa2_qp->rx_vq.rx_pkts += num_rx;
+	*dequeue_status = 1;
+	*n_success = num_rx;
+
+	DPAA2_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
+	/*Return the total number of packets received to DPAA2 app*/
+	return num_rx;
+}
+
+static __rte_always_inline void *
+dpaa2_sec_raw_dequeue(void *qp_data, uint8_t *drv_ctx, int *dequeue_status,
+		enum rte_crypto_op_status *op_status)
+{
+	RTE_SET_USED(qp_data);
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(dequeue_status);
+	RTE_SET_USED(op_status);
+
+	return NULL;
+}
+
+static __rte_always_inline int
+dpaa2_sec_raw_enqueue_done(void *qp_data, uint8_t *drv_ctx, uint32_t n)
+{
+	RTE_SET_USED(qp_data);
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(n);
+
+	return 0;
+}
+
+static __rte_always_inline int
+dpaa2_sec_raw_dequeue_done(void *qp_data, uint8_t *drv_ctx, uint32_t n)
+{
+	RTE_SET_USED(qp_data);
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(n);
+
+	return 0;
+}
+
+int
+dpaa2_sec_configure_raw_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
+	enum rte_crypto_op_sess_type sess_type,
+	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
+{
+	dpaa2_sec_session *sess;
+	struct dpaa2_sec_raw_dp_ctx *dp_ctx;
+	RTE_SET_USED(qp_id);
+
+	if (!is_update) {
+		memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx));
+		raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
+	}
+
+	if (sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
+		sess = (dpaa2_sec_session *)get_sec_session_private_data(
+				session_ctx.sec_sess);
+	else if (sess_type == RTE_CRYPTO_OP_WITH_SESSION)
+		sess = (dpaa2_sec_session *)get_sym_session_private_data(
+			session_ctx.crypto_sess, cryptodev_driver_id);
+	else
+		return -ENOTSUP;
+	raw_dp_ctx->dequeue_burst = dpaa2_sec_raw_dequeue_burst;
+	raw_dp_ctx->dequeue = dpaa2_sec_raw_dequeue;
+	raw_dp_ctx->dequeue_done = dpaa2_sec_raw_dequeue_done;
+	raw_dp_ctx->enqueue_burst = dpaa2_sec_raw_enqueue_burst;
+	raw_dp_ctx->enqueue = dpaa2_sec_raw_enqueue;
+	raw_dp_ctx->enqueue_done = dpaa2_sec_raw_enqueue_done;
+
+	if (sess->ctxt_type == DPAA2_SEC_CIPHER_HASH)
+		sess->build_raw_dp_fd = build_raw_dp_chain_fd;
+	else if (sess->ctxt_type == DPAA2_SEC_AEAD)
+		sess->build_raw_dp_fd = build_raw_dp_aead_fd;
+	else if (sess->ctxt_type == DPAA2_SEC_AUTH)
+		sess->build_raw_dp_fd = build_raw_dp_auth_fd;
+	else if (sess->ctxt_type == DPAA2_SEC_CIPHER)
+		sess->build_raw_dp_fd = build_raw_dp_cipher_fd;
+	else if (sess->ctxt_type == DPAA2_SEC_IPSEC)
+		sess->build_raw_dp_fd = build_raw_dp_proto_fd;
+	else if (sess->ctxt_type == DPAA2_SEC_PDCP)
+		sess->build_raw_dp_fd = build_raw_dp_proto_compound_fd;
+	else
+		return -ENOTSUP;
+	dp_ctx = (struct dpaa2_sec_raw_dp_ctx *)raw_dp_ctx->drv_ctx_data;
+	dp_ctx->session = sess;
+
+	return 0;
+}
+
+int
+dpaa2_sec_get_dp_ctx_size(__rte_unused struct rte_cryptodev *dev)
+{
+	return sizeof(struct dpaa2_sec_raw_dp_ctx);
+}
diff --git a/drivers/crypto/dpaa2_sec/meson.build b/drivers/crypto/dpaa2_sec/meson.build
index ea1d73a13d..e6e5abb3c1 100644
--- a/drivers/crypto/dpaa2_sec/meson.build
+++ b/drivers/crypto/dpaa2_sec/meson.build
@@ -1,5 +1,5 @@
 # SPDX-License-Identifier: BSD-3-Clause
-# Copyright 2018 NXP
+# Copyright 2018,2021 NXP
 
 if not is_linux
     build = false
@@ -9,6 +9,7 @@ endif
 deps += ['security', 'mempool_dpaa2']
 sources = files(
         'dpaa2_sec_dpseci.c',
+	'dpaa2_sec_raw_dp.c',
         'mc/dpseci.c',
 )
 
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH v4 06/15] crypto/dpaa2_sec: support AUTH only with raw buffer APIs
  2021-10-13 19:00     ` [dpdk-dev] [PATCH v4 " Hemant Agrawal
                         ` (4 preceding siblings ...)
  2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 05/15] crypto/dpaa2_sec: support raw datapath APIs Hemant Agrawal
@ 2021-10-13 19:00       ` Hemant Agrawal
  2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 07/15] crypto/dpaa2_sec: support AUTHENC " Hemant Agrawal
                         ` (9 subsequent siblings)
  15 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-10-13 19:00 UTC (permalink / raw)
  To: dev, gakhil; +Cc: konstantin.ananyev, roy.fan.zhang, Gagandeep Singh

From: Gagandeep Singh <g.singh@nxp.com>

Auth only with raw buffer APIs has been supported in this patch.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h   |  21 ----
 drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c | 114 ++++++++++++++++++--
 2 files changed, 108 insertions(+), 27 deletions(-)

diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
index e9b888186e..f397b756e8 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
@@ -231,27 +231,6 @@ typedef struct dpaa2_sec_session_entry {
 
 static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
 	/* Symmetric capabilities */
-	{	/* NULL (AUTH) */
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
-		{.sym = {
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
-			{.auth = {
-				.algo = RTE_CRYPTO_AUTH_NULL,
-				.block_size = 1,
-				.key_size = {
-					.min = 0,
-					.max = 0,
-					.increment = 0
-				},
-				.digest_size = {
-					.min = 0,
-					.max = 0,
-					.increment = 0
-				},
-				.iv_size = { 0 }
-			}, },
-		}, },
-	},
 	{	/* MD5 */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
index 8925c8e938..471c81b9e7 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
@@ -11,6 +11,8 @@
 #include "dpaa2_sec_priv.h"
 #include "dpaa2_sec_logs.h"
 
+#include <desc/algo.h>
+
 struct dpaa2_sec_raw_dp_ctx {
 	dpaa2_sec_session *session;
 	uint32_t tail;
@@ -73,14 +75,114 @@ build_raw_dp_auth_fd(uint8_t *drv_ctx,
 		       void *userdata,
 		       struct qbman_fd *fd)
 {
-	RTE_SET_USED(drv_ctx);
-	RTE_SET_USED(sgl);
 	RTE_SET_USED(iv);
-	RTE_SET_USED(digest);
 	RTE_SET_USED(auth_iv);
-	RTE_SET_USED(ofs);
-	RTE_SET_USED(userdata);
-	RTE_SET_USED(fd);
+
+	dpaa2_sec_session *sess =
+		((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
+	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
+	struct sec_flow_context *flc;
+	int total_len = 0, data_len = 0, data_offset;
+	uint8_t *old_digest;
+	struct ctxt_priv *priv = sess->ctxt;
+	unsigned int i;
+
+	for (i = 0; i < sgl->num; i++)
+		total_len += sgl->vec[i].len;
+
+	data_len = total_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
+	data_offset = ofs.ofs.auth.head;
+
+	if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
+		sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
+		if ((data_len & 7) || (data_offset & 7)) {
+			DPAA2_SEC_ERR("AUTH: len/offset must be full bytes");
+			return -ENOTSUP;
+		}
+
+		data_len = data_len >> 3;
+		data_offset = data_offset >> 3;
+	}
+	fle = (struct qbman_fle *)rte_malloc(NULL,
+		FLE_SG_MEM_SIZE(2 * sgl->num),
+			RTE_CACHE_LINE_SIZE);
+	if (unlikely(!fle)) {
+		DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE");
+		return -ENOMEM;
+	}
+	memset(fle, 0, FLE_SG_MEM_SIZE(2*sgl->num));
+	/* first FLE entry used to store mbuf and session ctxt */
+	DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
+	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
+	op_fle = fle + 1;
+	ip_fle = fle + 2;
+	sge = fle + 3;
+
+	flc = &priv->flc_desc[DESC_INITFINAL].flc;
+
+	/* sg FD */
+	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
+	DPAA2_SET_FD_COMPOUND_FMT(fd);
+
+	/* o/p fle */
+	DPAA2_SET_FLE_ADDR(op_fle,
+			DPAA2_VADDR_TO_IOVA(digest->va));
+	op_fle->length = sess->digest_length;
+
+	/* i/p fle */
+	DPAA2_SET_FLE_SG_EXT(ip_fle);
+	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
+	ip_fle->length = data_len;
+
+	if (sess->iv.length) {
+		uint8_t *iv_ptr;
+
+		iv_ptr = rte_crypto_op_ctod_offset(userdata, uint8_t *,
+						sess->iv.offset);
+
+		if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
+			iv_ptr = conv_to_snow_f9_iv(iv_ptr);
+			sge->length = 12;
+		} else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
+			iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
+			sge->length = 8;
+		} else {
+			sge->length = sess->iv.length;
+		}
+		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
+		ip_fle->length += sge->length;
+		sge++;
+	}
+	/* i/p 1st seg */
+	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+	DPAA2_SET_FLE_OFFSET(sge, data_offset);
+
+	if (data_len <= (int)(sgl->vec[0].len - data_offset)) {
+		sge->length = data_len;
+		data_len = 0;
+	} else {
+		sge->length = sgl->vec[0].len - data_offset;
+		for (i = 1; i < sgl->num; i++) {
+			sge++;
+			DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+			DPAA2_SET_FLE_OFFSET(sge, 0);
+			sge->length = sgl->vec[i].len;
+		}
+	}
+	if (sess->dir == DIR_DEC) {
+		/* Digest verification case */
+		sge++;
+		old_digest = (uint8_t *)(sge + 1);
+		rte_memcpy(old_digest, digest->va,
+			sess->digest_length);
+		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
+		sge->length = sess->digest_length;
+		ip_fle->length += sess->digest_length;
+	}
+	DPAA2_SET_FLE_FIN(sge);
+	DPAA2_SET_FLE_FIN(ip_fle);
+	DPAA2_SET_FD_LEN(fd, ip_fle->length);
 
 	return 0;
 }
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH v4 07/15] crypto/dpaa2_sec: support AUTHENC with raw buffer APIs
  2021-10-13 19:00     ` [dpdk-dev] [PATCH v4 " Hemant Agrawal
                         ` (5 preceding siblings ...)
  2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 06/15] crypto/dpaa2_sec: support AUTH only with raw buffer APIs Hemant Agrawal
@ 2021-10-13 19:00       ` Hemant Agrawal
  2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 08/15] crypto/dpaa2_sec: support AEAD " Hemant Agrawal
                         ` (8 subsequent siblings)
  15 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-10-13 19:00 UTC (permalink / raw)
  To: dev, gakhil; +Cc: konstantin.ananyev, roy.fan.zhang, Gagandeep Singh

From: Gagandeep Singh <g.singh@nxp.com>

This patch supports AUTHENC with raw buufer APIs

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c | 128 ++++++++++++++++++--
 1 file changed, 121 insertions(+), 7 deletions(-)

diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
index 471c81b9e7..565af6dcba 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
@@ -31,14 +31,128 @@ build_raw_dp_chain_fd(uint8_t *drv_ctx,
 		       void *userdata,
 		       struct qbman_fd *fd)
 {
-	RTE_SET_USED(drv_ctx);
-	RTE_SET_USED(sgl);
-	RTE_SET_USED(iv);
-	RTE_SET_USED(digest);
 	RTE_SET_USED(auth_iv);
-	RTE_SET_USED(ofs);
-	RTE_SET_USED(userdata);
-	RTE_SET_USED(fd);
+
+	dpaa2_sec_session *sess =
+		((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
+	struct ctxt_priv *priv = sess->ctxt;
+	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
+	struct sec_flow_context *flc;
+	int data_len = 0, auth_len = 0, cipher_len = 0;
+	unsigned int i = 0;
+	uint16_t auth_hdr_len = ofs.ofs.cipher.head -
+				ofs.ofs.auth.head;
+
+	uint16_t auth_tail_len = ofs.ofs.auth.tail;
+	uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
+	int icv_len = sess->digest_length;
+	uint8_t *old_icv;
+	uint8_t *iv_ptr = iv->va;
+
+	for (i = 0; i < sgl->num; i++)
+		data_len += sgl->vec[i].len;
+
+	cipher_len = data_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
+	auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
+	/* first FLE entry used to store session ctxt */
+	fle = (struct qbman_fle *)rte_malloc(NULL,
+			FLE_SG_MEM_SIZE(2 * sgl->num),
+			RTE_CACHE_LINE_SIZE);
+	if (unlikely(!fle)) {
+		DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE");
+		return -ENOMEM;
+	}
+	memset(fle, 0, FLE_SG_MEM_SIZE(2 * sgl->num));
+	DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
+	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
+
+	op_fle = fle + 1;
+	ip_fle = fle + 2;
+	sge = fle + 3;
+
+	/* Save the shared descriptor */
+	flc = &priv->flc_desc[0].flc;
+
+	/* Configure FD as a FRAME LIST */
+	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
+	DPAA2_SET_FD_COMPOUND_FMT(fd);
+	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+	/* Configure Output FLE with Scatter/Gather Entry */
+	DPAA2_SET_FLE_SG_EXT(op_fle);
+	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
+
+	if (auth_only_len)
+		DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
+
+	op_fle->length = (sess->dir == DIR_ENC) ?
+			(cipher_len + icv_len) :
+			cipher_len;
+
+	/* Configure Output SGE for Encap/Decap */
+	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+	DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.auth.head);
+	sge->length = sgl->vec[0].len - ofs.ofs.auth.head;
+
+	/* o/p segs */
+	for (i = 1; i < sgl->num; i++) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+		DPAA2_SET_FLE_OFFSET(sge, 0);
+		sge->length = sgl->vec[i].len;
+	}
+
+	if (sess->dir == DIR_ENC) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge,
+			digest->iova);
+		sge->length = icv_len;
+	}
+	DPAA2_SET_FLE_FIN(sge);
+
+	sge++;
+
+	/* Configure Input FLE with Scatter/Gather Entry */
+	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
+	DPAA2_SET_FLE_SG_EXT(ip_fle);
+	DPAA2_SET_FLE_FIN(ip_fle);
+
+	ip_fle->length = (sess->dir == DIR_ENC) ?
+			(auth_len + sess->iv.length) :
+			(auth_len + sess->iv.length +
+			icv_len);
+
+	/* Configure Input SGE for Encap/Decap */
+	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
+	sge->length = sess->iv.length;
+
+	sge++;
+	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+	DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.auth.head);
+	sge->length = sgl->vec[0].len - ofs.ofs.auth.head;
+
+	for (i = 1; i < sgl->num; i++) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+		DPAA2_SET_FLE_OFFSET(sge, 0);
+		sge->length = sgl->vec[i].len;
+	}
+
+	if (sess->dir == DIR_DEC) {
+		sge++;
+		old_icv = (uint8_t *)(sge + 1);
+		memcpy(old_icv, digest->va,
+			icv_len);
+		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
+		sge->length = icv_len;
+	}
+
+	DPAA2_SET_FLE_FIN(sge);
+	if (auth_only_len) {
+		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
+		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
+	}
+	DPAA2_SET_FD_LEN(fd, ip_fle->length);
 
 	return 0;
 }
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH v4 08/15] crypto/dpaa2_sec: support AEAD with raw buffer APIs
  2021-10-13 19:00     ` [dpdk-dev] [PATCH v4 " Hemant Agrawal
                         ` (6 preceding siblings ...)
  2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 07/15] crypto/dpaa2_sec: support AUTHENC " Hemant Agrawal
@ 2021-10-13 19:00       ` Hemant Agrawal
  2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 09/15] crypto/dpaa2_sec: support OOP with raw buffer API Hemant Agrawal
                         ` (7 subsequent siblings)
  15 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-10-13 19:00 UTC (permalink / raw)
  To: dev, gakhil; +Cc: konstantin.ananyev, roy.fan.zhang, Gagandeep Singh

From: Gagandeep Singh <g.singh@nxp.com>

add raw vector API support for AEAD algos.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c | 249 +++++++++++++++++---
 1 file changed, 214 insertions(+), 35 deletions(-)

diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
index 565af6dcba..5c29c61f9d 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
@@ -167,14 +167,126 @@ build_raw_dp_aead_fd(uint8_t *drv_ctx,
 		       void *userdata,
 		       struct qbman_fd *fd)
 {
-	RTE_SET_USED(drv_ctx);
-	RTE_SET_USED(sgl);
-	RTE_SET_USED(iv);
-	RTE_SET_USED(digest);
-	RTE_SET_USED(auth_iv);
-	RTE_SET_USED(ofs);
-	RTE_SET_USED(userdata);
-	RTE_SET_USED(fd);
+	dpaa2_sec_session *sess =
+		((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
+	struct ctxt_priv *priv = sess->ctxt;
+	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
+	struct sec_flow_context *flc;
+	uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
+	int icv_len = sess->digest_length;
+	uint8_t *old_icv;
+	uint8_t *IV_ptr = iv->va;
+	unsigned int i = 0;
+	int data_len = 0, aead_len = 0;
+
+	for (i = 0; i < sgl->num; i++)
+		data_len += sgl->vec[i].len;
+
+	aead_len = data_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
+
+	/* first FLE entry used to store mbuf and session ctxt */
+	fle = (struct qbman_fle *)rte_malloc(NULL,
+			FLE_SG_MEM_SIZE(2 * sgl->num),
+			RTE_CACHE_LINE_SIZE);
+	if (unlikely(!fle)) {
+		DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE");
+		return -ENOMEM;
+	}
+	memset(fle, 0, FLE_SG_MEM_SIZE(2 * sgl->num));
+	DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
+	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
+
+	op_fle = fle + 1;
+	ip_fle = fle + 2;
+	sge = fle + 3;
+
+	/* Save the shared descriptor */
+	flc = &priv->flc_desc[0].flc;
+
+	/* Configure FD as a FRAME LIST */
+	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
+	DPAA2_SET_FD_COMPOUND_FMT(fd);
+	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+	/* Configure Output FLE with Scatter/Gather Entry */
+	DPAA2_SET_FLE_SG_EXT(op_fle);
+	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
+
+	if (auth_only_len)
+		DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
+
+	op_fle->length = (sess->dir == DIR_ENC) ?
+			(aead_len + icv_len) :
+			aead_len;
+
+	/* Configure Output SGE for Encap/Decap */
+	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+	DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
+	sge->length = sgl->vec[0].len - ofs.ofs.cipher.head;
+
+	/* o/p segs */
+	for (i = 1; i < sgl->num; i++) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+		DPAA2_SET_FLE_OFFSET(sge, 0);
+		sge->length = sgl->vec[i].len;
+	}
+
+	if (sess->dir == DIR_ENC) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge, digest->iova);
+		sge->length = icv_len;
+	}
+	DPAA2_SET_FLE_FIN(sge);
+
+	sge++;
+
+	/* Configure Input FLE with Scatter/Gather Entry */
+	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
+	DPAA2_SET_FLE_SG_EXT(ip_fle);
+	DPAA2_SET_FLE_FIN(ip_fle);
+	ip_fle->length = (sess->dir == DIR_ENC) ?
+		(aead_len + sess->iv.length + auth_only_len) :
+		(aead_len + sess->iv.length + auth_only_len +
+		icv_len);
+
+	/* Configure Input SGE for Encap/Decap */
+	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
+	sge->length = sess->iv.length;
+
+	sge++;
+	if (auth_only_len) {
+		DPAA2_SET_FLE_ADDR(sge, auth_iv->iova);
+		sge->length = auth_only_len;
+		sge++;
+	}
+
+	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+	DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
+	sge->length = sgl->vec[0].len - ofs.ofs.cipher.head;
+
+	/* i/p segs */
+	for (i = 1; i < sgl->num; i++) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+		DPAA2_SET_FLE_OFFSET(sge, 0);
+		sge->length = sgl->vec[i].len;
+	}
+
+	if (sess->dir == DIR_DEC) {
+		sge++;
+		old_icv = (uint8_t *)(sge + 1);
+		memcpy(old_icv,  digest->va, icv_len);
+		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
+		sge->length = icv_len;
+	}
+
+	DPAA2_SET_FLE_FIN(sge);
+	if (auth_only_len) {
+		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
+		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
+	}
+	DPAA2_SET_FD_LEN(fd, ip_fle->length);
 
 	return 0;
 }
@@ -311,36 +423,104 @@ build_raw_dp_proto_fd(uint8_t *drv_ctx,
 		       void *userdata,
 		       struct qbman_fd *fd)
 {
-	RTE_SET_USED(drv_ctx);
-	RTE_SET_USED(sgl);
 	RTE_SET_USED(iv);
 	RTE_SET_USED(digest);
 	RTE_SET_USED(auth_iv);
 	RTE_SET_USED(ofs);
-	RTE_SET_USED(userdata);
-	RTE_SET_USED(fd);
 
-	return 0;
-}
+	dpaa2_sec_session *sess =
+		((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
+	struct ctxt_priv *priv = sess->ctxt;
+	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
+	struct sec_flow_context *flc;
+	uint32_t in_len = 0, out_len = 0, i;
 
-static int
-build_raw_dp_proto_compound_fd(uint8_t *drv_ctx,
-		       struct rte_crypto_sgl *sgl,
-		       struct rte_crypto_va_iova_ptr *iv,
-		       struct rte_crypto_va_iova_ptr *digest,
-		       struct rte_crypto_va_iova_ptr *auth_iv,
-		       union rte_crypto_sym_ofs ofs,
-		       void *userdata,
-		       struct qbman_fd *fd)
-{
-	RTE_SET_USED(drv_ctx);
-	RTE_SET_USED(sgl);
-	RTE_SET_USED(iv);
-	RTE_SET_USED(digest);
-	RTE_SET_USED(auth_iv);
-	RTE_SET_USED(ofs);
-	RTE_SET_USED(userdata);
-	RTE_SET_USED(fd);
+	/* first FLE entry used to store mbuf and session ctxt */
+	fle = (struct qbman_fle *)rte_malloc(NULL,
+			FLE_SG_MEM_SIZE(2 * sgl->num),
+			RTE_CACHE_LINE_SIZE);
+	if (unlikely(!fle)) {
+		DPAA2_SEC_DP_ERR("Proto:SG: Memory alloc failed for SGE");
+		return -ENOMEM;
+	}
+	memset(fle, 0, FLE_SG_MEM_SIZE(2 * sgl->num));
+	DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
+	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
+
+	/* Save the shared descriptor */
+	flc = &priv->flc_desc[0].flc;
+	op_fle = fle + 1;
+	ip_fle = fle + 2;
+	sge = fle + 3;
+
+	DPAA2_SET_FD_IVP(fd);
+	DPAA2_SET_FLE_IVP(op_fle);
+	DPAA2_SET_FLE_IVP(ip_fle);
+
+	/* Configure FD as a FRAME LIST */
+	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
+	DPAA2_SET_FD_COMPOUND_FMT(fd);
+	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+	/* Configure Output FLE with Scatter/Gather Entry */
+	DPAA2_SET_FLE_SG_EXT(op_fle);
+	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
+
+	/* Configure Output SGE for Encap/Decap */
+	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+	DPAA2_SET_FLE_OFFSET(sge, 0);
+	sge->length = sgl->vec[0].len;
+	out_len += sge->length;
+	/* o/p segs */
+	for (i = 1; i < sgl->num; i++) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+		DPAA2_SET_FLE_OFFSET(sge, 0);
+		sge->length = sgl->vec[i].len;
+		out_len += sge->length;
+	}
+	sge->length = sgl->vec[i - 1].tot_len;
+	out_len += sge->length;
+
+	DPAA2_SET_FLE_FIN(sge);
+	op_fle->length = out_len;
+
+	sge++;
+
+	/* Configure Input FLE with Scatter/Gather Entry */
+	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
+	DPAA2_SET_FLE_SG_EXT(ip_fle);
+	DPAA2_SET_FLE_FIN(ip_fle);
+
+	/* Configure input SGE for Encap/Decap */
+	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+	DPAA2_SET_FLE_OFFSET(sge, 0);
+	sge->length = sgl->vec[0].len;
+	in_len += sge->length;
+	/* i/p segs */
+	for (i = 1; i < sgl->num; i++) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+		DPAA2_SET_FLE_OFFSET(sge, 0);
+		sge->length = sgl->vec[i].len;
+		in_len += sge->length;
+	}
+
+	ip_fle->length = in_len;
+	DPAA2_SET_FLE_FIN(sge);
+
+	/* In case of PDCP, per packet HFN is stored in
+	 * mbuf priv after sym_op.
+	 */
+	if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
+		uint32_t hfn_ovd = *(uint32_t *)((uint8_t *)userdata +
+				sess->pdcp.hfn_ovd_offset);
+		/*enable HFN override override */
+		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd);
+		DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd);
+		DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd);
+	}
+	DPAA2_SET_FD_LEN(fd, ip_fle->length);
 
 	return 0;
 }
@@ -792,10 +972,9 @@ dpaa2_sec_configure_raw_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
 		sess->build_raw_dp_fd = build_raw_dp_auth_fd;
 	else if (sess->ctxt_type == DPAA2_SEC_CIPHER)
 		sess->build_raw_dp_fd = build_raw_dp_cipher_fd;
-	else if (sess->ctxt_type == DPAA2_SEC_IPSEC)
+	else if (sess->ctxt_type == DPAA2_SEC_IPSEC ||
+		sess->ctxt_type == DPAA2_SEC_PDCP)
 		sess->build_raw_dp_fd = build_raw_dp_proto_fd;
-	else if (sess->ctxt_type == DPAA2_SEC_PDCP)
-		sess->build_raw_dp_fd = build_raw_dp_proto_compound_fd;
 	else
 		return -ENOTSUP;
 	dp_ctx = (struct dpaa2_sec_raw_dp_ctx *)raw_dp_ctx->drv_ctx_data;
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH v4 09/15] crypto/dpaa2_sec: support OOP with raw buffer API
  2021-10-13 19:00     ` [dpdk-dev] [PATCH v4 " Hemant Agrawal
                         ` (7 preceding siblings ...)
  2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 08/15] crypto/dpaa2_sec: support AEAD " Hemant Agrawal
@ 2021-10-13 19:00       ` Hemant Agrawal
  2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 10/15] crypto/dpaa2_sec: enhance error checks with raw buffer APIs Hemant Agrawal
                         ` (6 subsequent siblings)
  15 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-10-13 19:00 UTC (permalink / raw)
  To: dev, gakhil; +Cc: konstantin.ananyev, roy.fan.zhang, Gagandeep Singh

From: Gagandeep Singh <g.singh@nxp.com>

add support for out of order processing with raw vector APIs.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h   |   1 +
 drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c | 156 +++++++++++++++-----
 2 files changed, 116 insertions(+), 41 deletions(-)

diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
index f397b756e8..05bd7c0736 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
@@ -179,6 +179,7 @@ typedef int (*dpaa2_sec_build_fd_t)(
 
 typedef int (*dpaa2_sec_build_raw_dp_fd_t)(uint8_t *drv_ctx,
 		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_sgl *dest_sgl,
 		       struct rte_crypto_va_iova_ptr *iv,
 		       struct rte_crypto_va_iova_ptr *digest,
 		       struct rte_crypto_va_iova_ptr *auth_iv,
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
index 5c29c61f9d..4f78cef9c0 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
@@ -24,6 +24,7 @@ struct dpaa2_sec_raw_dp_ctx {
 static int
 build_raw_dp_chain_fd(uint8_t *drv_ctx,
 		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_sgl *dest_sgl,
 		       struct rte_crypto_va_iova_ptr *iv,
 		       struct rte_crypto_va_iova_ptr *digest,
 		       struct rte_crypto_va_iova_ptr *auth_iv,
@@ -89,17 +90,33 @@ build_raw_dp_chain_fd(uint8_t *drv_ctx,
 			(cipher_len + icv_len) :
 			cipher_len;
 
-	/* Configure Output SGE for Encap/Decap */
-	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
-	DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.auth.head);
-	sge->length = sgl->vec[0].len - ofs.ofs.auth.head;
+	/* OOP */
+	if (dest_sgl) {
+		/* Configure Output SGE for Encap/Decap */
+		DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[0].iova);
+		DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
+		sge->length = dest_sgl->vec[0].len - ofs.ofs.cipher.head;
 
-	/* o/p segs */
-	for (i = 1; i < sgl->num; i++) {
-		sge++;
-		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
-		DPAA2_SET_FLE_OFFSET(sge, 0);
-		sge->length = sgl->vec[i].len;
+		/* o/p segs */
+		for (i = 1; i < dest_sgl->num; i++) {
+			sge++;
+			DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[i].iova);
+			DPAA2_SET_FLE_OFFSET(sge, 0);
+			sge->length = dest_sgl->vec[i].len;
+		}
+	} else {
+		/* Configure Output SGE for Encap/Decap */
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+		DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
+		sge->length = sgl->vec[0].len - ofs.ofs.cipher.head;
+
+		/* o/p segs */
+		for (i = 1; i < sgl->num; i++) {
+			sge++;
+			DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+			DPAA2_SET_FLE_OFFSET(sge, 0);
+			sge->length = sgl->vec[i].len;
+		}
 	}
 
 	if (sess->dir == DIR_ENC) {
@@ -160,6 +177,7 @@ build_raw_dp_chain_fd(uint8_t *drv_ctx,
 static int
 build_raw_dp_aead_fd(uint8_t *drv_ctx,
 		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_sgl *dest_sgl,
 		       struct rte_crypto_va_iova_ptr *iv,
 		       struct rte_crypto_va_iova_ptr *digest,
 		       struct rte_crypto_va_iova_ptr *auth_iv,
@@ -219,17 +237,33 @@ build_raw_dp_aead_fd(uint8_t *drv_ctx,
 			(aead_len + icv_len) :
 			aead_len;
 
-	/* Configure Output SGE for Encap/Decap */
-	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
-	DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
-	sge->length = sgl->vec[0].len - ofs.ofs.cipher.head;
+	/* OOP */
+	if (dest_sgl) {
+		/* Configure Output SGE for Encap/Decap */
+		DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[0].iova);
+		DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
+		sge->length = dest_sgl->vec[0].len - ofs.ofs.cipher.head;
 
-	/* o/p segs */
-	for (i = 1; i < sgl->num; i++) {
-		sge++;
-		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
-		DPAA2_SET_FLE_OFFSET(sge, 0);
-		sge->length = sgl->vec[i].len;
+		/* o/p segs */
+		for (i = 1; i < dest_sgl->num; i++) {
+			sge++;
+			DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[i].iova);
+			DPAA2_SET_FLE_OFFSET(sge, 0);
+			sge->length = dest_sgl->vec[i].len;
+		}
+	} else {
+		/* Configure Output SGE for Encap/Decap */
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+		DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
+		sge->length = sgl->vec[0].len - ofs.ofs.cipher.head;
+
+		/* o/p segs */
+		for (i = 1; i < sgl->num; i++) {
+			sge++;
+			DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+			DPAA2_SET_FLE_OFFSET(sge, 0);
+			sge->length = sgl->vec[i].len;
+		}
 	}
 
 	if (sess->dir == DIR_ENC) {
@@ -294,6 +328,7 @@ build_raw_dp_aead_fd(uint8_t *drv_ctx,
 static int
 build_raw_dp_auth_fd(uint8_t *drv_ctx,
 		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_sgl *dest_sgl,
 		       struct rte_crypto_va_iova_ptr *iv,
 		       struct rte_crypto_va_iova_ptr *digest,
 		       struct rte_crypto_va_iova_ptr *auth_iv,
@@ -303,6 +338,7 @@ build_raw_dp_auth_fd(uint8_t *drv_ctx,
 {
 	RTE_SET_USED(iv);
 	RTE_SET_USED(auth_iv);
+	RTE_SET_USED(dest_sgl);
 
 	dpaa2_sec_session *sess =
 		((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
@@ -416,6 +452,7 @@ build_raw_dp_auth_fd(uint8_t *drv_ctx,
 static int
 build_raw_dp_proto_fd(uint8_t *drv_ctx,
 		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_sgl *dest_sgl,
 		       struct rte_crypto_va_iova_ptr *iv,
 		       struct rte_crypto_va_iova_ptr *digest,
 		       struct rte_crypto_va_iova_ptr *auth_iv,
@@ -466,20 +503,39 @@ build_raw_dp_proto_fd(uint8_t *drv_ctx,
 	DPAA2_SET_FLE_SG_EXT(op_fle);
 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
 
-	/* Configure Output SGE for Encap/Decap */
-	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
-	DPAA2_SET_FLE_OFFSET(sge, 0);
-	sge->length = sgl->vec[0].len;
-	out_len += sge->length;
-	/* o/p segs */
-	for (i = 1; i < sgl->num; i++) {
-		sge++;
-		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+	/* OOP */
+	if (dest_sgl) {
+		/* Configure Output SGE for Encap/Decap */
+		DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[0].iova);
 		DPAA2_SET_FLE_OFFSET(sge, 0);
-		sge->length = sgl->vec[i].len;
+		sge->length = dest_sgl->vec[0].len;
+		out_len += sge->length;
+		/* o/p segs */
+		for (i = 1; i < dest_sgl->num; i++) {
+			sge++;
+			DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[i].iova);
+			DPAA2_SET_FLE_OFFSET(sge, 0);
+			sge->length = dest_sgl->vec[i].len;
+			out_len += sge->length;
+		}
+		sge->length = dest_sgl->vec[i - 1].tot_len;
+
+	} else {
+		/* Configure Output SGE for Encap/Decap */
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+		DPAA2_SET_FLE_OFFSET(sge, 0);
+		sge->length = sgl->vec[0].len;
 		out_len += sge->length;
+		/* o/p segs */
+		for (i = 1; i < sgl->num; i++) {
+			sge++;
+			DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+			DPAA2_SET_FLE_OFFSET(sge, 0);
+			sge->length = sgl->vec[i].len;
+			out_len += sge->length;
+		}
+		sge->length = sgl->vec[i - 1].tot_len;
 	}
-	sge->length = sgl->vec[i - 1].tot_len;
 	out_len += sge->length;
 
 	DPAA2_SET_FLE_FIN(sge);
@@ -528,6 +584,7 @@ build_raw_dp_proto_fd(uint8_t *drv_ctx,
 static int
 build_raw_dp_cipher_fd(uint8_t *drv_ctx,
 		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_sgl *dest_sgl,
 		       struct rte_crypto_va_iova_ptr *iv,
 		       struct rte_crypto_va_iova_ptr *digest,
 		       struct rte_crypto_va_iova_ptr *auth_iv,
@@ -593,17 +650,33 @@ build_raw_dp_cipher_fd(uint8_t *drv_ctx,
 	op_fle->length = data_len;
 	DPAA2_SET_FLE_SG_EXT(op_fle);
 
-	/* o/p 1st seg */
-	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
-	DPAA2_SET_FLE_OFFSET(sge, data_offset);
-	sge->length = sgl->vec[0].len - data_offset;
+	/* OOP */
+	if (dest_sgl) {
+		/* o/p 1st seg */
+		DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[0].iova);
+		DPAA2_SET_FLE_OFFSET(sge, data_offset);
+		sge->length = dest_sgl->vec[0].len - data_offset;
 
-	/* o/p segs */
-	for (i = 1; i < sgl->num; i++) {
-		sge++;
-		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
-		DPAA2_SET_FLE_OFFSET(sge, 0);
-		sge->length = sgl->vec[i].len;
+		/* o/p segs */
+		for (i = 1; i < dest_sgl->num; i++) {
+			sge++;
+			DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[i].iova);
+			DPAA2_SET_FLE_OFFSET(sge, 0);
+			sge->length = dest_sgl->vec[i].len;
+		}
+	} else {
+		/* o/p 1st seg */
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+		DPAA2_SET_FLE_OFFSET(sge, data_offset);
+		sge->length = sgl->vec[0].len - data_offset;
+
+		/* o/p segs */
+		for (i = 1; i < sgl->num; i++) {
+			sge++;
+			DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+			DPAA2_SET_FLE_OFFSET(sge, 0);
+			sge->length = sgl->vec[i].len;
+		}
 	}
 	DPAA2_SET_FLE_FIN(sge);
 
@@ -706,6 +779,7 @@ dpaa2_sec_raw_enqueue_burst(void *qp_data, uint8_t *drv_ctx,
 			memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
 			ret = sess->build_raw_dp_fd(drv_ctx,
 						    &vec->src_sgl[loop],
+						    &vec->dest_sgl[loop],
 						    &vec->iv[loop],
 						    &vec->digest[loop],
 						    &vec->auth_iv[loop],
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH v4 10/15] crypto/dpaa2_sec: enhance error checks with raw buffer APIs
  2021-10-13 19:00     ` [dpdk-dev] [PATCH v4 " Hemant Agrawal
                         ` (8 preceding siblings ...)
  2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 09/15] crypto/dpaa2_sec: support OOP with raw buffer API Hemant Agrawal
@ 2021-10-13 19:00       ` Hemant Agrawal
  2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 11/15] crypto/dpaa_sec: support raw datapath APIs Hemant Agrawal
                         ` (5 subsequent siblings)
  15 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-10-13 19:00 UTC (permalink / raw)
  To: dev, gakhil; +Cc: konstantin.ananyev, roy.fan.zhang, Gagandeep Singh

From: Gagandeep Singh <g.singh@nxp.com>

This patch improves error conditions and support of
Wireless algos with raw buffers.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c | 31 ++++-----------------
 1 file changed, 6 insertions(+), 25 deletions(-)

diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
index 4f78cef9c0..a2ffc6c02f 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
@@ -355,16 +355,7 @@ build_raw_dp_auth_fd(uint8_t *drv_ctx,
 	data_len = total_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
 	data_offset = ofs.ofs.auth.head;
 
-	if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
-		sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
-		if ((data_len & 7) || (data_offset & 7)) {
-			DPAA2_SEC_ERR("AUTH: len/offset must be full bytes");
-			return -ENOTSUP;
-		}
-
-		data_len = data_len >> 3;
-		data_offset = data_offset >> 3;
-	}
+	/* For SNOW3G and ZUC, lengths in bits only supported */
 	fle = (struct qbman_fle *)rte_malloc(NULL,
 		FLE_SG_MEM_SIZE(2 * sgl->num),
 			RTE_CACHE_LINE_SIZE);
@@ -609,17 +600,7 @@ build_raw_dp_cipher_fd(uint8_t *drv_ctx,
 	data_len = total_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
 	data_offset = ofs.ofs.cipher.head;
 
-	if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
-		sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
-		if ((data_len & 7) || (data_offset & 7)) {
-			DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes");
-			return -ENOTSUP;
-		}
-
-		data_len = data_len >> 3;
-		data_offset = data_offset >> 3;
-	}
-
+	/* For SNOW3G and ZUC, lengths in bits only supported */
 	/* first FLE entry used to store mbuf and session ctxt */
 	fle = (struct qbman_fle *)rte_malloc(NULL,
 			FLE_SG_MEM_SIZE(2*sgl->num),
@@ -878,7 +859,7 @@ dpaa2_sec_raw_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
 	struct qbman_result *dq_storage;
 	uint32_t fqid = dpaa2_qp->rx_vq.fqid;
 	int ret, num_rx = 0;
-	uint8_t is_last = 0, status;
+	uint8_t is_last = 0, status, is_success = 0;
 	struct qbman_swp *swp;
 	const struct qbman_fd *fd;
 	struct qbman_pull_desc pulldesc;
@@ -957,11 +938,11 @@ dpaa2_sec_raw_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
 			/* TODO Parse SEC errors */
 			DPAA2_SEC_ERR("SEC returned Error - %x",
 				      fd->simple.frc);
-			status = RTE_CRYPTO_OP_STATUS_ERROR;
+			is_success = false;
 		} else {
-			status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+			is_success = true;
 		}
-		post_dequeue(user_data, num_rx, status);
+		post_dequeue(user_data, num_rx, is_success);
 
 		num_rx++;
 		dq_storage++;
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH v4 11/15] crypto/dpaa_sec: support raw datapath APIs
  2021-10-13 19:00     ` [dpdk-dev] [PATCH v4 " Hemant Agrawal
                         ` (9 preceding siblings ...)
  2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 10/15] crypto/dpaa2_sec: enhance error checks with raw buffer APIs Hemant Agrawal
@ 2021-10-13 19:00       ` Hemant Agrawal
  2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 12/15] crypto/dpaa_sec: support authonly and chain with raw APIs Hemant Agrawal
                         ` (4 subsequent siblings)
  15 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-10-13 19:00 UTC (permalink / raw)
  To: dev, gakhil; +Cc: konstantin.ananyev, roy.fan.zhang, Gagandeep Singh

From: Gagandeep Singh <g.singh@nxp.com>

This patch add raw vector API framework for dpaa_sec driver.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 doc/guides/rel_notes/release_21_11.rst    |   1 +
 drivers/crypto/dpaa_sec/dpaa_sec.c        |  23 +-
 drivers/crypto/dpaa_sec/dpaa_sec.h        |  39 +-
 drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c | 485 ++++++++++++++++++++++
 drivers/crypto/dpaa_sec/meson.build       |   4 +-
 5 files changed, 538 insertions(+), 14 deletions(-)
 create mode 100644 drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c

diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index 279f0adf62..584c420bd8 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -119,6 +119,7 @@ New Features
 
   * Added DES-CBC, AES-XCBC-MAC, AES-CMAC and non-HMAC algo support.
   * Added PDCP short MAC-I support.
+  * Added raw vector datapath API support
 
 * **Updated NXP dpaa2_sec crypto PMD.**
 
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c
index d5aa2748d6..c7ef1c7b0f 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.c
@@ -45,10 +45,7 @@
 #include <dpaa_sec_log.h>
 #include <dpaax_iova_table.h>
 
-static uint8_t cryptodev_driver_id;
-
-static int
-dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
+uint8_t dpaa_cryptodev_driver_id;
 
 static inline void
 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
@@ -1787,8 +1784,8 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
 			case RTE_CRYPTO_OP_WITH_SESSION:
 				ses = (dpaa_sec_session *)
 					get_sym_session_private_data(
-							op->sym->session,
-							cryptodev_driver_id);
+						op->sym->session,
+						dpaa_cryptodev_driver_id);
 				break;
 #ifdef RTE_LIB_SECURITY
 			case RTE_CRYPTO_OP_SECURITY_SESSION:
@@ -2400,7 +2397,7 @@ dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
 	return -1;
 }
 
-static int
+int
 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
 {
 	int ret;
@@ -3216,7 +3213,7 @@ dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
 		info->feature_flags = dev->feature_flags;
 		info->capabilities = dpaa_sec_capabilities;
 		info->sym.max_nb_sessions = internals->max_nb_sessions;
-		info->driver_id = cryptodev_driver_id;
+		info->driver_id = dpaa_cryptodev_driver_id;
 	}
 }
 
@@ -3412,7 +3409,10 @@ static struct rte_cryptodev_ops crypto_ops = {
 	.queue_pair_release   = dpaa_sec_queue_pair_release,
 	.sym_session_get_size     = dpaa_sec_sym_session_get_size,
 	.sym_session_configure    = dpaa_sec_sym_session_configure,
-	.sym_session_clear        = dpaa_sec_sym_session_clear
+	.sym_session_clear        = dpaa_sec_sym_session_clear,
+	/* Raw data-path API related operations */
+	.sym_get_raw_dp_ctx_size = dpaa_sec_get_dp_ctx_size,
+	.sym_configure_raw_dp_ctx = dpaa_sec_configure_raw_dp_ctx,
 };
 
 #ifdef RTE_LIB_SECURITY
@@ -3463,7 +3463,7 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	cryptodev->driver_id = cryptodev_driver_id;
+	cryptodev->driver_id = dpaa_cryptodev_driver_id;
 	cryptodev->dev_ops = &crypto_ops;
 
 	cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
@@ -3472,6 +3472,7 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
 			RTE_CRYPTODEV_FF_HW_ACCELERATED |
 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
 			RTE_CRYPTODEV_FF_SECURITY |
+			RTE_CRYPTODEV_FF_SYM_RAW_DP |
 			RTE_CRYPTODEV_FF_IN_PLACE_SGL |
 			RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
 			RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
@@ -3637,5 +3638,5 @@ static struct cryptodev_driver dpaa_sec_crypto_drv;
 
 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
-		cryptodev_driver_id);
+		dpaa_cryptodev_driver_id);
 RTE_LOG_REGISTER(dpaa_logtype_sec, pmd.crypto.dpaa, NOTICE);
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.h b/drivers/crypto/dpaa_sec/dpaa_sec.h
index 503047879e..77288cd1eb 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.h
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.h
@@ -19,6 +19,8 @@
 #define AES_CTR_IV_LEN		16
 #define AES_GCM_IV_LEN		12
 
+extern uint8_t dpaa_cryptodev_driver_id;
+
 #define DPAA_IPv6_DEFAULT_VTC_FLOW	0x60000000
 
 /* Minimum job descriptor consists of a oneword job descriptor HEADER and
@@ -117,6 +119,24 @@ struct sec_pdcp_ctxt {
 	uint32_t hfn_threshold;	/*!< HFN Threashold for key renegotiation */
 };
 #endif
+
+typedef int (*dpaa_sec_build_fd_t)(
+	void *qp, uint8_t *drv_ctx, struct rte_crypto_vec *data_vec,
+	uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
+	void *user_data);
+
+typedef struct dpaa_sec_job* (*dpaa_sec_build_raw_dp_fd_t)(uint8_t *drv_ctx,
+			struct rte_crypto_sgl *sgl,
+			struct rte_crypto_sgl *dest_sgl,
+			struct rte_crypto_va_iova_ptr *iv,
+			struct rte_crypto_va_iova_ptr *digest,
+			struct rte_crypto_va_iova_ptr *auth_iv,
+			union rte_crypto_sym_ofs ofs,
+			void *userdata);
+
 typedef struct dpaa_sec_session_entry {
 	struct sec_cdb cdb;	/**< cmd block associated with qp */
 	struct dpaa_sec_qp *qp[MAX_DPAA_CORES];
@@ -129,6 +149,8 @@ typedef struct dpaa_sec_session_entry {
 #ifdef RTE_LIB_SECURITY
 	enum rte_security_session_protocol proto_alg; /*!< Security Algorithm*/
 #endif
+	dpaa_sec_build_fd_t build_fd;
+	dpaa_sec_build_raw_dp_fd_t build_raw_dp_fd;
 	union {
 		struct {
 			uint8_t *data;	/**< pointer to key data */
@@ -211,7 +233,10 @@ struct dpaa_sec_job {
 #define DPAA_MAX_NB_MAX_DIGEST	32
 struct dpaa_sec_op_ctx {
 	struct dpaa_sec_job job;
-	struct rte_crypto_op *op;
+	union {
+		struct rte_crypto_op *op;
+		void *userdata;
+	};
 	struct rte_mempool *ctx_pool; /* mempool pointer for dpaa_sec_op_ctx */
 	uint32_t fd_status;
 	int64_t vtop_offset;
@@ -1001,4 +1026,16 @@ calc_chksum(void *buffer, int len)
 	return  result;
 }
 
+int
+dpaa_sec_configure_raw_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
+	enum rte_crypto_op_sess_type sess_type,
+	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update);
+
+int
+dpaa_sec_get_dp_ctx_size(struct rte_cryptodev *dev);
+
+int
+dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
+
 #endif /* _DPAA_SEC_H_ */
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c b/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c
new file mode 100644
index 0000000000..7376da4cbc
--- /dev/null
+++ b/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c
@@ -0,0 +1,485 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2021 NXP
+ */
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <cryptodev_pmd.h>
+#include <rte_crypto.h>
+#include <rte_cryptodev.h>
+#ifdef RTE_LIB_SECURITY
+#include <rte_security_driver.h>
+#endif
+
+/* RTA header files */
+#include <desc/ipsec.h>
+
+#include <rte_dpaa_bus.h>
+#include <dpaa_sec.h>
+#include <dpaa_sec_log.h>
+
+struct dpaa_sec_raw_dp_ctx {
+	dpaa_sec_session *session;
+	uint32_t tail;
+	uint32_t head;
+	uint16_t cached_enqueue;
+	uint16_t cached_dequeue;
+};
+
+static __rte_always_inline int
+dpaa_sec_raw_enqueue_done(void *qp_data, uint8_t *drv_ctx, uint32_t n)
+{
+	RTE_SET_USED(qp_data);
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(n);
+
+	return 0;
+}
+
+static __rte_always_inline int
+dpaa_sec_raw_dequeue_done(void *qp_data, uint8_t *drv_ctx, uint32_t n)
+{
+	RTE_SET_USED(qp_data);
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(n);
+
+	return 0;
+}
+
+static inline struct dpaa_sec_op_ctx *
+dpaa_sec_alloc_raw_ctx(dpaa_sec_session *ses, int sg_count)
+{
+	struct dpaa_sec_op_ctx *ctx;
+	int i, retval;
+
+	retval = rte_mempool_get(
+			ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool,
+			(void **)(&ctx));
+	if (!ctx || retval) {
+		DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
+		return NULL;
+	}
+	/*
+	 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
+	 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
+	 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
+	 * each packet, memset is costlier than dcbz_64().
+	 */
+	for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4)
+		dcbz_64(&ctx->job.sg[i]);
+
+	ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool;
+	ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
+
+	return ctx;
+}
+
+static struct dpaa_sec_job *
+build_dpaa_raw_dp_auth_fd(uint8_t *drv_ctx,
+			struct rte_crypto_sgl *sgl,
+			struct rte_crypto_sgl *dest_sgl,
+			struct rte_crypto_va_iova_ptr *iv,
+			struct rte_crypto_va_iova_ptr *digest,
+			struct rte_crypto_va_iova_ptr *auth_iv,
+			union rte_crypto_sym_ofs ofs,
+			void *userdata)
+{
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(sgl);
+	RTE_SET_USED(dest_sgl);
+	RTE_SET_USED(iv);
+	RTE_SET_USED(digest);
+	RTE_SET_USED(auth_iv);
+	RTE_SET_USED(ofs);
+	RTE_SET_USED(userdata);
+
+	return NULL;
+}
+
+static struct dpaa_sec_job *
+build_dpaa_raw_dp_cipher_fd(uint8_t *drv_ctx,
+			struct rte_crypto_sgl *sgl,
+			struct rte_crypto_sgl *dest_sgl,
+			struct rte_crypto_va_iova_ptr *iv,
+			struct rte_crypto_va_iova_ptr *digest,
+			struct rte_crypto_va_iova_ptr *auth_iv,
+			union rte_crypto_sym_ofs ofs,
+			void *userdata)
+{
+	RTE_SET_USED(digest);
+	RTE_SET_USED(auth_iv);
+	dpaa_sec_session *ses =
+		((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
+	struct dpaa_sec_job *cf;
+	struct dpaa_sec_op_ctx *ctx;
+	struct qm_sg_entry *sg, *out_sg, *in_sg;
+	unsigned int i;
+	uint8_t *IV_ptr = iv->va;
+	int data_len, total_len = 0, data_offset;
+
+	for (i = 0; i < sgl->num; i++)
+		total_len += sgl->vec[i].len;
+
+	data_len = total_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
+	data_offset = ofs.ofs.cipher.head;
+
+	/* Support lengths in bits only for SNOW3G and ZUC */
+	if (sgl->num > MAX_SG_ENTRIES) {
+		DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
+				MAX_SG_ENTRIES);
+		return NULL;
+	}
+
+	ctx = dpaa_sec_alloc_raw_ctx(ses, sgl->num * 2 + 3);
+	if (!ctx)
+		return NULL;
+
+	cf = &ctx->job;
+	ctx->userdata = (void *)userdata;
+
+	/* output */
+	out_sg = &cf->sg[0];
+	out_sg->extension = 1;
+	out_sg->length = data_len;
+	qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
+	cpu_to_hw_sg(out_sg);
+
+	if (dest_sgl) {
+		/* 1st seg */
+		sg = &cf->sg[2];
+		qm_sg_entry_set64(sg, dest_sgl->vec[0].iova);
+		sg->length = dest_sgl->vec[0].len - data_offset;
+		sg->offset = data_offset;
+
+		/* Successive segs */
+		for (i = 1; i < dest_sgl->num; i++) {
+			cpu_to_hw_sg(sg);
+			sg++;
+			qm_sg_entry_set64(sg, dest_sgl->vec[i].iova);
+			sg->length = dest_sgl->vec[i].len;
+		}
+	} else {
+		/* 1st seg */
+		sg = &cf->sg[2];
+		qm_sg_entry_set64(sg, sgl->vec[0].iova);
+		sg->length = sgl->vec[0].len - data_offset;
+		sg->offset = data_offset;
+
+		/* Successive segs */
+		for (i = 1; i < sgl->num; i++) {
+			cpu_to_hw_sg(sg);
+			sg++;
+			qm_sg_entry_set64(sg, sgl->vec[i].iova);
+			sg->length = sgl->vec[i].len;
+		}
+
+	}
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+
+	/* input */
+	in_sg = &cf->sg[1];
+	in_sg->extension = 1;
+	in_sg->final = 1;
+	in_sg->length = data_len + ses->iv.length;
+
+	sg++;
+	qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
+	cpu_to_hw_sg(in_sg);
+
+	/* IV */
+	qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
+	sg->length = ses->iv.length;
+	cpu_to_hw_sg(sg);
+
+	/* 1st seg */
+	sg++;
+	qm_sg_entry_set64(sg, sgl->vec[0].iova);
+	sg->length = sgl->vec[0].len - data_offset;
+	sg->offset = data_offset;
+
+	/* Successive segs */
+	for (i = 1; i < sgl->num; i++) {
+		cpu_to_hw_sg(sg);
+		sg++;
+		qm_sg_entry_set64(sg, sgl->vec[i].iova);
+		sg->length = sgl->vec[i].len;
+	}
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+
+	return cf;
+}
+
+static uint32_t
+dpaa_sec_raw_enqueue_burst(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	/* Function to transmit the frames to given device and queuepair */
+	uint32_t loop;
+	struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp_data;
+	uint16_t num_tx = 0;
+	struct qm_fd fds[DPAA_SEC_BURST], *fd;
+	uint32_t frames_to_send;
+	struct dpaa_sec_job *cf;
+	dpaa_sec_session *ses =
+			((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
+	uint32_t flags[DPAA_SEC_BURST] = {0};
+	struct qman_fq *inq[DPAA_SEC_BURST];
+
+	if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
+		if (rte_dpaa_portal_init((void *)0)) {
+			DPAA_SEC_ERR("Failure in affining portal");
+			return 0;
+		}
+	}
+
+	while (vec->num) {
+		frames_to_send = (vec->num > DPAA_SEC_BURST) ?
+				DPAA_SEC_BURST : vec->num;
+		for (loop = 0; loop < frames_to_send; loop++) {
+			if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
+				if (dpaa_sec_attach_sess_q(dpaa_qp, ses)) {
+					frames_to_send = loop;
+					goto send_pkts;
+				}
+			} else if (unlikely(ses->qp[rte_lcore_id() %
+						MAX_DPAA_CORES] != dpaa_qp)) {
+				DPAA_SEC_DP_ERR("Old:sess->qp = %p"
+					" New qp = %p\n",
+					ses->qp[rte_lcore_id() %
+					MAX_DPAA_CORES], dpaa_qp);
+				frames_to_send = loop;
+				goto send_pkts;
+			}
+
+			/*Clear the unused FD fields before sending*/
+			fd = &fds[loop];
+			memset(fd, 0, sizeof(struct qm_fd));
+			cf = ses->build_raw_dp_fd(drv_ctx,
+						&vec->src_sgl[loop],
+						&vec->dest_sgl[loop],
+						&vec->iv[loop],
+						&vec->digest[loop],
+						&vec->auth_iv[loop],
+						ofs,
+						user_data[loop]);
+			if (!cf) {
+				DPAA_SEC_ERR("error: Improper packet contents"
+					" for crypto operation");
+				goto skip_tx;
+			}
+			inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
+			fd->opaque_addr = 0;
+			fd->cmd = 0;
+			qm_fd_addr_set64(fd, rte_dpaa_mem_vtop(cf->sg));
+			fd->_format1 = qm_fd_compound;
+			fd->length29 = 2 * sizeof(struct qm_sg_entry);
+
+			status[loop] = 1;
+		}
+send_pkts:
+		loop = 0;
+		while (loop < frames_to_send) {
+			loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
+					&flags[loop], frames_to_send - loop);
+		}
+		vec->num -= frames_to_send;
+		num_tx += frames_to_send;
+	}
+
+skip_tx:
+	dpaa_qp->tx_pkts += num_tx;
+	dpaa_qp->tx_errs += vec->num - num_tx;
+
+	return num_tx;
+}
+
+static int
+dpaa_sec_deq_raw(struct dpaa_sec_qp *qp, void **out_user_data,
+		uint8_t is_user_data_array,
+		rte_cryptodev_raw_post_dequeue_t post_dequeue,
+		int nb_ops)
+{
+	struct qman_fq *fq;
+	unsigned int pkts = 0;
+	int num_rx_bufs, ret;
+	struct qm_dqrr_entry *dq;
+	uint32_t vdqcr_flags = 0;
+	uint8_t is_success = 0;
+
+	fq = &qp->outq;
+	/*
+	 * Until request for four buffers, we provide exact number of buffers.
+	 * Otherwise we do not set the QM_VDQCR_EXACT flag.
+	 * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
+	 * requested, so we request two less in this case.
+	 */
+	if (nb_ops < 4) {
+		vdqcr_flags = QM_VDQCR_EXACT;
+		num_rx_bufs = nb_ops;
+	} else {
+		num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
+			(DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
+	}
+	ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
+	if (ret)
+		return 0;
+
+	do {
+		const struct qm_fd *fd;
+		struct dpaa_sec_job *job;
+		struct dpaa_sec_op_ctx *ctx;
+
+		dq = qman_dequeue(fq);
+		if (!dq)
+			continue;
+
+		fd = &dq->fd;
+		/* sg is embedded in an op ctx,
+		 * sg[0] is for output
+		 * sg[1] for input
+		 */
+		job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
+
+		ctx = container_of(job, struct dpaa_sec_op_ctx, job);
+		ctx->fd_status = fd->status;
+		if (is_user_data_array)
+			out_user_data[pkts] = ctx->userdata;
+		else
+			out_user_data[0] = ctx->userdata;
+
+		if (!ctx->fd_status) {
+			is_success = true;
+		} else {
+			is_success = false;
+			DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
+		}
+		post_dequeue(ctx->op, pkts, is_success);
+		pkts++;
+
+		/* report op status to sym->op and then free the ctx memory */
+		rte_mempool_put(ctx->ctx_pool, (void *)ctx);
+
+		qman_dqrr_consume(fq, dq);
+	} while (fq->flags & QMAN_FQ_STATE_VDQCR);
+
+	return pkts;
+}
+
+
+static __rte_always_inline uint32_t
+dpaa_sec_raw_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
+	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
+	uint32_t max_nb_to_dequeue,
+	rte_cryptodev_raw_post_dequeue_t post_dequeue,
+	void **out_user_data, uint8_t is_user_data_array,
+	uint32_t *n_success, int *dequeue_status)
+{
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(get_dequeue_count);
+	uint16_t num_rx;
+	struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp_data;
+	uint32_t nb_ops = max_nb_to_dequeue;
+
+	if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
+		if (rte_dpaa_portal_init((void *)0)) {
+			DPAA_SEC_ERR("Failure in affining portal");
+			return 0;
+		}
+	}
+
+	num_rx = dpaa_sec_deq_raw(dpaa_qp, out_user_data,
+			is_user_data_array, post_dequeue, nb_ops);
+
+	dpaa_qp->rx_pkts += num_rx;
+	*dequeue_status = 1;
+	*n_success = num_rx;
+
+	DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
+
+	return num_rx;
+}
+
+static __rte_always_inline int
+dpaa_sec_raw_enqueue(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data_vec,
+	uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
+	void *user_data)
+{
+	RTE_SET_USED(qp_data);
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(data_vec);
+	RTE_SET_USED(n_data_vecs);
+	RTE_SET_USED(ofs);
+	RTE_SET_USED(iv);
+	RTE_SET_USED(digest);
+	RTE_SET_USED(aad_or_auth_iv);
+	RTE_SET_USED(user_data);
+
+	return 0;
+}
+
+static __rte_always_inline void *
+dpaa_sec_raw_dequeue(void *qp_data, uint8_t *drv_ctx, int *dequeue_status,
+	enum rte_crypto_op_status *op_status)
+{
+	RTE_SET_USED(qp_data);
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(dequeue_status);
+	RTE_SET_USED(op_status);
+
+	return NULL;
+}
+
+int
+dpaa_sec_configure_raw_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
+	enum rte_crypto_op_sess_type sess_type,
+	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
+{
+	dpaa_sec_session *sess;
+	struct dpaa_sec_raw_dp_ctx *dp_ctx;
+	RTE_SET_USED(qp_id);
+
+	if (!is_update) {
+		memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx));
+		raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
+	}
+
+	if (sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
+		sess = (dpaa_sec_session *)get_sec_session_private_data(
+				session_ctx.sec_sess);
+	else if (sess_type == RTE_CRYPTO_OP_WITH_SESSION)
+		sess = (dpaa_sec_session *)get_sym_session_private_data(
+			session_ctx.crypto_sess, dpaa_cryptodev_driver_id);
+	else
+		return -ENOTSUP;
+	raw_dp_ctx->dequeue_burst = dpaa_sec_raw_dequeue_burst;
+	raw_dp_ctx->dequeue = dpaa_sec_raw_dequeue;
+	raw_dp_ctx->dequeue_done = dpaa_sec_raw_dequeue_done;
+	raw_dp_ctx->enqueue_burst = dpaa_sec_raw_enqueue_burst;
+	raw_dp_ctx->enqueue = dpaa_sec_raw_enqueue;
+	raw_dp_ctx->enqueue_done = dpaa_sec_raw_enqueue_done;
+
+	if (sess->ctxt == DPAA_SEC_CIPHER)
+		sess->build_raw_dp_fd = build_dpaa_raw_dp_cipher_fd;
+	else if (sess->ctxt == DPAA_SEC_AUTH)
+		sess->build_raw_dp_fd = build_dpaa_raw_dp_auth_fd;
+	else
+		return -ENOTSUP;
+	dp_ctx = (struct dpaa_sec_raw_dp_ctx *)raw_dp_ctx->drv_ctx_data;
+	dp_ctx->session = sess;
+
+	return 0;
+}
+
+int
+dpaa_sec_get_dp_ctx_size(__rte_unused struct rte_cryptodev *dev)
+{
+	return sizeof(struct dpaa_sec_raw_dp_ctx);
+}
diff --git a/drivers/crypto/dpaa_sec/meson.build b/drivers/crypto/dpaa_sec/meson.build
index 44fd60e5ae..f87ad6c7e7 100644
--- a/drivers/crypto/dpaa_sec/meson.build
+++ b/drivers/crypto/dpaa_sec/meson.build
@@ -1,5 +1,5 @@
 # SPDX-License-Identifier: BSD-3-Clause
-# Copyright 2018 NXP
+# Copyright 2018-2021 NXP
 
 if not is_linux
     build = false
@@ -7,7 +7,7 @@ if not is_linux
 endif
 
 deps += ['bus_dpaa', 'mempool_dpaa', 'security']
-sources = files('dpaa_sec.c')
+sources = files('dpaa_sec.c', 'dpaa_sec_raw_dp.c')
 
 includes += include_directories('../../bus/dpaa/include')
 includes += include_directories('../../common/dpaax')
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH v4 12/15] crypto/dpaa_sec: support authonly and chain with raw APIs
  2021-10-13 19:00     ` [dpdk-dev] [PATCH v4 " Hemant Agrawal
                         ` (10 preceding siblings ...)
  2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 11/15] crypto/dpaa_sec: support raw datapath APIs Hemant Agrawal
@ 2021-10-13 19:00       ` Hemant Agrawal
  2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 13/15] crypto/dpaa_sec: support AEAD and proto " Hemant Agrawal
                         ` (3 subsequent siblings)
  15 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-10-13 19:00 UTC (permalink / raw)
  To: dev, gakhil; +Cc: konstantin.ananyev, roy.fan.zhang, Gagandeep Singh

From: Gagandeep Singh <g.singh@nxp.com>

This patch improves the raw vector support in dpaa_sec driver
for authonly and chain usecase.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/crypto/dpaa_sec/dpaa_sec.h        |   3 +-
 drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c | 296 +++++++++++++++++++++-
 2 files changed, 287 insertions(+), 12 deletions(-)

diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.h b/drivers/crypto/dpaa_sec/dpaa_sec.h
index 77288cd1eb..7890687828 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.h
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.h
@@ -135,7 +135,8 @@ typedef struct dpaa_sec_job* (*dpaa_sec_build_raw_dp_fd_t)(uint8_t *drv_ctx,
 			struct rte_crypto_va_iova_ptr *digest,
 			struct rte_crypto_va_iova_ptr *auth_iv,
 			union rte_crypto_sym_ofs ofs,
-			void *userdata);
+			void *userdata,
+			struct qm_fd *fd);
 
 typedef struct dpaa_sec_session_entry {
 	struct sec_cdb cdb;	/**< cmd block associated with qp */
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c b/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c
index 7376da4cbc..03ce21e53f 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c
@@ -12,6 +12,7 @@
 #endif
 
 /* RTA header files */
+#include <desc/algo.h>
 #include <desc/ipsec.h>
 
 #include <rte_dpaa_bus.h>
@@ -26,6 +27,17 @@ struct dpaa_sec_raw_dp_ctx {
 	uint16_t cached_dequeue;
 };
 
+static inline int
+is_encode(dpaa_sec_session *ses)
+{
+	return ses->dir == DIR_ENC;
+}
+
+static inline int is_decode(dpaa_sec_session *ses)
+{
+	return ses->dir == DIR_DEC;
+}
+
 static __rte_always_inline int
 dpaa_sec_raw_enqueue_done(void *qp_data, uint8_t *drv_ctx, uint32_t n)
 {
@@ -82,18 +94,276 @@ build_dpaa_raw_dp_auth_fd(uint8_t *drv_ctx,
 			struct rte_crypto_va_iova_ptr *digest,
 			struct rte_crypto_va_iova_ptr *auth_iv,
 			union rte_crypto_sym_ofs ofs,
-			void *userdata)
+			void *userdata,
+			struct qm_fd *fd)
 {
-	RTE_SET_USED(drv_ctx);
-	RTE_SET_USED(sgl);
 	RTE_SET_USED(dest_sgl);
 	RTE_SET_USED(iv);
-	RTE_SET_USED(digest);
 	RTE_SET_USED(auth_iv);
-	RTE_SET_USED(ofs);
-	RTE_SET_USED(userdata);
+	RTE_SET_USED(fd);
 
-	return NULL;
+	dpaa_sec_session *ses =
+		((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
+	struct dpaa_sec_job *cf;
+	struct dpaa_sec_op_ctx *ctx;
+	struct qm_sg_entry *sg, *out_sg, *in_sg;
+	phys_addr_t start_addr;
+	uint8_t *old_digest, extra_segs;
+	int data_len, data_offset, total_len = 0;
+	unsigned int i;
+
+	for (i = 0; i < sgl->num; i++)
+		total_len += sgl->vec[i].len;
+
+	data_len = total_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
+	data_offset =  ofs.ofs.auth.head;
+
+	/* Support only length in bits for SNOW3G and ZUC */
+
+	if (is_decode(ses))
+		extra_segs = 3;
+	else
+		extra_segs = 2;
+
+	if (sgl->num > MAX_SG_ENTRIES) {
+		DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
+				MAX_SG_ENTRIES);
+		return NULL;
+	}
+	ctx = dpaa_sec_alloc_raw_ctx(ses, sgl->num * 2 + extra_segs);
+	if (!ctx)
+		return NULL;
+
+	cf = &ctx->job;
+	ctx->userdata = (void *)userdata;
+	old_digest = ctx->digest;
+
+	/* output */
+	out_sg = &cf->sg[0];
+	qm_sg_entry_set64(out_sg, digest->iova);
+	out_sg->length = ses->digest_length;
+	cpu_to_hw_sg(out_sg);
+
+	/* input */
+	in_sg = &cf->sg[1];
+	/* need to extend the input to a compound frame */
+	in_sg->extension = 1;
+	in_sg->final = 1;
+	in_sg->length = data_len;
+	qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
+
+	/* 1st seg */
+	sg = in_sg + 1;
+
+	if (ses->iv.length) {
+		uint8_t *iv_ptr;
+
+		iv_ptr = rte_crypto_op_ctod_offset(userdata, uint8_t *,
+						   ses->iv.offset);
+
+		if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
+			iv_ptr = conv_to_snow_f9_iv(iv_ptr);
+			sg->length = 12;
+		} else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
+			iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
+			sg->length = 8;
+		} else {
+			sg->length = ses->iv.length;
+		}
+		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
+		in_sg->length += sg->length;
+		cpu_to_hw_sg(sg);
+		sg++;
+	}
+
+	qm_sg_entry_set64(sg, sgl->vec[0].iova);
+	sg->offset = data_offset;
+
+	if (data_len <= (int)(sgl->vec[0].len - data_offset)) {
+		sg->length = data_len;
+	} else {
+		sg->length = sgl->vec[0].len - data_offset;
+
+		/* remaining i/p segs */
+		for (i = 1; i < sgl->num; i++) {
+			cpu_to_hw_sg(sg);
+			sg++;
+			qm_sg_entry_set64(sg, sgl->vec[i].iova);
+			if (data_len > (int)sgl->vec[i].len)
+				sg->length = sgl->vec[0].len;
+			else
+				sg->length = data_len;
+
+			data_len = data_len - sg->length;
+			if (data_len < 1)
+				break;
+		}
+	}
+
+	if (is_decode(ses)) {
+		/* Digest verification case */
+		cpu_to_hw_sg(sg);
+		sg++;
+		rte_memcpy(old_digest, digest->va,
+				ses->digest_length);
+		start_addr = rte_dpaa_mem_vtop(old_digest);
+		qm_sg_entry_set64(sg, start_addr);
+		sg->length = ses->digest_length;
+		in_sg->length += ses->digest_length;
+	}
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+	cpu_to_hw_sg(in_sg);
+
+	return cf;
+}
+
+static inline struct dpaa_sec_job *
+build_dpaa_raw_dp_chain_fd(uint8_t *drv_ctx,
+			struct rte_crypto_sgl *sgl,
+			struct rte_crypto_sgl *dest_sgl,
+			struct rte_crypto_va_iova_ptr *iv,
+			struct rte_crypto_va_iova_ptr *digest,
+			struct rte_crypto_va_iova_ptr *auth_iv,
+			union rte_crypto_sym_ofs ofs,
+			void *userdata,
+			struct qm_fd *fd)
+{
+	RTE_SET_USED(auth_iv);
+
+	dpaa_sec_session *ses =
+		((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
+	struct dpaa_sec_job *cf;
+	struct dpaa_sec_op_ctx *ctx;
+	struct qm_sg_entry *sg, *out_sg, *in_sg;
+	uint8_t *IV_ptr = iv->va;
+	unsigned int i;
+	uint16_t auth_hdr_len = ofs.ofs.cipher.head -
+				ofs.ofs.auth.head;
+	uint16_t auth_tail_len = ofs.ofs.auth.tail;
+	uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
+	int data_len = 0, auth_len = 0, cipher_len = 0;
+
+	for (i = 0; i < sgl->num; i++)
+		data_len += sgl->vec[i].len;
+
+	cipher_len = data_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
+	auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
+
+	if (sgl->num > MAX_SG_ENTRIES) {
+		DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
+				MAX_SG_ENTRIES);
+		return NULL;
+	}
+
+	ctx = dpaa_sec_alloc_raw_ctx(ses, sgl->num * 2 + 4);
+	if (!ctx)
+		return NULL;
+
+	cf = &ctx->job;
+	ctx->userdata = (void *)userdata;
+
+	rte_prefetch0(cf->sg);
+
+	/* output */
+	out_sg = &cf->sg[0];
+	out_sg->extension = 1;
+	if (is_encode(ses))
+		out_sg->length = cipher_len + ses->digest_length;
+	else
+		out_sg->length = cipher_len;
+
+	/* output sg entries */
+	sg = &cf->sg[2];
+	qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
+	cpu_to_hw_sg(out_sg);
+
+	/* 1st seg */
+	if (dest_sgl) {
+		qm_sg_entry_set64(sg, dest_sgl->vec[0].iova);
+		sg->length = dest_sgl->vec[0].len - ofs.ofs.cipher.head;
+		sg->offset = ofs.ofs.cipher.head;
+
+		/* Successive segs */
+		for (i = 1; i < dest_sgl->num; i++) {
+			cpu_to_hw_sg(sg);
+			sg++;
+			qm_sg_entry_set64(sg, dest_sgl->vec[i].iova);
+			sg->length = dest_sgl->vec[i].len;
+		}
+	} else {
+		qm_sg_entry_set64(sg, sgl->vec[0].iova);
+		sg->length = sgl->vec[0].len - ofs.ofs.cipher.head;
+		sg->offset = ofs.ofs.cipher.head;
+
+		/* Successive segs */
+		for (i = 1; i < sgl->num; i++) {
+			cpu_to_hw_sg(sg);
+			sg++;
+			qm_sg_entry_set64(sg, sgl->vec[i].iova);
+			sg->length = sgl->vec[i].len;
+		}
+	}
+
+	if (is_encode(ses)) {
+		cpu_to_hw_sg(sg);
+		/* set auth output */
+		sg++;
+		qm_sg_entry_set64(sg, digest->iova);
+		sg->length = ses->digest_length;
+	}
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+
+	/* input */
+	in_sg = &cf->sg[1];
+	in_sg->extension = 1;
+	in_sg->final = 1;
+	if (is_encode(ses))
+		in_sg->length = ses->iv.length + auth_len;
+	else
+		in_sg->length = ses->iv.length + auth_len
+						+ ses->digest_length;
+
+	/* input sg entries */
+	sg++;
+	qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
+	cpu_to_hw_sg(in_sg);
+
+	/* 1st seg IV */
+	qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
+	sg->length = ses->iv.length;
+	cpu_to_hw_sg(sg);
+
+	/* 2 seg */
+	sg++;
+	qm_sg_entry_set64(sg, sgl->vec[0].iova);
+	sg->length = sgl->vec[0].len - ofs.ofs.auth.head;
+	sg->offset = ofs.ofs.auth.head;
+
+	/* Successive segs */
+	for (i = 1; i < sgl->num; i++) {
+		cpu_to_hw_sg(sg);
+		sg++;
+		qm_sg_entry_set64(sg, sgl->vec[i].iova);
+		sg->length = sgl->vec[i].len;
+	}
+
+	if (is_decode(ses)) {
+		cpu_to_hw_sg(sg);
+		sg++;
+		memcpy(ctx->digest, digest->va,
+			ses->digest_length);
+		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
+		sg->length = ses->digest_length;
+	}
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+
+	if (auth_only_len)
+		fd->cmd = 0x80000000 | auth_only_len;
+
+	return cf;
 }
 
 static struct dpaa_sec_job *
@@ -104,10 +374,13 @@ build_dpaa_raw_dp_cipher_fd(uint8_t *drv_ctx,
 			struct rte_crypto_va_iova_ptr *digest,
 			struct rte_crypto_va_iova_ptr *auth_iv,
 			union rte_crypto_sym_ofs ofs,
-			void *userdata)
+			void *userdata,
+			struct qm_fd *fd)
 {
 	RTE_SET_USED(digest);
 	RTE_SET_USED(auth_iv);
+	RTE_SET_USED(fd);
+
 	dpaa_sec_session *ses =
 		((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
 	struct dpaa_sec_job *cf;
@@ -264,15 +537,14 @@ dpaa_sec_raw_enqueue_burst(void *qp_data, uint8_t *drv_ctx,
 						&vec->digest[loop],
 						&vec->auth_iv[loop],
 						ofs,
-						user_data[loop]);
+						user_data[loop],
+						fd);
 			if (!cf) {
 				DPAA_SEC_ERR("error: Improper packet contents"
 					" for crypto operation");
 				goto skip_tx;
 			}
 			inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
-			fd->opaque_addr = 0;
-			fd->cmd = 0;
 			qm_fd_addr_set64(fd, rte_dpaa_mem_vtop(cf->sg));
 			fd->_format1 = qm_fd_compound;
 			fd->length29 = 2 * sizeof(struct qm_sg_entry);
@@ -470,6 +742,8 @@ dpaa_sec_configure_raw_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
 		sess->build_raw_dp_fd = build_dpaa_raw_dp_cipher_fd;
 	else if (sess->ctxt == DPAA_SEC_AUTH)
 		sess->build_raw_dp_fd = build_dpaa_raw_dp_auth_fd;
+	else if (sess->ctxt == DPAA_SEC_CIPHER_HASH)
+		sess->build_raw_dp_fd = build_dpaa_raw_dp_chain_fd;
 	else
 		return -ENOTSUP;
 	dp_ctx = (struct dpaa_sec_raw_dp_ctx *)raw_dp_ctx->drv_ctx_data;
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH v4 13/15] crypto/dpaa_sec: support AEAD and proto with raw APIs
  2021-10-13 19:00     ` [dpdk-dev] [PATCH v4 " Hemant Agrawal
                         ` (11 preceding siblings ...)
  2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 12/15] crypto/dpaa_sec: support authonly and chain with raw APIs Hemant Agrawal
@ 2021-10-13 19:00       ` Hemant Agrawal
  2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 14/15] test/crypto: add raw API test for dpaax Hemant Agrawal
                         ` (2 subsequent siblings)
  15 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-10-13 19:00 UTC (permalink / raw)
  To: dev, gakhil; +Cc: konstantin.ananyev, roy.fan.zhang, Gagandeep Singh

From: Gagandeep Singh <g.singh@nxp.com>

This add support for AEAD and proto offload with raw APIs
for dpaa_sec driver.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c | 293 ++++++++++++++++++++++
 1 file changed, 293 insertions(+)

diff --git a/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c b/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c
index 03ce21e53f..522685f8cf 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c
@@ -218,6 +218,163 @@ build_dpaa_raw_dp_auth_fd(uint8_t *drv_ctx,
 	return cf;
 }
 
+static inline struct dpaa_sec_job *
+build_raw_cipher_auth_gcm_sg(uint8_t *drv_ctx,
+			struct rte_crypto_sgl *sgl,
+			struct rte_crypto_sgl *dest_sgl,
+			struct rte_crypto_va_iova_ptr *iv,
+			struct rte_crypto_va_iova_ptr *digest,
+			struct rte_crypto_va_iova_ptr *auth_iv,
+			union rte_crypto_sym_ofs ofs,
+			void *userdata,
+			struct qm_fd *fd)
+{
+	dpaa_sec_session *ses =
+		((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
+	struct dpaa_sec_job *cf;
+	struct dpaa_sec_op_ctx *ctx;
+	struct qm_sg_entry *sg, *out_sg, *in_sg;
+	uint8_t extra_req_segs;
+	uint8_t *IV_ptr = iv->va;
+	int data_len = 0, aead_len = 0;
+	unsigned int i;
+
+	for (i = 0; i < sgl->num; i++)
+		data_len += sgl->vec[i].len;
+
+	extra_req_segs = 4;
+	aead_len = data_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
+
+	if (ses->auth_only_len)
+		extra_req_segs++;
+
+	if (sgl->num > MAX_SG_ENTRIES) {
+		DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
+				MAX_SG_ENTRIES);
+		return NULL;
+	}
+
+	ctx = dpaa_sec_alloc_raw_ctx(ses,  sgl->num * 2 + extra_req_segs);
+	if (!ctx)
+		return NULL;
+
+	cf = &ctx->job;
+	ctx->userdata = (void *)userdata;
+
+	rte_prefetch0(cf->sg);
+
+	/* output */
+	out_sg = &cf->sg[0];
+	out_sg->extension = 1;
+	if (is_encode(ses))
+		out_sg->length = aead_len + ses->digest_length;
+	else
+		out_sg->length = aead_len;
+
+	/* output sg entries */
+	sg = &cf->sg[2];
+	qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
+	cpu_to_hw_sg(out_sg);
+
+	if (dest_sgl) {
+		/* 1st seg */
+		qm_sg_entry_set64(sg, dest_sgl->vec[0].iova);
+		sg->length = dest_sgl->vec[0].len - ofs.ofs.cipher.head;
+		sg->offset = ofs.ofs.cipher.head;
+
+		/* Successive segs */
+		for (i = 1; i < dest_sgl->num; i++) {
+			cpu_to_hw_sg(sg);
+			sg++;
+			qm_sg_entry_set64(sg, dest_sgl->vec[i].iova);
+			sg->length = dest_sgl->vec[i].len;
+		}
+	} else {
+		/* 1st seg */
+		qm_sg_entry_set64(sg, sgl->vec[0].iova);
+		sg->length = sgl->vec[0].len - ofs.ofs.cipher.head;
+		sg->offset = ofs.ofs.cipher.head;
+
+		/* Successive segs */
+		for (i = 1; i < sgl->num; i++) {
+			cpu_to_hw_sg(sg);
+			sg++;
+			qm_sg_entry_set64(sg, sgl->vec[i].iova);
+			sg->length = sgl->vec[i].len;
+		}
+
+	}
+
+	if (is_encode(ses)) {
+		cpu_to_hw_sg(sg);
+		/* set auth output */
+		sg++;
+		qm_sg_entry_set64(sg, digest->iova);
+		sg->length = ses->digest_length;
+	}
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+
+	/* input */
+	in_sg = &cf->sg[1];
+	in_sg->extension = 1;
+	in_sg->final = 1;
+	if (is_encode(ses))
+		in_sg->length = ses->iv.length + aead_len
+						+ ses->auth_only_len;
+	else
+		in_sg->length = ses->iv.length + aead_len
+				+ ses->auth_only_len + ses->digest_length;
+
+	/* input sg entries */
+	sg++;
+	qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
+	cpu_to_hw_sg(in_sg);
+
+	/* 1st seg IV */
+	qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
+	sg->length = ses->iv.length;
+	cpu_to_hw_sg(sg);
+
+	/* 2 seg auth only */
+	if (ses->auth_only_len) {
+		sg++;
+		qm_sg_entry_set64(sg, auth_iv->iova);
+		sg->length = ses->auth_only_len;
+		cpu_to_hw_sg(sg);
+	}
+
+	/* 3rd seg */
+	sg++;
+	qm_sg_entry_set64(sg, sgl->vec[0].iova);
+	sg->length = sgl->vec[0].len - ofs.ofs.cipher.head;
+	sg->offset = ofs.ofs.cipher.head;
+
+	/* Successive segs */
+	for (i = 1; i < sgl->num; i++) {
+		cpu_to_hw_sg(sg);
+		sg++;
+		qm_sg_entry_set64(sg, sgl->vec[i].iova);
+		sg->length =  sgl->vec[i].len;
+	}
+
+	if (is_decode(ses)) {
+		cpu_to_hw_sg(sg);
+		sg++;
+		memcpy(ctx->digest, digest->va,
+			ses->digest_length);
+		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
+		sg->length = ses->digest_length;
+	}
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+
+	if (ses->auth_only_len)
+		fd->cmd = 0x80000000 | ses->auth_only_len;
+
+	return cf;
+}
+
 static inline struct dpaa_sec_job *
 build_dpaa_raw_dp_chain_fd(uint8_t *drv_ctx,
 			struct rte_crypto_sgl *sgl,
@@ -484,6 +641,135 @@ build_dpaa_raw_dp_cipher_fd(uint8_t *drv_ctx,
 	return cf;
 }
 
+#ifdef RTE_LIBRTE_SECURITY
+static inline struct dpaa_sec_job *
+build_dpaa_raw_proto_sg(uint8_t *drv_ctx,
+			struct rte_crypto_sgl *sgl,
+			struct rte_crypto_sgl *dest_sgl,
+			struct rte_crypto_va_iova_ptr *iv,
+			struct rte_crypto_va_iova_ptr *digest,
+			struct rte_crypto_va_iova_ptr *auth_iv,
+			union rte_crypto_sym_ofs ofs,
+			void *userdata,
+			struct qm_fd *fd)
+{
+	RTE_SET_USED(iv);
+	RTE_SET_USED(digest);
+	RTE_SET_USED(auth_iv);
+	RTE_SET_USED(ofs);
+
+	dpaa_sec_session *ses =
+		((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
+	struct dpaa_sec_job *cf;
+	struct dpaa_sec_op_ctx *ctx;
+	struct qm_sg_entry *sg, *out_sg, *in_sg;
+	uint32_t in_len = 0, out_len = 0;
+	unsigned int i;
+
+	if (sgl->num > MAX_SG_ENTRIES) {
+		DPAA_SEC_DP_ERR("Proto: Max sec segs supported is %d",
+				MAX_SG_ENTRIES);
+		return NULL;
+	}
+
+	ctx = dpaa_sec_alloc_raw_ctx(ses, sgl->num * 2 + 4);
+	if (!ctx)
+		return NULL;
+	cf = &ctx->job;
+	ctx->userdata = (void *)userdata;
+	/* output */
+	out_sg = &cf->sg[0];
+	out_sg->extension = 1;
+	qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
+
+	if (dest_sgl) {
+		/* 1st seg */
+		sg = &cf->sg[2];
+		qm_sg_entry_set64(sg, dest_sgl->vec[0].iova);
+		sg->offset = 0;
+		sg->length = dest_sgl->vec[0].len;
+		out_len += sg->length;
+
+		for (i = 1; i < dest_sgl->num; i++) {
+		/* Successive segs */
+			cpu_to_hw_sg(sg);
+			sg++;
+			qm_sg_entry_set64(sg, dest_sgl->vec[i].iova);
+			sg->offset = 0;
+			sg->length = dest_sgl->vec[i].len;
+			out_len += sg->length;
+		}
+		sg->length = dest_sgl->vec[i - 1].tot_len;
+	} else {
+		/* 1st seg */
+		sg = &cf->sg[2];
+		qm_sg_entry_set64(sg, sgl->vec[0].iova);
+		sg->offset = 0;
+		sg->length = sgl->vec[0].len;
+		out_len += sg->length;
+
+		for (i = 1; i < sgl->num; i++) {
+		/* Successive segs */
+			cpu_to_hw_sg(sg);
+			sg++;
+			qm_sg_entry_set64(sg, sgl->vec[i].iova);
+			sg->offset = 0;
+			sg->length = sgl->vec[i].len;
+			out_len += sg->length;
+		}
+		sg->length = sgl->vec[i - 1].tot_len;
+
+	}
+	out_len += sg->length;
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+
+	out_sg->length = out_len;
+	cpu_to_hw_sg(out_sg);
+
+	/* input */
+	in_sg = &cf->sg[1];
+	in_sg->extension = 1;
+	in_sg->final = 1;
+	in_len = sgl->vec[0].len;
+
+	sg++;
+	qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
+
+	/* 1st seg */
+	qm_sg_entry_set64(sg, sgl->vec[0].iova);
+	sg->length = sgl->vec[0].len;
+	sg->offset = 0;
+
+	/* Successive segs */
+	for (i = 1; i < sgl->num; i++) {
+		cpu_to_hw_sg(sg);
+		sg++;
+		qm_sg_entry_set64(sg, sgl->vec[i].iova);
+		sg->length = sgl->vec[i].len;
+		sg->offset = 0;
+		in_len += sg->length;
+	}
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+
+	in_sg->length = in_len;
+	cpu_to_hw_sg(in_sg);
+
+	if ((ses->ctxt == DPAA_SEC_PDCP) && ses->pdcp.hfn_ovd) {
+		fd->cmd = 0x80000000 |
+			*((uint32_t *)((uint8_t *)userdata +
+			ses->pdcp.hfn_ovd_offset));
+		DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u\n",
+			*((uint32_t *)((uint8_t *)userdata +
+			ses->pdcp.hfn_ovd_offset)),
+			ses->pdcp.hfn_ovd);
+	}
+
+	return cf;
+}
+#endif
+
 static uint32_t
 dpaa_sec_raw_enqueue_burst(void *qp_data, uint8_t *drv_ctx,
 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
@@ -744,6 +1030,13 @@ dpaa_sec_configure_raw_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
 		sess->build_raw_dp_fd = build_dpaa_raw_dp_auth_fd;
 	else if (sess->ctxt == DPAA_SEC_CIPHER_HASH)
 		sess->build_raw_dp_fd = build_dpaa_raw_dp_chain_fd;
+	else if (sess->ctxt == DPAA_SEC_AEAD)
+		sess->build_raw_dp_fd = build_raw_cipher_auth_gcm_sg;
+#ifdef RTE_LIBRTE_SECURITY
+	else if (sess->ctxt == DPAA_SEC_IPSEC ||
+			sess->ctxt == DPAA_SEC_PDCP)
+		sess->build_raw_dp_fd = build_dpaa_raw_proto_sg;
+#endif
 	else
 		return -ENOTSUP;
 	dp_ctx = (struct dpaa_sec_raw_dp_ctx *)raw_dp_ctx->drv_ctx_data;
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH v4 14/15] test/crypto: add raw API test for dpaax
  2021-10-13 19:00     ` [dpdk-dev] [PATCH v4 " Hemant Agrawal
                         ` (12 preceding siblings ...)
  2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 13/15] crypto/dpaa_sec: support AEAD and proto " Hemant Agrawal
@ 2021-10-13 19:00       ` Hemant Agrawal
  2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 15/15] test/crypto: add raw API support in 5G algos Hemant Agrawal
  2021-10-17 16:16       ` [dpdk-dev] [PATCH v5 00/15] crypto: add raw vector support in DPAAx Hemant Agrawal
  15 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-10-13 19:00 UTC (permalink / raw)
  To: dev, gakhil; +Cc: konstantin.ananyev, roy.fan.zhang, Gagandeep Singh

This patch add support for raw API tests for
dpaa_sec and dpaa2_sec platforms.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 app/test/test_cryptodev.c | 116 +++++++++++++++++++++++++++++++++++---
 1 file changed, 109 insertions(+), 7 deletions(-)

diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 4778dafbe5..73c64301f7 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -186,11 +186,11 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id,
 {
 	struct rte_crypto_sym_op *sop = op->sym;
 	struct rte_crypto_op *ret_op = NULL;
-	struct rte_crypto_vec data_vec[UINT8_MAX];
+	struct rte_crypto_vec data_vec[UINT8_MAX], dest_data_vec[UINT8_MAX];
 	struct rte_crypto_va_iova_ptr cipher_iv, digest, aad_auth_iv;
 	union rte_crypto_sym_ofs ofs;
 	struct rte_crypto_sym_vec vec;
-	struct rte_crypto_sgl sgl;
+	struct rte_crypto_sgl sgl, dest_sgl;
 	uint32_t max_len;
 	union rte_cryptodev_session_ctx sess;
 	uint32_t count = 0;
@@ -326,6 +326,19 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id,
 	}
 
 	sgl.num = n;
+	/* Out of place */
+	if (sop->m_dst != NULL) {
+		dest_sgl.vec = dest_data_vec;
+		vec.dest_sgl = &dest_sgl;
+		n = rte_crypto_mbuf_to_vec(sop->m_dst, 0, max_len,
+				dest_data_vec, RTE_DIM(dest_data_vec));
+		if (n < 0 || n > sop->m_dst->nb_segs) {
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			goto exit;
+		}
+		dest_sgl.num = n;
+	} else
+		vec.dest_sgl = NULL;
 
 	if (rte_cryptodev_raw_enqueue_burst(ctx, &vec, ofs, (void **)&op,
 			&enqueue_status) < 1) {
@@ -8381,10 +8394,21 @@ test_pdcp_proto_SGL(int i, int oop,
 	int to_trn_tbl[16];
 	int segs = 1;
 	unsigned int trn_data = 0;
+	struct rte_cryptodev_info dev_info;
+	uint64_t feat_flags;
 	struct rte_security_ctx *ctx = (struct rte_security_ctx *)
 				rte_cryptodev_get_sec_ctx(
 				ts_params->valid_devs[0]);
+	struct rte_mbuf *temp_mbuf;
+
+	rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
+	feat_flags = dev_info.feature_flags;
 
+	if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) &&
+			(!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) {
+		printf("Device does not support RAW data-path APIs.\n");
+		return -ENOTSUP;
+	}
 	/* Verify the capabilities */
 	struct rte_security_capability_idx sec_cap_idx;
 
@@ -8568,8 +8592,23 @@ test_pdcp_proto_SGL(int i, int oop,
 		ut_params->op->sym->m_dst = ut_params->obuf;
 
 	/* Process crypto operation */
-	if (process_crypto_request(ts_params->valid_devs[0], ut_params->op)
-		== NULL) {
+	temp_mbuf = ut_params->op->sym->m_src;
+	if (global_api_test_type == CRYPTODEV_RAW_API_TEST) {
+		/* filling lengths */
+		while (temp_mbuf) {
+			ut_params->op->sym->cipher.data.length
+				+= temp_mbuf->pkt_len;
+			ut_params->op->sym->auth.data.length
+				+= temp_mbuf->pkt_len;
+			temp_mbuf = temp_mbuf->next;
+		}
+		process_sym_raw_dp_op(ts_params->valid_devs[0], 0,
+			ut_params->op, 1, 1, 0, 0);
+	} else {
+		ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+							ut_params->op);
+	}
+	if (ut_params->op == NULL) {
 		printf("TestCase %s()-%d line %d failed %s: ",
 			__func__, i, __LINE__,
 			"failed to process sym crypto op");
@@ -10450,6 +10489,7 @@ test_authenticated_encryption_oop(const struct aead_test_data *tdata)
 	int retval;
 	uint8_t *ciphertext, *auth_tag;
 	uint16_t plaintext_pad_len;
+	struct rte_cryptodev_info dev_info;
 
 	/* Verify the capabilities */
 	struct rte_cryptodev_sym_capability_idx cap_idx;
@@ -10459,7 +10499,11 @@ test_authenticated_encryption_oop(const struct aead_test_data *tdata)
 			&cap_idx) == NULL)
 		return TEST_SKIPPED;
 
-	if (global_api_test_type == CRYPTODEV_RAW_API_TEST)
+	rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
+	uint64_t feat_flags = dev_info.feature_flags;
+
+	if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) &&
+			(!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP)))
 		return TEST_SKIPPED;
 
 	/* not supported with CPU crypto */
@@ -10496,7 +10540,11 @@ test_authenticated_encryption_oop(const struct aead_test_data *tdata)
 	ut_params->op->sym->m_dst = ut_params->obuf;
 
 	/* Process crypto operation */
-	TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+	if (global_api_test_type == CRYPTODEV_RAW_API_TEST)
+		process_sym_raw_dp_op(ts_params->valid_devs[0], 0,
+			ut_params->op, 0, 0, 0, 0);
+	else
+		TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
 			ut_params->op), "failed to process sym crypto op");
 
 	TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
@@ -10542,6 +10590,10 @@ test_authenticated_decryption_oop(const struct aead_test_data *tdata)
 
 	int retval;
 	uint8_t *plaintext;
+	struct rte_cryptodev_info dev_info;
+
+	rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
+	uint64_t feat_flags = dev_info.feature_flags;
 
 	/* Verify the capabilities */
 	struct rte_cryptodev_sym_capability_idx cap_idx;
@@ -10556,6 +10608,12 @@ test_authenticated_decryption_oop(const struct aead_test_data *tdata)
 			global_api_test_type == CRYPTODEV_RAW_API_TEST)
 		return TEST_SKIPPED;
 
+	if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) &&
+			(!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) {
+		printf("Device does not support RAW data-path APIs.\n");
+		return TEST_SKIPPED;
+	}
+
 	/* Create AEAD session */
 	retval = create_aead_session(ts_params->valid_devs[0],
 			tdata->algo,
@@ -10586,7 +10644,11 @@ test_authenticated_decryption_oop(const struct aead_test_data *tdata)
 	ut_params->op->sym->m_dst = ut_params->obuf;
 
 	/* Process crypto operation */
-	TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+	if (global_api_test_type == CRYPTODEV_RAW_API_TEST)
+		process_sym_raw_dp_op(ts_params->valid_devs[0], 0,
+				ut_params->op, 0, 0, 0, 0);
+	else
+		TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
 			ut_params->op), "failed to process sym crypto op");
 
 	TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
@@ -15434,6 +15496,46 @@ test_cryptodev_cn10k(void)
 	return run_cryptodev_testsuite(RTE_STR(CRYPTODEV_NAME_CN10K_PMD));
 }
 
+static int
+test_cryptodev_dpaa2_sec_raw_api(void)
+{
+	static const char *pmd_name = RTE_STR(CRYPTODEV_NAME_DPAA2_SEC_PMD);
+	int ret;
+
+	ret = require_feature_flag(pmd_name, RTE_CRYPTODEV_FF_SYM_RAW_DP,
+			"RAW API");
+	if (ret)
+		return ret;
+
+	global_api_test_type = CRYPTODEV_RAW_API_TEST;
+	ret = run_cryptodev_testsuite(pmd_name);
+	global_api_test_type = CRYPTODEV_API_TEST;
+
+	return ret;
+}
+
+static int
+test_cryptodev_dpaa_sec_raw_api(void)
+{
+	static const char *pmd_name = RTE_STR(CRYPTODEV_NAME_DPAA2_SEC_PMD);
+	int ret;
+
+	ret = require_feature_flag(pmd_name, RTE_CRYPTODEV_FF_SYM_RAW_DP,
+			"RAW API");
+	if (ret)
+		return ret;
+
+	global_api_test_type = CRYPTODEV_RAW_API_TEST;
+	ret = run_cryptodev_testsuite(pmd_name);
+	global_api_test_type = CRYPTODEV_API_TEST;
+
+	return ret;
+}
+
+REGISTER_TEST_COMMAND(cryptodev_dpaa2_sec_raw_api_autotest,
+		test_cryptodev_dpaa2_sec_raw_api);
+REGISTER_TEST_COMMAND(cryptodev_dpaa_sec_raw_api_autotest,
+		test_cryptodev_dpaa_sec_raw_api);
 REGISTER_TEST_COMMAND(cryptodev_qat_raw_api_autotest,
 		test_cryptodev_qat_raw_api);
 REGISTER_TEST_COMMAND(cryptodev_qat_autotest, test_cryptodev_qat);
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH v4 15/15] test/crypto: add raw API support in 5G algos
  2021-10-13 19:00     ` [dpdk-dev] [PATCH v4 " Hemant Agrawal
                         ` (13 preceding siblings ...)
  2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 14/15] test/crypto: add raw API test for dpaax Hemant Agrawal
@ 2021-10-13 19:00       ` Hemant Agrawal
  2021-10-17 16:16       ` [dpdk-dev] [PATCH v5 00/15] crypto: add raw vector support in DPAAx Hemant Agrawal
  15 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-10-13 19:00 UTC (permalink / raw)
  To: dev, gakhil; +Cc: konstantin.ananyev, roy.fan.zhang, Gagandeep Singh

This patch add support for RAW API testing with ZUC
and SNOW test cases.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 app/test/test_cryptodev.c | 57 ++++++++++++++++++++++++++++++++++-----
 1 file changed, 51 insertions(+), 6 deletions(-)

diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 73c64301f7..01af4cfbd2 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -379,6 +379,7 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id,
 	}
 
 	op->status = (count == MAX_RAW_DEQUEUE_COUNT + 1 || ret_op != op ||
+			ret_op->status == RTE_CRYPTO_OP_STATUS_ERROR ||
 			n_success < 1) ? RTE_CRYPTO_OP_STATUS_ERROR :
 					RTE_CRYPTO_OP_STATUS_SUCCESS;
 
@@ -4210,6 +4211,16 @@ test_snow3g_encryption_oop(const struct snow3g_test_data *tdata)
 	int retval;
 	unsigned plaintext_pad_len;
 	unsigned plaintext_len;
+	struct rte_cryptodev_info dev_info;
+
+	rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
+	uint64_t feat_flags = dev_info.feature_flags;
+
+	if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) &&
+			(!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) {
+		printf("Device does not support RAW data-path APIs.\n");
+		return -ENOTSUP;
+	}
 
 	/* Verify the capabilities */
 	struct rte_cryptodev_sym_capability_idx cap_idx;
@@ -4265,7 +4276,11 @@ test_snow3g_encryption_oop(const struct snow3g_test_data *tdata)
 	if (retval < 0)
 		return retval;
 
-	ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+	if (global_api_test_type == CRYPTODEV_RAW_API_TEST)
+		process_sym_raw_dp_op(ts_params->valid_devs[0], 0,
+			ut_params->op, 1, 0, 1, tdata->cipher_iv.len);
+	else
+		ut_params->op = process_crypto_request(ts_params->valid_devs[0],
 						ut_params->op);
 	TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
 
@@ -4325,6 +4340,12 @@ test_snow3g_encryption_oop_sgl(const struct snow3g_test_data *tdata)
 		return TEST_SKIPPED;
 	}
 
+	if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) &&
+			(!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) {
+		printf("Device does not support RAW data-path APIs.\n");
+		return -ENOTSUP;
+	}
+
 	/* Create SNOW 3G session */
 	retval = create_wireless_algo_cipher_session(ts_params->valid_devs[0],
 					RTE_CRYPTO_CIPHER_OP_ENCRYPT,
@@ -4359,7 +4380,11 @@ test_snow3g_encryption_oop_sgl(const struct snow3g_test_data *tdata)
 	if (retval < 0)
 		return retval;
 
-	ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+	if (global_api_test_type == CRYPTODEV_RAW_API_TEST)
+		process_sym_raw_dp_op(ts_params->valid_devs[0], 0,
+			ut_params->op, 1, 0, 1, tdata->cipher_iv.len);
+	else
+		ut_params->op = process_crypto_request(ts_params->valid_devs[0],
 						ut_params->op);
 	TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
 
@@ -4486,7 +4511,11 @@ test_snow3g_encryption_offset_oop(const struct snow3g_test_data *tdata)
 	if (retval < 0)
 		return retval;
 
-	ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+	if (global_api_test_type == CRYPTODEV_RAW_API_TEST)
+		process_sym_raw_dp_op(ts_params->valid_devs[0], 0,
+			ut_params->op, 1, 0, 1, tdata->cipher_iv.len);
+	else
+		ut_params->op = process_crypto_request(ts_params->valid_devs[0],
 						ut_params->op);
 	TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
 
@@ -4617,7 +4646,16 @@ static int test_snow3g_decryption_oop(const struct snow3g_test_data *tdata)
 	uint8_t *plaintext, *ciphertext;
 	unsigned ciphertext_pad_len;
 	unsigned ciphertext_len;
+	struct rte_cryptodev_info dev_info;
+
+	rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
+	uint64_t feat_flags = dev_info.feature_flags;
 
+	if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) &&
+			(!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) {
+		printf("Device does not support RAW data-path APIs.\n");
+		return -ENOTSUP;
+	}
 	/* Verify the capabilities */
 	struct rte_cryptodev_sym_capability_idx cap_idx;
 	cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
@@ -4675,7 +4713,11 @@ static int test_snow3g_decryption_oop(const struct snow3g_test_data *tdata)
 	if (retval < 0)
 		return retval;
 
-	ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+	if (global_api_test_type == CRYPTODEV_RAW_API_TEST)
+		process_sym_raw_dp_op(ts_params->valid_devs[0], 0,
+			ut_params->op, 1, 0, 1, tdata->cipher_iv.len);
+	else
+		ut_params->op = process_crypto_request(ts_params->valid_devs[0],
 						ut_params->op);
 	TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
 	ut_params->obuf = ut_params->op->sym->m_dst;
@@ -12997,10 +13039,13 @@ test_authentication_verify_fail_when_data_corruption(
 	else {
 		ut_params->op = process_crypto_request(ts_params->valid_devs[0],
 			ut_params->op);
-		TEST_ASSERT_NULL(ut_params->op, "authentication not failed");
 	}
+	if (ut_params->op == NULL)
+		return 0;
+	else if (ut_params->op->status != RTE_CRYPTO_OP_STATUS_SUCCESS)
+		return 0;
 
-	return 0;
+	return -1;
 }
 
 static int
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [dpdk-dev] [PATCH v4 02/15] crypto: add total raw buffer length
  2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 02/15] crypto: add total raw buffer length Hemant Agrawal
@ 2021-10-14 12:39         ` Zhang, Roy Fan
  2021-10-17  8:30           ` Hemant Agrawal
  2021-10-15 17:45         ` Ananyev, Konstantin
  1 sibling, 1 reply; 115+ messages in thread
From: Zhang, Roy Fan @ 2021-10-14 12:39 UTC (permalink / raw)
  To: Hemant Agrawal, dev, gakhil; +Cc: Ananyev, Konstantin, Gagandeep Singh

Hi Hemant,

I still think it is enough to use rte_crypto_vec as well as 
rte_crypto_sym_ofs is enough to describe the data including both the data
and aad.

Imagine 2 10 bytes segments case, encryption/hash size of 12 and aad of 8.
We will have 
struct rte_crypto_vec sgl[2] = {{.base = x, .len = 10}, {.base = y, .len = 10}};
union rte_crypto_sym_ofs ofs  = {.cipher.head = 0, .cipher.tail = 8, .auth.head = 0, .auth.tail = 8};

The driver shall understand there are 8 bytes not included for cipher/auth.

Regards,
Fan

> -----Original Message-----
> From: Hemant Agrawal <hemant.agrawal@nxp.com>
> Sent: Wednesday, October 13, 2021 8:00 PM
> To: dev@dpdk.org; gakhil@marvell.com
> Cc: Ananyev, Konstantin <konstantin.ananyev@intel.com>; Zhang, Roy Fan
> <roy.fan.zhang@intel.com>; Gagandeep Singh <g.singh@nxp.com>
> Subject: [PATCH v4 02/15] crypto: add total raw buffer length
> 
> From: Gagandeep Singh <g.singh@nxp.com>
> 
> The current crypto raw data vectors is extended to support
> rte_security usecases, where we need total data length to know
> how much additional memory space is available in buffer other
> than data length so that driver/HW can write expanded size
> data after encryption.
> 
> Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
> Acked-by: Akhil Goyal <gakhil@marvell.com>
> ---
>  doc/guides/rel_notes/deprecation.rst | 7 -------
>  lib/cryptodev/rte_crypto_sym.h       | 7 +++++++
>  2 files changed, 7 insertions(+), 7 deletions(-)
> 
> diff --git a/doc/guides/rel_notes/deprecation.rst
> b/doc/guides/rel_notes/deprecation.rst
> index a4e86b31f5..53155459a0 100644
> --- a/doc/guides/rel_notes/deprecation.rst
> +++ b/doc/guides/rel_notes/deprecation.rst
> @@ -188,13 +188,6 @@ Deprecation Notices
>    This field will be null for inplace processing.
>    This change is targeted for DPDK 21.11.
> 
> -* cryptodev: The structure ``rte_crypto_vec`` would be updated to add
> -  ``tot_len`` to support total buffer length.
> -  This is required for security cases like IPsec and PDCP encryption offload
> -  to know how much additional memory space is available in buffer other
> than
> -  data length so that driver/HW can write expanded size data after
> encryption.
> -  This change is targeted for DPDK 21.11.
> -
>  * cryptodev: Hide structures ``rte_cryptodev_sym_session`` and
>    ``rte_cryptodev_asym_session`` to remove unnecessary indirection
> between
>    session and the private data of session. An opaque pointer can be exposed
> diff --git a/lib/cryptodev/rte_crypto_sym.h
> b/lib/cryptodev/rte_crypto_sym.h
> index dcc0bd5933..6be283e83c 100644
> --- a/lib/cryptodev/rte_crypto_sym.h
> +++ b/lib/cryptodev/rte_crypto_sym.h
> @@ -37,6 +37,8 @@ struct rte_crypto_vec {
>  	rte_iova_t iova;
>  	/** length of the data buffer */
>  	uint32_t len;
> +	/** total buffer length */
> +	uint32_t tot_len;
>  };
> 
>  /**
> @@ -980,12 +982,14 @@ rte_crypto_mbuf_to_vec(const struct rte_mbuf
> *mb, uint32_t ofs, uint32_t len,
>  	seglen = mb->data_len - ofs;
>  	if (len <= seglen) {
>  		vec[0].len = len;
> +		vec[0].tot_len = mb->buf_len - rte_pktmbuf_headroom(mb)
> - ofs;
>  		return 1;
>  	}
> 
>  	/* data spread across segments */
>  	vec[0].len = seglen;
>  	left = len - seglen;
> +	vec[0].tot_len = mb->buf_len - rte_pktmbuf_headroom(mb) - ofs;
>  	for (i = 1, nseg = mb->next; nseg != NULL; nseg = nseg->next, i++) {
> 
>  		vec[i].base = rte_pktmbuf_mtod(nseg, void *);
> @@ -995,6 +999,8 @@ rte_crypto_mbuf_to_vec(const struct rte_mbuf *mb,
> uint32_t ofs, uint32_t len,
>  		if (left <= seglen) {
>  			/* whole requested data is completed */
>  			vec[i].len = left;
> +			vec[i].tot_len = mb->buf_len -
> rte_pktmbuf_headroom(mb)
> +					- ofs;
>  			left = 0;
>  			break;
>  		}
> @@ -1002,6 +1008,7 @@ rte_crypto_mbuf_to_vec(const struct rte_mbuf
> *mb, uint32_t ofs, uint32_t len,
>  		/* use whole segment */
>  		vec[i].len = seglen;
>  		left -= seglen;
> +		vec[i].tot_len = mb->buf_len - rte_pktmbuf_headroom(mb)
> - ofs;
>  	}
> 
>  	RTE_ASSERT(left == 0);
> --
> 2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [dpdk-dev] [PATCH v4 04/15] crypto: fix raw process for multi-seg case
  2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 04/15] crypto: fix raw process for multi-seg case Hemant Agrawal
@ 2021-10-15 17:39         ` Ananyev, Konstantin
  0 siblings, 0 replies; 115+ messages in thread
From: Ananyev, Konstantin @ 2021-10-15 17:39 UTC (permalink / raw)
  To: Hemant Agrawal, dev, gakhil
  Cc: Zhang, Roy Fan, Gagandeep Singh, marcinx.smoczynski, stable


> 
> From: Gagandeep Singh <g.singh@nxp.com>
> 
> If no next segment available the “for” loop will fail and it still
> returns i+1 i.e. 2, which is wrong as it has filled only 1 buffer.
> 
> Fixes: 7adf992fb9bf ("cryptodev: introduce CPU crypto API")
> Cc: marcinx.smoczynski@intel.com
> Cc: stable@dpdk.org
> 
> Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
> ---
>  lib/cryptodev/rte_crypto_sym.h | 3 ++-
>  1 file changed, 2 insertions(+), 1 deletion(-)
> 
> diff --git a/lib/cryptodev/rte_crypto_sym.h b/lib/cryptodev/rte_crypto_sym.h
> index b6a229e263..dc88ad6dcc 100644
> --- a/lib/cryptodev/rte_crypto_sym.h
> +++ b/lib/cryptodev/rte_crypto_sym.h
> @@ -1004,6 +1004,7 @@ rte_crypto_mbuf_to_vec(const struct rte_mbuf *mb, uint32_t ofs, uint32_t len,
>  			vec[i].tot_len = mb->buf_len - rte_pktmbuf_headroom(mb)
>  					- ofs;
>  			left = 0;
> +			i++;
>  			break;
>  		}
> 
> @@ -1014,7 +1015,7 @@ rte_crypto_mbuf_to_vec(const struct rte_mbuf *mb, uint32_t ofs, uint32_t len,
>  	}
> 
>  	RTE_ASSERT(left == 0);
> -	return i + 1;
> +	return i;
>  }
> 
> 
> --

Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>

> 2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [dpdk-dev] [PATCH v4 02/15] crypto: add total raw buffer length
  2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 02/15] crypto: add total raw buffer length Hemant Agrawal
  2021-10-14 12:39         ` Zhang, Roy Fan
@ 2021-10-15 17:45         ` Ananyev, Konstantin
  1 sibling, 0 replies; 115+ messages in thread
From: Ananyev, Konstantin @ 2021-10-15 17:45 UTC (permalink / raw)
  To: Hemant Agrawal, dev, gakhil; +Cc: Zhang, Roy Fan, Gagandeep Singh

> The current crypto raw data vectors is extended to support
> rte_security usecases, where we need total data length to know
> how much additional memory space is available in buffer other
> than data length so that driver/HW can write expanded size
> data after encryption.
> 
> Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
> Acked-by: Akhil Goyal <gakhil@marvell.com>
> ---
>  doc/guides/rel_notes/deprecation.rst | 7 -------
>  lib/cryptodev/rte_crypto_sym.h       | 7 +++++++
>  2 files changed, 7 insertions(+), 7 deletions(-)
> 
> diff --git a/doc/guides/rel_notes/deprecation.rst b/doc/guides/rel_notes/deprecation.rst
> index a4e86b31f5..53155459a0 100644
> --- a/doc/guides/rel_notes/deprecation.rst
> +++ b/doc/guides/rel_notes/deprecation.rst
> @@ -188,13 +188,6 @@ Deprecation Notices
>    This field will be null for inplace processing.
>    This change is targeted for DPDK 21.11.
> 
> -* cryptodev: The structure ``rte_crypto_vec`` would be updated to add
> -  ``tot_len`` to support total buffer length.
> -  This is required for security cases like IPsec and PDCP encryption offload
> -  to know how much additional memory space is available in buffer other than
> -  data length so that driver/HW can write expanded size data after encryption.
> -  This change is targeted for DPDK 21.11.
> -
>  * cryptodev: Hide structures ``rte_cryptodev_sym_session`` and
>    ``rte_cryptodev_asym_session`` to remove unnecessary indirection between
>    session and the private data of session. An opaque pointer can be exposed
> diff --git a/lib/cryptodev/rte_crypto_sym.h b/lib/cryptodev/rte_crypto_sym.h
> index dcc0bd5933..6be283e83c 100644
> --- a/lib/cryptodev/rte_crypto_sym.h
> +++ b/lib/cryptodev/rte_crypto_sym.h
> @@ -37,6 +37,8 @@ struct rte_crypto_vec {
>  	rte_iova_t iova;
>  	/** length of the data buffer */
>  	uint32_t len;
> +	/** total buffer length */
> +	uint32_t tot_len;
>  };
> 
>  /**
> @@ -980,12 +982,14 @@ rte_crypto_mbuf_to_vec(const struct rte_mbuf *mb, uint32_t ofs, uint32_t len,
>  	seglen = mb->data_len - ofs;
>  	if (len <= seglen) {
>  		vec[0].len = len;
> +		vec[0].tot_len = mb->buf_len - rte_pktmbuf_headroom(mb) - ofs;
>  		return 1;
>  	}
> 
>  	/* data spread across segments */
>  	vec[0].len = seglen;
>  	left = len - seglen;
> +	vec[0].tot_len = mb->buf_len - rte_pktmbuf_headroom(mb) - ofs;

As a nit this line and similar line above can be merged into one and put
before 'if' statement above, i.e.:

vec[0].base = rte_pktmbuf_mtod_offset(mb, void *, ofs);
vec[0].iova = rte_pktmbuf_iova_offset(mb, ofs);
vec[0].tot_len = mb->buf_len - rte_pktmbuf_headroom(mb) - ofs;

>  	for (i = 1, nseg = mb->next; nseg != NULL; nseg = nseg->next, i++) {
> 
>  		vec[i].base = rte_pktmbuf_mtod(nseg, void *);
> @@ -995,6 +999,8 @@ rte_crypto_mbuf_to_vec(const struct rte_mbuf *mb, uint32_t ofs, uint32_t len,
>  		if (left <= seglen) {
>  			/* whole requested data is completed */
>  			vec[i].len = left;
> +			vec[i].tot_len = mb->buf_len - rte_pktmbuf_headroom(mb)
> +					- ofs;

Same as above - these two lines can be put as one before 'if'.


>  			left = 0;
>  			break;
>  		}
> @@ -1002,6 +1008,7 @@ rte_crypto_mbuf_to_vec(const struct rte_mbuf *mb, uint32_t ofs, uint32_t len,
>  		/* use whole segment */
>  		vec[i].len = seglen;
>  		left -= seglen;
> +		vec[i].tot_len = mb->buf_len - rte_pktmbuf_headroom(mb) - ofs;
>  	}
> 
>  	RTE_ASSERT(left == 0);
> --
> 2.17.1

With nits above applied:
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>


^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [dpdk-dev] [PATCH v4 02/15] crypto: add total raw buffer length
  2021-10-14 12:39         ` Zhang, Roy Fan
@ 2021-10-17  8:30           ` Hemant Agrawal
  0 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-10-17  8:30 UTC (permalink / raw)
  To: Zhang, Roy Fan, Hemant Agrawal, dev, gakhil
  Cc: Ananyev, Konstantin, Gagandeep Singh

Hi Fan

On 10/14/2021 6:09 PM, Zhang, Roy Fan wrote:
> Hi Hemant,
>
> I still think it is enough to use rte_crypto_vec as well as
> rte_crypto_sym_ofs is enough to describe the data including both the data
> and aad.
>
> Imagine 2 10 bytes segments case, encryption/hash size of 12 and aad of 8.
> We will have
> struct rte_crypto_vec sgl[2] = {{.base = x, .len = 10}, {.base = y, .len = 10}};
> union rte_crypto_sym_ofs ofs  = {.cipher.head = 0, .cipher.tail = 8, .auth.head = 0, .auth.tail = 8};
>
> The driver shall understand there are 8 bytes not included for cipher/auth.

This is the protocol offload case. It won't work in that case.

>
> Regards,
> Fan
>
>> -----Original Message-----
>> From: Hemant Agrawal <hemant.agrawal@nxp.com>
>> Sent: Wednesday, October 13, 2021 8:00 PM
>> To: dev@dpdk.org; gakhil@marvell.com
>> Cc: Ananyev, Konstantin <konstantin.ananyev@intel.com>; Zhang, Roy Fan
>> <roy.fan.zhang@intel.com>; Gagandeep Singh <g.singh@nxp.com>
>> Subject: [PATCH v4 02/15] crypto: add total raw buffer length
>>
>> From: Gagandeep Singh <g.singh@nxp.com>
>>
>> The current crypto raw data vectors is extended to support
>> rte_security usecases, where we need total data length to know
>> how much additional memory space is available in buffer other
>> than data length so that driver/HW can write expanded size
>> data after encryption.
>>
>> Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
>> Acked-by: Akhil Goyal <gakhil@marvell.com>
>> ---
>>   doc/guides/rel_notes/deprecation.rst | 7 -------
>>   lib/cryptodev/rte_crypto_sym.h       | 7 +++++++
>>   2 files changed, 7 insertions(+), 7 deletions(-)
>>
>> diff --git a/doc/guides/rel_notes/deprecation.rst
>> b/doc/guides/rel_notes/deprecation.rst
>> index a4e86b31f5..53155459a0 100644
>> --- a/doc/guides/rel_notes/deprecation.rst
>> +++ b/doc/guides/rel_notes/deprecation.rst
>> @@ -188,13 +188,6 @@ Deprecation Notices
>>     This field will be null for inplace processing.
>>     This change is targeted for DPDK 21.11.
>>
>> -* cryptodev: The structure ``rte_crypto_vec`` would be updated to add
>> -  ``tot_len`` to support total buffer length.
>> -  This is required for security cases like IPsec and PDCP encryption offload
>> -  to know how much additional memory space is available in buffer other
>> than
>> -  data length so that driver/HW can write expanded size data after
>> encryption.
>> -  This change is targeted for DPDK 21.11.
>> -
>>   * cryptodev: Hide structures ``rte_cryptodev_sym_session`` and
>>     ``rte_cryptodev_asym_session`` to remove unnecessary indirection
>> between
>>     session and the private data of session. An opaque pointer can be exposed
>> diff --git a/lib/cryptodev/rte_crypto_sym.h
>> b/lib/cryptodev/rte_crypto_sym.h
>> index dcc0bd5933..6be283e83c 100644
>> --- a/lib/cryptodev/rte_crypto_sym.h
>> +++ b/lib/cryptodev/rte_crypto_sym.h
>> @@ -37,6 +37,8 @@ struct rte_crypto_vec {
>>   	rte_iova_t iova;
>>   	/** length of the data buffer */
>>   	uint32_t len;
>> +	/** total buffer length */
>> +	uint32_t tot_len;
>>   };
>>
>>   /**
>> @@ -980,12 +982,14 @@ rte_crypto_mbuf_to_vec(const struct rte_mbuf
>> *mb, uint32_t ofs, uint32_t len,
>>   	seglen = mb->data_len - ofs;
>>   	if (len <= seglen) {
>>   		vec[0].len = len;
>> +		vec[0].tot_len = mb->buf_len - rte_pktmbuf_headroom(mb)
>> - ofs;
>>   		return 1;
>>   	}
>>
>>   	/* data spread across segments */
>>   	vec[0].len = seglen;
>>   	left = len - seglen;
>> +	vec[0].tot_len = mb->buf_len - rte_pktmbuf_headroom(mb) - ofs;
>>   	for (i = 1, nseg = mb->next; nseg != NULL; nseg = nseg->next, i++) {
>>
>>   		vec[i].base = rte_pktmbuf_mtod(nseg, void *);
>> @@ -995,6 +999,8 @@ rte_crypto_mbuf_to_vec(const struct rte_mbuf *mb,
>> uint32_t ofs, uint32_t len,
>>   		if (left <= seglen) {
>>   			/* whole requested data is completed */
>>   			vec[i].len = left;
>> +			vec[i].tot_len = mb->buf_len -
>> rte_pktmbuf_headroom(mb)
>> +					- ofs;
>>   			left = 0;
>>   			break;
>>   		}
>> @@ -1002,6 +1008,7 @@ rte_crypto_mbuf_to_vec(const struct rte_mbuf
>> *mb, uint32_t ofs, uint32_t len,
>>   		/* use whole segment */
>>   		vec[i].len = seglen;
>>   		left -= seglen;
>> +		vec[i].tot_len = mb->buf_len - rte_pktmbuf_headroom(mb)
>> - ofs;
>>   	}
>>
>>   	RTE_ASSERT(left == 0);
>> --
>> 2.17.1

^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [dpdk-dev] [EXT] [PATCH v4 03/15] crypto: add dest_sgl in raw vector APIs
  2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 03/15] crypto: add dest_sgl in raw vector APIs Hemant Agrawal
@ 2021-10-17 12:21         ` Akhil Goyal
  0 siblings, 0 replies; 115+ messages in thread
From: Akhil Goyal @ 2021-10-17 12:21 UTC (permalink / raw)
  To: Hemant Agrawal, dev; +Cc: konstantin.ananyev, roy.fan.zhang

> The structure rte_crypto_sym_vec is updated to
> add dest_sgl to support out of place processing.
> 
> Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
> Acked-by: Akhil Goyal <gakhil@marvell.com>
> Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
> ---
>  doc/guides/rel_notes/deprecation.rst | 5 -----
>  lib/cryptodev/rte_crypto_sym.h       | 2 ++
>  2 files changed, 2 insertions(+), 5 deletions(-)
> 
> diff --git a/doc/guides/rel_notes/deprecation.rst
> b/doc/guides/rel_notes/deprecation.rst
> index 53155459a0..581ee21449 100644
> --- a/doc/guides/rel_notes/deprecation.rst
> +++ b/doc/guides/rel_notes/deprecation.rst
> @@ -183,11 +183,6 @@ Deprecation Notices
>    has a limited size ``uint16_t``.
>    It will be moved and extended as ``uint32_t`` in DPDK 21.11.
> 
> -* cryptodev: The structure ``rte_crypto_sym_vec`` would be updated to add
> -  ``dest_sgl`` to support out of place processing.
> -  This field will be null for inplace processing.
> -  This change is targeted for DPDK 21.11.
> -

Corresponding release notes update missing.


>  * cryptodev: Hide structures ``rte_cryptodev_sym_session`` and
>    ``rte_cryptodev_asym_session`` to remove unnecessary indirection
> between
>    session and the private data of session. An opaque pointer can be exposed
> diff --git a/lib/cryptodev/rte_crypto_sym.h
> b/lib/cryptodev/rte_crypto_sym.h
> index 6be283e83c..b6a229e263 100644
> --- a/lib/cryptodev/rte_crypto_sym.h
> +++ b/lib/cryptodev/rte_crypto_sym.h
> @@ -72,6 +72,8 @@ struct rte_crypto_sym_vec {
>  	uint32_t num;
>  	/** array of SGL vectors */
>  	struct rte_crypto_sgl *src_sgl;
> +	/** array of SGL vectors for OOP, keep it NULL for inplace*/
> +	struct rte_crypto_sgl *dest_sgl;
>  	/** array of pointers to cipher IV */
>  	struct rte_crypto_va_iova_ptr *iv;
>  	/** array of pointers to digest */
> --
> 2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH v5 00/15] crypto: add raw vector support in DPAAx
  2021-10-13 19:00     ` [dpdk-dev] [PATCH v4 " Hemant Agrawal
                         ` (14 preceding siblings ...)
  2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 15/15] test/crypto: add raw API support in 5G algos Hemant Agrawal
@ 2021-10-17 16:16       ` Hemant Agrawal
  2021-10-17 16:16         ` [dpdk-dev] [PATCH v5 01/15] crypto: change sgl to src_sgl in vector Hemant Agrawal
                           ` (15 more replies)
  15 siblings, 16 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-10-17 16:16 UTC (permalink / raw)
  To: dev, gakhil; +Cc: konstantin.ananyev, roy.fan.zhang

This patch series adds support for raw vector API in dpaax_sec drivers This also enhances the raw vector APIs to support OOP and security protocol support.

v2: fix aesni compilation and add release notes.
v3: ignore it
v4: fix the tot_length patch as per Konstantin's comments
v5: address comments from Akhil/Konstantin - update rel notes

Gagandeep Singh (11):
  crypto: add total raw buffer length
  crypto: fix raw process for multi-seg case
  crypto/dpaa2_sec: support raw datapath APIs
  crypto/dpaa2_sec: support AUTH only with raw buffer APIs
  crypto/dpaa2_sec: support AUTHENC with raw buffer APIs
  crypto/dpaa2_sec: support AEAD with raw buffer APIs
  crypto/dpaa2_sec: support OOP with raw buffer API
  crypto/dpaa2_sec: enhance error checks with raw buffer APIs
  crypto/dpaa_sec: support raw datapath APIs
  crypto/dpaa_sec: support authonly and chain with raw APIs
  crypto/dpaa_sec: support AEAD and proto with raw APIs

Hemant Agrawal (4):
  crypto: change sgl to src_sgl in vector
  crypto: add dest_sgl in raw vector APIs
  test/crypto: add raw API test for dpaax
  test/crypto: add raw API support in 5G algos

 app/test/test_cryptodev.c                   |  179 +++-
 doc/guides/rel_notes/deprecation.rst        |   12 -
 doc/guides/rel_notes/release_21_11.rst      |   11 +
 drivers/crypto/aesni_gcm/aesni_gcm_pmd.c    |   12 +-
 drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c  |    6 +-
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c |   13 +-
 drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h   |   82 +-
 drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c | 1045 ++++++++++++++++++
 drivers/crypto/dpaa2_sec/meson.build        |    3 +-
 drivers/crypto/dpaa_sec/dpaa_sec.c          |   23 +-
 drivers/crypto/dpaa_sec/dpaa_sec.h          |   40 +-
 drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c   | 1052 +++++++++++++++++++
 drivers/crypto/dpaa_sec/meson.build         |    4 +-
 drivers/crypto/qat/qat_sym_hw_dp.c          |   27 +-
 lib/cryptodev/rte_crypto_sym.h              |   11 +-
 lib/ipsec/misc.h                            |    4 +-
 16 files changed, 2408 insertions(+), 116 deletions(-)
 create mode 100644 drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
 create mode 100644 drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c

-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH v5 01/15] crypto: change sgl to src_sgl in vector
  2021-10-17 16:16       ` [dpdk-dev] [PATCH v5 00/15] crypto: add raw vector support in DPAAx Hemant Agrawal
@ 2021-10-17 16:16         ` Hemant Agrawal
  2021-10-17 16:16         ` [dpdk-dev] [PATCH v5 02/15] crypto: add total raw buffer length Hemant Agrawal
                           ` (14 subsequent siblings)
  15 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-10-17 16:16 UTC (permalink / raw)
  To: dev, gakhil; +Cc: konstantin.ananyev, roy.fan.zhang

This patch renames the sgl to src_sgl to help differentiating
between source and destination sgl.

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
Acked-by: Akhil Goyal <gakhil@marvell.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
---
 app/test/test_cryptodev.c                  |  6 ++---
 drivers/crypto/aesni_gcm/aesni_gcm_pmd.c   | 12 +++++-----
 drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c |  6 ++---
 drivers/crypto/qat/qat_sym_hw_dp.c         | 27 +++++++++++++---------
 lib/cryptodev/rte_crypto_sym.h             |  2 +-
 lib/ipsec/misc.h                           |  4 ++--
 6 files changed, 31 insertions(+), 26 deletions(-)

diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 65b64e1af0..4778dafbe5 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -232,7 +232,7 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id,
 	digest.va = NULL;
 	sgl.vec = data_vec;
 	vec.num = 1;
-	vec.sgl = &sgl;
+	vec.src_sgl = &sgl;
 	vec.iv = &cipher_iv;
 	vec.digest = &digest;
 	vec.aad = &aad_auth_iv;
@@ -396,7 +396,7 @@ process_cpu_aead_op(uint8_t dev_id, struct rte_crypto_op *op)
 
 	sgl.vec = vec;
 	sgl.num = n;
-	symvec.sgl = &sgl;
+	symvec.src_sgl = &sgl;
 	symvec.iv = &iv_ptr;
 	symvec.digest = &digest_ptr;
 	symvec.aad = &aad_ptr;
@@ -442,7 +442,7 @@ process_cpu_crypt_auth_op(uint8_t dev_id, struct rte_crypto_op *op)
 
 	sgl.vec = vec;
 	sgl.num = n;
-	symvec.sgl = &sgl;
+	symvec.src_sgl = &sgl;
 	symvec.iv = &iv_ptr;
 	symvec.digest = &digest_ptr;
 	symvec.status = &st;
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
index 330aad8157..d0368828e9 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
@@ -535,7 +535,7 @@ aesni_gcm_sgl_encrypt(struct aesni_gcm_session *s,
 	processed = 0;
 	for (i = 0; i < vec->num; ++i) {
 		aesni_gcm_process_gcm_sgl_op(s, gdata_ctx,
-			&vec->sgl[i], vec->iv[i].va,
+			&vec->src_sgl[i], vec->iv[i].va,
 			vec->aad[i].va);
 		vec->status[i] = aesni_gcm_sgl_op_finalize_encryption(s,
 			gdata_ctx, vec->digest[i].va);
@@ -554,7 +554,7 @@ aesni_gcm_sgl_decrypt(struct aesni_gcm_session *s,
 	processed = 0;
 	for (i = 0; i < vec->num; ++i) {
 		aesni_gcm_process_gcm_sgl_op(s, gdata_ctx,
-			&vec->sgl[i], vec->iv[i].va,
+			&vec->src_sgl[i], vec->iv[i].va,
 			vec->aad[i].va);
 		 vec->status[i] = aesni_gcm_sgl_op_finalize_decryption(s,
 			gdata_ctx, vec->digest[i].va);
@@ -572,13 +572,13 @@ aesni_gmac_sgl_generate(struct aesni_gcm_session *s,
 
 	processed = 0;
 	for (i = 0; i < vec->num; ++i) {
-		if (vec->sgl[i].num != 1) {
+		if (vec->src_sgl[i].num != 1) {
 			vec->status[i] = ENOTSUP;
 			continue;
 		}
 
 		aesni_gcm_process_gmac_sgl_op(s, gdata_ctx,
-			&vec->sgl[i], vec->iv[i].va);
+			&vec->src_sgl[i], vec->iv[i].va);
 		vec->status[i] = aesni_gcm_sgl_op_finalize_encryption(s,
 			gdata_ctx, vec->digest[i].va);
 		processed += (vec->status[i] == 0);
@@ -595,13 +595,13 @@ aesni_gmac_sgl_verify(struct aesni_gcm_session *s,
 
 	processed = 0;
 	for (i = 0; i < vec->num; ++i) {
-		if (vec->sgl[i].num != 1) {
+		if (vec->src_sgl[i].num != 1) {
 			vec->status[i] = ENOTSUP;
 			continue;
 		}
 
 		aesni_gcm_process_gmac_sgl_op(s, gdata_ctx,
-			&vec->sgl[i], vec->iv[i].va);
+			&vec->src_sgl[i], vec->iv[i].va);
 		vec->status[i] = aesni_gcm_sgl_op_finalize_decryption(s,
 			gdata_ctx, vec->digest[i].va);
 		processed += (vec->status[i] == 0);
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
index 60963a8208..2419adc699 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
@@ -2002,14 +2002,14 @@ aesni_mb_cpu_crypto_process_bulk(struct rte_cryptodev *dev,
 	for (i = 0, j = 0, k = 0; i != vec->num; i++) {
 
 
-		ret = check_crypto_sgl(sofs, vec->sgl + i);
+		ret = check_crypto_sgl(sofs, vec->src_sgl + i);
 		if (ret != 0) {
 			vec->status[i] = ret;
 			continue;
 		}
 
-		buf = vec->sgl[i].vec[0].base;
-		len = vec->sgl[i].vec[0].len;
+		buf = vec->src_sgl[i].vec[0].base;
+		len = vec->src_sgl[i].vec[0].len;
 
 		job = IMB_GET_NEXT_JOB(mb_mgr);
 		if (job == NULL) {
diff --git a/drivers/crypto/qat/qat_sym_hw_dp.c b/drivers/crypto/qat/qat_sym_hw_dp.c
index 36d11e0dc9..12825e448b 100644
--- a/drivers/crypto/qat/qat_sym_hw_dp.c
+++ b/drivers/crypto/qat/qat_sym_hw_dp.c
@@ -181,8 +181,9 @@ qat_sym_dp_enqueue_cipher_jobs(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec,
-			vec->sgl[i].num);
+		data_len = qat_sym_dp_parse_data_vec(qp, req,
+			vec->src_sgl[i].vec,
+			vec->src_sgl[i].num);
 		if (unlikely(data_len < 0))
 			break;
 		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
@@ -302,8 +303,9 @@ qat_sym_dp_enqueue_auth_jobs(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec,
-			vec->sgl[i].num);
+		data_len = qat_sym_dp_parse_data_vec(qp, req,
+			vec->src_sgl[i].vec,
+			vec->src_sgl[i].num);
 		if (unlikely(data_len < 0))
 			break;
 		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
@@ -484,14 +486,16 @@ qat_sym_dp_enqueue_chain_jobs(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec,
-			vec->sgl[i].num);
+		data_len = qat_sym_dp_parse_data_vec(qp, req,
+			vec->src_sgl[i].vec,
+			vec->src_sgl[i].num);
 		if (unlikely(data_len < 0))
 			break;
 		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
-		if (unlikely(enqueue_one_chain_job(ctx, req, vec->sgl[i].vec,
-			vec->sgl[i].num, &vec->iv[i], &vec->digest[i],
-				&vec->auth_iv[i], ofs, (uint32_t)data_len)))
+		if (unlikely(enqueue_one_chain_job(ctx, req,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num,
+			&vec->iv[i], &vec->digest[i],
+			&vec->auth_iv[i], ofs, (uint32_t)data_len)))
 			break;
 
 		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
@@ -688,8 +692,9 @@ qat_sym_dp_enqueue_aead_jobs(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec,
-			vec->sgl[i].num);
+		data_len = qat_sym_dp_parse_data_vec(qp, req,
+			vec->src_sgl[i].vec,
+			vec->src_sgl[i].num);
 		if (unlikely(data_len < 0))
 			break;
 		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
diff --git a/lib/cryptodev/rte_crypto_sym.h b/lib/cryptodev/rte_crypto_sym.h
index 1106ad6201..f8cb2ccca0 100644
--- a/lib/cryptodev/rte_crypto_sym.h
+++ b/lib/cryptodev/rte_crypto_sym.h
@@ -69,7 +69,7 @@ struct rte_crypto_sym_vec {
 	/** number of operations to perform */
 	uint32_t num;
 	/** array of SGL vectors */
-	struct rte_crypto_sgl *sgl;
+	struct rte_crypto_sgl *src_sgl;
 	/** array of pointers to cipher IV */
 	struct rte_crypto_va_iova_ptr *iv;
 	/** array of pointers to digest */
diff --git a/lib/ipsec/misc.h b/lib/ipsec/misc.h
index 79b9a20762..58ff538141 100644
--- a/lib/ipsec/misc.h
+++ b/lib/ipsec/misc.h
@@ -136,7 +136,7 @@ cpu_crypto_bulk(const struct rte_ipsec_session *ss,
 		/* not enough space in vec[] to hold all segments */
 		if (vcnt < 0) {
 			/* fill the request structure */
-			symvec.sgl = &vecpkt[j];
+			symvec.src_sgl = &vecpkt[j];
 			symvec.iv = &iv[j];
 			symvec.digest = &dgst[j];
 			symvec.aad = &aad[j];
@@ -160,7 +160,7 @@ cpu_crypto_bulk(const struct rte_ipsec_session *ss,
 	}
 
 	/* fill the request structure */
-	symvec.sgl = &vecpkt[j];
+	symvec.src_sgl = &vecpkt[j];
 	symvec.iv = &iv[j];
 	symvec.aad = &aad[j];
 	symvec.digest = &dgst[j];
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH v5 02/15] crypto: add total raw buffer length
  2021-10-17 16:16       ` [dpdk-dev] [PATCH v5 00/15] crypto: add raw vector support in DPAAx Hemant Agrawal
  2021-10-17 16:16         ` [dpdk-dev] [PATCH v5 01/15] crypto: change sgl to src_sgl in vector Hemant Agrawal
@ 2021-10-17 16:16         ` Hemant Agrawal
  2021-10-17 16:16         ` [dpdk-dev] [PATCH v5 03/15] crypto: add dest_sgl in raw vector APIs Hemant Agrawal
                           ` (13 subsequent siblings)
  15 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-10-17 16:16 UTC (permalink / raw)
  To: dev, gakhil; +Cc: konstantin.ananyev, roy.fan.zhang, Gagandeep Singh

From: Gagandeep Singh <g.singh@nxp.com>

The current crypto raw data vectors is extended to support
rte_security usecases, where we need total data length to know
how much additional memory space is available in buffer other
than data length so that driver/HW can write expanded size
data after encryption.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
Acked-by: Akhil Goyal <gakhil@marvell.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
---
 doc/guides/rel_notes/deprecation.rst   | 7 -------
 doc/guides/rel_notes/release_21_11.rst | 6 ++++++
 lib/cryptodev/rte_crypto_sym.h         | 4 ++++
 3 files changed, 10 insertions(+), 7 deletions(-)

diff --git a/doc/guides/rel_notes/deprecation.rst b/doc/guides/rel_notes/deprecation.rst
index 09b54fdef3..0e04ecf743 100644
--- a/doc/guides/rel_notes/deprecation.rst
+++ b/doc/guides/rel_notes/deprecation.rst
@@ -179,13 +179,6 @@ Deprecation Notices
   This field will be null for inplace processing.
   This change is targeted for DPDK 21.11.
 
-* cryptodev: The structure ``rte_crypto_vec`` would be updated to add
-  ``tot_len`` to support total buffer length.
-  This is required for security cases like IPsec and PDCP encryption offload
-  to know how much additional memory space is available in buffer other than
-  data length so that driver/HW can write expanded size data after encryption.
-  This change is targeted for DPDK 21.11.
-
 * cryptodev: Hide structures ``rte_cryptodev_sym_session`` and
   ``rte_cryptodev_asym_session`` to remove unnecessary indirection between
   session and the private data of session. An opaque pointer can be exposed
diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index 256037b639..ba036c5b3f 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -232,6 +232,12 @@ API Changes
 * cryptodev: The field ``dataunit_len`` of the ``struct rte_crypto_cipher_xform``
   moved to the end of the structure and extended to ``uint32_t``.
 
+* cryptodev: The structure ``rte_crypto_vec`` updated to add ``tot_len`` to
+  support total buffer length. This is required for security cases like IPsec
+  and PDCP encryption offload to know how much additional memory space is
+  available in buffer other than data length so that driver/HW can write
+  expanded size data after encryption.
+
 
 ABI Changes
 -----------
diff --git a/lib/cryptodev/rte_crypto_sym.h b/lib/cryptodev/rte_crypto_sym.h
index f8cb2ccca0..1f2f0a572c 100644
--- a/lib/cryptodev/rte_crypto_sym.h
+++ b/lib/cryptodev/rte_crypto_sym.h
@@ -37,6 +37,8 @@ struct rte_crypto_vec {
 	rte_iova_t iova;
 	/** length of the data buffer */
 	uint32_t len;
+	/** total buffer length */
+	uint32_t tot_len;
 };
 
 /**
@@ -963,6 +965,7 @@ rte_crypto_mbuf_to_vec(const struct rte_mbuf *mb, uint32_t ofs, uint32_t len,
 
 	vec[0].base = rte_pktmbuf_mtod_offset(mb, void *, ofs);
 	vec[0].iova = rte_pktmbuf_iova_offset(mb, ofs);
+	vec[0].tot_len = mb->buf_len - rte_pktmbuf_headroom(mb) - ofs;
 
 	/* whole data lies in the first segment */
 	seglen = mb->data_len - ofs;
@@ -978,6 +981,7 @@ rte_crypto_mbuf_to_vec(const struct rte_mbuf *mb, uint32_t ofs, uint32_t len,
 
 		vec[i].base = rte_pktmbuf_mtod(nseg, void *);
 		vec[i].iova = rte_pktmbuf_iova(nseg);
+		vec[i].tot_len = mb->buf_len - rte_pktmbuf_headroom(mb) - ofs;
 
 		seglen = nseg->data_len;
 		if (left <= seglen) {
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH v5 03/15] crypto: add dest_sgl in raw vector APIs
  2021-10-17 16:16       ` [dpdk-dev] [PATCH v5 00/15] crypto: add raw vector support in DPAAx Hemant Agrawal
  2021-10-17 16:16         ` [dpdk-dev] [PATCH v5 01/15] crypto: change sgl to src_sgl in vector Hemant Agrawal
  2021-10-17 16:16         ` [dpdk-dev] [PATCH v5 02/15] crypto: add total raw buffer length Hemant Agrawal
@ 2021-10-17 16:16         ` Hemant Agrawal
  2021-10-17 16:16         ` [dpdk-dev] [PATCH v5 04/15] crypto: fix raw process for multi-seg case Hemant Agrawal
                           ` (12 subsequent siblings)
  15 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-10-17 16:16 UTC (permalink / raw)
  To: dev, gakhil; +Cc: konstantin.ananyev, roy.fan.zhang

The structure rte_crypto_sym_vec is updated to
add dest_sgl to support out of place processing.

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
Acked-by: Akhil Goyal <gakhil@marvell.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
---
 doc/guides/rel_notes/deprecation.rst   | 5 -----
 doc/guides/rel_notes/release_21_11.rst | 5 ++++-
 lib/cryptodev/rte_crypto_sym.h         | 2 ++
 3 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/doc/guides/rel_notes/deprecation.rst b/doc/guides/rel_notes/deprecation.rst
index 0e04ecf743..1d743c3a17 100644
--- a/doc/guides/rel_notes/deprecation.rst
+++ b/doc/guides/rel_notes/deprecation.rst
@@ -174,11 +174,6 @@ Deprecation Notices
   and ``rte_vhost_driver_set_protocol_features`` functions will be removed
   and the API functions will be made stable in DPDK 21.11.
 
-* cryptodev: The structure ``rte_crypto_sym_vec`` would be updated to add
-  ``dest_sgl`` to support out of place processing.
-  This field will be null for inplace processing.
-  This change is targeted for DPDK 21.11.
-
 * cryptodev: Hide structures ``rte_cryptodev_sym_session`` and
   ``rte_cryptodev_asym_session`` to remove unnecessary indirection between
   session and the private data of session. An opaque pointer can be exposed
diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index ba036c5b3f..6e274a131f 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -232,12 +232,15 @@ API Changes
 * cryptodev: The field ``dataunit_len`` of the ``struct rte_crypto_cipher_xform``
   moved to the end of the structure and extended to ``uint32_t``.
 
-* cryptodev: The structure ``rte_crypto_vec`` updated to add ``tot_len`` to
+* cryptodev: The structure ``rte_crypto_vec`` is updated to add ``tot_len`` to
   support total buffer length. This is required for security cases like IPsec
   and PDCP encryption offload to know how much additional memory space is
   available in buffer other than data length so that driver/HW can write
   expanded size data after encryption.
 
+* cryptodev: The structure ``rte_crypto_sym_vec`` is updated to add ``dest_sgl``
+  to support out of place processing. This field will be null for inplace
+  processing.
 
 ABI Changes
 -----------
diff --git a/lib/cryptodev/rte_crypto_sym.h b/lib/cryptodev/rte_crypto_sym.h
index 1f2f0a572c..c907b1646d 100644
--- a/lib/cryptodev/rte_crypto_sym.h
+++ b/lib/cryptodev/rte_crypto_sym.h
@@ -72,6 +72,8 @@ struct rte_crypto_sym_vec {
 	uint32_t num;
 	/** array of SGL vectors */
 	struct rte_crypto_sgl *src_sgl;
+	/** array of SGL vectors for OOP, keep it NULL for inplace*/
+	struct rte_crypto_sgl *dest_sgl;
 	/** array of pointers to cipher IV */
 	struct rte_crypto_va_iova_ptr *iv;
 	/** array of pointers to digest */
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH v5 04/15] crypto: fix raw process for multi-seg case
  2021-10-17 16:16       ` [dpdk-dev] [PATCH v5 00/15] crypto: add raw vector support in DPAAx Hemant Agrawal
                           ` (2 preceding siblings ...)
  2021-10-17 16:16         ` [dpdk-dev] [PATCH v5 03/15] crypto: add dest_sgl in raw vector APIs Hemant Agrawal
@ 2021-10-17 16:16         ` Hemant Agrawal
  2021-10-17 16:16         ` [dpdk-dev] [PATCH v5 05/15] crypto/dpaa2_sec: support raw datapath APIs Hemant Agrawal
                           ` (11 subsequent siblings)
  15 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-10-17 16:16 UTC (permalink / raw)
  To: dev, gakhil
  Cc: konstantin.ananyev, roy.fan.zhang, Gagandeep Singh,
	marcinx.smoczynski, stable

From: Gagandeep Singh <g.singh@nxp.com>

If no next segment available the “for” loop will fail and it still
returns i+1 i.e. 2, which is wrong as it has filled only 1 buffer.

Fixes: 7adf992fb9bf ("cryptodev: introduce CPU crypto API")
Cc: marcinx.smoczynski@intel.com
Cc: stable@dpdk.org

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
---
 lib/cryptodev/rte_crypto_sym.h | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/lib/cryptodev/rte_crypto_sym.h b/lib/cryptodev/rte_crypto_sym.h
index c907b1646d..daa090b978 100644
--- a/lib/cryptodev/rte_crypto_sym.h
+++ b/lib/cryptodev/rte_crypto_sym.h
@@ -990,6 +990,7 @@ rte_crypto_mbuf_to_vec(const struct rte_mbuf *mb, uint32_t ofs, uint32_t len,
 			/* whole requested data is completed */
 			vec[i].len = left;
 			left = 0;
+			i++;
 			break;
 		}
 
@@ -999,7 +1000,7 @@ rte_crypto_mbuf_to_vec(const struct rte_mbuf *mb, uint32_t ofs, uint32_t len,
 	}
 
 	RTE_ASSERT(left == 0);
-	return i + 1;
+	return i;
 }
 
 
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH v5 05/15] crypto/dpaa2_sec: support raw datapath APIs
  2021-10-17 16:16       ` [dpdk-dev] [PATCH v5 00/15] crypto: add raw vector support in DPAAx Hemant Agrawal
                           ` (3 preceding siblings ...)
  2021-10-17 16:16         ` [dpdk-dev] [PATCH v5 04/15] crypto: fix raw process for multi-seg case Hemant Agrawal
@ 2021-10-17 16:16         ` Hemant Agrawal
  2021-10-17 16:16         ` [dpdk-dev] [PATCH v5 06/15] crypto/dpaa2_sec: support AUTH only with raw buffer APIs Hemant Agrawal
                           ` (10 subsequent siblings)
  15 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-10-17 16:16 UTC (permalink / raw)
  To: dev, gakhil; +Cc: konstantin.ananyev, roy.fan.zhang, Gagandeep Singh

From: Gagandeep Singh <g.singh@nxp.com>

This path add framework for raw API support.
The initial patch only test cipher only part.

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 doc/guides/rel_notes/release_21_11.rst      |   1 +
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c |  13 +-
 drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h   |  60 +-
 drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c | 595 ++++++++++++++++++++
 drivers/crypto/dpaa2_sec/meson.build        |   3 +-
 5 files changed, 643 insertions(+), 29 deletions(-)
 create mode 100644 drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c

diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index 6e274a131f..0566aacf19 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -128,6 +128,7 @@ New Features
 * **Updated NXP dpaa2_sec crypto PMD.**
 
   * Added PDCP short MAC-I support.
+  * Added raw vector datapath API support
 
 * **Updated the ACC100 bbdev PMD.**
 
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index dfa72f3f93..4eb3615250 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -49,15 +49,8 @@
 #define FSL_MC_DPSECI_DEVID     3
 
 #define NO_PREFETCH 0
-/* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */
-#define FLE_POOL_NUM_BUFS	32000
-#define FLE_POOL_BUF_SIZE	256
-#define FLE_POOL_CACHE_SIZE	512
-#define FLE_SG_MEM_SIZE(num)	(FLE_POOL_BUF_SIZE + ((num) * 32))
-#define SEC_FLC_DHR_OUTBOUND	-114
-#define SEC_FLC_DHR_INBOUND	0
 
-static uint8_t cryptodev_driver_id;
+uint8_t cryptodev_driver_id;
 
 #ifdef RTE_LIB_SECURITY
 static inline int
@@ -3828,6 +3821,9 @@ static struct rte_cryptodev_ops crypto_ops = {
 	.sym_session_get_size     = dpaa2_sec_sym_session_get_size,
 	.sym_session_configure    = dpaa2_sec_sym_session_configure,
 	.sym_session_clear        = dpaa2_sec_sym_session_clear,
+	/* Raw data-path API related operations */
+	.sym_get_raw_dp_ctx_size = dpaa2_sec_get_dp_ctx_size,
+	.sym_configure_raw_dp_ctx = dpaa2_sec_configure_raw_dp_ctx,
 };
 
 #ifdef RTE_LIB_SECURITY
@@ -3910,6 +3906,7 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
 			RTE_CRYPTODEV_FF_HW_ACCELERATED |
 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
 			RTE_CRYPTODEV_FF_SECURITY |
+			RTE_CRYPTODEV_FF_SYM_RAW_DP |
 			RTE_CRYPTODEV_FF_IN_PLACE_SGL |
 			RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
 			RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
index 8dee0a4bda..e9b888186e 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
@@ -15,6 +15,16 @@
 #define CRYPTODEV_NAME_DPAA2_SEC_PMD	crypto_dpaa2_sec
 /**< NXP DPAA2 - SEC PMD device name */
 
+extern uint8_t cryptodev_driver_id;
+
+/* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */
+#define FLE_POOL_NUM_BUFS	32000
+#define FLE_POOL_BUF_SIZE	256
+#define FLE_POOL_CACHE_SIZE	512
+#define FLE_SG_MEM_SIZE(num)	(FLE_POOL_BUF_SIZE + ((num) * 32))
+#define SEC_FLC_DHR_OUTBOUND	-114
+#define SEC_FLC_DHR_INBOUND	0
+
 #define MAX_QUEUES		64
 #define MAX_DESC_SIZE		64
 /** private data structure for each DPAA2_SEC device */
@@ -158,6 +168,24 @@ struct dpaa2_pdcp_ctxt {
 	uint32_t hfn_threshold;	/*!< HFN Threashold for key renegotiation */
 };
 #endif
+
+typedef int (*dpaa2_sec_build_fd_t)(
+	void *qp, uint8_t *drv_ctx, struct rte_crypto_vec *data_vec,
+	uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
+	void *user_data);
+
+typedef int (*dpaa2_sec_build_raw_dp_fd_t)(uint8_t *drv_ctx,
+		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_va_iova_ptr *iv,
+		       struct rte_crypto_va_iova_ptr *digest,
+		       struct rte_crypto_va_iova_ptr *auth_iv,
+		       union rte_crypto_sym_ofs ofs,
+		       void *userdata,
+		       struct qbman_fd *fd);
+
 typedef struct dpaa2_sec_session_entry {
 	void *ctxt;
 	uint8_t ctxt_type;
@@ -165,6 +193,8 @@ typedef struct dpaa2_sec_session_entry {
 	enum rte_crypto_cipher_algorithm cipher_alg; /*!< Cipher Algorithm*/
 	enum rte_crypto_auth_algorithm auth_alg; /*!< Authentication Algorithm*/
 	enum rte_crypto_aead_algorithm aead_alg; /*!< AEAD Algorithm*/
+	dpaa2_sec_build_fd_t build_fd;
+	dpaa2_sec_build_raw_dp_fd_t build_raw_dp_fd;
 	union {
 		struct {
 			uint8_t *data;	/**< pointer to key data */
@@ -547,26 +577,6 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
 			}, }
 		}, }
 	},
-	{	/* NULL (CIPHER) */
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
-		{.sym = {
-			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
-			{.cipher = {
-				.algo = RTE_CRYPTO_CIPHER_NULL,
-				.block_size = 1,
-				.key_size = {
-					.min = 0,
-					.max = 0,
-					.increment = 0
-				},
-				.iv_size = {
-					.min = 0,
-					.max = 0,
-					.increment = 0
-				}
-			}, },
-		}, }
-	},
 	{	/* AES CBC */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
@@ -983,4 +993,14 @@ calc_chksum(void *buffer, int len)
 	return  result;
 }
 
+int
+dpaa2_sec_configure_raw_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
+	enum rte_crypto_op_sess_type sess_type,
+	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update);
+
+int
+dpaa2_sec_get_dp_ctx_size(struct rte_cryptodev *dev);
+
+
 #endif /* _DPAA2_SEC_PMD_PRIVATE_H_ */
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
new file mode 100644
index 0000000000..8925c8e938
--- /dev/null
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
@@ -0,0 +1,595 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2021 NXP
+ */
+
+#include <cryptodev_pmd.h>
+#include <rte_fslmc.h>
+#include <fslmc_vfio.h>
+#include <dpaa2_hw_pvt.h>
+#include <dpaa2_hw_dpio.h>
+
+#include "dpaa2_sec_priv.h"
+#include "dpaa2_sec_logs.h"
+
+struct dpaa2_sec_raw_dp_ctx {
+	dpaa2_sec_session *session;
+	uint32_t tail;
+	uint32_t head;
+	uint16_t cached_enqueue;
+	uint16_t cached_dequeue;
+};
+
+static int
+build_raw_dp_chain_fd(uint8_t *drv_ctx,
+		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_va_iova_ptr *iv,
+		       struct rte_crypto_va_iova_ptr *digest,
+		       struct rte_crypto_va_iova_ptr *auth_iv,
+		       union rte_crypto_sym_ofs ofs,
+		       void *userdata,
+		       struct qbman_fd *fd)
+{
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(sgl);
+	RTE_SET_USED(iv);
+	RTE_SET_USED(digest);
+	RTE_SET_USED(auth_iv);
+	RTE_SET_USED(ofs);
+	RTE_SET_USED(userdata);
+	RTE_SET_USED(fd);
+
+	return 0;
+}
+
+static int
+build_raw_dp_aead_fd(uint8_t *drv_ctx,
+		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_va_iova_ptr *iv,
+		       struct rte_crypto_va_iova_ptr *digest,
+		       struct rte_crypto_va_iova_ptr *auth_iv,
+		       union rte_crypto_sym_ofs ofs,
+		       void *userdata,
+		       struct qbman_fd *fd)
+{
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(sgl);
+	RTE_SET_USED(iv);
+	RTE_SET_USED(digest);
+	RTE_SET_USED(auth_iv);
+	RTE_SET_USED(ofs);
+	RTE_SET_USED(userdata);
+	RTE_SET_USED(fd);
+
+	return 0;
+}
+
+static int
+build_raw_dp_auth_fd(uint8_t *drv_ctx,
+		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_va_iova_ptr *iv,
+		       struct rte_crypto_va_iova_ptr *digest,
+		       struct rte_crypto_va_iova_ptr *auth_iv,
+		       union rte_crypto_sym_ofs ofs,
+		       void *userdata,
+		       struct qbman_fd *fd)
+{
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(sgl);
+	RTE_SET_USED(iv);
+	RTE_SET_USED(digest);
+	RTE_SET_USED(auth_iv);
+	RTE_SET_USED(ofs);
+	RTE_SET_USED(userdata);
+	RTE_SET_USED(fd);
+
+	return 0;
+}
+
+static int
+build_raw_dp_proto_fd(uint8_t *drv_ctx,
+		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_va_iova_ptr *iv,
+		       struct rte_crypto_va_iova_ptr *digest,
+		       struct rte_crypto_va_iova_ptr *auth_iv,
+		       union rte_crypto_sym_ofs ofs,
+		       void *userdata,
+		       struct qbman_fd *fd)
+{
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(sgl);
+	RTE_SET_USED(iv);
+	RTE_SET_USED(digest);
+	RTE_SET_USED(auth_iv);
+	RTE_SET_USED(ofs);
+	RTE_SET_USED(userdata);
+	RTE_SET_USED(fd);
+
+	return 0;
+}
+
+static int
+build_raw_dp_proto_compound_fd(uint8_t *drv_ctx,
+		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_va_iova_ptr *iv,
+		       struct rte_crypto_va_iova_ptr *digest,
+		       struct rte_crypto_va_iova_ptr *auth_iv,
+		       union rte_crypto_sym_ofs ofs,
+		       void *userdata,
+		       struct qbman_fd *fd)
+{
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(sgl);
+	RTE_SET_USED(iv);
+	RTE_SET_USED(digest);
+	RTE_SET_USED(auth_iv);
+	RTE_SET_USED(ofs);
+	RTE_SET_USED(userdata);
+	RTE_SET_USED(fd);
+
+	return 0;
+}
+
+static int
+build_raw_dp_cipher_fd(uint8_t *drv_ctx,
+		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_va_iova_ptr *iv,
+		       struct rte_crypto_va_iova_ptr *digest,
+		       struct rte_crypto_va_iova_ptr *auth_iv,
+		       union rte_crypto_sym_ofs ofs,
+		       void *userdata,
+		       struct qbman_fd *fd)
+{
+	RTE_SET_USED(digest);
+	RTE_SET_USED(auth_iv);
+
+	dpaa2_sec_session *sess =
+		((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
+	struct qbman_fle *ip_fle, *op_fle, *sge, *fle;
+	int total_len = 0, data_len = 0, data_offset;
+	struct sec_flow_context *flc;
+	struct ctxt_priv *priv = sess->ctxt;
+	unsigned int i;
+
+	for (i = 0; i < sgl->num; i++)
+		total_len += sgl->vec[i].len;
+
+	data_len = total_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
+	data_offset = ofs.ofs.cipher.head;
+
+	if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
+		sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
+		if ((data_len & 7) || (data_offset & 7)) {
+			DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes");
+			return -ENOTSUP;
+		}
+
+		data_len = data_len >> 3;
+		data_offset = data_offset >> 3;
+	}
+
+	/* first FLE entry used to store mbuf and session ctxt */
+	fle = (struct qbman_fle *)rte_malloc(NULL,
+			FLE_SG_MEM_SIZE(2*sgl->num),
+			RTE_CACHE_LINE_SIZE);
+	if (!fle) {
+		DPAA2_SEC_ERR("RAW CIPHER SG: Memory alloc failed for SGE");
+		return -ENOMEM;
+	}
+	memset(fle, 0, FLE_SG_MEM_SIZE(2*sgl->num));
+	/* first FLE entry used to store userdata and session ctxt */
+	DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
+	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
+
+	op_fle = fle + 1;
+	ip_fle = fle + 2;
+	sge = fle + 3;
+
+	flc = &priv->flc_desc[0].flc;
+
+	DPAA2_SEC_DP_DEBUG(
+		"RAW CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d\n",
+		data_offset,
+		data_len,
+		sess->iv.length);
+
+	/* o/p fle */
+	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
+	op_fle->length = data_len;
+	DPAA2_SET_FLE_SG_EXT(op_fle);
+
+	/* o/p 1st seg */
+	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+	DPAA2_SET_FLE_OFFSET(sge, data_offset);
+	sge->length = sgl->vec[0].len - data_offset;
+
+	/* o/p segs */
+	for (i = 1; i < sgl->num; i++) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+		DPAA2_SET_FLE_OFFSET(sge, 0);
+		sge->length = sgl->vec[i].len;
+	}
+	DPAA2_SET_FLE_FIN(sge);
+
+	DPAA2_SEC_DP_DEBUG(
+		"RAW CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n",
+		flc, fle, fle->addr_hi, fle->addr_lo,
+		fle->length);
+
+	/* i/p fle */
+	sge++;
+	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
+	ip_fle->length = sess->iv.length + data_len;
+	DPAA2_SET_FLE_SG_EXT(ip_fle);
+
+	/* i/p IV */
+	DPAA2_SET_FLE_ADDR(sge, iv->iova);
+	DPAA2_SET_FLE_OFFSET(sge, 0);
+	sge->length = sess->iv.length;
+
+	sge++;
+
+	/* i/p 1st seg */
+	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+	DPAA2_SET_FLE_OFFSET(sge, data_offset);
+	sge->length = sgl->vec[0].len - data_offset;
+
+	/* i/p segs */
+	for (i = 1; i < sgl->num; i++) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+		DPAA2_SET_FLE_OFFSET(sge, 0);
+		sge->length = sgl->vec[i].len;
+	}
+	DPAA2_SET_FLE_FIN(sge);
+	DPAA2_SET_FLE_FIN(ip_fle);
+
+	/* sg fd */
+	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
+	DPAA2_SET_FD_LEN(fd, ip_fle->length);
+	DPAA2_SET_FD_COMPOUND_FMT(fd);
+	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+	DPAA2_SEC_DP_DEBUG(
+		"RAW CIPHER SG: fdaddr =%" PRIx64 " off =%d, len =%d\n",
+		DPAA2_GET_FD_ADDR(fd),
+		DPAA2_GET_FD_OFFSET(fd),
+		DPAA2_GET_FD_LEN(fd));
+
+	return 0;
+}
+
+static __rte_always_inline uint32_t
+dpaa2_sec_raw_enqueue_burst(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	RTE_SET_USED(user_data);
+	uint32_t loop;
+	int32_t ret;
+	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
+	uint32_t frames_to_send, retry_count;
+	struct qbman_eq_desc eqdesc;
+	struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp_data;
+	dpaa2_sec_session *sess =
+		((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
+	struct qbman_swp *swp;
+	uint16_t num_tx = 0;
+	uint32_t flags[MAX_TX_RING_SLOTS] = {0};
+
+	if (unlikely(vec->num == 0))
+		return 0;
+
+	if (sess == NULL) {
+		DPAA2_SEC_ERR("sessionless raw crypto not supported");
+		return 0;
+	}
+	/*Prepare enqueue descriptor*/
+	qbman_eq_desc_clear(&eqdesc);
+	qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
+	qbman_eq_desc_set_response(&eqdesc, 0, 0);
+	qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
+
+	if (!DPAA2_PER_LCORE_DPIO) {
+		ret = dpaa2_affine_qbman_swp();
+		if (ret) {
+			DPAA2_SEC_ERR(
+				"Failed to allocate IO portal, tid: %d\n",
+				rte_gettid());
+			return 0;
+		}
+	}
+	swp = DPAA2_PER_LCORE_PORTAL;
+
+	while (vec->num) {
+		frames_to_send = (vec->num > dpaa2_eqcr_size) ?
+			dpaa2_eqcr_size : vec->num;
+
+		for (loop = 0; loop < frames_to_send; loop++) {
+			/*Clear the unused FD fields before sending*/
+			memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
+			ret = sess->build_raw_dp_fd(drv_ctx,
+						    &vec->src_sgl[loop],
+						    &vec->iv[loop],
+						    &vec->digest[loop],
+						    &vec->auth_iv[loop],
+						    ofs,
+						    user_data[loop],
+						    &fd_arr[loop]);
+			if (ret) {
+				DPAA2_SEC_ERR("error: Improper packet contents"
+					      " for crypto operation");
+				goto skip_tx;
+			}
+			status[loop] = 1;
+		}
+
+		loop = 0;
+		retry_count = 0;
+		while (loop < frames_to_send) {
+			ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
+							 &fd_arr[loop],
+							 &flags[loop],
+							 frames_to_send - loop);
+			if (unlikely(ret < 0)) {
+				retry_count++;
+				if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
+					num_tx += loop;
+					vec->num -= loop;
+					goto skip_tx;
+				}
+			} else {
+				loop += ret;
+				retry_count = 0;
+			}
+		}
+
+		num_tx += loop;
+		vec->num -= loop;
+	}
+skip_tx:
+	dpaa2_qp->tx_vq.tx_pkts += num_tx;
+	dpaa2_qp->tx_vq.err_pkts += vec->num;
+
+	return num_tx;
+}
+
+static __rte_always_inline int
+dpaa2_sec_raw_enqueue(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data_vec,
+	uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
+	void *user_data)
+{
+	RTE_SET_USED(qp_data);
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(data_vec);
+	RTE_SET_USED(n_data_vecs);
+	RTE_SET_USED(ofs);
+	RTE_SET_USED(iv);
+	RTE_SET_USED(digest);
+	RTE_SET_USED(aad_or_auth_iv);
+	RTE_SET_USED(user_data);
+
+	return 0;
+}
+
+static inline void *
+sec_fd_to_userdata(const struct qbman_fd *fd)
+{
+	struct qbman_fle *fle;
+	void *userdata;
+	fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
+
+	DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n",
+			   fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
+	userdata = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
+	/* free the fle memory */
+	rte_free((void *)(fle-1));
+
+	return userdata;
+}
+
+static __rte_always_inline uint32_t
+dpaa2_sec_raw_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
+	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
+	uint32_t max_nb_to_dequeue,
+	rte_cryptodev_raw_post_dequeue_t post_dequeue,
+	void **out_user_data, uint8_t is_user_data_array,
+	uint32_t *n_success, int *dequeue_status)
+{
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(get_dequeue_count);
+
+	/* Function is responsible to receive frames for a given device and VQ*/
+	struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp_data;
+	struct qbman_result *dq_storage;
+	uint32_t fqid = dpaa2_qp->rx_vq.fqid;
+	int ret, num_rx = 0;
+	uint8_t is_last = 0, status;
+	struct qbman_swp *swp;
+	const struct qbman_fd *fd;
+	struct qbman_pull_desc pulldesc;
+	void *user_data;
+	uint32_t nb_ops = max_nb_to_dequeue;
+
+	if (!DPAA2_PER_LCORE_DPIO) {
+		ret = dpaa2_affine_qbman_swp();
+		if (ret) {
+			DPAA2_SEC_ERR(
+				"Failed to allocate IO portal, tid: %d\n",
+				rte_gettid());
+			return 0;
+		}
+	}
+	swp = DPAA2_PER_LCORE_PORTAL;
+	dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
+
+	qbman_pull_desc_clear(&pulldesc);
+	qbman_pull_desc_set_numframes(&pulldesc,
+				      (nb_ops > dpaa2_dqrr_size) ?
+				      dpaa2_dqrr_size : nb_ops);
+	qbman_pull_desc_set_fq(&pulldesc, fqid);
+	qbman_pull_desc_set_storage(&pulldesc, dq_storage,
+				    (uint64_t)DPAA2_VADDR_TO_IOVA(dq_storage),
+				    1);
+
+	/*Issue a volatile dequeue command. */
+	while (1) {
+		if (qbman_swp_pull(swp, &pulldesc)) {
+			DPAA2_SEC_WARN(
+				"SEC VDQ command is not issued : QBMAN busy");
+			/* Portal was busy, try again */
+			continue;
+		}
+		break;
+	};
+
+	/* Receive the packets till Last Dequeue entry is found with
+	 * respect to the above issues PULL command.
+	 */
+	while (!is_last) {
+		/* Check if the previous issued command is completed.
+		 * Also seems like the SWP is shared between the Ethernet Driver
+		 * and the SEC driver.
+		 */
+		while (!qbman_check_command_complete(dq_storage))
+			;
+
+		/* Loop until the dq_storage is updated with
+		 * new token by QBMAN
+		 */
+		while (!qbman_check_new_result(dq_storage))
+			;
+		/* Check whether Last Pull command is Expired and
+		 * setting Condition for Loop termination
+		 */
+		if (qbman_result_DQ_is_pull_complete(dq_storage)) {
+			is_last = 1;
+			/* Check for valid frame. */
+			status = (uint8_t)qbman_result_DQ_flags(dq_storage);
+			if (unlikely(
+				(status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
+				DPAA2_SEC_DP_DEBUG("No frame is delivered\n");
+				continue;
+			}
+		}
+
+		fd = qbman_result_DQ_fd(dq_storage);
+		user_data = sec_fd_to_userdata(fd);
+		if (is_user_data_array)
+			out_user_data[num_rx] = user_data;
+		else
+			out_user_data[0] = user_data;
+		if (unlikely(fd->simple.frc)) {
+			/* TODO Parse SEC errors */
+			DPAA2_SEC_ERR("SEC returned Error - %x",
+				      fd->simple.frc);
+			status = RTE_CRYPTO_OP_STATUS_ERROR;
+		} else {
+			status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+		}
+		post_dequeue(user_data, num_rx, status);
+
+		num_rx++;
+		dq_storage++;
+	} /* End of Packet Rx loop */
+
+	dpaa2_qp->rx_vq.rx_pkts += num_rx;
+	*dequeue_status = 1;
+	*n_success = num_rx;
+
+	DPAA2_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
+	/*Return the total number of packets received to DPAA2 app*/
+	return num_rx;
+}
+
+static __rte_always_inline void *
+dpaa2_sec_raw_dequeue(void *qp_data, uint8_t *drv_ctx, int *dequeue_status,
+		enum rte_crypto_op_status *op_status)
+{
+	RTE_SET_USED(qp_data);
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(dequeue_status);
+	RTE_SET_USED(op_status);
+
+	return NULL;
+}
+
+static __rte_always_inline int
+dpaa2_sec_raw_enqueue_done(void *qp_data, uint8_t *drv_ctx, uint32_t n)
+{
+	RTE_SET_USED(qp_data);
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(n);
+
+	return 0;
+}
+
+static __rte_always_inline int
+dpaa2_sec_raw_dequeue_done(void *qp_data, uint8_t *drv_ctx, uint32_t n)
+{
+	RTE_SET_USED(qp_data);
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(n);
+
+	return 0;
+}
+
+int
+dpaa2_sec_configure_raw_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
+	enum rte_crypto_op_sess_type sess_type,
+	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
+{
+	dpaa2_sec_session *sess;
+	struct dpaa2_sec_raw_dp_ctx *dp_ctx;
+	RTE_SET_USED(qp_id);
+
+	if (!is_update) {
+		memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx));
+		raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
+	}
+
+	if (sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
+		sess = (dpaa2_sec_session *)get_sec_session_private_data(
+				session_ctx.sec_sess);
+	else if (sess_type == RTE_CRYPTO_OP_WITH_SESSION)
+		sess = (dpaa2_sec_session *)get_sym_session_private_data(
+			session_ctx.crypto_sess, cryptodev_driver_id);
+	else
+		return -ENOTSUP;
+	raw_dp_ctx->dequeue_burst = dpaa2_sec_raw_dequeue_burst;
+	raw_dp_ctx->dequeue = dpaa2_sec_raw_dequeue;
+	raw_dp_ctx->dequeue_done = dpaa2_sec_raw_dequeue_done;
+	raw_dp_ctx->enqueue_burst = dpaa2_sec_raw_enqueue_burst;
+	raw_dp_ctx->enqueue = dpaa2_sec_raw_enqueue;
+	raw_dp_ctx->enqueue_done = dpaa2_sec_raw_enqueue_done;
+
+	if (sess->ctxt_type == DPAA2_SEC_CIPHER_HASH)
+		sess->build_raw_dp_fd = build_raw_dp_chain_fd;
+	else if (sess->ctxt_type == DPAA2_SEC_AEAD)
+		sess->build_raw_dp_fd = build_raw_dp_aead_fd;
+	else if (sess->ctxt_type == DPAA2_SEC_AUTH)
+		sess->build_raw_dp_fd = build_raw_dp_auth_fd;
+	else if (sess->ctxt_type == DPAA2_SEC_CIPHER)
+		sess->build_raw_dp_fd = build_raw_dp_cipher_fd;
+	else if (sess->ctxt_type == DPAA2_SEC_IPSEC)
+		sess->build_raw_dp_fd = build_raw_dp_proto_fd;
+	else if (sess->ctxt_type == DPAA2_SEC_PDCP)
+		sess->build_raw_dp_fd = build_raw_dp_proto_compound_fd;
+	else
+		return -ENOTSUP;
+	dp_ctx = (struct dpaa2_sec_raw_dp_ctx *)raw_dp_ctx->drv_ctx_data;
+	dp_ctx->session = sess;
+
+	return 0;
+}
+
+int
+dpaa2_sec_get_dp_ctx_size(__rte_unused struct rte_cryptodev *dev)
+{
+	return sizeof(struct dpaa2_sec_raw_dp_ctx);
+}
diff --git a/drivers/crypto/dpaa2_sec/meson.build b/drivers/crypto/dpaa2_sec/meson.build
index ea1d73a13d..e6e5abb3c1 100644
--- a/drivers/crypto/dpaa2_sec/meson.build
+++ b/drivers/crypto/dpaa2_sec/meson.build
@@ -1,5 +1,5 @@
 # SPDX-License-Identifier: BSD-3-Clause
-# Copyright 2018 NXP
+# Copyright 2018,2021 NXP
 
 if not is_linux
     build = false
@@ -9,6 +9,7 @@ endif
 deps += ['security', 'mempool_dpaa2']
 sources = files(
         'dpaa2_sec_dpseci.c',
+	'dpaa2_sec_raw_dp.c',
         'mc/dpseci.c',
 )
 
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH v5 06/15] crypto/dpaa2_sec: support AUTH only with raw buffer APIs
  2021-10-17 16:16       ` [dpdk-dev] [PATCH v5 00/15] crypto: add raw vector support in DPAAx Hemant Agrawal
                           ` (4 preceding siblings ...)
  2021-10-17 16:16         ` [dpdk-dev] [PATCH v5 05/15] crypto/dpaa2_sec: support raw datapath APIs Hemant Agrawal
@ 2021-10-17 16:16         ` Hemant Agrawal
  2021-10-17 16:16         ` [dpdk-dev] [PATCH v5 07/15] crypto/dpaa2_sec: support AUTHENC " Hemant Agrawal
                           ` (9 subsequent siblings)
  15 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-10-17 16:16 UTC (permalink / raw)
  To: dev, gakhil; +Cc: konstantin.ananyev, roy.fan.zhang, Gagandeep Singh

From: Gagandeep Singh <g.singh@nxp.com>

Auth only with raw buffer APIs has been supported in this patch.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h   |  21 ----
 drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c | 114 ++++++++++++++++++--
 2 files changed, 108 insertions(+), 27 deletions(-)

diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
index e9b888186e..f397b756e8 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
@@ -231,27 +231,6 @@ typedef struct dpaa2_sec_session_entry {
 
 static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
 	/* Symmetric capabilities */
-	{	/* NULL (AUTH) */
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
-		{.sym = {
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
-			{.auth = {
-				.algo = RTE_CRYPTO_AUTH_NULL,
-				.block_size = 1,
-				.key_size = {
-					.min = 0,
-					.max = 0,
-					.increment = 0
-				},
-				.digest_size = {
-					.min = 0,
-					.max = 0,
-					.increment = 0
-				},
-				.iv_size = { 0 }
-			}, },
-		}, },
-	},
 	{	/* MD5 */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
index 8925c8e938..471c81b9e7 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
@@ -11,6 +11,8 @@
 #include "dpaa2_sec_priv.h"
 #include "dpaa2_sec_logs.h"
 
+#include <desc/algo.h>
+
 struct dpaa2_sec_raw_dp_ctx {
 	dpaa2_sec_session *session;
 	uint32_t tail;
@@ -73,14 +75,114 @@ build_raw_dp_auth_fd(uint8_t *drv_ctx,
 		       void *userdata,
 		       struct qbman_fd *fd)
 {
-	RTE_SET_USED(drv_ctx);
-	RTE_SET_USED(sgl);
 	RTE_SET_USED(iv);
-	RTE_SET_USED(digest);
 	RTE_SET_USED(auth_iv);
-	RTE_SET_USED(ofs);
-	RTE_SET_USED(userdata);
-	RTE_SET_USED(fd);
+
+	dpaa2_sec_session *sess =
+		((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
+	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
+	struct sec_flow_context *flc;
+	int total_len = 0, data_len = 0, data_offset;
+	uint8_t *old_digest;
+	struct ctxt_priv *priv = sess->ctxt;
+	unsigned int i;
+
+	for (i = 0; i < sgl->num; i++)
+		total_len += sgl->vec[i].len;
+
+	data_len = total_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
+	data_offset = ofs.ofs.auth.head;
+
+	if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
+		sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
+		if ((data_len & 7) || (data_offset & 7)) {
+			DPAA2_SEC_ERR("AUTH: len/offset must be full bytes");
+			return -ENOTSUP;
+		}
+
+		data_len = data_len >> 3;
+		data_offset = data_offset >> 3;
+	}
+	fle = (struct qbman_fle *)rte_malloc(NULL,
+		FLE_SG_MEM_SIZE(2 * sgl->num),
+			RTE_CACHE_LINE_SIZE);
+	if (unlikely(!fle)) {
+		DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE");
+		return -ENOMEM;
+	}
+	memset(fle, 0, FLE_SG_MEM_SIZE(2*sgl->num));
+	/* first FLE entry used to store mbuf and session ctxt */
+	DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
+	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
+	op_fle = fle + 1;
+	ip_fle = fle + 2;
+	sge = fle + 3;
+
+	flc = &priv->flc_desc[DESC_INITFINAL].flc;
+
+	/* sg FD */
+	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
+	DPAA2_SET_FD_COMPOUND_FMT(fd);
+
+	/* o/p fle */
+	DPAA2_SET_FLE_ADDR(op_fle,
+			DPAA2_VADDR_TO_IOVA(digest->va));
+	op_fle->length = sess->digest_length;
+
+	/* i/p fle */
+	DPAA2_SET_FLE_SG_EXT(ip_fle);
+	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
+	ip_fle->length = data_len;
+
+	if (sess->iv.length) {
+		uint8_t *iv_ptr;
+
+		iv_ptr = rte_crypto_op_ctod_offset(userdata, uint8_t *,
+						sess->iv.offset);
+
+		if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
+			iv_ptr = conv_to_snow_f9_iv(iv_ptr);
+			sge->length = 12;
+		} else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
+			iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
+			sge->length = 8;
+		} else {
+			sge->length = sess->iv.length;
+		}
+		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
+		ip_fle->length += sge->length;
+		sge++;
+	}
+	/* i/p 1st seg */
+	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+	DPAA2_SET_FLE_OFFSET(sge, data_offset);
+
+	if (data_len <= (int)(sgl->vec[0].len - data_offset)) {
+		sge->length = data_len;
+		data_len = 0;
+	} else {
+		sge->length = sgl->vec[0].len - data_offset;
+		for (i = 1; i < sgl->num; i++) {
+			sge++;
+			DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+			DPAA2_SET_FLE_OFFSET(sge, 0);
+			sge->length = sgl->vec[i].len;
+		}
+	}
+	if (sess->dir == DIR_DEC) {
+		/* Digest verification case */
+		sge++;
+		old_digest = (uint8_t *)(sge + 1);
+		rte_memcpy(old_digest, digest->va,
+			sess->digest_length);
+		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
+		sge->length = sess->digest_length;
+		ip_fle->length += sess->digest_length;
+	}
+	DPAA2_SET_FLE_FIN(sge);
+	DPAA2_SET_FLE_FIN(ip_fle);
+	DPAA2_SET_FD_LEN(fd, ip_fle->length);
 
 	return 0;
 }
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH v5 07/15] crypto/dpaa2_sec: support AUTHENC with raw buffer APIs
  2021-10-17 16:16       ` [dpdk-dev] [PATCH v5 00/15] crypto: add raw vector support in DPAAx Hemant Agrawal
                           ` (5 preceding siblings ...)
  2021-10-17 16:16         ` [dpdk-dev] [PATCH v5 06/15] crypto/dpaa2_sec: support AUTH only with raw buffer APIs Hemant Agrawal
@ 2021-10-17 16:16         ` Hemant Agrawal
  2021-10-17 16:16         ` [dpdk-dev] [PATCH v5 08/15] crypto/dpaa2_sec: support AEAD " Hemant Agrawal
                           ` (8 subsequent siblings)
  15 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-10-17 16:16 UTC (permalink / raw)
  To: dev, gakhil; +Cc: konstantin.ananyev, roy.fan.zhang, Gagandeep Singh

From: Gagandeep Singh <g.singh@nxp.com>

This patch supports AUTHENC with raw buufer APIs

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c | 128 ++++++++++++++++++--
 1 file changed, 121 insertions(+), 7 deletions(-)

diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
index 471c81b9e7..565af6dcba 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
@@ -31,14 +31,128 @@ build_raw_dp_chain_fd(uint8_t *drv_ctx,
 		       void *userdata,
 		       struct qbman_fd *fd)
 {
-	RTE_SET_USED(drv_ctx);
-	RTE_SET_USED(sgl);
-	RTE_SET_USED(iv);
-	RTE_SET_USED(digest);
 	RTE_SET_USED(auth_iv);
-	RTE_SET_USED(ofs);
-	RTE_SET_USED(userdata);
-	RTE_SET_USED(fd);
+
+	dpaa2_sec_session *sess =
+		((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
+	struct ctxt_priv *priv = sess->ctxt;
+	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
+	struct sec_flow_context *flc;
+	int data_len = 0, auth_len = 0, cipher_len = 0;
+	unsigned int i = 0;
+	uint16_t auth_hdr_len = ofs.ofs.cipher.head -
+				ofs.ofs.auth.head;
+
+	uint16_t auth_tail_len = ofs.ofs.auth.tail;
+	uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
+	int icv_len = sess->digest_length;
+	uint8_t *old_icv;
+	uint8_t *iv_ptr = iv->va;
+
+	for (i = 0; i < sgl->num; i++)
+		data_len += sgl->vec[i].len;
+
+	cipher_len = data_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
+	auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
+	/* first FLE entry used to store session ctxt */
+	fle = (struct qbman_fle *)rte_malloc(NULL,
+			FLE_SG_MEM_SIZE(2 * sgl->num),
+			RTE_CACHE_LINE_SIZE);
+	if (unlikely(!fle)) {
+		DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE");
+		return -ENOMEM;
+	}
+	memset(fle, 0, FLE_SG_MEM_SIZE(2 * sgl->num));
+	DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
+	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
+
+	op_fle = fle + 1;
+	ip_fle = fle + 2;
+	sge = fle + 3;
+
+	/* Save the shared descriptor */
+	flc = &priv->flc_desc[0].flc;
+
+	/* Configure FD as a FRAME LIST */
+	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
+	DPAA2_SET_FD_COMPOUND_FMT(fd);
+	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+	/* Configure Output FLE with Scatter/Gather Entry */
+	DPAA2_SET_FLE_SG_EXT(op_fle);
+	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
+
+	if (auth_only_len)
+		DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
+
+	op_fle->length = (sess->dir == DIR_ENC) ?
+			(cipher_len + icv_len) :
+			cipher_len;
+
+	/* Configure Output SGE for Encap/Decap */
+	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+	DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.auth.head);
+	sge->length = sgl->vec[0].len - ofs.ofs.auth.head;
+
+	/* o/p segs */
+	for (i = 1; i < sgl->num; i++) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+		DPAA2_SET_FLE_OFFSET(sge, 0);
+		sge->length = sgl->vec[i].len;
+	}
+
+	if (sess->dir == DIR_ENC) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge,
+			digest->iova);
+		sge->length = icv_len;
+	}
+	DPAA2_SET_FLE_FIN(sge);
+
+	sge++;
+
+	/* Configure Input FLE with Scatter/Gather Entry */
+	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
+	DPAA2_SET_FLE_SG_EXT(ip_fle);
+	DPAA2_SET_FLE_FIN(ip_fle);
+
+	ip_fle->length = (sess->dir == DIR_ENC) ?
+			(auth_len + sess->iv.length) :
+			(auth_len + sess->iv.length +
+			icv_len);
+
+	/* Configure Input SGE for Encap/Decap */
+	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
+	sge->length = sess->iv.length;
+
+	sge++;
+	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+	DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.auth.head);
+	sge->length = sgl->vec[0].len - ofs.ofs.auth.head;
+
+	for (i = 1; i < sgl->num; i++) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+		DPAA2_SET_FLE_OFFSET(sge, 0);
+		sge->length = sgl->vec[i].len;
+	}
+
+	if (sess->dir == DIR_DEC) {
+		sge++;
+		old_icv = (uint8_t *)(sge + 1);
+		memcpy(old_icv, digest->va,
+			icv_len);
+		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
+		sge->length = icv_len;
+	}
+
+	DPAA2_SET_FLE_FIN(sge);
+	if (auth_only_len) {
+		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
+		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
+	}
+	DPAA2_SET_FD_LEN(fd, ip_fle->length);
 
 	return 0;
 }
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH v5 08/15] crypto/dpaa2_sec: support AEAD with raw buffer APIs
  2021-10-17 16:16       ` [dpdk-dev] [PATCH v5 00/15] crypto: add raw vector support in DPAAx Hemant Agrawal
                           ` (6 preceding siblings ...)
  2021-10-17 16:16         ` [dpdk-dev] [PATCH v5 07/15] crypto/dpaa2_sec: support AUTHENC " Hemant Agrawal
@ 2021-10-17 16:16         ` Hemant Agrawal
  2021-10-17 16:16         ` [dpdk-dev] [PATCH v5 09/15] crypto/dpaa2_sec: support OOP with raw buffer API Hemant Agrawal
                           ` (7 subsequent siblings)
  15 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-10-17 16:16 UTC (permalink / raw)
  To: dev, gakhil; +Cc: konstantin.ananyev, roy.fan.zhang, Gagandeep Singh

From: Gagandeep Singh <g.singh@nxp.com>

add raw vector API support for AEAD algos.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c | 249 +++++++++++++++++---
 1 file changed, 214 insertions(+), 35 deletions(-)

diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
index 565af6dcba..5c29c61f9d 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
@@ -167,14 +167,126 @@ build_raw_dp_aead_fd(uint8_t *drv_ctx,
 		       void *userdata,
 		       struct qbman_fd *fd)
 {
-	RTE_SET_USED(drv_ctx);
-	RTE_SET_USED(sgl);
-	RTE_SET_USED(iv);
-	RTE_SET_USED(digest);
-	RTE_SET_USED(auth_iv);
-	RTE_SET_USED(ofs);
-	RTE_SET_USED(userdata);
-	RTE_SET_USED(fd);
+	dpaa2_sec_session *sess =
+		((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
+	struct ctxt_priv *priv = sess->ctxt;
+	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
+	struct sec_flow_context *flc;
+	uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
+	int icv_len = sess->digest_length;
+	uint8_t *old_icv;
+	uint8_t *IV_ptr = iv->va;
+	unsigned int i = 0;
+	int data_len = 0, aead_len = 0;
+
+	for (i = 0; i < sgl->num; i++)
+		data_len += sgl->vec[i].len;
+
+	aead_len = data_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
+
+	/* first FLE entry used to store mbuf and session ctxt */
+	fle = (struct qbman_fle *)rte_malloc(NULL,
+			FLE_SG_MEM_SIZE(2 * sgl->num),
+			RTE_CACHE_LINE_SIZE);
+	if (unlikely(!fle)) {
+		DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE");
+		return -ENOMEM;
+	}
+	memset(fle, 0, FLE_SG_MEM_SIZE(2 * sgl->num));
+	DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
+	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
+
+	op_fle = fle + 1;
+	ip_fle = fle + 2;
+	sge = fle + 3;
+
+	/* Save the shared descriptor */
+	flc = &priv->flc_desc[0].flc;
+
+	/* Configure FD as a FRAME LIST */
+	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
+	DPAA2_SET_FD_COMPOUND_FMT(fd);
+	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+	/* Configure Output FLE with Scatter/Gather Entry */
+	DPAA2_SET_FLE_SG_EXT(op_fle);
+	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
+
+	if (auth_only_len)
+		DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
+
+	op_fle->length = (sess->dir == DIR_ENC) ?
+			(aead_len + icv_len) :
+			aead_len;
+
+	/* Configure Output SGE for Encap/Decap */
+	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+	DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
+	sge->length = sgl->vec[0].len - ofs.ofs.cipher.head;
+
+	/* o/p segs */
+	for (i = 1; i < sgl->num; i++) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+		DPAA2_SET_FLE_OFFSET(sge, 0);
+		sge->length = sgl->vec[i].len;
+	}
+
+	if (sess->dir == DIR_ENC) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge, digest->iova);
+		sge->length = icv_len;
+	}
+	DPAA2_SET_FLE_FIN(sge);
+
+	sge++;
+
+	/* Configure Input FLE with Scatter/Gather Entry */
+	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
+	DPAA2_SET_FLE_SG_EXT(ip_fle);
+	DPAA2_SET_FLE_FIN(ip_fle);
+	ip_fle->length = (sess->dir == DIR_ENC) ?
+		(aead_len + sess->iv.length + auth_only_len) :
+		(aead_len + sess->iv.length + auth_only_len +
+		icv_len);
+
+	/* Configure Input SGE for Encap/Decap */
+	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
+	sge->length = sess->iv.length;
+
+	sge++;
+	if (auth_only_len) {
+		DPAA2_SET_FLE_ADDR(sge, auth_iv->iova);
+		sge->length = auth_only_len;
+		sge++;
+	}
+
+	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+	DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
+	sge->length = sgl->vec[0].len - ofs.ofs.cipher.head;
+
+	/* i/p segs */
+	for (i = 1; i < sgl->num; i++) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+		DPAA2_SET_FLE_OFFSET(sge, 0);
+		sge->length = sgl->vec[i].len;
+	}
+
+	if (sess->dir == DIR_DEC) {
+		sge++;
+		old_icv = (uint8_t *)(sge + 1);
+		memcpy(old_icv,  digest->va, icv_len);
+		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
+		sge->length = icv_len;
+	}
+
+	DPAA2_SET_FLE_FIN(sge);
+	if (auth_only_len) {
+		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
+		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
+	}
+	DPAA2_SET_FD_LEN(fd, ip_fle->length);
 
 	return 0;
 }
@@ -311,36 +423,104 @@ build_raw_dp_proto_fd(uint8_t *drv_ctx,
 		       void *userdata,
 		       struct qbman_fd *fd)
 {
-	RTE_SET_USED(drv_ctx);
-	RTE_SET_USED(sgl);
 	RTE_SET_USED(iv);
 	RTE_SET_USED(digest);
 	RTE_SET_USED(auth_iv);
 	RTE_SET_USED(ofs);
-	RTE_SET_USED(userdata);
-	RTE_SET_USED(fd);
 
-	return 0;
-}
+	dpaa2_sec_session *sess =
+		((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
+	struct ctxt_priv *priv = sess->ctxt;
+	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
+	struct sec_flow_context *flc;
+	uint32_t in_len = 0, out_len = 0, i;
 
-static int
-build_raw_dp_proto_compound_fd(uint8_t *drv_ctx,
-		       struct rte_crypto_sgl *sgl,
-		       struct rte_crypto_va_iova_ptr *iv,
-		       struct rte_crypto_va_iova_ptr *digest,
-		       struct rte_crypto_va_iova_ptr *auth_iv,
-		       union rte_crypto_sym_ofs ofs,
-		       void *userdata,
-		       struct qbman_fd *fd)
-{
-	RTE_SET_USED(drv_ctx);
-	RTE_SET_USED(sgl);
-	RTE_SET_USED(iv);
-	RTE_SET_USED(digest);
-	RTE_SET_USED(auth_iv);
-	RTE_SET_USED(ofs);
-	RTE_SET_USED(userdata);
-	RTE_SET_USED(fd);
+	/* first FLE entry used to store mbuf and session ctxt */
+	fle = (struct qbman_fle *)rte_malloc(NULL,
+			FLE_SG_MEM_SIZE(2 * sgl->num),
+			RTE_CACHE_LINE_SIZE);
+	if (unlikely(!fle)) {
+		DPAA2_SEC_DP_ERR("Proto:SG: Memory alloc failed for SGE");
+		return -ENOMEM;
+	}
+	memset(fle, 0, FLE_SG_MEM_SIZE(2 * sgl->num));
+	DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
+	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
+
+	/* Save the shared descriptor */
+	flc = &priv->flc_desc[0].flc;
+	op_fle = fle + 1;
+	ip_fle = fle + 2;
+	sge = fle + 3;
+
+	DPAA2_SET_FD_IVP(fd);
+	DPAA2_SET_FLE_IVP(op_fle);
+	DPAA2_SET_FLE_IVP(ip_fle);
+
+	/* Configure FD as a FRAME LIST */
+	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
+	DPAA2_SET_FD_COMPOUND_FMT(fd);
+	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+	/* Configure Output FLE with Scatter/Gather Entry */
+	DPAA2_SET_FLE_SG_EXT(op_fle);
+	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
+
+	/* Configure Output SGE for Encap/Decap */
+	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+	DPAA2_SET_FLE_OFFSET(sge, 0);
+	sge->length = sgl->vec[0].len;
+	out_len += sge->length;
+	/* o/p segs */
+	for (i = 1; i < sgl->num; i++) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+		DPAA2_SET_FLE_OFFSET(sge, 0);
+		sge->length = sgl->vec[i].len;
+		out_len += sge->length;
+	}
+	sge->length = sgl->vec[i - 1].tot_len;
+	out_len += sge->length;
+
+	DPAA2_SET_FLE_FIN(sge);
+	op_fle->length = out_len;
+
+	sge++;
+
+	/* Configure Input FLE with Scatter/Gather Entry */
+	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
+	DPAA2_SET_FLE_SG_EXT(ip_fle);
+	DPAA2_SET_FLE_FIN(ip_fle);
+
+	/* Configure input SGE for Encap/Decap */
+	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+	DPAA2_SET_FLE_OFFSET(sge, 0);
+	sge->length = sgl->vec[0].len;
+	in_len += sge->length;
+	/* i/p segs */
+	for (i = 1; i < sgl->num; i++) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+		DPAA2_SET_FLE_OFFSET(sge, 0);
+		sge->length = sgl->vec[i].len;
+		in_len += sge->length;
+	}
+
+	ip_fle->length = in_len;
+	DPAA2_SET_FLE_FIN(sge);
+
+	/* In case of PDCP, per packet HFN is stored in
+	 * mbuf priv after sym_op.
+	 */
+	if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
+		uint32_t hfn_ovd = *(uint32_t *)((uint8_t *)userdata +
+				sess->pdcp.hfn_ovd_offset);
+		/*enable HFN override override */
+		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd);
+		DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd);
+		DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd);
+	}
+	DPAA2_SET_FD_LEN(fd, ip_fle->length);
 
 	return 0;
 }
@@ -792,10 +972,9 @@ dpaa2_sec_configure_raw_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
 		sess->build_raw_dp_fd = build_raw_dp_auth_fd;
 	else if (sess->ctxt_type == DPAA2_SEC_CIPHER)
 		sess->build_raw_dp_fd = build_raw_dp_cipher_fd;
-	else if (sess->ctxt_type == DPAA2_SEC_IPSEC)
+	else if (sess->ctxt_type == DPAA2_SEC_IPSEC ||
+		sess->ctxt_type == DPAA2_SEC_PDCP)
 		sess->build_raw_dp_fd = build_raw_dp_proto_fd;
-	else if (sess->ctxt_type == DPAA2_SEC_PDCP)
-		sess->build_raw_dp_fd = build_raw_dp_proto_compound_fd;
 	else
 		return -ENOTSUP;
 	dp_ctx = (struct dpaa2_sec_raw_dp_ctx *)raw_dp_ctx->drv_ctx_data;
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH v5 09/15] crypto/dpaa2_sec: support OOP with raw buffer API
  2021-10-17 16:16       ` [dpdk-dev] [PATCH v5 00/15] crypto: add raw vector support in DPAAx Hemant Agrawal
                           ` (7 preceding siblings ...)
  2021-10-17 16:16         ` [dpdk-dev] [PATCH v5 08/15] crypto/dpaa2_sec: support AEAD " Hemant Agrawal
@ 2021-10-17 16:16         ` Hemant Agrawal
  2021-10-17 16:16         ` [dpdk-dev] [PATCH v5 10/15] crypto/dpaa2_sec: enhance error checks with raw buffer APIs Hemant Agrawal
                           ` (6 subsequent siblings)
  15 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-10-17 16:16 UTC (permalink / raw)
  To: dev, gakhil; +Cc: konstantin.ananyev, roy.fan.zhang, Gagandeep Singh

From: Gagandeep Singh <g.singh@nxp.com>

add support for out of order processing with raw vector APIs.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h   |   1 +
 drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c | 156 +++++++++++++++-----
 2 files changed, 116 insertions(+), 41 deletions(-)

diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
index f397b756e8..05bd7c0736 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
@@ -179,6 +179,7 @@ typedef int (*dpaa2_sec_build_fd_t)(
 
 typedef int (*dpaa2_sec_build_raw_dp_fd_t)(uint8_t *drv_ctx,
 		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_sgl *dest_sgl,
 		       struct rte_crypto_va_iova_ptr *iv,
 		       struct rte_crypto_va_iova_ptr *digest,
 		       struct rte_crypto_va_iova_ptr *auth_iv,
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
index 5c29c61f9d..4f78cef9c0 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
@@ -24,6 +24,7 @@ struct dpaa2_sec_raw_dp_ctx {
 static int
 build_raw_dp_chain_fd(uint8_t *drv_ctx,
 		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_sgl *dest_sgl,
 		       struct rte_crypto_va_iova_ptr *iv,
 		       struct rte_crypto_va_iova_ptr *digest,
 		       struct rte_crypto_va_iova_ptr *auth_iv,
@@ -89,17 +90,33 @@ build_raw_dp_chain_fd(uint8_t *drv_ctx,
 			(cipher_len + icv_len) :
 			cipher_len;
 
-	/* Configure Output SGE for Encap/Decap */
-	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
-	DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.auth.head);
-	sge->length = sgl->vec[0].len - ofs.ofs.auth.head;
+	/* OOP */
+	if (dest_sgl) {
+		/* Configure Output SGE for Encap/Decap */
+		DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[0].iova);
+		DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
+		sge->length = dest_sgl->vec[0].len - ofs.ofs.cipher.head;
 
-	/* o/p segs */
-	for (i = 1; i < sgl->num; i++) {
-		sge++;
-		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
-		DPAA2_SET_FLE_OFFSET(sge, 0);
-		sge->length = sgl->vec[i].len;
+		/* o/p segs */
+		for (i = 1; i < dest_sgl->num; i++) {
+			sge++;
+			DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[i].iova);
+			DPAA2_SET_FLE_OFFSET(sge, 0);
+			sge->length = dest_sgl->vec[i].len;
+		}
+	} else {
+		/* Configure Output SGE for Encap/Decap */
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+		DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
+		sge->length = sgl->vec[0].len - ofs.ofs.cipher.head;
+
+		/* o/p segs */
+		for (i = 1; i < sgl->num; i++) {
+			sge++;
+			DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+			DPAA2_SET_FLE_OFFSET(sge, 0);
+			sge->length = sgl->vec[i].len;
+		}
 	}
 
 	if (sess->dir == DIR_ENC) {
@@ -160,6 +177,7 @@ build_raw_dp_chain_fd(uint8_t *drv_ctx,
 static int
 build_raw_dp_aead_fd(uint8_t *drv_ctx,
 		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_sgl *dest_sgl,
 		       struct rte_crypto_va_iova_ptr *iv,
 		       struct rte_crypto_va_iova_ptr *digest,
 		       struct rte_crypto_va_iova_ptr *auth_iv,
@@ -219,17 +237,33 @@ build_raw_dp_aead_fd(uint8_t *drv_ctx,
 			(aead_len + icv_len) :
 			aead_len;
 
-	/* Configure Output SGE for Encap/Decap */
-	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
-	DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
-	sge->length = sgl->vec[0].len - ofs.ofs.cipher.head;
+	/* OOP */
+	if (dest_sgl) {
+		/* Configure Output SGE for Encap/Decap */
+		DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[0].iova);
+		DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
+		sge->length = dest_sgl->vec[0].len - ofs.ofs.cipher.head;
 
-	/* o/p segs */
-	for (i = 1; i < sgl->num; i++) {
-		sge++;
-		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
-		DPAA2_SET_FLE_OFFSET(sge, 0);
-		sge->length = sgl->vec[i].len;
+		/* o/p segs */
+		for (i = 1; i < dest_sgl->num; i++) {
+			sge++;
+			DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[i].iova);
+			DPAA2_SET_FLE_OFFSET(sge, 0);
+			sge->length = dest_sgl->vec[i].len;
+		}
+	} else {
+		/* Configure Output SGE for Encap/Decap */
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+		DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
+		sge->length = sgl->vec[0].len - ofs.ofs.cipher.head;
+
+		/* o/p segs */
+		for (i = 1; i < sgl->num; i++) {
+			sge++;
+			DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+			DPAA2_SET_FLE_OFFSET(sge, 0);
+			sge->length = sgl->vec[i].len;
+		}
 	}
 
 	if (sess->dir == DIR_ENC) {
@@ -294,6 +328,7 @@ build_raw_dp_aead_fd(uint8_t *drv_ctx,
 static int
 build_raw_dp_auth_fd(uint8_t *drv_ctx,
 		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_sgl *dest_sgl,
 		       struct rte_crypto_va_iova_ptr *iv,
 		       struct rte_crypto_va_iova_ptr *digest,
 		       struct rte_crypto_va_iova_ptr *auth_iv,
@@ -303,6 +338,7 @@ build_raw_dp_auth_fd(uint8_t *drv_ctx,
 {
 	RTE_SET_USED(iv);
 	RTE_SET_USED(auth_iv);
+	RTE_SET_USED(dest_sgl);
 
 	dpaa2_sec_session *sess =
 		((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
@@ -416,6 +452,7 @@ build_raw_dp_auth_fd(uint8_t *drv_ctx,
 static int
 build_raw_dp_proto_fd(uint8_t *drv_ctx,
 		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_sgl *dest_sgl,
 		       struct rte_crypto_va_iova_ptr *iv,
 		       struct rte_crypto_va_iova_ptr *digest,
 		       struct rte_crypto_va_iova_ptr *auth_iv,
@@ -466,20 +503,39 @@ build_raw_dp_proto_fd(uint8_t *drv_ctx,
 	DPAA2_SET_FLE_SG_EXT(op_fle);
 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
 
-	/* Configure Output SGE for Encap/Decap */
-	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
-	DPAA2_SET_FLE_OFFSET(sge, 0);
-	sge->length = sgl->vec[0].len;
-	out_len += sge->length;
-	/* o/p segs */
-	for (i = 1; i < sgl->num; i++) {
-		sge++;
-		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+	/* OOP */
+	if (dest_sgl) {
+		/* Configure Output SGE for Encap/Decap */
+		DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[0].iova);
 		DPAA2_SET_FLE_OFFSET(sge, 0);
-		sge->length = sgl->vec[i].len;
+		sge->length = dest_sgl->vec[0].len;
+		out_len += sge->length;
+		/* o/p segs */
+		for (i = 1; i < dest_sgl->num; i++) {
+			sge++;
+			DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[i].iova);
+			DPAA2_SET_FLE_OFFSET(sge, 0);
+			sge->length = dest_sgl->vec[i].len;
+			out_len += sge->length;
+		}
+		sge->length = dest_sgl->vec[i - 1].tot_len;
+
+	} else {
+		/* Configure Output SGE for Encap/Decap */
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+		DPAA2_SET_FLE_OFFSET(sge, 0);
+		sge->length = sgl->vec[0].len;
 		out_len += sge->length;
+		/* o/p segs */
+		for (i = 1; i < sgl->num; i++) {
+			sge++;
+			DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+			DPAA2_SET_FLE_OFFSET(sge, 0);
+			sge->length = sgl->vec[i].len;
+			out_len += sge->length;
+		}
+		sge->length = sgl->vec[i - 1].tot_len;
 	}
-	sge->length = sgl->vec[i - 1].tot_len;
 	out_len += sge->length;
 
 	DPAA2_SET_FLE_FIN(sge);
@@ -528,6 +584,7 @@ build_raw_dp_proto_fd(uint8_t *drv_ctx,
 static int
 build_raw_dp_cipher_fd(uint8_t *drv_ctx,
 		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_sgl *dest_sgl,
 		       struct rte_crypto_va_iova_ptr *iv,
 		       struct rte_crypto_va_iova_ptr *digest,
 		       struct rte_crypto_va_iova_ptr *auth_iv,
@@ -593,17 +650,33 @@ build_raw_dp_cipher_fd(uint8_t *drv_ctx,
 	op_fle->length = data_len;
 	DPAA2_SET_FLE_SG_EXT(op_fle);
 
-	/* o/p 1st seg */
-	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
-	DPAA2_SET_FLE_OFFSET(sge, data_offset);
-	sge->length = sgl->vec[0].len - data_offset;
+	/* OOP */
+	if (dest_sgl) {
+		/* o/p 1st seg */
+		DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[0].iova);
+		DPAA2_SET_FLE_OFFSET(sge, data_offset);
+		sge->length = dest_sgl->vec[0].len - data_offset;
 
-	/* o/p segs */
-	for (i = 1; i < sgl->num; i++) {
-		sge++;
-		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
-		DPAA2_SET_FLE_OFFSET(sge, 0);
-		sge->length = sgl->vec[i].len;
+		/* o/p segs */
+		for (i = 1; i < dest_sgl->num; i++) {
+			sge++;
+			DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[i].iova);
+			DPAA2_SET_FLE_OFFSET(sge, 0);
+			sge->length = dest_sgl->vec[i].len;
+		}
+	} else {
+		/* o/p 1st seg */
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+		DPAA2_SET_FLE_OFFSET(sge, data_offset);
+		sge->length = sgl->vec[0].len - data_offset;
+
+		/* o/p segs */
+		for (i = 1; i < sgl->num; i++) {
+			sge++;
+			DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+			DPAA2_SET_FLE_OFFSET(sge, 0);
+			sge->length = sgl->vec[i].len;
+		}
 	}
 	DPAA2_SET_FLE_FIN(sge);
 
@@ -706,6 +779,7 @@ dpaa2_sec_raw_enqueue_burst(void *qp_data, uint8_t *drv_ctx,
 			memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
 			ret = sess->build_raw_dp_fd(drv_ctx,
 						    &vec->src_sgl[loop],
+						    &vec->dest_sgl[loop],
 						    &vec->iv[loop],
 						    &vec->digest[loop],
 						    &vec->auth_iv[loop],
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH v5 10/15] crypto/dpaa2_sec: enhance error checks with raw buffer APIs
  2021-10-17 16:16       ` [dpdk-dev] [PATCH v5 00/15] crypto: add raw vector support in DPAAx Hemant Agrawal
                           ` (8 preceding siblings ...)
  2021-10-17 16:16         ` [dpdk-dev] [PATCH v5 09/15] crypto/dpaa2_sec: support OOP with raw buffer API Hemant Agrawal
@ 2021-10-17 16:16         ` Hemant Agrawal
  2021-10-17 16:16         ` [dpdk-dev] [PATCH v5 11/15] crypto/dpaa_sec: support raw datapath APIs Hemant Agrawal
                           ` (5 subsequent siblings)
  15 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-10-17 16:16 UTC (permalink / raw)
  To: dev, gakhil; +Cc: konstantin.ananyev, roy.fan.zhang, Gagandeep Singh

From: Gagandeep Singh <g.singh@nxp.com>

This patch improves error conditions and support of
Wireless algos with raw buffers.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c | 31 ++++-----------------
 1 file changed, 6 insertions(+), 25 deletions(-)

diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
index 4f78cef9c0..a2ffc6c02f 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
@@ -355,16 +355,7 @@ build_raw_dp_auth_fd(uint8_t *drv_ctx,
 	data_len = total_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
 	data_offset = ofs.ofs.auth.head;
 
-	if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
-		sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
-		if ((data_len & 7) || (data_offset & 7)) {
-			DPAA2_SEC_ERR("AUTH: len/offset must be full bytes");
-			return -ENOTSUP;
-		}
-
-		data_len = data_len >> 3;
-		data_offset = data_offset >> 3;
-	}
+	/* For SNOW3G and ZUC, lengths in bits only supported */
 	fle = (struct qbman_fle *)rte_malloc(NULL,
 		FLE_SG_MEM_SIZE(2 * sgl->num),
 			RTE_CACHE_LINE_SIZE);
@@ -609,17 +600,7 @@ build_raw_dp_cipher_fd(uint8_t *drv_ctx,
 	data_len = total_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
 	data_offset = ofs.ofs.cipher.head;
 
-	if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
-		sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
-		if ((data_len & 7) || (data_offset & 7)) {
-			DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes");
-			return -ENOTSUP;
-		}
-
-		data_len = data_len >> 3;
-		data_offset = data_offset >> 3;
-	}
-
+	/* For SNOW3G and ZUC, lengths in bits only supported */
 	/* first FLE entry used to store mbuf and session ctxt */
 	fle = (struct qbman_fle *)rte_malloc(NULL,
 			FLE_SG_MEM_SIZE(2*sgl->num),
@@ -878,7 +859,7 @@ dpaa2_sec_raw_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
 	struct qbman_result *dq_storage;
 	uint32_t fqid = dpaa2_qp->rx_vq.fqid;
 	int ret, num_rx = 0;
-	uint8_t is_last = 0, status;
+	uint8_t is_last = 0, status, is_success = 0;
 	struct qbman_swp *swp;
 	const struct qbman_fd *fd;
 	struct qbman_pull_desc pulldesc;
@@ -957,11 +938,11 @@ dpaa2_sec_raw_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
 			/* TODO Parse SEC errors */
 			DPAA2_SEC_ERR("SEC returned Error - %x",
 				      fd->simple.frc);
-			status = RTE_CRYPTO_OP_STATUS_ERROR;
+			is_success = false;
 		} else {
-			status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+			is_success = true;
 		}
-		post_dequeue(user_data, num_rx, status);
+		post_dequeue(user_data, num_rx, is_success);
 
 		num_rx++;
 		dq_storage++;
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH v5 11/15] crypto/dpaa_sec: support raw datapath APIs
  2021-10-17 16:16       ` [dpdk-dev] [PATCH v5 00/15] crypto: add raw vector support in DPAAx Hemant Agrawal
                           ` (9 preceding siblings ...)
  2021-10-17 16:16         ` [dpdk-dev] [PATCH v5 10/15] crypto/dpaa2_sec: enhance error checks with raw buffer APIs Hemant Agrawal
@ 2021-10-17 16:16         ` Hemant Agrawal
  2021-10-17 16:16         ` [dpdk-dev] [PATCH v5 12/15] crypto/dpaa_sec: support authonly and chain with raw APIs Hemant Agrawal
                           ` (4 subsequent siblings)
  15 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-10-17 16:16 UTC (permalink / raw)
  To: dev, gakhil; +Cc: konstantin.ananyev, roy.fan.zhang, Gagandeep Singh

From: Gagandeep Singh <g.singh@nxp.com>

This patch add raw vector API framework for dpaa_sec driver.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 doc/guides/rel_notes/release_21_11.rst    |   1 +
 drivers/crypto/dpaa_sec/dpaa_sec.c        |  23 +-
 drivers/crypto/dpaa_sec/dpaa_sec.h        |  39 +-
 drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c | 485 ++++++++++++++++++++++
 drivers/crypto/dpaa_sec/meson.build       |   4 +-
 5 files changed, 538 insertions(+), 14 deletions(-)
 create mode 100644 drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c

diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index 0566aacf19..885630eae9 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -124,6 +124,7 @@ New Features
 
   * Added DES-CBC, AES-XCBC-MAC, AES-CMAC and non-HMAC algo support.
   * Added PDCP short MAC-I support.
+  * Added raw vector datapath API support
 
 * **Updated NXP dpaa2_sec crypto PMD.**
 
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c
index d5aa2748d6..c7ef1c7b0f 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.c
@@ -45,10 +45,7 @@
 #include <dpaa_sec_log.h>
 #include <dpaax_iova_table.h>
 
-static uint8_t cryptodev_driver_id;
-
-static int
-dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
+uint8_t dpaa_cryptodev_driver_id;
 
 static inline void
 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
@@ -1787,8 +1784,8 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
 			case RTE_CRYPTO_OP_WITH_SESSION:
 				ses = (dpaa_sec_session *)
 					get_sym_session_private_data(
-							op->sym->session,
-							cryptodev_driver_id);
+						op->sym->session,
+						dpaa_cryptodev_driver_id);
 				break;
 #ifdef RTE_LIB_SECURITY
 			case RTE_CRYPTO_OP_SECURITY_SESSION:
@@ -2400,7 +2397,7 @@ dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
 	return -1;
 }
 
-static int
+int
 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
 {
 	int ret;
@@ -3216,7 +3213,7 @@ dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
 		info->feature_flags = dev->feature_flags;
 		info->capabilities = dpaa_sec_capabilities;
 		info->sym.max_nb_sessions = internals->max_nb_sessions;
-		info->driver_id = cryptodev_driver_id;
+		info->driver_id = dpaa_cryptodev_driver_id;
 	}
 }
 
@@ -3412,7 +3409,10 @@ static struct rte_cryptodev_ops crypto_ops = {
 	.queue_pair_release   = dpaa_sec_queue_pair_release,
 	.sym_session_get_size     = dpaa_sec_sym_session_get_size,
 	.sym_session_configure    = dpaa_sec_sym_session_configure,
-	.sym_session_clear        = dpaa_sec_sym_session_clear
+	.sym_session_clear        = dpaa_sec_sym_session_clear,
+	/* Raw data-path API related operations */
+	.sym_get_raw_dp_ctx_size = dpaa_sec_get_dp_ctx_size,
+	.sym_configure_raw_dp_ctx = dpaa_sec_configure_raw_dp_ctx,
 };
 
 #ifdef RTE_LIB_SECURITY
@@ -3463,7 +3463,7 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	cryptodev->driver_id = cryptodev_driver_id;
+	cryptodev->driver_id = dpaa_cryptodev_driver_id;
 	cryptodev->dev_ops = &crypto_ops;
 
 	cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
@@ -3472,6 +3472,7 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
 			RTE_CRYPTODEV_FF_HW_ACCELERATED |
 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
 			RTE_CRYPTODEV_FF_SECURITY |
+			RTE_CRYPTODEV_FF_SYM_RAW_DP |
 			RTE_CRYPTODEV_FF_IN_PLACE_SGL |
 			RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
 			RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
@@ -3637,5 +3638,5 @@ static struct cryptodev_driver dpaa_sec_crypto_drv;
 
 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
-		cryptodev_driver_id);
+		dpaa_cryptodev_driver_id);
 RTE_LOG_REGISTER(dpaa_logtype_sec, pmd.crypto.dpaa, NOTICE);
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.h b/drivers/crypto/dpaa_sec/dpaa_sec.h
index 503047879e..77288cd1eb 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.h
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.h
@@ -19,6 +19,8 @@
 #define AES_CTR_IV_LEN		16
 #define AES_GCM_IV_LEN		12
 
+extern uint8_t dpaa_cryptodev_driver_id;
+
 #define DPAA_IPv6_DEFAULT_VTC_FLOW	0x60000000
 
 /* Minimum job descriptor consists of a oneword job descriptor HEADER and
@@ -117,6 +119,24 @@ struct sec_pdcp_ctxt {
 	uint32_t hfn_threshold;	/*!< HFN Threashold for key renegotiation */
 };
 #endif
+
+typedef int (*dpaa_sec_build_fd_t)(
+	void *qp, uint8_t *drv_ctx, struct rte_crypto_vec *data_vec,
+	uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
+	void *user_data);
+
+typedef struct dpaa_sec_job* (*dpaa_sec_build_raw_dp_fd_t)(uint8_t *drv_ctx,
+			struct rte_crypto_sgl *sgl,
+			struct rte_crypto_sgl *dest_sgl,
+			struct rte_crypto_va_iova_ptr *iv,
+			struct rte_crypto_va_iova_ptr *digest,
+			struct rte_crypto_va_iova_ptr *auth_iv,
+			union rte_crypto_sym_ofs ofs,
+			void *userdata);
+
 typedef struct dpaa_sec_session_entry {
 	struct sec_cdb cdb;	/**< cmd block associated with qp */
 	struct dpaa_sec_qp *qp[MAX_DPAA_CORES];
@@ -129,6 +149,8 @@ typedef struct dpaa_sec_session_entry {
 #ifdef RTE_LIB_SECURITY
 	enum rte_security_session_protocol proto_alg; /*!< Security Algorithm*/
 #endif
+	dpaa_sec_build_fd_t build_fd;
+	dpaa_sec_build_raw_dp_fd_t build_raw_dp_fd;
 	union {
 		struct {
 			uint8_t *data;	/**< pointer to key data */
@@ -211,7 +233,10 @@ struct dpaa_sec_job {
 #define DPAA_MAX_NB_MAX_DIGEST	32
 struct dpaa_sec_op_ctx {
 	struct dpaa_sec_job job;
-	struct rte_crypto_op *op;
+	union {
+		struct rte_crypto_op *op;
+		void *userdata;
+	};
 	struct rte_mempool *ctx_pool; /* mempool pointer for dpaa_sec_op_ctx */
 	uint32_t fd_status;
 	int64_t vtop_offset;
@@ -1001,4 +1026,16 @@ calc_chksum(void *buffer, int len)
 	return  result;
 }
 
+int
+dpaa_sec_configure_raw_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
+	enum rte_crypto_op_sess_type sess_type,
+	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update);
+
+int
+dpaa_sec_get_dp_ctx_size(struct rte_cryptodev *dev);
+
+int
+dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
+
 #endif /* _DPAA_SEC_H_ */
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c b/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c
new file mode 100644
index 0000000000..7376da4cbc
--- /dev/null
+++ b/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c
@@ -0,0 +1,485 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2021 NXP
+ */
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <cryptodev_pmd.h>
+#include <rte_crypto.h>
+#include <rte_cryptodev.h>
+#ifdef RTE_LIB_SECURITY
+#include <rte_security_driver.h>
+#endif
+
+/* RTA header files */
+#include <desc/ipsec.h>
+
+#include <rte_dpaa_bus.h>
+#include <dpaa_sec.h>
+#include <dpaa_sec_log.h>
+
+struct dpaa_sec_raw_dp_ctx {
+	dpaa_sec_session *session;
+	uint32_t tail;
+	uint32_t head;
+	uint16_t cached_enqueue;
+	uint16_t cached_dequeue;
+};
+
+static __rte_always_inline int
+dpaa_sec_raw_enqueue_done(void *qp_data, uint8_t *drv_ctx, uint32_t n)
+{
+	RTE_SET_USED(qp_data);
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(n);
+
+	return 0;
+}
+
+static __rte_always_inline int
+dpaa_sec_raw_dequeue_done(void *qp_data, uint8_t *drv_ctx, uint32_t n)
+{
+	RTE_SET_USED(qp_data);
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(n);
+
+	return 0;
+}
+
+static inline struct dpaa_sec_op_ctx *
+dpaa_sec_alloc_raw_ctx(dpaa_sec_session *ses, int sg_count)
+{
+	struct dpaa_sec_op_ctx *ctx;
+	int i, retval;
+
+	retval = rte_mempool_get(
+			ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool,
+			(void **)(&ctx));
+	if (!ctx || retval) {
+		DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
+		return NULL;
+	}
+	/*
+	 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
+	 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
+	 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
+	 * each packet, memset is costlier than dcbz_64().
+	 */
+	for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4)
+		dcbz_64(&ctx->job.sg[i]);
+
+	ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool;
+	ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
+
+	return ctx;
+}
+
+static struct dpaa_sec_job *
+build_dpaa_raw_dp_auth_fd(uint8_t *drv_ctx,
+			struct rte_crypto_sgl *sgl,
+			struct rte_crypto_sgl *dest_sgl,
+			struct rte_crypto_va_iova_ptr *iv,
+			struct rte_crypto_va_iova_ptr *digest,
+			struct rte_crypto_va_iova_ptr *auth_iv,
+			union rte_crypto_sym_ofs ofs,
+			void *userdata)
+{
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(sgl);
+	RTE_SET_USED(dest_sgl);
+	RTE_SET_USED(iv);
+	RTE_SET_USED(digest);
+	RTE_SET_USED(auth_iv);
+	RTE_SET_USED(ofs);
+	RTE_SET_USED(userdata);
+
+	return NULL;
+}
+
+static struct dpaa_sec_job *
+build_dpaa_raw_dp_cipher_fd(uint8_t *drv_ctx,
+			struct rte_crypto_sgl *sgl,
+			struct rte_crypto_sgl *dest_sgl,
+			struct rte_crypto_va_iova_ptr *iv,
+			struct rte_crypto_va_iova_ptr *digest,
+			struct rte_crypto_va_iova_ptr *auth_iv,
+			union rte_crypto_sym_ofs ofs,
+			void *userdata)
+{
+	RTE_SET_USED(digest);
+	RTE_SET_USED(auth_iv);
+	dpaa_sec_session *ses =
+		((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
+	struct dpaa_sec_job *cf;
+	struct dpaa_sec_op_ctx *ctx;
+	struct qm_sg_entry *sg, *out_sg, *in_sg;
+	unsigned int i;
+	uint8_t *IV_ptr = iv->va;
+	int data_len, total_len = 0, data_offset;
+
+	for (i = 0; i < sgl->num; i++)
+		total_len += sgl->vec[i].len;
+
+	data_len = total_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
+	data_offset = ofs.ofs.cipher.head;
+
+	/* Support lengths in bits only for SNOW3G and ZUC */
+	if (sgl->num > MAX_SG_ENTRIES) {
+		DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
+				MAX_SG_ENTRIES);
+		return NULL;
+	}
+
+	ctx = dpaa_sec_alloc_raw_ctx(ses, sgl->num * 2 + 3);
+	if (!ctx)
+		return NULL;
+
+	cf = &ctx->job;
+	ctx->userdata = (void *)userdata;
+
+	/* output */
+	out_sg = &cf->sg[0];
+	out_sg->extension = 1;
+	out_sg->length = data_len;
+	qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
+	cpu_to_hw_sg(out_sg);
+
+	if (dest_sgl) {
+		/* 1st seg */
+		sg = &cf->sg[2];
+		qm_sg_entry_set64(sg, dest_sgl->vec[0].iova);
+		sg->length = dest_sgl->vec[0].len - data_offset;
+		sg->offset = data_offset;
+
+		/* Successive segs */
+		for (i = 1; i < dest_sgl->num; i++) {
+			cpu_to_hw_sg(sg);
+			sg++;
+			qm_sg_entry_set64(sg, dest_sgl->vec[i].iova);
+			sg->length = dest_sgl->vec[i].len;
+		}
+	} else {
+		/* 1st seg */
+		sg = &cf->sg[2];
+		qm_sg_entry_set64(sg, sgl->vec[0].iova);
+		sg->length = sgl->vec[0].len - data_offset;
+		sg->offset = data_offset;
+
+		/* Successive segs */
+		for (i = 1; i < sgl->num; i++) {
+			cpu_to_hw_sg(sg);
+			sg++;
+			qm_sg_entry_set64(sg, sgl->vec[i].iova);
+			sg->length = sgl->vec[i].len;
+		}
+
+	}
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+
+	/* input */
+	in_sg = &cf->sg[1];
+	in_sg->extension = 1;
+	in_sg->final = 1;
+	in_sg->length = data_len + ses->iv.length;
+
+	sg++;
+	qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
+	cpu_to_hw_sg(in_sg);
+
+	/* IV */
+	qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
+	sg->length = ses->iv.length;
+	cpu_to_hw_sg(sg);
+
+	/* 1st seg */
+	sg++;
+	qm_sg_entry_set64(sg, sgl->vec[0].iova);
+	sg->length = sgl->vec[0].len - data_offset;
+	sg->offset = data_offset;
+
+	/* Successive segs */
+	for (i = 1; i < sgl->num; i++) {
+		cpu_to_hw_sg(sg);
+		sg++;
+		qm_sg_entry_set64(sg, sgl->vec[i].iova);
+		sg->length = sgl->vec[i].len;
+	}
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+
+	return cf;
+}
+
+static uint32_t
+dpaa_sec_raw_enqueue_burst(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	/* Function to transmit the frames to given device and queuepair */
+	uint32_t loop;
+	struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp_data;
+	uint16_t num_tx = 0;
+	struct qm_fd fds[DPAA_SEC_BURST], *fd;
+	uint32_t frames_to_send;
+	struct dpaa_sec_job *cf;
+	dpaa_sec_session *ses =
+			((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
+	uint32_t flags[DPAA_SEC_BURST] = {0};
+	struct qman_fq *inq[DPAA_SEC_BURST];
+
+	if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
+		if (rte_dpaa_portal_init((void *)0)) {
+			DPAA_SEC_ERR("Failure in affining portal");
+			return 0;
+		}
+	}
+
+	while (vec->num) {
+		frames_to_send = (vec->num > DPAA_SEC_BURST) ?
+				DPAA_SEC_BURST : vec->num;
+		for (loop = 0; loop < frames_to_send; loop++) {
+			if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
+				if (dpaa_sec_attach_sess_q(dpaa_qp, ses)) {
+					frames_to_send = loop;
+					goto send_pkts;
+				}
+			} else if (unlikely(ses->qp[rte_lcore_id() %
+						MAX_DPAA_CORES] != dpaa_qp)) {
+				DPAA_SEC_DP_ERR("Old:sess->qp = %p"
+					" New qp = %p\n",
+					ses->qp[rte_lcore_id() %
+					MAX_DPAA_CORES], dpaa_qp);
+				frames_to_send = loop;
+				goto send_pkts;
+			}
+
+			/*Clear the unused FD fields before sending*/
+			fd = &fds[loop];
+			memset(fd, 0, sizeof(struct qm_fd));
+			cf = ses->build_raw_dp_fd(drv_ctx,
+						&vec->src_sgl[loop],
+						&vec->dest_sgl[loop],
+						&vec->iv[loop],
+						&vec->digest[loop],
+						&vec->auth_iv[loop],
+						ofs,
+						user_data[loop]);
+			if (!cf) {
+				DPAA_SEC_ERR("error: Improper packet contents"
+					" for crypto operation");
+				goto skip_tx;
+			}
+			inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
+			fd->opaque_addr = 0;
+			fd->cmd = 0;
+			qm_fd_addr_set64(fd, rte_dpaa_mem_vtop(cf->sg));
+			fd->_format1 = qm_fd_compound;
+			fd->length29 = 2 * sizeof(struct qm_sg_entry);
+
+			status[loop] = 1;
+		}
+send_pkts:
+		loop = 0;
+		while (loop < frames_to_send) {
+			loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
+					&flags[loop], frames_to_send - loop);
+		}
+		vec->num -= frames_to_send;
+		num_tx += frames_to_send;
+	}
+
+skip_tx:
+	dpaa_qp->tx_pkts += num_tx;
+	dpaa_qp->tx_errs += vec->num - num_tx;
+
+	return num_tx;
+}
+
+static int
+dpaa_sec_deq_raw(struct dpaa_sec_qp *qp, void **out_user_data,
+		uint8_t is_user_data_array,
+		rte_cryptodev_raw_post_dequeue_t post_dequeue,
+		int nb_ops)
+{
+	struct qman_fq *fq;
+	unsigned int pkts = 0;
+	int num_rx_bufs, ret;
+	struct qm_dqrr_entry *dq;
+	uint32_t vdqcr_flags = 0;
+	uint8_t is_success = 0;
+
+	fq = &qp->outq;
+	/*
+	 * Until request for four buffers, we provide exact number of buffers.
+	 * Otherwise we do not set the QM_VDQCR_EXACT flag.
+	 * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
+	 * requested, so we request two less in this case.
+	 */
+	if (nb_ops < 4) {
+		vdqcr_flags = QM_VDQCR_EXACT;
+		num_rx_bufs = nb_ops;
+	} else {
+		num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
+			(DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
+	}
+	ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
+	if (ret)
+		return 0;
+
+	do {
+		const struct qm_fd *fd;
+		struct dpaa_sec_job *job;
+		struct dpaa_sec_op_ctx *ctx;
+
+		dq = qman_dequeue(fq);
+		if (!dq)
+			continue;
+
+		fd = &dq->fd;
+		/* sg is embedded in an op ctx,
+		 * sg[0] is for output
+		 * sg[1] for input
+		 */
+		job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
+
+		ctx = container_of(job, struct dpaa_sec_op_ctx, job);
+		ctx->fd_status = fd->status;
+		if (is_user_data_array)
+			out_user_data[pkts] = ctx->userdata;
+		else
+			out_user_data[0] = ctx->userdata;
+
+		if (!ctx->fd_status) {
+			is_success = true;
+		} else {
+			is_success = false;
+			DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
+		}
+		post_dequeue(ctx->op, pkts, is_success);
+		pkts++;
+
+		/* report op status to sym->op and then free the ctx memory */
+		rte_mempool_put(ctx->ctx_pool, (void *)ctx);
+
+		qman_dqrr_consume(fq, dq);
+	} while (fq->flags & QMAN_FQ_STATE_VDQCR);
+
+	return pkts;
+}
+
+
+static __rte_always_inline uint32_t
+dpaa_sec_raw_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
+	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
+	uint32_t max_nb_to_dequeue,
+	rte_cryptodev_raw_post_dequeue_t post_dequeue,
+	void **out_user_data, uint8_t is_user_data_array,
+	uint32_t *n_success, int *dequeue_status)
+{
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(get_dequeue_count);
+	uint16_t num_rx;
+	struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp_data;
+	uint32_t nb_ops = max_nb_to_dequeue;
+
+	if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
+		if (rte_dpaa_portal_init((void *)0)) {
+			DPAA_SEC_ERR("Failure in affining portal");
+			return 0;
+		}
+	}
+
+	num_rx = dpaa_sec_deq_raw(dpaa_qp, out_user_data,
+			is_user_data_array, post_dequeue, nb_ops);
+
+	dpaa_qp->rx_pkts += num_rx;
+	*dequeue_status = 1;
+	*n_success = num_rx;
+
+	DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
+
+	return num_rx;
+}
+
+static __rte_always_inline int
+dpaa_sec_raw_enqueue(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data_vec,
+	uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
+	void *user_data)
+{
+	RTE_SET_USED(qp_data);
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(data_vec);
+	RTE_SET_USED(n_data_vecs);
+	RTE_SET_USED(ofs);
+	RTE_SET_USED(iv);
+	RTE_SET_USED(digest);
+	RTE_SET_USED(aad_or_auth_iv);
+	RTE_SET_USED(user_data);
+
+	return 0;
+}
+
+static __rte_always_inline void *
+dpaa_sec_raw_dequeue(void *qp_data, uint8_t *drv_ctx, int *dequeue_status,
+	enum rte_crypto_op_status *op_status)
+{
+	RTE_SET_USED(qp_data);
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(dequeue_status);
+	RTE_SET_USED(op_status);
+
+	return NULL;
+}
+
+int
+dpaa_sec_configure_raw_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
+	enum rte_crypto_op_sess_type sess_type,
+	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
+{
+	dpaa_sec_session *sess;
+	struct dpaa_sec_raw_dp_ctx *dp_ctx;
+	RTE_SET_USED(qp_id);
+
+	if (!is_update) {
+		memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx));
+		raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
+	}
+
+	if (sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
+		sess = (dpaa_sec_session *)get_sec_session_private_data(
+				session_ctx.sec_sess);
+	else if (sess_type == RTE_CRYPTO_OP_WITH_SESSION)
+		sess = (dpaa_sec_session *)get_sym_session_private_data(
+			session_ctx.crypto_sess, dpaa_cryptodev_driver_id);
+	else
+		return -ENOTSUP;
+	raw_dp_ctx->dequeue_burst = dpaa_sec_raw_dequeue_burst;
+	raw_dp_ctx->dequeue = dpaa_sec_raw_dequeue;
+	raw_dp_ctx->dequeue_done = dpaa_sec_raw_dequeue_done;
+	raw_dp_ctx->enqueue_burst = dpaa_sec_raw_enqueue_burst;
+	raw_dp_ctx->enqueue = dpaa_sec_raw_enqueue;
+	raw_dp_ctx->enqueue_done = dpaa_sec_raw_enqueue_done;
+
+	if (sess->ctxt == DPAA_SEC_CIPHER)
+		sess->build_raw_dp_fd = build_dpaa_raw_dp_cipher_fd;
+	else if (sess->ctxt == DPAA_SEC_AUTH)
+		sess->build_raw_dp_fd = build_dpaa_raw_dp_auth_fd;
+	else
+		return -ENOTSUP;
+	dp_ctx = (struct dpaa_sec_raw_dp_ctx *)raw_dp_ctx->drv_ctx_data;
+	dp_ctx->session = sess;
+
+	return 0;
+}
+
+int
+dpaa_sec_get_dp_ctx_size(__rte_unused struct rte_cryptodev *dev)
+{
+	return sizeof(struct dpaa_sec_raw_dp_ctx);
+}
diff --git a/drivers/crypto/dpaa_sec/meson.build b/drivers/crypto/dpaa_sec/meson.build
index 44fd60e5ae..f87ad6c7e7 100644
--- a/drivers/crypto/dpaa_sec/meson.build
+++ b/drivers/crypto/dpaa_sec/meson.build
@@ -1,5 +1,5 @@
 # SPDX-License-Identifier: BSD-3-Clause
-# Copyright 2018 NXP
+# Copyright 2018-2021 NXP
 
 if not is_linux
     build = false
@@ -7,7 +7,7 @@ if not is_linux
 endif
 
 deps += ['bus_dpaa', 'mempool_dpaa', 'security']
-sources = files('dpaa_sec.c')
+sources = files('dpaa_sec.c', 'dpaa_sec_raw_dp.c')
 
 includes += include_directories('../../bus/dpaa/include')
 includes += include_directories('../../common/dpaax')
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH v5 12/15] crypto/dpaa_sec: support authonly and chain with raw APIs
  2021-10-17 16:16       ` [dpdk-dev] [PATCH v5 00/15] crypto: add raw vector support in DPAAx Hemant Agrawal
                           ` (10 preceding siblings ...)
  2021-10-17 16:16         ` [dpdk-dev] [PATCH v5 11/15] crypto/dpaa_sec: support raw datapath APIs Hemant Agrawal
@ 2021-10-17 16:16         ` Hemant Agrawal
  2021-10-17 16:16         ` [dpdk-dev] [PATCH v5 13/15] crypto/dpaa_sec: support AEAD and proto " Hemant Agrawal
                           ` (3 subsequent siblings)
  15 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-10-17 16:16 UTC (permalink / raw)
  To: dev, gakhil; +Cc: konstantin.ananyev, roy.fan.zhang, Gagandeep Singh

From: Gagandeep Singh <g.singh@nxp.com>

This patch improves the raw vector support in dpaa_sec driver
for authonly and chain usecase.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/crypto/dpaa_sec/dpaa_sec.h        |   3 +-
 drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c | 296 +++++++++++++++++++++-
 2 files changed, 287 insertions(+), 12 deletions(-)

diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.h b/drivers/crypto/dpaa_sec/dpaa_sec.h
index 77288cd1eb..7890687828 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.h
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.h
@@ -135,7 +135,8 @@ typedef struct dpaa_sec_job* (*dpaa_sec_build_raw_dp_fd_t)(uint8_t *drv_ctx,
 			struct rte_crypto_va_iova_ptr *digest,
 			struct rte_crypto_va_iova_ptr *auth_iv,
 			union rte_crypto_sym_ofs ofs,
-			void *userdata);
+			void *userdata,
+			struct qm_fd *fd);
 
 typedef struct dpaa_sec_session_entry {
 	struct sec_cdb cdb;	/**< cmd block associated with qp */
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c b/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c
index 7376da4cbc..03ce21e53f 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c
@@ -12,6 +12,7 @@
 #endif
 
 /* RTA header files */
+#include <desc/algo.h>
 #include <desc/ipsec.h>
 
 #include <rte_dpaa_bus.h>
@@ -26,6 +27,17 @@ struct dpaa_sec_raw_dp_ctx {
 	uint16_t cached_dequeue;
 };
 
+static inline int
+is_encode(dpaa_sec_session *ses)
+{
+	return ses->dir == DIR_ENC;
+}
+
+static inline int is_decode(dpaa_sec_session *ses)
+{
+	return ses->dir == DIR_DEC;
+}
+
 static __rte_always_inline int
 dpaa_sec_raw_enqueue_done(void *qp_data, uint8_t *drv_ctx, uint32_t n)
 {
@@ -82,18 +94,276 @@ build_dpaa_raw_dp_auth_fd(uint8_t *drv_ctx,
 			struct rte_crypto_va_iova_ptr *digest,
 			struct rte_crypto_va_iova_ptr *auth_iv,
 			union rte_crypto_sym_ofs ofs,
-			void *userdata)
+			void *userdata,
+			struct qm_fd *fd)
 {
-	RTE_SET_USED(drv_ctx);
-	RTE_SET_USED(sgl);
 	RTE_SET_USED(dest_sgl);
 	RTE_SET_USED(iv);
-	RTE_SET_USED(digest);
 	RTE_SET_USED(auth_iv);
-	RTE_SET_USED(ofs);
-	RTE_SET_USED(userdata);
+	RTE_SET_USED(fd);
 
-	return NULL;
+	dpaa_sec_session *ses =
+		((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
+	struct dpaa_sec_job *cf;
+	struct dpaa_sec_op_ctx *ctx;
+	struct qm_sg_entry *sg, *out_sg, *in_sg;
+	phys_addr_t start_addr;
+	uint8_t *old_digest, extra_segs;
+	int data_len, data_offset, total_len = 0;
+	unsigned int i;
+
+	for (i = 0; i < sgl->num; i++)
+		total_len += sgl->vec[i].len;
+
+	data_len = total_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
+	data_offset =  ofs.ofs.auth.head;
+
+	/* Support only length in bits for SNOW3G and ZUC */
+
+	if (is_decode(ses))
+		extra_segs = 3;
+	else
+		extra_segs = 2;
+
+	if (sgl->num > MAX_SG_ENTRIES) {
+		DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
+				MAX_SG_ENTRIES);
+		return NULL;
+	}
+	ctx = dpaa_sec_alloc_raw_ctx(ses, sgl->num * 2 + extra_segs);
+	if (!ctx)
+		return NULL;
+
+	cf = &ctx->job;
+	ctx->userdata = (void *)userdata;
+	old_digest = ctx->digest;
+
+	/* output */
+	out_sg = &cf->sg[0];
+	qm_sg_entry_set64(out_sg, digest->iova);
+	out_sg->length = ses->digest_length;
+	cpu_to_hw_sg(out_sg);
+
+	/* input */
+	in_sg = &cf->sg[1];
+	/* need to extend the input to a compound frame */
+	in_sg->extension = 1;
+	in_sg->final = 1;
+	in_sg->length = data_len;
+	qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
+
+	/* 1st seg */
+	sg = in_sg + 1;
+
+	if (ses->iv.length) {
+		uint8_t *iv_ptr;
+
+		iv_ptr = rte_crypto_op_ctod_offset(userdata, uint8_t *,
+						   ses->iv.offset);
+
+		if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
+			iv_ptr = conv_to_snow_f9_iv(iv_ptr);
+			sg->length = 12;
+		} else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
+			iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
+			sg->length = 8;
+		} else {
+			sg->length = ses->iv.length;
+		}
+		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
+		in_sg->length += sg->length;
+		cpu_to_hw_sg(sg);
+		sg++;
+	}
+
+	qm_sg_entry_set64(sg, sgl->vec[0].iova);
+	sg->offset = data_offset;
+
+	if (data_len <= (int)(sgl->vec[0].len - data_offset)) {
+		sg->length = data_len;
+	} else {
+		sg->length = sgl->vec[0].len - data_offset;
+
+		/* remaining i/p segs */
+		for (i = 1; i < sgl->num; i++) {
+			cpu_to_hw_sg(sg);
+			sg++;
+			qm_sg_entry_set64(sg, sgl->vec[i].iova);
+			if (data_len > (int)sgl->vec[i].len)
+				sg->length = sgl->vec[0].len;
+			else
+				sg->length = data_len;
+
+			data_len = data_len - sg->length;
+			if (data_len < 1)
+				break;
+		}
+	}
+
+	if (is_decode(ses)) {
+		/* Digest verification case */
+		cpu_to_hw_sg(sg);
+		sg++;
+		rte_memcpy(old_digest, digest->va,
+				ses->digest_length);
+		start_addr = rte_dpaa_mem_vtop(old_digest);
+		qm_sg_entry_set64(sg, start_addr);
+		sg->length = ses->digest_length;
+		in_sg->length += ses->digest_length;
+	}
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+	cpu_to_hw_sg(in_sg);
+
+	return cf;
+}
+
+static inline struct dpaa_sec_job *
+build_dpaa_raw_dp_chain_fd(uint8_t *drv_ctx,
+			struct rte_crypto_sgl *sgl,
+			struct rte_crypto_sgl *dest_sgl,
+			struct rte_crypto_va_iova_ptr *iv,
+			struct rte_crypto_va_iova_ptr *digest,
+			struct rte_crypto_va_iova_ptr *auth_iv,
+			union rte_crypto_sym_ofs ofs,
+			void *userdata,
+			struct qm_fd *fd)
+{
+	RTE_SET_USED(auth_iv);
+
+	dpaa_sec_session *ses =
+		((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
+	struct dpaa_sec_job *cf;
+	struct dpaa_sec_op_ctx *ctx;
+	struct qm_sg_entry *sg, *out_sg, *in_sg;
+	uint8_t *IV_ptr = iv->va;
+	unsigned int i;
+	uint16_t auth_hdr_len = ofs.ofs.cipher.head -
+				ofs.ofs.auth.head;
+	uint16_t auth_tail_len = ofs.ofs.auth.tail;
+	uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
+	int data_len = 0, auth_len = 0, cipher_len = 0;
+
+	for (i = 0; i < sgl->num; i++)
+		data_len += sgl->vec[i].len;
+
+	cipher_len = data_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
+	auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
+
+	if (sgl->num > MAX_SG_ENTRIES) {
+		DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
+				MAX_SG_ENTRIES);
+		return NULL;
+	}
+
+	ctx = dpaa_sec_alloc_raw_ctx(ses, sgl->num * 2 + 4);
+	if (!ctx)
+		return NULL;
+
+	cf = &ctx->job;
+	ctx->userdata = (void *)userdata;
+
+	rte_prefetch0(cf->sg);
+
+	/* output */
+	out_sg = &cf->sg[0];
+	out_sg->extension = 1;
+	if (is_encode(ses))
+		out_sg->length = cipher_len + ses->digest_length;
+	else
+		out_sg->length = cipher_len;
+
+	/* output sg entries */
+	sg = &cf->sg[2];
+	qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
+	cpu_to_hw_sg(out_sg);
+
+	/* 1st seg */
+	if (dest_sgl) {
+		qm_sg_entry_set64(sg, dest_sgl->vec[0].iova);
+		sg->length = dest_sgl->vec[0].len - ofs.ofs.cipher.head;
+		sg->offset = ofs.ofs.cipher.head;
+
+		/* Successive segs */
+		for (i = 1; i < dest_sgl->num; i++) {
+			cpu_to_hw_sg(sg);
+			sg++;
+			qm_sg_entry_set64(sg, dest_sgl->vec[i].iova);
+			sg->length = dest_sgl->vec[i].len;
+		}
+	} else {
+		qm_sg_entry_set64(sg, sgl->vec[0].iova);
+		sg->length = sgl->vec[0].len - ofs.ofs.cipher.head;
+		sg->offset = ofs.ofs.cipher.head;
+
+		/* Successive segs */
+		for (i = 1; i < sgl->num; i++) {
+			cpu_to_hw_sg(sg);
+			sg++;
+			qm_sg_entry_set64(sg, sgl->vec[i].iova);
+			sg->length = sgl->vec[i].len;
+		}
+	}
+
+	if (is_encode(ses)) {
+		cpu_to_hw_sg(sg);
+		/* set auth output */
+		sg++;
+		qm_sg_entry_set64(sg, digest->iova);
+		sg->length = ses->digest_length;
+	}
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+
+	/* input */
+	in_sg = &cf->sg[1];
+	in_sg->extension = 1;
+	in_sg->final = 1;
+	if (is_encode(ses))
+		in_sg->length = ses->iv.length + auth_len;
+	else
+		in_sg->length = ses->iv.length + auth_len
+						+ ses->digest_length;
+
+	/* input sg entries */
+	sg++;
+	qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
+	cpu_to_hw_sg(in_sg);
+
+	/* 1st seg IV */
+	qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
+	sg->length = ses->iv.length;
+	cpu_to_hw_sg(sg);
+
+	/* 2 seg */
+	sg++;
+	qm_sg_entry_set64(sg, sgl->vec[0].iova);
+	sg->length = sgl->vec[0].len - ofs.ofs.auth.head;
+	sg->offset = ofs.ofs.auth.head;
+
+	/* Successive segs */
+	for (i = 1; i < sgl->num; i++) {
+		cpu_to_hw_sg(sg);
+		sg++;
+		qm_sg_entry_set64(sg, sgl->vec[i].iova);
+		sg->length = sgl->vec[i].len;
+	}
+
+	if (is_decode(ses)) {
+		cpu_to_hw_sg(sg);
+		sg++;
+		memcpy(ctx->digest, digest->va,
+			ses->digest_length);
+		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
+		sg->length = ses->digest_length;
+	}
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+
+	if (auth_only_len)
+		fd->cmd = 0x80000000 | auth_only_len;
+
+	return cf;
 }
 
 static struct dpaa_sec_job *
@@ -104,10 +374,13 @@ build_dpaa_raw_dp_cipher_fd(uint8_t *drv_ctx,
 			struct rte_crypto_va_iova_ptr *digest,
 			struct rte_crypto_va_iova_ptr *auth_iv,
 			union rte_crypto_sym_ofs ofs,
-			void *userdata)
+			void *userdata,
+			struct qm_fd *fd)
 {
 	RTE_SET_USED(digest);
 	RTE_SET_USED(auth_iv);
+	RTE_SET_USED(fd);
+
 	dpaa_sec_session *ses =
 		((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
 	struct dpaa_sec_job *cf;
@@ -264,15 +537,14 @@ dpaa_sec_raw_enqueue_burst(void *qp_data, uint8_t *drv_ctx,
 						&vec->digest[loop],
 						&vec->auth_iv[loop],
 						ofs,
-						user_data[loop]);
+						user_data[loop],
+						fd);
 			if (!cf) {
 				DPAA_SEC_ERR("error: Improper packet contents"
 					" for crypto operation");
 				goto skip_tx;
 			}
 			inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
-			fd->opaque_addr = 0;
-			fd->cmd = 0;
 			qm_fd_addr_set64(fd, rte_dpaa_mem_vtop(cf->sg));
 			fd->_format1 = qm_fd_compound;
 			fd->length29 = 2 * sizeof(struct qm_sg_entry);
@@ -470,6 +742,8 @@ dpaa_sec_configure_raw_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
 		sess->build_raw_dp_fd = build_dpaa_raw_dp_cipher_fd;
 	else if (sess->ctxt == DPAA_SEC_AUTH)
 		sess->build_raw_dp_fd = build_dpaa_raw_dp_auth_fd;
+	else if (sess->ctxt == DPAA_SEC_CIPHER_HASH)
+		sess->build_raw_dp_fd = build_dpaa_raw_dp_chain_fd;
 	else
 		return -ENOTSUP;
 	dp_ctx = (struct dpaa_sec_raw_dp_ctx *)raw_dp_ctx->drv_ctx_data;
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH v5 13/15] crypto/dpaa_sec: support AEAD and proto with raw APIs
  2021-10-17 16:16       ` [dpdk-dev] [PATCH v5 00/15] crypto: add raw vector support in DPAAx Hemant Agrawal
                           ` (11 preceding siblings ...)
  2021-10-17 16:16         ` [dpdk-dev] [PATCH v5 12/15] crypto/dpaa_sec: support authonly and chain with raw APIs Hemant Agrawal
@ 2021-10-17 16:16         ` Hemant Agrawal
  2021-10-17 16:16         ` [dpdk-dev] [PATCH v5 14/15] test/crypto: add raw API test for dpaax Hemant Agrawal
                           ` (2 subsequent siblings)
  15 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-10-17 16:16 UTC (permalink / raw)
  To: dev, gakhil; +Cc: konstantin.ananyev, roy.fan.zhang, Gagandeep Singh

From: Gagandeep Singh <g.singh@nxp.com>

This add support for AEAD and proto offload with raw APIs
for dpaa_sec driver.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c | 293 ++++++++++++++++++++++
 1 file changed, 293 insertions(+)

diff --git a/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c b/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c
index 03ce21e53f..522685f8cf 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c
@@ -218,6 +218,163 @@ build_dpaa_raw_dp_auth_fd(uint8_t *drv_ctx,
 	return cf;
 }
 
+static inline struct dpaa_sec_job *
+build_raw_cipher_auth_gcm_sg(uint8_t *drv_ctx,
+			struct rte_crypto_sgl *sgl,
+			struct rte_crypto_sgl *dest_sgl,
+			struct rte_crypto_va_iova_ptr *iv,
+			struct rte_crypto_va_iova_ptr *digest,
+			struct rte_crypto_va_iova_ptr *auth_iv,
+			union rte_crypto_sym_ofs ofs,
+			void *userdata,
+			struct qm_fd *fd)
+{
+	dpaa_sec_session *ses =
+		((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
+	struct dpaa_sec_job *cf;
+	struct dpaa_sec_op_ctx *ctx;
+	struct qm_sg_entry *sg, *out_sg, *in_sg;
+	uint8_t extra_req_segs;
+	uint8_t *IV_ptr = iv->va;
+	int data_len = 0, aead_len = 0;
+	unsigned int i;
+
+	for (i = 0; i < sgl->num; i++)
+		data_len += sgl->vec[i].len;
+
+	extra_req_segs = 4;
+	aead_len = data_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
+
+	if (ses->auth_only_len)
+		extra_req_segs++;
+
+	if (sgl->num > MAX_SG_ENTRIES) {
+		DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
+				MAX_SG_ENTRIES);
+		return NULL;
+	}
+
+	ctx = dpaa_sec_alloc_raw_ctx(ses,  sgl->num * 2 + extra_req_segs);
+	if (!ctx)
+		return NULL;
+
+	cf = &ctx->job;
+	ctx->userdata = (void *)userdata;
+
+	rte_prefetch0(cf->sg);
+
+	/* output */
+	out_sg = &cf->sg[0];
+	out_sg->extension = 1;
+	if (is_encode(ses))
+		out_sg->length = aead_len + ses->digest_length;
+	else
+		out_sg->length = aead_len;
+
+	/* output sg entries */
+	sg = &cf->sg[2];
+	qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
+	cpu_to_hw_sg(out_sg);
+
+	if (dest_sgl) {
+		/* 1st seg */
+		qm_sg_entry_set64(sg, dest_sgl->vec[0].iova);
+		sg->length = dest_sgl->vec[0].len - ofs.ofs.cipher.head;
+		sg->offset = ofs.ofs.cipher.head;
+
+		/* Successive segs */
+		for (i = 1; i < dest_sgl->num; i++) {
+			cpu_to_hw_sg(sg);
+			sg++;
+			qm_sg_entry_set64(sg, dest_sgl->vec[i].iova);
+			sg->length = dest_sgl->vec[i].len;
+		}
+	} else {
+		/* 1st seg */
+		qm_sg_entry_set64(sg, sgl->vec[0].iova);
+		sg->length = sgl->vec[0].len - ofs.ofs.cipher.head;
+		sg->offset = ofs.ofs.cipher.head;
+
+		/* Successive segs */
+		for (i = 1; i < sgl->num; i++) {
+			cpu_to_hw_sg(sg);
+			sg++;
+			qm_sg_entry_set64(sg, sgl->vec[i].iova);
+			sg->length = sgl->vec[i].len;
+		}
+
+	}
+
+	if (is_encode(ses)) {
+		cpu_to_hw_sg(sg);
+		/* set auth output */
+		sg++;
+		qm_sg_entry_set64(sg, digest->iova);
+		sg->length = ses->digest_length;
+	}
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+
+	/* input */
+	in_sg = &cf->sg[1];
+	in_sg->extension = 1;
+	in_sg->final = 1;
+	if (is_encode(ses))
+		in_sg->length = ses->iv.length + aead_len
+						+ ses->auth_only_len;
+	else
+		in_sg->length = ses->iv.length + aead_len
+				+ ses->auth_only_len + ses->digest_length;
+
+	/* input sg entries */
+	sg++;
+	qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
+	cpu_to_hw_sg(in_sg);
+
+	/* 1st seg IV */
+	qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
+	sg->length = ses->iv.length;
+	cpu_to_hw_sg(sg);
+
+	/* 2 seg auth only */
+	if (ses->auth_only_len) {
+		sg++;
+		qm_sg_entry_set64(sg, auth_iv->iova);
+		sg->length = ses->auth_only_len;
+		cpu_to_hw_sg(sg);
+	}
+
+	/* 3rd seg */
+	sg++;
+	qm_sg_entry_set64(sg, sgl->vec[0].iova);
+	sg->length = sgl->vec[0].len - ofs.ofs.cipher.head;
+	sg->offset = ofs.ofs.cipher.head;
+
+	/* Successive segs */
+	for (i = 1; i < sgl->num; i++) {
+		cpu_to_hw_sg(sg);
+		sg++;
+		qm_sg_entry_set64(sg, sgl->vec[i].iova);
+		sg->length =  sgl->vec[i].len;
+	}
+
+	if (is_decode(ses)) {
+		cpu_to_hw_sg(sg);
+		sg++;
+		memcpy(ctx->digest, digest->va,
+			ses->digest_length);
+		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
+		sg->length = ses->digest_length;
+	}
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+
+	if (ses->auth_only_len)
+		fd->cmd = 0x80000000 | ses->auth_only_len;
+
+	return cf;
+}
+
 static inline struct dpaa_sec_job *
 build_dpaa_raw_dp_chain_fd(uint8_t *drv_ctx,
 			struct rte_crypto_sgl *sgl,
@@ -484,6 +641,135 @@ build_dpaa_raw_dp_cipher_fd(uint8_t *drv_ctx,
 	return cf;
 }
 
+#ifdef RTE_LIBRTE_SECURITY
+static inline struct dpaa_sec_job *
+build_dpaa_raw_proto_sg(uint8_t *drv_ctx,
+			struct rte_crypto_sgl *sgl,
+			struct rte_crypto_sgl *dest_sgl,
+			struct rte_crypto_va_iova_ptr *iv,
+			struct rte_crypto_va_iova_ptr *digest,
+			struct rte_crypto_va_iova_ptr *auth_iv,
+			union rte_crypto_sym_ofs ofs,
+			void *userdata,
+			struct qm_fd *fd)
+{
+	RTE_SET_USED(iv);
+	RTE_SET_USED(digest);
+	RTE_SET_USED(auth_iv);
+	RTE_SET_USED(ofs);
+
+	dpaa_sec_session *ses =
+		((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
+	struct dpaa_sec_job *cf;
+	struct dpaa_sec_op_ctx *ctx;
+	struct qm_sg_entry *sg, *out_sg, *in_sg;
+	uint32_t in_len = 0, out_len = 0;
+	unsigned int i;
+
+	if (sgl->num > MAX_SG_ENTRIES) {
+		DPAA_SEC_DP_ERR("Proto: Max sec segs supported is %d",
+				MAX_SG_ENTRIES);
+		return NULL;
+	}
+
+	ctx = dpaa_sec_alloc_raw_ctx(ses, sgl->num * 2 + 4);
+	if (!ctx)
+		return NULL;
+	cf = &ctx->job;
+	ctx->userdata = (void *)userdata;
+	/* output */
+	out_sg = &cf->sg[0];
+	out_sg->extension = 1;
+	qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
+
+	if (dest_sgl) {
+		/* 1st seg */
+		sg = &cf->sg[2];
+		qm_sg_entry_set64(sg, dest_sgl->vec[0].iova);
+		sg->offset = 0;
+		sg->length = dest_sgl->vec[0].len;
+		out_len += sg->length;
+
+		for (i = 1; i < dest_sgl->num; i++) {
+		/* Successive segs */
+			cpu_to_hw_sg(sg);
+			sg++;
+			qm_sg_entry_set64(sg, dest_sgl->vec[i].iova);
+			sg->offset = 0;
+			sg->length = dest_sgl->vec[i].len;
+			out_len += sg->length;
+		}
+		sg->length = dest_sgl->vec[i - 1].tot_len;
+	} else {
+		/* 1st seg */
+		sg = &cf->sg[2];
+		qm_sg_entry_set64(sg, sgl->vec[0].iova);
+		sg->offset = 0;
+		sg->length = sgl->vec[0].len;
+		out_len += sg->length;
+
+		for (i = 1; i < sgl->num; i++) {
+		/* Successive segs */
+			cpu_to_hw_sg(sg);
+			sg++;
+			qm_sg_entry_set64(sg, sgl->vec[i].iova);
+			sg->offset = 0;
+			sg->length = sgl->vec[i].len;
+			out_len += sg->length;
+		}
+		sg->length = sgl->vec[i - 1].tot_len;
+
+	}
+	out_len += sg->length;
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+
+	out_sg->length = out_len;
+	cpu_to_hw_sg(out_sg);
+
+	/* input */
+	in_sg = &cf->sg[1];
+	in_sg->extension = 1;
+	in_sg->final = 1;
+	in_len = sgl->vec[0].len;
+
+	sg++;
+	qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
+
+	/* 1st seg */
+	qm_sg_entry_set64(sg, sgl->vec[0].iova);
+	sg->length = sgl->vec[0].len;
+	sg->offset = 0;
+
+	/* Successive segs */
+	for (i = 1; i < sgl->num; i++) {
+		cpu_to_hw_sg(sg);
+		sg++;
+		qm_sg_entry_set64(sg, sgl->vec[i].iova);
+		sg->length = sgl->vec[i].len;
+		sg->offset = 0;
+		in_len += sg->length;
+	}
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+
+	in_sg->length = in_len;
+	cpu_to_hw_sg(in_sg);
+
+	if ((ses->ctxt == DPAA_SEC_PDCP) && ses->pdcp.hfn_ovd) {
+		fd->cmd = 0x80000000 |
+			*((uint32_t *)((uint8_t *)userdata +
+			ses->pdcp.hfn_ovd_offset));
+		DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u\n",
+			*((uint32_t *)((uint8_t *)userdata +
+			ses->pdcp.hfn_ovd_offset)),
+			ses->pdcp.hfn_ovd);
+	}
+
+	return cf;
+}
+#endif
+
 static uint32_t
 dpaa_sec_raw_enqueue_burst(void *qp_data, uint8_t *drv_ctx,
 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
@@ -744,6 +1030,13 @@ dpaa_sec_configure_raw_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
 		sess->build_raw_dp_fd = build_dpaa_raw_dp_auth_fd;
 	else if (sess->ctxt == DPAA_SEC_CIPHER_HASH)
 		sess->build_raw_dp_fd = build_dpaa_raw_dp_chain_fd;
+	else if (sess->ctxt == DPAA_SEC_AEAD)
+		sess->build_raw_dp_fd = build_raw_cipher_auth_gcm_sg;
+#ifdef RTE_LIBRTE_SECURITY
+	else if (sess->ctxt == DPAA_SEC_IPSEC ||
+			sess->ctxt == DPAA_SEC_PDCP)
+		sess->build_raw_dp_fd = build_dpaa_raw_proto_sg;
+#endif
 	else
 		return -ENOTSUP;
 	dp_ctx = (struct dpaa_sec_raw_dp_ctx *)raw_dp_ctx->drv_ctx_data;
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH v5 14/15] test/crypto: add raw API test for dpaax
  2021-10-17 16:16       ` [dpdk-dev] [PATCH v5 00/15] crypto: add raw vector support in DPAAx Hemant Agrawal
                           ` (12 preceding siblings ...)
  2021-10-17 16:16         ` [dpdk-dev] [PATCH v5 13/15] crypto/dpaa_sec: support AEAD and proto " Hemant Agrawal
@ 2021-10-17 16:16         ` Hemant Agrawal
  2021-10-20  9:08           ` Thomas Monjalon
  2021-10-17 16:16         ` [dpdk-dev] [PATCH v5 15/15] test/crypto: add raw API support in 5G algos Hemant Agrawal
  2021-10-17 17:59         ` [dpdk-dev] [EXT] [PATCH v5 00/15] crypto: add raw vector support in DPAAx Akhil Goyal
  15 siblings, 1 reply; 115+ messages in thread
From: Hemant Agrawal @ 2021-10-17 16:16 UTC (permalink / raw)
  To: dev, gakhil; +Cc: konstantin.ananyev, roy.fan.zhang, Gagandeep Singh

This patch add support for raw API tests for
dpaa_sec and dpaa2_sec platforms.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 app/test/test_cryptodev.c | 116 +++++++++++++++++++++++++++++++++++---
 1 file changed, 109 insertions(+), 7 deletions(-)

diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 4778dafbe5..73c64301f7 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -186,11 +186,11 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id,
 {
 	struct rte_crypto_sym_op *sop = op->sym;
 	struct rte_crypto_op *ret_op = NULL;
-	struct rte_crypto_vec data_vec[UINT8_MAX];
+	struct rte_crypto_vec data_vec[UINT8_MAX], dest_data_vec[UINT8_MAX];
 	struct rte_crypto_va_iova_ptr cipher_iv, digest, aad_auth_iv;
 	union rte_crypto_sym_ofs ofs;
 	struct rte_crypto_sym_vec vec;
-	struct rte_crypto_sgl sgl;
+	struct rte_crypto_sgl sgl, dest_sgl;
 	uint32_t max_len;
 	union rte_cryptodev_session_ctx sess;
 	uint32_t count = 0;
@@ -326,6 +326,19 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id,
 	}
 
 	sgl.num = n;
+	/* Out of place */
+	if (sop->m_dst != NULL) {
+		dest_sgl.vec = dest_data_vec;
+		vec.dest_sgl = &dest_sgl;
+		n = rte_crypto_mbuf_to_vec(sop->m_dst, 0, max_len,
+				dest_data_vec, RTE_DIM(dest_data_vec));
+		if (n < 0 || n > sop->m_dst->nb_segs) {
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			goto exit;
+		}
+		dest_sgl.num = n;
+	} else
+		vec.dest_sgl = NULL;
 
 	if (rte_cryptodev_raw_enqueue_burst(ctx, &vec, ofs, (void **)&op,
 			&enqueue_status) < 1) {
@@ -8381,10 +8394,21 @@ test_pdcp_proto_SGL(int i, int oop,
 	int to_trn_tbl[16];
 	int segs = 1;
 	unsigned int trn_data = 0;
+	struct rte_cryptodev_info dev_info;
+	uint64_t feat_flags;
 	struct rte_security_ctx *ctx = (struct rte_security_ctx *)
 				rte_cryptodev_get_sec_ctx(
 				ts_params->valid_devs[0]);
+	struct rte_mbuf *temp_mbuf;
+
+	rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
+	feat_flags = dev_info.feature_flags;
 
+	if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) &&
+			(!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) {
+		printf("Device does not support RAW data-path APIs.\n");
+		return -ENOTSUP;
+	}
 	/* Verify the capabilities */
 	struct rte_security_capability_idx sec_cap_idx;
 
@@ -8568,8 +8592,23 @@ test_pdcp_proto_SGL(int i, int oop,
 		ut_params->op->sym->m_dst = ut_params->obuf;
 
 	/* Process crypto operation */
-	if (process_crypto_request(ts_params->valid_devs[0], ut_params->op)
-		== NULL) {
+	temp_mbuf = ut_params->op->sym->m_src;
+	if (global_api_test_type == CRYPTODEV_RAW_API_TEST) {
+		/* filling lengths */
+		while (temp_mbuf) {
+			ut_params->op->sym->cipher.data.length
+				+= temp_mbuf->pkt_len;
+			ut_params->op->sym->auth.data.length
+				+= temp_mbuf->pkt_len;
+			temp_mbuf = temp_mbuf->next;
+		}
+		process_sym_raw_dp_op(ts_params->valid_devs[0], 0,
+			ut_params->op, 1, 1, 0, 0);
+	} else {
+		ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+							ut_params->op);
+	}
+	if (ut_params->op == NULL) {
 		printf("TestCase %s()-%d line %d failed %s: ",
 			__func__, i, __LINE__,
 			"failed to process sym crypto op");
@@ -10450,6 +10489,7 @@ test_authenticated_encryption_oop(const struct aead_test_data *tdata)
 	int retval;
 	uint8_t *ciphertext, *auth_tag;
 	uint16_t plaintext_pad_len;
+	struct rte_cryptodev_info dev_info;
 
 	/* Verify the capabilities */
 	struct rte_cryptodev_sym_capability_idx cap_idx;
@@ -10459,7 +10499,11 @@ test_authenticated_encryption_oop(const struct aead_test_data *tdata)
 			&cap_idx) == NULL)
 		return TEST_SKIPPED;
 
-	if (global_api_test_type == CRYPTODEV_RAW_API_TEST)
+	rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
+	uint64_t feat_flags = dev_info.feature_flags;
+
+	if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) &&
+			(!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP)))
 		return TEST_SKIPPED;
 
 	/* not supported with CPU crypto */
@@ -10496,7 +10540,11 @@ test_authenticated_encryption_oop(const struct aead_test_data *tdata)
 	ut_params->op->sym->m_dst = ut_params->obuf;
 
 	/* Process crypto operation */
-	TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+	if (global_api_test_type == CRYPTODEV_RAW_API_TEST)
+		process_sym_raw_dp_op(ts_params->valid_devs[0], 0,
+			ut_params->op, 0, 0, 0, 0);
+	else
+		TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
 			ut_params->op), "failed to process sym crypto op");
 
 	TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
@@ -10542,6 +10590,10 @@ test_authenticated_decryption_oop(const struct aead_test_data *tdata)
 
 	int retval;
 	uint8_t *plaintext;
+	struct rte_cryptodev_info dev_info;
+
+	rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
+	uint64_t feat_flags = dev_info.feature_flags;
 
 	/* Verify the capabilities */
 	struct rte_cryptodev_sym_capability_idx cap_idx;
@@ -10556,6 +10608,12 @@ test_authenticated_decryption_oop(const struct aead_test_data *tdata)
 			global_api_test_type == CRYPTODEV_RAW_API_TEST)
 		return TEST_SKIPPED;
 
+	if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) &&
+			(!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) {
+		printf("Device does not support RAW data-path APIs.\n");
+		return TEST_SKIPPED;
+	}
+
 	/* Create AEAD session */
 	retval = create_aead_session(ts_params->valid_devs[0],
 			tdata->algo,
@@ -10586,7 +10644,11 @@ test_authenticated_decryption_oop(const struct aead_test_data *tdata)
 	ut_params->op->sym->m_dst = ut_params->obuf;
 
 	/* Process crypto operation */
-	TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+	if (global_api_test_type == CRYPTODEV_RAW_API_TEST)
+		process_sym_raw_dp_op(ts_params->valid_devs[0], 0,
+				ut_params->op, 0, 0, 0, 0);
+	else
+		TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
 			ut_params->op), "failed to process sym crypto op");
 
 	TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
@@ -15434,6 +15496,46 @@ test_cryptodev_cn10k(void)
 	return run_cryptodev_testsuite(RTE_STR(CRYPTODEV_NAME_CN10K_PMD));
 }
 
+static int
+test_cryptodev_dpaa2_sec_raw_api(void)
+{
+	static const char *pmd_name = RTE_STR(CRYPTODEV_NAME_DPAA2_SEC_PMD);
+	int ret;
+
+	ret = require_feature_flag(pmd_name, RTE_CRYPTODEV_FF_SYM_RAW_DP,
+			"RAW API");
+	if (ret)
+		return ret;
+
+	global_api_test_type = CRYPTODEV_RAW_API_TEST;
+	ret = run_cryptodev_testsuite(pmd_name);
+	global_api_test_type = CRYPTODEV_API_TEST;
+
+	return ret;
+}
+
+static int
+test_cryptodev_dpaa_sec_raw_api(void)
+{
+	static const char *pmd_name = RTE_STR(CRYPTODEV_NAME_DPAA2_SEC_PMD);
+	int ret;
+
+	ret = require_feature_flag(pmd_name, RTE_CRYPTODEV_FF_SYM_RAW_DP,
+			"RAW API");
+	if (ret)
+		return ret;
+
+	global_api_test_type = CRYPTODEV_RAW_API_TEST;
+	ret = run_cryptodev_testsuite(pmd_name);
+	global_api_test_type = CRYPTODEV_API_TEST;
+
+	return ret;
+}
+
+REGISTER_TEST_COMMAND(cryptodev_dpaa2_sec_raw_api_autotest,
+		test_cryptodev_dpaa2_sec_raw_api);
+REGISTER_TEST_COMMAND(cryptodev_dpaa_sec_raw_api_autotest,
+		test_cryptodev_dpaa_sec_raw_api);
 REGISTER_TEST_COMMAND(cryptodev_qat_raw_api_autotest,
 		test_cryptodev_qat_raw_api);
 REGISTER_TEST_COMMAND(cryptodev_qat_autotest, test_cryptodev_qat);
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* [dpdk-dev] [PATCH v5 15/15] test/crypto: add raw API support in 5G algos
  2021-10-17 16:16       ` [dpdk-dev] [PATCH v5 00/15] crypto: add raw vector support in DPAAx Hemant Agrawal
                           ` (13 preceding siblings ...)
  2021-10-17 16:16         ` [dpdk-dev] [PATCH v5 14/15] test/crypto: add raw API test for dpaax Hemant Agrawal
@ 2021-10-17 16:16         ` Hemant Agrawal
  2021-10-17 17:59         ` [dpdk-dev] [EXT] [PATCH v5 00/15] crypto: add raw vector support in DPAAx Akhil Goyal
  15 siblings, 0 replies; 115+ messages in thread
From: Hemant Agrawal @ 2021-10-17 16:16 UTC (permalink / raw)
  To: dev, gakhil; +Cc: konstantin.ananyev, roy.fan.zhang, Gagandeep Singh

This patch add support for RAW API testing with ZUC
and SNOW test cases.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 app/test/test_cryptodev.c | 57 ++++++++++++++++++++++++++++++++++-----
 1 file changed, 51 insertions(+), 6 deletions(-)

diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 73c64301f7..01af4cfbd2 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -379,6 +379,7 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id,
 	}
 
 	op->status = (count == MAX_RAW_DEQUEUE_COUNT + 1 || ret_op != op ||
+			ret_op->status == RTE_CRYPTO_OP_STATUS_ERROR ||
 			n_success < 1) ? RTE_CRYPTO_OP_STATUS_ERROR :
 					RTE_CRYPTO_OP_STATUS_SUCCESS;
 
@@ -4210,6 +4211,16 @@ test_snow3g_encryption_oop(const struct snow3g_test_data *tdata)
 	int retval;
 	unsigned plaintext_pad_len;
 	unsigned plaintext_len;
+	struct rte_cryptodev_info dev_info;
+
+	rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
+	uint64_t feat_flags = dev_info.feature_flags;
+
+	if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) &&
+			(!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) {
+		printf("Device does not support RAW data-path APIs.\n");
+		return -ENOTSUP;
+	}
 
 	/* Verify the capabilities */
 	struct rte_cryptodev_sym_capability_idx cap_idx;
@@ -4265,7 +4276,11 @@ test_snow3g_encryption_oop(const struct snow3g_test_data *tdata)
 	if (retval < 0)
 		return retval;
 
-	ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+	if (global_api_test_type == CRYPTODEV_RAW_API_TEST)
+		process_sym_raw_dp_op(ts_params->valid_devs[0], 0,
+			ut_params->op, 1, 0, 1, tdata->cipher_iv.len);
+	else
+		ut_params->op = process_crypto_request(ts_params->valid_devs[0],
 						ut_params->op);
 	TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
 
@@ -4325,6 +4340,12 @@ test_snow3g_encryption_oop_sgl(const struct snow3g_test_data *tdata)
 		return TEST_SKIPPED;
 	}
 
+	if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) &&
+			(!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) {
+		printf("Device does not support RAW data-path APIs.\n");
+		return -ENOTSUP;
+	}
+
 	/* Create SNOW 3G session */
 	retval = create_wireless_algo_cipher_session(ts_params->valid_devs[0],
 					RTE_CRYPTO_CIPHER_OP_ENCRYPT,
@@ -4359,7 +4380,11 @@ test_snow3g_encryption_oop_sgl(const struct snow3g_test_data *tdata)
 	if (retval < 0)
 		return retval;
 
-	ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+	if (global_api_test_type == CRYPTODEV_RAW_API_TEST)
+		process_sym_raw_dp_op(ts_params->valid_devs[0], 0,
+			ut_params->op, 1, 0, 1, tdata->cipher_iv.len);
+	else
+		ut_params->op = process_crypto_request(ts_params->valid_devs[0],
 						ut_params->op);
 	TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
 
@@ -4486,7 +4511,11 @@ test_snow3g_encryption_offset_oop(const struct snow3g_test_data *tdata)
 	if (retval < 0)
 		return retval;
 
-	ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+	if (global_api_test_type == CRYPTODEV_RAW_API_TEST)
+		process_sym_raw_dp_op(ts_params->valid_devs[0], 0,
+			ut_params->op, 1, 0, 1, tdata->cipher_iv.len);
+	else
+		ut_params->op = process_crypto_request(ts_params->valid_devs[0],
 						ut_params->op);
 	TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
 
@@ -4617,7 +4646,16 @@ static int test_snow3g_decryption_oop(const struct snow3g_test_data *tdata)
 	uint8_t *plaintext, *ciphertext;
 	unsigned ciphertext_pad_len;
 	unsigned ciphertext_len;
+	struct rte_cryptodev_info dev_info;
+
+	rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
+	uint64_t feat_flags = dev_info.feature_flags;
 
+	if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) &&
+			(!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) {
+		printf("Device does not support RAW data-path APIs.\n");
+		return -ENOTSUP;
+	}
 	/* Verify the capabilities */
 	struct rte_cryptodev_sym_capability_idx cap_idx;
 	cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
@@ -4675,7 +4713,11 @@ static int test_snow3g_decryption_oop(const struct snow3g_test_data *tdata)
 	if (retval < 0)
 		return retval;
 
-	ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+	if (global_api_test_type == CRYPTODEV_RAW_API_TEST)
+		process_sym_raw_dp_op(ts_params->valid_devs[0], 0,
+			ut_params->op, 1, 0, 1, tdata->cipher_iv.len);
+	else
+		ut_params->op = process_crypto_request(ts_params->valid_devs[0],
 						ut_params->op);
 	TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
 	ut_params->obuf = ut_params->op->sym->m_dst;
@@ -12997,10 +13039,13 @@ test_authentication_verify_fail_when_data_corruption(
 	else {
 		ut_params->op = process_crypto_request(ts_params->valid_devs[0],
 			ut_params->op);
-		TEST_ASSERT_NULL(ut_params->op, "authentication not failed");
 	}
+	if (ut_params->op == NULL)
+		return 0;
+	else if (ut_params->op->status != RTE_CRYPTO_OP_STATUS_SUCCESS)
+		return 0;
 
-	return 0;
+	return -1;
 }
 
 static int
-- 
2.17.1


^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [dpdk-dev] [EXT] [PATCH v5 00/15] crypto: add raw vector support in DPAAx
  2021-10-17 16:16       ` [dpdk-dev] [PATCH v5 00/15] crypto: add raw vector support in DPAAx Hemant Agrawal
                           ` (14 preceding siblings ...)
  2021-10-17 16:16         ` [dpdk-dev] [PATCH v5 15/15] test/crypto: add raw API support in 5G algos Hemant Agrawal
@ 2021-10-17 17:59         ` Akhil Goyal
  2021-10-18  7:33           ` Zhang, Roy Fan
  15 siblings, 1 reply; 115+ messages in thread
From: Akhil Goyal @ 2021-10-17 17:59 UTC (permalink / raw)
  To: Hemant Agrawal, dev, roy.fan.zhang; +Cc: konstantin.ananyev

> This patch series adds support for raw vector API in dpaax_sec drivers This
> also enhances the raw vector APIs to support OOP and security protocol
> support.
> 
> v2: fix aesni compilation and add release notes.
> v3: ignore it
> v4: fix the tot_length patch as per Konstantin's comments
> v5: address comments from Akhil/Konstantin - update rel notes
> 

Series Acked-by: Akhil Goyal <gakhil@marvell.com>

Modified release notes and patch title/description while applying.

Applied to dpdk-next-crypto

Thanks.

@Fan: This series may cause issue in QAT raw crypto testing if it does not support wireless algos for raw data path.

^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [dpdk-dev] [EXT] [PATCH v5 00/15] crypto: add raw vector support in DPAAx
  2021-10-17 17:59         ` [dpdk-dev] [EXT] [PATCH v5 00/15] crypto: add raw vector support in DPAAx Akhil Goyal
@ 2021-10-18  7:33           ` Zhang, Roy Fan
  0 siblings, 0 replies; 115+ messages in thread
From: Zhang, Roy Fan @ 2021-10-18  7:33 UTC (permalink / raw)
  To: Akhil Goyal, Hemant Agrawal, dev; +Cc: Ananyev, Konstantin

Hi Akhil,

We have been prepared for that.
For this release we will update QAT PMD to return -ENOTSUP if we found dst sgl is not NULL or not equal to src sgl.

Regards,
Fan

> -----Original Message-----
> From: Akhil Goyal <gakhil@marvell.com>
> Sent: Sunday, October 17, 2021 6:59 PM
> To: Hemant Agrawal <hemant.agrawal@nxp.com>; dev@dpdk.org; Zhang,
> Roy Fan <roy.fan.zhang@intel.com>
> Cc: Ananyev, Konstantin <konstantin.ananyev@intel.com>
> Subject: RE: [EXT] [PATCH v5 00/15] crypto: add raw vector support in DPAAx
> 
> > This patch series adds support for raw vector API in dpaax_sec drivers This
> > also enhances the raw vector APIs to support OOP and security protocol
> > support.
> >
> > v2: fix aesni compilation and add release notes.
> > v3: ignore it
> > v4: fix the tot_length patch as per Konstantin's comments
> > v5: address comments from Akhil/Konstantin - update rel notes
> >
> 
> Series Acked-by: Akhil Goyal <gakhil@marvell.com>
> 
> Modified release notes and patch title/description while applying.
> 
> Applied to dpdk-next-crypto
> 
> Thanks.
> 
> @Fan: This series may cause issue in QAT raw crypto testing if it does not
> support wireless algos for raw data path.

^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [dpdk-dev] [PATCH v5 14/15] test/crypto: add raw API test for dpaax
  2021-10-17 16:16         ` [dpdk-dev] [PATCH v5 14/15] test/crypto: add raw API test for dpaax Hemant Agrawal
@ 2021-10-20  9:08           ` Thomas Monjalon
  2021-10-20  9:15             ` [dpdk-dev] [EXT] " Akhil Goyal
  0 siblings, 1 reply; 115+ messages in thread
From: Thomas Monjalon @ 2021-10-20  9:08 UTC (permalink / raw)
  To: gakhil, roy.fan.zhang, Gagandeep Singh, Hemant Agrawal
  Cc: dev, konstantin.ananyev

17/10/2021 18:16, Hemant Agrawal:
> This patch add support for raw API tests for
> dpaa_sec and dpaa2_sec platforms.

Why do we have tests specific to some drivers?
Is there a plan to remove them?

> +REGISTER_TEST_COMMAND(cryptodev_dpaa2_sec_raw_api_autotest,
> +		test_cryptodev_dpaa2_sec_raw_api);
> +REGISTER_TEST_COMMAND(cryptodev_dpaa_sec_raw_api_autotest,
> +		test_cryptodev_dpaa_sec_raw_api);
>  REGISTER_TEST_COMMAND(cryptodev_qat_raw_api_autotest,
>  		test_cryptodev_qat_raw_api);
>  REGISTER_TEST_COMMAND(cryptodev_qat_autotest, test_cryptodev_qat);




^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [dpdk-dev] [EXT] Re: [PATCH v5 14/15] test/crypto: add raw API test for dpaax
  2021-10-20  9:08           ` Thomas Monjalon
@ 2021-10-20  9:15             ` Akhil Goyal
  2021-10-20  9:21               ` Thomas Monjalon
  0 siblings, 1 reply; 115+ messages in thread
From: Akhil Goyal @ 2021-10-20  9:15 UTC (permalink / raw)
  To: Thomas Monjalon, roy.fan.zhang, Gagandeep Singh, Hemant Agrawal
  Cc: dev, konstantin.ananyev

> 17/10/2021 18:16, Hemant Agrawal:
> > This patch add support for raw API tests for
> > dpaa_sec and dpaa2_sec platforms.
> 
> Why do we have tests specific to some drivers?
> Is there a plan to remove them?
> 

The testsuites are common and there is no driver specific test.
The test command is different for each of the PMD,
that is why it is registered for each PMD.
For Raw data path APIs, all of the testsuite is run with a global flag set.
Currently only 3 PMDs support raw APIs, we can get rid of this global flag in future if more
PMDs start supporting these APIs.

> > +REGISTER_TEST_COMMAND(cryptodev_dpaa2_sec_raw_api_autotest,
> > +		test_cryptodev_dpaa2_sec_raw_api);
> > +REGISTER_TEST_COMMAND(cryptodev_dpaa_sec_raw_api_autotest,
> > +		test_cryptodev_dpaa_sec_raw_api);
> >  REGISTER_TEST_COMMAND(cryptodev_qat_raw_api_autotest,
> >  		test_cryptodev_qat_raw_api);
> >  REGISTER_TEST_COMMAND(cryptodev_qat_autotest,
> test_cryptodev_qat);
> 
> 


^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [dpdk-dev] [EXT] Re: [PATCH v5 14/15] test/crypto: add raw API test for dpaax
  2021-10-20  9:15             ` [dpdk-dev] [EXT] " Akhil Goyal
@ 2021-10-20  9:21               ` Thomas Monjalon
  2021-10-20  9:32                 ` Akhil Goyal
  0 siblings, 1 reply; 115+ messages in thread
From: Thomas Monjalon @ 2021-10-20  9:21 UTC (permalink / raw)
  To: roy.fan.zhang, Gagandeep Singh, Hemant Agrawal, Akhil Goyal
  Cc: dev, konstantin.ananyev

20/10/2021 11:15, Akhil Goyal:
> > 17/10/2021 18:16, Hemant Agrawal:
> > > This patch add support for raw API tests for
> > > dpaa_sec and dpaa2_sec platforms.
> > 
> > Why do we have tests specific to some drivers?
> > Is there a plan to remove them?
> > 
> 
> The testsuites are common and there is no driver specific test.
> The test command is different for each of the PMD,
> that is why it is registered for each PMD.
> For Raw data path APIs, all of the testsuite is run with a global flag set.
> Currently only 3 PMDs support raw APIs, we can get rid of this global flag in future if more
> PMDs start supporting these APIs.

No there is something wrong.
It shows that it is not generic enough for any app.
What is missing to make the same calls no matter the driver?
Do we need to add some capability flags?




^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [dpdk-dev] [EXT] Re: [PATCH v5 14/15] test/crypto: add raw API test for dpaax
  2021-10-20  9:21               ` Thomas Monjalon
@ 2021-10-20  9:32                 ` Akhil Goyal
  2021-10-20 12:13                   ` Thomas Monjalon
  0 siblings, 1 reply; 115+ messages in thread
From: Akhil Goyal @ 2021-10-20  9:32 UTC (permalink / raw)
  To: Thomas Monjalon, roy.fan.zhang, Gagandeep Singh, Hemant Agrawal
  Cc: dev, konstantin.ananyev

> 
> 20/10/2021 11:15, Akhil Goyal:
> > > 17/10/2021 18:16, Hemant Agrawal:
> > > > This patch add support for raw API tests for
> > > > dpaa_sec and dpaa2_sec platforms.
> > >
> > > Why do we have tests specific to some drivers?
> > > Is there a plan to remove them?
> > >
> >
> > The testsuites are common and there is no driver specific test.
> > The test command is different for each of the PMD,
> > that is why it is registered for each PMD.
> > For Raw data path APIs, all of the testsuite is run with a global flag set.
> > Currently only 3 PMDs support raw APIs, we can get rid of this global flag in
> future if more
> > PMDs start supporting these APIs.
> 
> No there is something wrong.
> It shows that it is not generic enough for any app.
> What is missing to make the same calls no matter the driver?
> Do we need to add some capability flags?

Capability flags are there for raw data path APIs but the PMD can support both APIs.
And we need to test both data paths.
So for this we have a global variable to enable raw data path and we register a new
Command for the PMD and enable that global flag while doing that.
The tests, however have the capability flags checks in place but we decide to enable 
Raw APIs only when the PMD support that and that global flag is set.
I hope it is clear now.

^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [dpdk-dev] [EXT] Re: [PATCH v5 14/15] test/crypto: add raw API test for dpaax
  2021-10-20  9:32                 ` Akhil Goyal
@ 2021-10-20 12:13                   ` Thomas Monjalon
  2021-10-20 12:25                     ` Akhil Goyal
  2021-10-20 12:43                     ` Zhang, Roy Fan
  0 siblings, 2 replies; 115+ messages in thread
From: Thomas Monjalon @ 2021-10-20 12:13 UTC (permalink / raw)
  To: roy.fan.zhang, Gagandeep Singh, Hemant Agrawal, Akhil Goyal
  Cc: dev, konstantin.ananyev, aconole, david.marchand

20/10/2021 11:32, Akhil Goyal:
> > 20/10/2021 11:15, Akhil Goyal:
> > > > 17/10/2021 18:16, Hemant Agrawal:
> > > > > This patch add support for raw API tests for
> > > > > dpaa_sec and dpaa2_sec platforms.
> > > >
> > > > Why do we have tests specific to some drivers?
> > > > Is there a plan to remove them?
> > > >
> > >
> > > The testsuites are common and there is no driver specific test.
> > > The test command is different for each of the PMD,
> > > that is why it is registered for each PMD.
> > > For Raw data path APIs, all of the testsuite is run with a global flag set.
> > > Currently only 3 PMDs support raw APIs, we can get rid of this global flag in
> > future if more
> > > PMDs start supporting these APIs.
> > 
> > No there is something wrong.
> > It shows that it is not generic enough for any app.
> > What is missing to make the same calls no matter the driver?
> > Do we need to add some capability flags?
> 
> Capability flags are there for raw data path APIs but the PMD can support both APIs.
> And we need to test both data paths.
> So for this we have a global variable to enable raw data path and we register a new
> Command for the PMD and enable that global flag while doing that.
> The tests, however have the capability flags checks in place but we decide to enable 
> Raw APIs only when the PMD support that and that global flag is set.
> I hope it is clear now.

No sorry, it is not clear.
How may I know that the raw API is supported in a PMD?
If there is such info, no need for specific tests?



^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [dpdk-dev] [EXT] Re: [PATCH v5 14/15] test/crypto: add raw API test for dpaax
  2021-10-20 12:13                   ` Thomas Monjalon
@ 2021-10-20 12:25                     ` Akhil Goyal
  2021-10-20 12:43                     ` Zhang, Roy Fan
  1 sibling, 0 replies; 115+ messages in thread
From: Akhil Goyal @ 2021-10-20 12:25 UTC (permalink / raw)
  To: Thomas Monjalon, roy.fan.zhang, Gagandeep Singh, Hemant Agrawal
  Cc: dev, konstantin.ananyev, aconole, david.marchand

> 20/10/2021 11:32, Akhil Goyal:
> > > 20/10/2021 11:15, Akhil Goyal:
> > > > > 17/10/2021 18:16, Hemant Agrawal:
> > > > > > This patch add support for raw API tests for
> > > > > > dpaa_sec and dpaa2_sec platforms.
> > > > >
> > > > > Why do we have tests specific to some drivers?
> > > > > Is there a plan to remove them?
> > > > >
> > > >
> > > > The testsuites are common and there is no driver specific test.
> > > > The test command is different for each of the PMD,
> > > > that is why it is registered for each PMD.
> > > > For Raw data path APIs, all of the testsuite is run with a global flag set.
> > > > Currently only 3 PMDs support raw APIs, we can get rid of this global
> flag in
> > > future if more
> > > > PMDs start supporting these APIs.
> > >
> > > No there is something wrong.
> > > It shows that it is not generic enough for any app.
> > > What is missing to make the same calls no matter the driver?
> > > Do we need to add some capability flags?
> >
> > Capability flags are there for raw data path APIs but the PMD can support
> both APIs.
> > And we need to test both data paths.
> > So for this we have a global variable to enable raw data path and we
> register a new
> > Command for the PMD and enable that global flag while doing that.
> > The tests, however have the capability flags checks in place but we decide
> to enable
> > Raw APIs only when the PMD support that and that global flag is set.
> > I hope it is clear now.
> 
> No sorry, it is not clear.
> How may I know that the raw API is supported in a PMD?
> If there is such info, no need for specific tests?
> 
RTE_CRYPTODEV_FF_SYM_RAW_DP is there to check if raw data path is supported.
But how can we test both raw APIs as well as normal crypto_op ones in the same app?
For that a global variable is there in app which allow/disallow all of the test cases.

^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [dpdk-dev] [EXT] Re: [PATCH v5 14/15] test/crypto: add raw API test for dpaax
  2021-10-20 12:13                   ` Thomas Monjalon
  2021-10-20 12:25                     ` Akhil Goyal
@ 2021-10-20 12:43                     ` Zhang, Roy Fan
  2021-10-20 13:34                       ` Thomas Monjalon
  1 sibling, 1 reply; 115+ messages in thread
From: Zhang, Roy Fan @ 2021-10-20 12:43 UTC (permalink / raw)
  To: Thomas Monjalon, Gagandeep Singh, Hemant Agrawal, Akhil Goyal
  Cc: dev, Ananyev, Konstantin, aconole, david.marchand

Hi Thomas,

The raw data path API tests takes advantage of existing cryptodev unit test
cases with one difference:
- in cryptodev sym crypto unit tests, all data is described by both rte_mbufs
and rte_crypto_ops.
- in raw data path API tests, the same data is converted from mbufs and crypto
ops into rte_crypto_sym_vec to test.

However for each test case we can only use either crypto op way or raw data
path API way to run a test. To distinguish which way to use there is a global flag
set by test command. If you want to use crypto op way to test all cases you
use one command, otherwise with other command.

What complicated things is, cryptodev unit test needs to prepare the data
for every test. Once the test is finished the data is either encrypted or
decrypted and cannot be reused immediately.

Using only the device capability flag to cover both crypto op tests and raw data
path api tests in the same test function means each and every test function in
test_cryptodev.c needs to be expanded by ~30% as all data needs to be
re-prepared again for each test type. Also for the PMDs that do not support this
test type will be shown 100% more bypassed tests in the test result briefing.

That's the reason to have this global knob so each test function only have to act
slight differently between crypto op test and raw data path API test.

Regards,
Fan

> -----Original Message-----
> From: Thomas Monjalon <thomas@monjalon.net>
> Sent: Wednesday, October 20, 2021 1:14 PM
> To: Zhang, Roy Fan <roy.fan.zhang@intel.com>; Gagandeep Singh
> <g.singh@nxp.com>; Hemant Agrawal <hemant.agrawal@nxp.com>; Akhil
> Goyal <gakhil@marvell.com>
> Cc: dev@dpdk.org; Ananyev, Konstantin <konstantin.ananyev@intel.com>;
> aconole@redhat.com; david.marchand@redhat.com
> Subject: Re: [EXT] Re: [dpdk-dev] [PATCH v5 14/15] test/crypto: add raw API
> test for dpaax
> 
> 20/10/2021 11:32, Akhil Goyal:
> > > 20/10/2021 11:15, Akhil Goyal:
> > > > > 17/10/2021 18:16, Hemant Agrawal:
> > > > > > This patch add support for raw API tests for
> > > > > > dpaa_sec and dpaa2_sec platforms.
> > > > >
> > > > > Why do we have tests specific to some drivers?
> > > > > Is there a plan to remove them?
> > > > >
> > > >
> > > > The testsuites are common and there is no driver specific test.
> > > > The test command is different for each of the PMD,
> > > > that is why it is registered for each PMD.
> > > > For Raw data path APIs, all of the testsuite is run with a global flag set.
> > > > Currently only 3 PMDs support raw APIs, we can get rid of this global
> flag in
> > > future if more
> > > > PMDs start supporting these APIs.
> > >
> > > No there is something wrong.
> > > It shows that it is not generic enough for any app.
> > > What is missing to make the same calls no matter the driver?
> > > Do we need to add some capability flags?
> >
> > Capability flags are there for raw data path APIs but the PMD can support
> both APIs.
> > And we need to test both data paths.
> > So for this we have a global variable to enable raw data path and we
> register a new
> > Command for the PMD and enable that global flag while doing that.
> > The tests, however have the capability flags checks in place but we decide
> to enable
> > Raw APIs only when the PMD support that and that global flag is set.
> > I hope it is clear now.
> 
> No sorry, it is not clear.
> How may I know that the raw API is supported in a PMD?
> If there is such info, no need for specific tests?
> 


^ permalink raw reply	[flat|nested] 115+ messages in thread

* Re: [dpdk-dev] [EXT] Re: [PATCH v5 14/15] test/crypto: add raw API test for dpaax
  2021-10-20 12:43                     ` Zhang, Roy Fan
@ 2021-10-20 13:34                       ` Thomas Monjalon
  0 siblings, 0 replies; 115+ messages in thread
From: Thomas Monjalon @ 2021-10-20 13:34 UTC (permalink / raw)
  To: Gagandeep Singh, Hemant Agrawal, Akhil Goyal, Zhang, Roy Fan
  Cc: dev, Ananyev, Konstantin, aconole, david.marchand

Maybe I'm not clear enough.

What I don't understand (and don't want to see),
is specific functions for qat, ccp, nitrox or dpaa.
The test should not care about the driver name.



20/10/2021 14:43, Zhang, Roy Fan:
> Hi Thomas,
> 
> The raw data path API tests takes advantage of existing cryptodev unit test
> cases with one difference:
> - in cryptodev sym crypto unit tests, all data is described by both rte_mbufs
> and rte_crypto_ops.
> - in raw data path API tests, the same data is converted from mbufs and crypto
> ops into rte_crypto_sym_vec to test.
> 
> However for each test case we can only use either crypto op way or raw data
> path API way to run a test. To distinguish which way to use there is a global flag
> set by test command. If you want to use crypto op way to test all cases you
> use one command, otherwise with other command.
> 
> What complicated things is, cryptodev unit test needs to prepare the data
> for every test. Once the test is finished the data is either encrypted or
> decrypted and cannot be reused immediately.
> 
> Using only the device capability flag to cover both crypto op tests and raw data
> path api tests in the same test function means each and every test function in
> test_cryptodev.c needs to be expanded by ~30% as all data needs to be
> re-prepared again for each test type. Also for the PMDs that do not support this
> test type will be shown 100% more bypassed tests in the test result briefing.
> 
> That's the reason to have this global knob so each test function only have to act
> slight differently between crypto op test and raw data path API test.
> 
> Regards,
> Fan
> 
> > -----Original Message-----
> > From: Thomas Monjalon <thomas@monjalon.net>
> > Sent: Wednesday, October 20, 2021 1:14 PM
> > To: Zhang, Roy Fan <roy.fan.zhang@intel.com>; Gagandeep Singh
> > <g.singh@nxp.com>; Hemant Agrawal <hemant.agrawal@nxp.com>; Akhil
> > Goyal <gakhil@marvell.com>
> > Cc: dev@dpdk.org; Ananyev, Konstantin <konstantin.ananyev@intel.com>;
> > aconole@redhat.com; david.marchand@redhat.com
> > Subject: Re: [EXT] Re: [dpdk-dev] [PATCH v5 14/15] test/crypto: add raw API
> > test for dpaax
> > 
> > 20/10/2021 11:32, Akhil Goyal:
> > > > 20/10/2021 11:15, Akhil Goyal:
> > > > > > 17/10/2021 18:16, Hemant Agrawal:
> > > > > > > This patch add support for raw API tests for
> > > > > > > dpaa_sec and dpaa2_sec platforms.
> > > > > >
> > > > > > Why do we have tests specific to some drivers?
> > > > > > Is there a plan to remove them?
> > > > > >
> > > > >
> > > > > The testsuites are common and there is no driver specific test.
> > > > > The test command is different for each of the PMD,
> > > > > that is why it is registered for each PMD.
> > > > > For Raw data path APIs, all of the testsuite is run with a global flag set.
> > > > > Currently only 3 PMDs support raw APIs, we can get rid of this global
> > flag in
> > > > future if more
> > > > > PMDs start supporting these APIs.
> > > >
> > > > No there is something wrong.
> > > > It shows that it is not generic enough for any app.
> > > > What is missing to make the same calls no matter the driver?
> > > > Do we need to add some capability flags?
> > >
> > > Capability flags are there for raw data path APIs but the PMD can support
> > both APIs.
> > > And we need to test both data paths.
> > > So for this we have a global variable to enable raw data path and we
> > register a new
> > > Command for the PMD and enable that global flag while doing that.
> > > The tests, however have the capability flags checks in place but we decide
> > to enable
> > > Raw APIs only when the PMD support that and that global flag is set.
> > > I hope it is clear now.
> > 
> > No sorry, it is not clear.
> > How may I know that the raw API is supported in a PMD?
> > If there is such info, no need for specific tests?





^ permalink raw reply	[flat|nested] 115+ messages in thread

end of thread, other threads:[~2021-10-20 13:34 UTC | newest]

Thread overview: 115+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-08-12  7:12 [dpdk-dev] [RFC 00/16] crypto: add raw vector support in DPAAx Hemant Agrawal
2021-08-12  7:12 ` [dpdk-dev] [RFC 01/16] crypto: change sgl to src_sgl in vector Hemant Agrawal
2021-08-12  8:22   ` [dpdk-dev] [EXT] " Akhil Goyal
2021-08-12  7:12 ` [dpdk-dev] [RFC 02/16] crypto: add total raw buffer length Hemant Agrawal
2021-08-12  7:12 ` [dpdk-dev] [RFC 03/16] crypto: add dest_sgl in raw vector APIs Hemant Agrawal
2021-08-12  7:12 ` [dpdk-dev] [RFC 04/16] crypto: enhance raw process for protocol offload Hemant Agrawal
2021-08-12  8:11   ` [dpdk-dev] [EXT] " Akhil Goyal
2021-08-12  7:12 ` [dpdk-dev] [RFC 05/16] crypto/dpaa2_sec: support raw datapath APIs Hemant Agrawal
2021-08-12  7:12 ` [dpdk-dev] [RFC 06/16] crypto/dpaa2_sec: support AUTH only with raw buffer APIs Hemant Agrawal
2021-08-12  7:12 ` [dpdk-dev] [RFC 07/16] crypto/dpaa2_sec: support AUTHENC " Hemant Agrawal
2021-08-12  7:12 ` [dpdk-dev] [RFC 08/16] crypto/dpaa2_sec: support AEAD " Hemant Agrawal
2021-08-12  7:12 ` [dpdk-dev] [RFC 09/16] crypto/dpaa2_sec: support OOP with raw buffer API Hemant Agrawal
2021-08-12  7:12 ` [dpdk-dev] [RFC 10/16] crypto/dpaa2_sec: fix ctx memset size Hemant Agrawal
2021-08-12  7:12 ` [dpdk-dev] [RFC 11/16] crypto/dpaa2_sec: enhance error checks with raw buffer APIs Hemant Agrawal
2021-08-12  7:12 ` [dpdk-dev] [RFC 12/16] crypto/dpaa_sec: support raw datapath APIs Hemant Agrawal
2021-08-12  7:12 ` [dpdk-dev] [RFC 13/16] crypto/dpaa_sec: support authonly and chain with raw APIs Hemant Agrawal
2021-08-12  7:12 ` [dpdk-dev] [RFC 14/16] crypto/dpaa_sec: support AEAD and proto " Hemant Agrawal
2021-08-12  7:12 ` [dpdk-dev] [RFC 15/16] test/crypto: add raw API test for dpaax Hemant Agrawal
2021-08-12  7:12 ` [dpdk-dev] [RFC 16/16] test/crypto: enabling raw API support in 5G algos Hemant Agrawal
2021-08-25  7:14 ` [dpdk-dev] [PATCH 00/15] crypto: add raw vector support in DPAAx Hemant Agrawal
2021-08-25  7:14   ` [dpdk-dev] [PATCH 01/15] crypto: change sgl to src_sgl in vector Hemant Agrawal
2021-09-06 18:36     ` [dpdk-dev] [EXT] " Akhil Goyal
2021-08-25  7:14   ` [dpdk-dev] [PATCH 02/15] crypto: add total raw buffer length Hemant Agrawal
2021-09-06 18:36     ` [dpdk-dev] [EXT] " Akhil Goyal
2021-08-25  7:14   ` [dpdk-dev] [PATCH 03/15] crypto: add dest_sgl in raw vector APIs Hemant Agrawal
2021-09-06 18:37     ` [dpdk-dev] [EXT] " Akhil Goyal
2021-08-25  7:14   ` [dpdk-dev] [PATCH 04/15] crypto: fix raw process for multi-seg case Hemant Agrawal
2021-09-06 18:38     ` [dpdk-dev] [EXT] " Akhil Goyal
2021-08-25  7:15   ` [dpdk-dev] [PATCH 05/15] crypto/dpaa2_sec: support raw datapath APIs Hemant Agrawal
2021-08-25  7:15   ` [dpdk-dev] [PATCH 06/15] crypto/dpaa2_sec: support AUTH only with raw buffer APIs Hemant Agrawal
2021-08-25  7:15   ` [dpdk-dev] [PATCH 07/15] crypto/dpaa2_sec: support AUTHENC " Hemant Agrawal
2021-08-25  7:15   ` [dpdk-dev] [PATCH 08/15] crypto/dpaa2_sec: support AEAD " Hemant Agrawal
2021-08-25  7:15   ` [dpdk-dev] [PATCH 09/15] crypto/dpaa2_sec: support OOP with raw buffer API Hemant Agrawal
2021-08-25  7:15   ` [dpdk-dev] [PATCH 10/15] crypto/dpaa2_sec: enhance error checks with raw buffer APIs Hemant Agrawal
2021-08-25  7:15   ` [dpdk-dev] [PATCH 11/15] crypto/dpaa_sec: support raw datapath APIs Hemant Agrawal
2021-08-25  7:15   ` [dpdk-dev] [PATCH 12/15] crypto/dpaa_sec: support authonly and chain with raw APIs Hemant Agrawal
2021-08-25  7:15   ` [dpdk-dev] [PATCH 13/15] crypto/dpaa_sec: support AEAD and proto " Hemant Agrawal
2021-08-25  7:15   ` [dpdk-dev] [PATCH 14/15] test/crypto: add raw API test for dpaax Hemant Agrawal
2021-08-25  7:15   ` [dpdk-dev] [PATCH 15/15] test/crypto: add raw API support in 5G algos Hemant Agrawal
2021-09-06 18:45     ` [dpdk-dev] [EXT] " Akhil Goyal
2021-09-06 18:46   ` [dpdk-dev] [EXT] [PATCH 00/15] crypto: add raw vector support in DPAAx Akhil Goyal
2021-09-07  7:59   ` [dpdk-dev] [PATCH v2 " Hemant Agrawal
2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 01/15] crypto: change sgl to src_sgl in vector Hemant Agrawal
2021-09-16 11:38       ` Ananyev, Konstantin
2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 02/15] crypto: add total raw buffer length Hemant Agrawal
2021-09-16 11:42       ` Ananyev, Konstantin
2021-09-20 19:28         ` Akhil Goyal
2021-09-21  9:59           ` Hemant Agrawal
2021-09-21 10:04             ` Ananyev, Konstantin
2021-09-24 18:06               ` Akhil Goyal
2021-09-27  9:23                 ` Ananyev, Konstantin
2021-09-28  7:05                   ` Akhil Goyal
2021-09-28  8:20                     ` Hemant Agrawal
2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 03/15] crypto: add dest_sgl in raw vector APIs Hemant Agrawal
2021-09-16 11:44       ` Ananyev, Konstantin
2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 04/15] crypto: fix raw process for multi-seg case Hemant Agrawal
2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 05/15] crypto/dpaa2_sec: support raw datapath APIs Hemant Agrawal
2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 06/15] crypto/dpaa2_sec: support AUTH only with raw buffer APIs Hemant Agrawal
2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 07/15] crypto/dpaa2_sec: support AUTHENC " Hemant Agrawal
2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 08/15] crypto/dpaa2_sec: support AEAD " Hemant Agrawal
2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 09/15] crypto/dpaa2_sec: support OOP with raw buffer API Hemant Agrawal
2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 10/15] crypto/dpaa2_sec: enhance error checks with raw buffer APIs Hemant Agrawal
2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 11/15] crypto/dpaa_sec: support raw datapath APIs Hemant Agrawal
2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 12/15] crypto/dpaa_sec: support authonly and chain with raw APIs Hemant Agrawal
2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 13/15] crypto/dpaa_sec: support AEAD and proto " Hemant Agrawal
2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 14/15] test/crypto: add raw API test for dpaax Hemant Agrawal
2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 15/15] test/crypto: add raw API support in 5G algos Hemant Agrawal
2021-09-15 16:01     ` [dpdk-dev] [PATCH v2 00/15] crypto: add raw vector support in DPAAx Troy, Rebecca
2021-10-13 19:00     ` [dpdk-dev] [PATCH v4 " Hemant Agrawal
2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 01/15] crypto: change sgl to src_sgl in vector Hemant Agrawal
2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 02/15] crypto: add total raw buffer length Hemant Agrawal
2021-10-14 12:39         ` Zhang, Roy Fan
2021-10-17  8:30           ` Hemant Agrawal
2021-10-15 17:45         ` Ananyev, Konstantin
2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 03/15] crypto: add dest_sgl in raw vector APIs Hemant Agrawal
2021-10-17 12:21         ` [dpdk-dev] [EXT] " Akhil Goyal
2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 04/15] crypto: fix raw process for multi-seg case Hemant Agrawal
2021-10-15 17:39         ` Ananyev, Konstantin
2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 05/15] crypto/dpaa2_sec: support raw datapath APIs Hemant Agrawal
2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 06/15] crypto/dpaa2_sec: support AUTH only with raw buffer APIs Hemant Agrawal
2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 07/15] crypto/dpaa2_sec: support AUTHENC " Hemant Agrawal
2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 08/15] crypto/dpaa2_sec: support AEAD " Hemant Agrawal
2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 09/15] crypto/dpaa2_sec: support OOP with raw buffer API Hemant Agrawal
2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 10/15] crypto/dpaa2_sec: enhance error checks with raw buffer APIs Hemant Agrawal
2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 11/15] crypto/dpaa_sec: support raw datapath APIs Hemant Agrawal
2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 12/15] crypto/dpaa_sec: support authonly and chain with raw APIs Hemant Agrawal
2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 13/15] crypto/dpaa_sec: support AEAD and proto " Hemant Agrawal
2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 14/15] test/crypto: add raw API test for dpaax Hemant Agrawal
2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 15/15] test/crypto: add raw API support in 5G algos Hemant Agrawal
2021-10-17 16:16       ` [dpdk-dev] [PATCH v5 00/15] crypto: add raw vector support in DPAAx Hemant Agrawal
2021-10-17 16:16         ` [dpdk-dev] [PATCH v5 01/15] crypto: change sgl to src_sgl in vector Hemant Agrawal
2021-10-17 16:16         ` [dpdk-dev] [PATCH v5 02/15] crypto: add total raw buffer length Hemant Agrawal
2021-10-17 16:16         ` [dpdk-dev] [PATCH v5 03/15] crypto: add dest_sgl in raw vector APIs Hemant Agrawal
2021-10-17 16:16         ` [dpdk-dev] [PATCH v5 04/15] crypto: fix raw process for multi-seg case Hemant Agrawal
2021-10-17 16:16         ` [dpdk-dev] [PATCH v5 05/15] crypto/dpaa2_sec: support raw datapath APIs Hemant Agrawal
2021-10-17 16:16         ` [dpdk-dev] [PATCH v5 06/15] crypto/dpaa2_sec: support AUTH only with raw buffer APIs Hemant Agrawal
2021-10-17 16:16         ` [dpdk-dev] [PATCH v5 07/15] crypto/dpaa2_sec: support AUTHENC " Hemant Agrawal
2021-10-17 16:16         ` [dpdk-dev] [PATCH v5 08/15] crypto/dpaa2_sec: support AEAD " Hemant Agrawal
2021-10-17 16:16         ` [dpdk-dev] [PATCH v5 09/15] crypto/dpaa2_sec: support OOP with raw buffer API Hemant Agrawal
2021-10-17 16:16         ` [dpdk-dev] [PATCH v5 10/15] crypto/dpaa2_sec: enhance error checks with raw buffer APIs Hemant Agrawal
2021-10-17 16:16         ` [dpdk-dev] [PATCH v5 11/15] crypto/dpaa_sec: support raw datapath APIs Hemant Agrawal
2021-10-17 16:16         ` [dpdk-dev] [PATCH v5 12/15] crypto/dpaa_sec: support authonly and chain with raw APIs Hemant Agrawal
2021-10-17 16:16         ` [dpdk-dev] [PATCH v5 13/15] crypto/dpaa_sec: support AEAD and proto " Hemant Agrawal
2021-10-17 16:16         ` [dpdk-dev] [PATCH v5 14/15] test/crypto: add raw API test for dpaax Hemant Agrawal
2021-10-20  9:08           ` Thomas Monjalon
2021-10-20  9:15             ` [dpdk-dev] [EXT] " Akhil Goyal
2021-10-20  9:21               ` Thomas Monjalon
2021-10-20  9:32                 ` Akhil Goyal
2021-10-20 12:13                   ` Thomas Monjalon
2021-10-20 12:25                     ` Akhil Goyal
2021-10-20 12:43                     ` Zhang, Roy Fan
2021-10-20 13:34                       ` Thomas Monjalon
2021-10-17 16:16         ` [dpdk-dev] [PATCH v5 15/15] test/crypto: add raw API support in 5G algos Hemant Agrawal
2021-10-17 17:59         ` [dpdk-dev] [EXT] [PATCH v5 00/15] crypto: add raw vector support in DPAAx Akhil Goyal
2021-10-18  7:33           ` Zhang, Roy Fan

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).