DPDK patches and discussions
 help / color / mirror / Atom feed
From: Hemant Agrawal <hemant.agrawal@nxp.com>
To: dev@dpdk.org, gakhil@marvell.com
Cc: konstantin.ananyev@intel.com, roy.fan.zhang@intel.com,
	Gagandeep Singh <g.singh@nxp.com>
Subject: [dpdk-dev] [PATCH v4 11/15] crypto/dpaa_sec: support raw datapath APIs
Date: Thu, 14 Oct 2021 00:30:28 +0530	[thread overview]
Message-ID: <20211013190032.2308-12-hemant.agrawal@nxp.com> (raw)
In-Reply-To: <20211013190032.2308-1-hemant.agrawal@nxp.com>

From: Gagandeep Singh <g.singh@nxp.com>

This patch add raw vector API framework for dpaa_sec driver.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 doc/guides/rel_notes/release_21_11.rst    |   1 +
 drivers/crypto/dpaa_sec/dpaa_sec.c        |  23 +-
 drivers/crypto/dpaa_sec/dpaa_sec.h        |  39 +-
 drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c | 485 ++++++++++++++++++++++
 drivers/crypto/dpaa_sec/meson.build       |   4 +-
 5 files changed, 538 insertions(+), 14 deletions(-)
 create mode 100644 drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c

diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index 279f0adf62..584c420bd8 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -119,6 +119,7 @@ New Features
 
   * Added DES-CBC, AES-XCBC-MAC, AES-CMAC and non-HMAC algo support.
   * Added PDCP short MAC-I support.
+  * Added raw vector datapath API support
 
 * **Updated NXP dpaa2_sec crypto PMD.**
 
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c
index d5aa2748d6..c7ef1c7b0f 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.c
@@ -45,10 +45,7 @@
 #include <dpaa_sec_log.h>
 #include <dpaax_iova_table.h>
 
-static uint8_t cryptodev_driver_id;
-
-static int
-dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
+uint8_t dpaa_cryptodev_driver_id;
 
 static inline void
 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
@@ -1787,8 +1784,8 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
 			case RTE_CRYPTO_OP_WITH_SESSION:
 				ses = (dpaa_sec_session *)
 					get_sym_session_private_data(
-							op->sym->session,
-							cryptodev_driver_id);
+						op->sym->session,
+						dpaa_cryptodev_driver_id);
 				break;
 #ifdef RTE_LIB_SECURITY
 			case RTE_CRYPTO_OP_SECURITY_SESSION:
@@ -2400,7 +2397,7 @@ dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
 	return -1;
 }
 
-static int
+int
 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
 {
 	int ret;
@@ -3216,7 +3213,7 @@ dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
 		info->feature_flags = dev->feature_flags;
 		info->capabilities = dpaa_sec_capabilities;
 		info->sym.max_nb_sessions = internals->max_nb_sessions;
-		info->driver_id = cryptodev_driver_id;
+		info->driver_id = dpaa_cryptodev_driver_id;
 	}
 }
 
@@ -3412,7 +3409,10 @@ static struct rte_cryptodev_ops crypto_ops = {
 	.queue_pair_release   = dpaa_sec_queue_pair_release,
 	.sym_session_get_size     = dpaa_sec_sym_session_get_size,
 	.sym_session_configure    = dpaa_sec_sym_session_configure,
-	.sym_session_clear        = dpaa_sec_sym_session_clear
+	.sym_session_clear        = dpaa_sec_sym_session_clear,
+	/* Raw data-path API related operations */
+	.sym_get_raw_dp_ctx_size = dpaa_sec_get_dp_ctx_size,
+	.sym_configure_raw_dp_ctx = dpaa_sec_configure_raw_dp_ctx,
 };
 
 #ifdef RTE_LIB_SECURITY
@@ -3463,7 +3463,7 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
 
 	PMD_INIT_FUNC_TRACE();
 
-	cryptodev->driver_id = cryptodev_driver_id;
+	cryptodev->driver_id = dpaa_cryptodev_driver_id;
 	cryptodev->dev_ops = &crypto_ops;
 
 	cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
@@ -3472,6 +3472,7 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
 			RTE_CRYPTODEV_FF_HW_ACCELERATED |
 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
 			RTE_CRYPTODEV_FF_SECURITY |
+			RTE_CRYPTODEV_FF_SYM_RAW_DP |
 			RTE_CRYPTODEV_FF_IN_PLACE_SGL |
 			RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
 			RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
@@ -3637,5 +3638,5 @@ static struct cryptodev_driver dpaa_sec_crypto_drv;
 
 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
-		cryptodev_driver_id);
+		dpaa_cryptodev_driver_id);
 RTE_LOG_REGISTER(dpaa_logtype_sec, pmd.crypto.dpaa, NOTICE);
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.h b/drivers/crypto/dpaa_sec/dpaa_sec.h
index 503047879e..77288cd1eb 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.h
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.h
@@ -19,6 +19,8 @@
 #define AES_CTR_IV_LEN		16
 #define AES_GCM_IV_LEN		12
 
+extern uint8_t dpaa_cryptodev_driver_id;
+
 #define DPAA_IPv6_DEFAULT_VTC_FLOW	0x60000000
 
 /* Minimum job descriptor consists of a oneword job descriptor HEADER and
@@ -117,6 +119,24 @@ struct sec_pdcp_ctxt {
 	uint32_t hfn_threshold;	/*!< HFN Threashold for key renegotiation */
 };
 #endif
+
+typedef int (*dpaa_sec_build_fd_t)(
+	void *qp, uint8_t *drv_ctx, struct rte_crypto_vec *data_vec,
+	uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
+	void *user_data);
+
+typedef struct dpaa_sec_job* (*dpaa_sec_build_raw_dp_fd_t)(uint8_t *drv_ctx,
+			struct rte_crypto_sgl *sgl,
+			struct rte_crypto_sgl *dest_sgl,
+			struct rte_crypto_va_iova_ptr *iv,
+			struct rte_crypto_va_iova_ptr *digest,
+			struct rte_crypto_va_iova_ptr *auth_iv,
+			union rte_crypto_sym_ofs ofs,
+			void *userdata);
+
 typedef struct dpaa_sec_session_entry {
 	struct sec_cdb cdb;	/**< cmd block associated with qp */
 	struct dpaa_sec_qp *qp[MAX_DPAA_CORES];
@@ -129,6 +149,8 @@ typedef struct dpaa_sec_session_entry {
 #ifdef RTE_LIB_SECURITY
 	enum rte_security_session_protocol proto_alg; /*!< Security Algorithm*/
 #endif
+	dpaa_sec_build_fd_t build_fd;
+	dpaa_sec_build_raw_dp_fd_t build_raw_dp_fd;
 	union {
 		struct {
 			uint8_t *data;	/**< pointer to key data */
@@ -211,7 +233,10 @@ struct dpaa_sec_job {
 #define DPAA_MAX_NB_MAX_DIGEST	32
 struct dpaa_sec_op_ctx {
 	struct dpaa_sec_job job;
-	struct rte_crypto_op *op;
+	union {
+		struct rte_crypto_op *op;
+		void *userdata;
+	};
 	struct rte_mempool *ctx_pool; /* mempool pointer for dpaa_sec_op_ctx */
 	uint32_t fd_status;
 	int64_t vtop_offset;
@@ -1001,4 +1026,16 @@ calc_chksum(void *buffer, int len)
 	return  result;
 }
 
+int
+dpaa_sec_configure_raw_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
+	enum rte_crypto_op_sess_type sess_type,
+	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update);
+
+int
+dpaa_sec_get_dp_ctx_size(struct rte_cryptodev *dev);
+
+int
+dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
+
 #endif /* _DPAA_SEC_H_ */
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c b/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c
new file mode 100644
index 0000000000..7376da4cbc
--- /dev/null
+++ b/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c
@@ -0,0 +1,485 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2021 NXP
+ */
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <cryptodev_pmd.h>
+#include <rte_crypto.h>
+#include <rte_cryptodev.h>
+#ifdef RTE_LIB_SECURITY
+#include <rte_security_driver.h>
+#endif
+
+/* RTA header files */
+#include <desc/ipsec.h>
+
+#include <rte_dpaa_bus.h>
+#include <dpaa_sec.h>
+#include <dpaa_sec_log.h>
+
+struct dpaa_sec_raw_dp_ctx {
+	dpaa_sec_session *session;
+	uint32_t tail;
+	uint32_t head;
+	uint16_t cached_enqueue;
+	uint16_t cached_dequeue;
+};
+
+static __rte_always_inline int
+dpaa_sec_raw_enqueue_done(void *qp_data, uint8_t *drv_ctx, uint32_t n)
+{
+	RTE_SET_USED(qp_data);
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(n);
+
+	return 0;
+}
+
+static __rte_always_inline int
+dpaa_sec_raw_dequeue_done(void *qp_data, uint8_t *drv_ctx, uint32_t n)
+{
+	RTE_SET_USED(qp_data);
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(n);
+
+	return 0;
+}
+
+static inline struct dpaa_sec_op_ctx *
+dpaa_sec_alloc_raw_ctx(dpaa_sec_session *ses, int sg_count)
+{
+	struct dpaa_sec_op_ctx *ctx;
+	int i, retval;
+
+	retval = rte_mempool_get(
+			ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool,
+			(void **)(&ctx));
+	if (!ctx || retval) {
+		DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
+		return NULL;
+	}
+	/*
+	 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
+	 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
+	 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
+	 * each packet, memset is costlier than dcbz_64().
+	 */
+	for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4)
+		dcbz_64(&ctx->job.sg[i]);
+
+	ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool;
+	ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
+
+	return ctx;
+}
+
+static struct dpaa_sec_job *
+build_dpaa_raw_dp_auth_fd(uint8_t *drv_ctx,
+			struct rte_crypto_sgl *sgl,
+			struct rte_crypto_sgl *dest_sgl,
+			struct rte_crypto_va_iova_ptr *iv,
+			struct rte_crypto_va_iova_ptr *digest,
+			struct rte_crypto_va_iova_ptr *auth_iv,
+			union rte_crypto_sym_ofs ofs,
+			void *userdata)
+{
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(sgl);
+	RTE_SET_USED(dest_sgl);
+	RTE_SET_USED(iv);
+	RTE_SET_USED(digest);
+	RTE_SET_USED(auth_iv);
+	RTE_SET_USED(ofs);
+	RTE_SET_USED(userdata);
+
+	return NULL;
+}
+
+static struct dpaa_sec_job *
+build_dpaa_raw_dp_cipher_fd(uint8_t *drv_ctx,
+			struct rte_crypto_sgl *sgl,
+			struct rte_crypto_sgl *dest_sgl,
+			struct rte_crypto_va_iova_ptr *iv,
+			struct rte_crypto_va_iova_ptr *digest,
+			struct rte_crypto_va_iova_ptr *auth_iv,
+			union rte_crypto_sym_ofs ofs,
+			void *userdata)
+{
+	RTE_SET_USED(digest);
+	RTE_SET_USED(auth_iv);
+	dpaa_sec_session *ses =
+		((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
+	struct dpaa_sec_job *cf;
+	struct dpaa_sec_op_ctx *ctx;
+	struct qm_sg_entry *sg, *out_sg, *in_sg;
+	unsigned int i;
+	uint8_t *IV_ptr = iv->va;
+	int data_len, total_len = 0, data_offset;
+
+	for (i = 0; i < sgl->num; i++)
+		total_len += sgl->vec[i].len;
+
+	data_len = total_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
+	data_offset = ofs.ofs.cipher.head;
+
+	/* Support lengths in bits only for SNOW3G and ZUC */
+	if (sgl->num > MAX_SG_ENTRIES) {
+		DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
+				MAX_SG_ENTRIES);
+		return NULL;
+	}
+
+	ctx = dpaa_sec_alloc_raw_ctx(ses, sgl->num * 2 + 3);
+	if (!ctx)
+		return NULL;
+
+	cf = &ctx->job;
+	ctx->userdata = (void *)userdata;
+
+	/* output */
+	out_sg = &cf->sg[0];
+	out_sg->extension = 1;
+	out_sg->length = data_len;
+	qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
+	cpu_to_hw_sg(out_sg);
+
+	if (dest_sgl) {
+		/* 1st seg */
+		sg = &cf->sg[2];
+		qm_sg_entry_set64(sg, dest_sgl->vec[0].iova);
+		sg->length = dest_sgl->vec[0].len - data_offset;
+		sg->offset = data_offset;
+
+		/* Successive segs */
+		for (i = 1; i < dest_sgl->num; i++) {
+			cpu_to_hw_sg(sg);
+			sg++;
+			qm_sg_entry_set64(sg, dest_sgl->vec[i].iova);
+			sg->length = dest_sgl->vec[i].len;
+		}
+	} else {
+		/* 1st seg */
+		sg = &cf->sg[2];
+		qm_sg_entry_set64(sg, sgl->vec[0].iova);
+		sg->length = sgl->vec[0].len - data_offset;
+		sg->offset = data_offset;
+
+		/* Successive segs */
+		for (i = 1; i < sgl->num; i++) {
+			cpu_to_hw_sg(sg);
+			sg++;
+			qm_sg_entry_set64(sg, sgl->vec[i].iova);
+			sg->length = sgl->vec[i].len;
+		}
+
+	}
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+
+	/* input */
+	in_sg = &cf->sg[1];
+	in_sg->extension = 1;
+	in_sg->final = 1;
+	in_sg->length = data_len + ses->iv.length;
+
+	sg++;
+	qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
+	cpu_to_hw_sg(in_sg);
+
+	/* IV */
+	qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
+	sg->length = ses->iv.length;
+	cpu_to_hw_sg(sg);
+
+	/* 1st seg */
+	sg++;
+	qm_sg_entry_set64(sg, sgl->vec[0].iova);
+	sg->length = sgl->vec[0].len - data_offset;
+	sg->offset = data_offset;
+
+	/* Successive segs */
+	for (i = 1; i < sgl->num; i++) {
+		cpu_to_hw_sg(sg);
+		sg++;
+		qm_sg_entry_set64(sg, sgl->vec[i].iova);
+		sg->length = sgl->vec[i].len;
+	}
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+
+	return cf;
+}
+
+static uint32_t
+dpaa_sec_raw_enqueue_burst(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	/* Function to transmit the frames to given device and queuepair */
+	uint32_t loop;
+	struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp_data;
+	uint16_t num_tx = 0;
+	struct qm_fd fds[DPAA_SEC_BURST], *fd;
+	uint32_t frames_to_send;
+	struct dpaa_sec_job *cf;
+	dpaa_sec_session *ses =
+			((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
+	uint32_t flags[DPAA_SEC_BURST] = {0};
+	struct qman_fq *inq[DPAA_SEC_BURST];
+
+	if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
+		if (rte_dpaa_portal_init((void *)0)) {
+			DPAA_SEC_ERR("Failure in affining portal");
+			return 0;
+		}
+	}
+
+	while (vec->num) {
+		frames_to_send = (vec->num > DPAA_SEC_BURST) ?
+				DPAA_SEC_BURST : vec->num;
+		for (loop = 0; loop < frames_to_send; loop++) {
+			if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
+				if (dpaa_sec_attach_sess_q(dpaa_qp, ses)) {
+					frames_to_send = loop;
+					goto send_pkts;
+				}
+			} else if (unlikely(ses->qp[rte_lcore_id() %
+						MAX_DPAA_CORES] != dpaa_qp)) {
+				DPAA_SEC_DP_ERR("Old:sess->qp = %p"
+					" New qp = %p\n",
+					ses->qp[rte_lcore_id() %
+					MAX_DPAA_CORES], dpaa_qp);
+				frames_to_send = loop;
+				goto send_pkts;
+			}
+
+			/*Clear the unused FD fields before sending*/
+			fd = &fds[loop];
+			memset(fd, 0, sizeof(struct qm_fd));
+			cf = ses->build_raw_dp_fd(drv_ctx,
+						&vec->src_sgl[loop],
+						&vec->dest_sgl[loop],
+						&vec->iv[loop],
+						&vec->digest[loop],
+						&vec->auth_iv[loop],
+						ofs,
+						user_data[loop]);
+			if (!cf) {
+				DPAA_SEC_ERR("error: Improper packet contents"
+					" for crypto operation");
+				goto skip_tx;
+			}
+			inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
+			fd->opaque_addr = 0;
+			fd->cmd = 0;
+			qm_fd_addr_set64(fd, rte_dpaa_mem_vtop(cf->sg));
+			fd->_format1 = qm_fd_compound;
+			fd->length29 = 2 * sizeof(struct qm_sg_entry);
+
+			status[loop] = 1;
+		}
+send_pkts:
+		loop = 0;
+		while (loop < frames_to_send) {
+			loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
+					&flags[loop], frames_to_send - loop);
+		}
+		vec->num -= frames_to_send;
+		num_tx += frames_to_send;
+	}
+
+skip_tx:
+	dpaa_qp->tx_pkts += num_tx;
+	dpaa_qp->tx_errs += vec->num - num_tx;
+
+	return num_tx;
+}
+
+static int
+dpaa_sec_deq_raw(struct dpaa_sec_qp *qp, void **out_user_data,
+		uint8_t is_user_data_array,
+		rte_cryptodev_raw_post_dequeue_t post_dequeue,
+		int nb_ops)
+{
+	struct qman_fq *fq;
+	unsigned int pkts = 0;
+	int num_rx_bufs, ret;
+	struct qm_dqrr_entry *dq;
+	uint32_t vdqcr_flags = 0;
+	uint8_t is_success = 0;
+
+	fq = &qp->outq;
+	/*
+	 * Until request for four buffers, we provide exact number of buffers.
+	 * Otherwise we do not set the QM_VDQCR_EXACT flag.
+	 * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
+	 * requested, so we request two less in this case.
+	 */
+	if (nb_ops < 4) {
+		vdqcr_flags = QM_VDQCR_EXACT;
+		num_rx_bufs = nb_ops;
+	} else {
+		num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
+			(DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
+	}
+	ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
+	if (ret)
+		return 0;
+
+	do {
+		const struct qm_fd *fd;
+		struct dpaa_sec_job *job;
+		struct dpaa_sec_op_ctx *ctx;
+
+		dq = qman_dequeue(fq);
+		if (!dq)
+			continue;
+
+		fd = &dq->fd;
+		/* sg is embedded in an op ctx,
+		 * sg[0] is for output
+		 * sg[1] for input
+		 */
+		job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
+
+		ctx = container_of(job, struct dpaa_sec_op_ctx, job);
+		ctx->fd_status = fd->status;
+		if (is_user_data_array)
+			out_user_data[pkts] = ctx->userdata;
+		else
+			out_user_data[0] = ctx->userdata;
+
+		if (!ctx->fd_status) {
+			is_success = true;
+		} else {
+			is_success = false;
+			DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
+		}
+		post_dequeue(ctx->op, pkts, is_success);
+		pkts++;
+
+		/* report op status to sym->op and then free the ctx memory */
+		rte_mempool_put(ctx->ctx_pool, (void *)ctx);
+
+		qman_dqrr_consume(fq, dq);
+	} while (fq->flags & QMAN_FQ_STATE_VDQCR);
+
+	return pkts;
+}
+
+
+static __rte_always_inline uint32_t
+dpaa_sec_raw_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
+	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
+	uint32_t max_nb_to_dequeue,
+	rte_cryptodev_raw_post_dequeue_t post_dequeue,
+	void **out_user_data, uint8_t is_user_data_array,
+	uint32_t *n_success, int *dequeue_status)
+{
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(get_dequeue_count);
+	uint16_t num_rx;
+	struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp_data;
+	uint32_t nb_ops = max_nb_to_dequeue;
+
+	if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
+		if (rte_dpaa_portal_init((void *)0)) {
+			DPAA_SEC_ERR("Failure in affining portal");
+			return 0;
+		}
+	}
+
+	num_rx = dpaa_sec_deq_raw(dpaa_qp, out_user_data,
+			is_user_data_array, post_dequeue, nb_ops);
+
+	dpaa_qp->rx_pkts += num_rx;
+	*dequeue_status = 1;
+	*n_success = num_rx;
+
+	DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
+
+	return num_rx;
+}
+
+static __rte_always_inline int
+dpaa_sec_raw_enqueue(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data_vec,
+	uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
+	void *user_data)
+{
+	RTE_SET_USED(qp_data);
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(data_vec);
+	RTE_SET_USED(n_data_vecs);
+	RTE_SET_USED(ofs);
+	RTE_SET_USED(iv);
+	RTE_SET_USED(digest);
+	RTE_SET_USED(aad_or_auth_iv);
+	RTE_SET_USED(user_data);
+
+	return 0;
+}
+
+static __rte_always_inline void *
+dpaa_sec_raw_dequeue(void *qp_data, uint8_t *drv_ctx, int *dequeue_status,
+	enum rte_crypto_op_status *op_status)
+{
+	RTE_SET_USED(qp_data);
+	RTE_SET_USED(drv_ctx);
+	RTE_SET_USED(dequeue_status);
+	RTE_SET_USED(op_status);
+
+	return NULL;
+}
+
+int
+dpaa_sec_configure_raw_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
+	enum rte_crypto_op_sess_type sess_type,
+	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
+{
+	dpaa_sec_session *sess;
+	struct dpaa_sec_raw_dp_ctx *dp_ctx;
+	RTE_SET_USED(qp_id);
+
+	if (!is_update) {
+		memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx));
+		raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
+	}
+
+	if (sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
+		sess = (dpaa_sec_session *)get_sec_session_private_data(
+				session_ctx.sec_sess);
+	else if (sess_type == RTE_CRYPTO_OP_WITH_SESSION)
+		sess = (dpaa_sec_session *)get_sym_session_private_data(
+			session_ctx.crypto_sess, dpaa_cryptodev_driver_id);
+	else
+		return -ENOTSUP;
+	raw_dp_ctx->dequeue_burst = dpaa_sec_raw_dequeue_burst;
+	raw_dp_ctx->dequeue = dpaa_sec_raw_dequeue;
+	raw_dp_ctx->dequeue_done = dpaa_sec_raw_dequeue_done;
+	raw_dp_ctx->enqueue_burst = dpaa_sec_raw_enqueue_burst;
+	raw_dp_ctx->enqueue = dpaa_sec_raw_enqueue;
+	raw_dp_ctx->enqueue_done = dpaa_sec_raw_enqueue_done;
+
+	if (sess->ctxt == DPAA_SEC_CIPHER)
+		sess->build_raw_dp_fd = build_dpaa_raw_dp_cipher_fd;
+	else if (sess->ctxt == DPAA_SEC_AUTH)
+		sess->build_raw_dp_fd = build_dpaa_raw_dp_auth_fd;
+	else
+		return -ENOTSUP;
+	dp_ctx = (struct dpaa_sec_raw_dp_ctx *)raw_dp_ctx->drv_ctx_data;
+	dp_ctx->session = sess;
+
+	return 0;
+}
+
+int
+dpaa_sec_get_dp_ctx_size(__rte_unused struct rte_cryptodev *dev)
+{
+	return sizeof(struct dpaa_sec_raw_dp_ctx);
+}
diff --git a/drivers/crypto/dpaa_sec/meson.build b/drivers/crypto/dpaa_sec/meson.build
index 44fd60e5ae..f87ad6c7e7 100644
--- a/drivers/crypto/dpaa_sec/meson.build
+++ b/drivers/crypto/dpaa_sec/meson.build
@@ -1,5 +1,5 @@
 # SPDX-License-Identifier: BSD-3-Clause
-# Copyright 2018 NXP
+# Copyright 2018-2021 NXP
 
 if not is_linux
     build = false
@@ -7,7 +7,7 @@ if not is_linux
 endif
 
 deps += ['bus_dpaa', 'mempool_dpaa', 'security']
-sources = files('dpaa_sec.c')
+sources = files('dpaa_sec.c', 'dpaa_sec_raw_dp.c')
 
 includes += include_directories('../../bus/dpaa/include')
 includes += include_directories('../../common/dpaax')
-- 
2.17.1


  parent reply	other threads:[~2021-10-13 19:06 UTC|newest]

Thread overview: 115+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-08-12  7:12 [dpdk-dev] [RFC 00/16] crypto: add raw vector support in DPAAx Hemant Agrawal
2021-08-12  7:12 ` [dpdk-dev] [RFC 01/16] crypto: change sgl to src_sgl in vector Hemant Agrawal
2021-08-12  8:22   ` [dpdk-dev] [EXT] " Akhil Goyal
2021-08-12  7:12 ` [dpdk-dev] [RFC 02/16] crypto: add total raw buffer length Hemant Agrawal
2021-08-12  7:12 ` [dpdk-dev] [RFC 03/16] crypto: add dest_sgl in raw vector APIs Hemant Agrawal
2021-08-12  7:12 ` [dpdk-dev] [RFC 04/16] crypto: enhance raw process for protocol offload Hemant Agrawal
2021-08-12  8:11   ` [dpdk-dev] [EXT] " Akhil Goyal
2021-08-12  7:12 ` [dpdk-dev] [RFC 05/16] crypto/dpaa2_sec: support raw datapath APIs Hemant Agrawal
2021-08-12  7:12 ` [dpdk-dev] [RFC 06/16] crypto/dpaa2_sec: support AUTH only with raw buffer APIs Hemant Agrawal
2021-08-12  7:12 ` [dpdk-dev] [RFC 07/16] crypto/dpaa2_sec: support AUTHENC " Hemant Agrawal
2021-08-12  7:12 ` [dpdk-dev] [RFC 08/16] crypto/dpaa2_sec: support AEAD " Hemant Agrawal
2021-08-12  7:12 ` [dpdk-dev] [RFC 09/16] crypto/dpaa2_sec: support OOP with raw buffer API Hemant Agrawal
2021-08-12  7:12 ` [dpdk-dev] [RFC 10/16] crypto/dpaa2_sec: fix ctx memset size Hemant Agrawal
2021-08-12  7:12 ` [dpdk-dev] [RFC 11/16] crypto/dpaa2_sec: enhance error checks with raw buffer APIs Hemant Agrawal
2021-08-12  7:12 ` [dpdk-dev] [RFC 12/16] crypto/dpaa_sec: support raw datapath APIs Hemant Agrawal
2021-08-12  7:12 ` [dpdk-dev] [RFC 13/16] crypto/dpaa_sec: support authonly and chain with raw APIs Hemant Agrawal
2021-08-12  7:12 ` [dpdk-dev] [RFC 14/16] crypto/dpaa_sec: support AEAD and proto " Hemant Agrawal
2021-08-12  7:12 ` [dpdk-dev] [RFC 15/16] test/crypto: add raw API test for dpaax Hemant Agrawal
2021-08-12  7:12 ` [dpdk-dev] [RFC 16/16] test/crypto: enabling raw API support in 5G algos Hemant Agrawal
2021-08-25  7:14 ` [dpdk-dev] [PATCH 00/15] crypto: add raw vector support in DPAAx Hemant Agrawal
2021-08-25  7:14   ` [dpdk-dev] [PATCH 01/15] crypto: change sgl to src_sgl in vector Hemant Agrawal
2021-09-06 18:36     ` [dpdk-dev] [EXT] " Akhil Goyal
2021-08-25  7:14   ` [dpdk-dev] [PATCH 02/15] crypto: add total raw buffer length Hemant Agrawal
2021-09-06 18:36     ` [dpdk-dev] [EXT] " Akhil Goyal
2021-08-25  7:14   ` [dpdk-dev] [PATCH 03/15] crypto: add dest_sgl in raw vector APIs Hemant Agrawal
2021-09-06 18:37     ` [dpdk-dev] [EXT] " Akhil Goyal
2021-08-25  7:14   ` [dpdk-dev] [PATCH 04/15] crypto: fix raw process for multi-seg case Hemant Agrawal
2021-09-06 18:38     ` [dpdk-dev] [EXT] " Akhil Goyal
2021-08-25  7:15   ` [dpdk-dev] [PATCH 05/15] crypto/dpaa2_sec: support raw datapath APIs Hemant Agrawal
2021-08-25  7:15   ` [dpdk-dev] [PATCH 06/15] crypto/dpaa2_sec: support AUTH only with raw buffer APIs Hemant Agrawal
2021-08-25  7:15   ` [dpdk-dev] [PATCH 07/15] crypto/dpaa2_sec: support AUTHENC " Hemant Agrawal
2021-08-25  7:15   ` [dpdk-dev] [PATCH 08/15] crypto/dpaa2_sec: support AEAD " Hemant Agrawal
2021-08-25  7:15   ` [dpdk-dev] [PATCH 09/15] crypto/dpaa2_sec: support OOP with raw buffer API Hemant Agrawal
2021-08-25  7:15   ` [dpdk-dev] [PATCH 10/15] crypto/dpaa2_sec: enhance error checks with raw buffer APIs Hemant Agrawal
2021-08-25  7:15   ` [dpdk-dev] [PATCH 11/15] crypto/dpaa_sec: support raw datapath APIs Hemant Agrawal
2021-08-25  7:15   ` [dpdk-dev] [PATCH 12/15] crypto/dpaa_sec: support authonly and chain with raw APIs Hemant Agrawal
2021-08-25  7:15   ` [dpdk-dev] [PATCH 13/15] crypto/dpaa_sec: support AEAD and proto " Hemant Agrawal
2021-08-25  7:15   ` [dpdk-dev] [PATCH 14/15] test/crypto: add raw API test for dpaax Hemant Agrawal
2021-08-25  7:15   ` [dpdk-dev] [PATCH 15/15] test/crypto: add raw API support in 5G algos Hemant Agrawal
2021-09-06 18:45     ` [dpdk-dev] [EXT] " Akhil Goyal
2021-09-06 18:46   ` [dpdk-dev] [EXT] [PATCH 00/15] crypto: add raw vector support in DPAAx Akhil Goyal
2021-09-07  7:59   ` [dpdk-dev] [PATCH v2 " Hemant Agrawal
2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 01/15] crypto: change sgl to src_sgl in vector Hemant Agrawal
2021-09-16 11:38       ` Ananyev, Konstantin
2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 02/15] crypto: add total raw buffer length Hemant Agrawal
2021-09-16 11:42       ` Ananyev, Konstantin
2021-09-20 19:28         ` Akhil Goyal
2021-09-21  9:59           ` Hemant Agrawal
2021-09-21 10:04             ` Ananyev, Konstantin
2021-09-24 18:06               ` Akhil Goyal
2021-09-27  9:23                 ` Ananyev, Konstantin
2021-09-28  7:05                   ` Akhil Goyal
2021-09-28  8:20                     ` Hemant Agrawal
2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 03/15] crypto: add dest_sgl in raw vector APIs Hemant Agrawal
2021-09-16 11:44       ` Ananyev, Konstantin
2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 04/15] crypto: fix raw process for multi-seg case Hemant Agrawal
2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 05/15] crypto/dpaa2_sec: support raw datapath APIs Hemant Agrawal
2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 06/15] crypto/dpaa2_sec: support AUTH only with raw buffer APIs Hemant Agrawal
2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 07/15] crypto/dpaa2_sec: support AUTHENC " Hemant Agrawal
2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 08/15] crypto/dpaa2_sec: support AEAD " Hemant Agrawal
2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 09/15] crypto/dpaa2_sec: support OOP with raw buffer API Hemant Agrawal
2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 10/15] crypto/dpaa2_sec: enhance error checks with raw buffer APIs Hemant Agrawal
2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 11/15] crypto/dpaa_sec: support raw datapath APIs Hemant Agrawal
2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 12/15] crypto/dpaa_sec: support authonly and chain with raw APIs Hemant Agrawal
2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 13/15] crypto/dpaa_sec: support AEAD and proto " Hemant Agrawal
2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 14/15] test/crypto: add raw API test for dpaax Hemant Agrawal
2021-09-07  7:59     ` [dpdk-dev] [PATCH v2 15/15] test/crypto: add raw API support in 5G algos Hemant Agrawal
2021-09-15 16:01     ` [dpdk-dev] [PATCH v2 00/15] crypto: add raw vector support in DPAAx Troy, Rebecca
2021-10-13 19:00     ` [dpdk-dev] [PATCH v4 " Hemant Agrawal
2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 01/15] crypto: change sgl to src_sgl in vector Hemant Agrawal
2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 02/15] crypto: add total raw buffer length Hemant Agrawal
2021-10-14 12:39         ` Zhang, Roy Fan
2021-10-17  8:30           ` Hemant Agrawal
2021-10-15 17:45         ` Ananyev, Konstantin
2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 03/15] crypto: add dest_sgl in raw vector APIs Hemant Agrawal
2021-10-17 12:21         ` [dpdk-dev] [EXT] " Akhil Goyal
2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 04/15] crypto: fix raw process for multi-seg case Hemant Agrawal
2021-10-15 17:39         ` Ananyev, Konstantin
2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 05/15] crypto/dpaa2_sec: support raw datapath APIs Hemant Agrawal
2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 06/15] crypto/dpaa2_sec: support AUTH only with raw buffer APIs Hemant Agrawal
2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 07/15] crypto/dpaa2_sec: support AUTHENC " Hemant Agrawal
2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 08/15] crypto/dpaa2_sec: support AEAD " Hemant Agrawal
2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 09/15] crypto/dpaa2_sec: support OOP with raw buffer API Hemant Agrawal
2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 10/15] crypto/dpaa2_sec: enhance error checks with raw buffer APIs Hemant Agrawal
2021-10-13 19:00       ` Hemant Agrawal [this message]
2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 12/15] crypto/dpaa_sec: support authonly and chain with raw APIs Hemant Agrawal
2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 13/15] crypto/dpaa_sec: support AEAD and proto " Hemant Agrawal
2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 14/15] test/crypto: add raw API test for dpaax Hemant Agrawal
2021-10-13 19:00       ` [dpdk-dev] [PATCH v4 15/15] test/crypto: add raw API support in 5G algos Hemant Agrawal
2021-10-17 16:16       ` [dpdk-dev] [PATCH v5 00/15] crypto: add raw vector support in DPAAx Hemant Agrawal
2021-10-17 16:16         ` [dpdk-dev] [PATCH v5 01/15] crypto: change sgl to src_sgl in vector Hemant Agrawal
2021-10-17 16:16         ` [dpdk-dev] [PATCH v5 02/15] crypto: add total raw buffer length Hemant Agrawal
2021-10-17 16:16         ` [dpdk-dev] [PATCH v5 03/15] crypto: add dest_sgl in raw vector APIs Hemant Agrawal
2021-10-17 16:16         ` [dpdk-dev] [PATCH v5 04/15] crypto: fix raw process for multi-seg case Hemant Agrawal
2021-10-17 16:16         ` [dpdk-dev] [PATCH v5 05/15] crypto/dpaa2_sec: support raw datapath APIs Hemant Agrawal
2021-10-17 16:16         ` [dpdk-dev] [PATCH v5 06/15] crypto/dpaa2_sec: support AUTH only with raw buffer APIs Hemant Agrawal
2021-10-17 16:16         ` [dpdk-dev] [PATCH v5 07/15] crypto/dpaa2_sec: support AUTHENC " Hemant Agrawal
2021-10-17 16:16         ` [dpdk-dev] [PATCH v5 08/15] crypto/dpaa2_sec: support AEAD " Hemant Agrawal
2021-10-17 16:16         ` [dpdk-dev] [PATCH v5 09/15] crypto/dpaa2_sec: support OOP with raw buffer API Hemant Agrawal
2021-10-17 16:16         ` [dpdk-dev] [PATCH v5 10/15] crypto/dpaa2_sec: enhance error checks with raw buffer APIs Hemant Agrawal
2021-10-17 16:16         ` [dpdk-dev] [PATCH v5 11/15] crypto/dpaa_sec: support raw datapath APIs Hemant Agrawal
2021-10-17 16:16         ` [dpdk-dev] [PATCH v5 12/15] crypto/dpaa_sec: support authonly and chain with raw APIs Hemant Agrawal
2021-10-17 16:16         ` [dpdk-dev] [PATCH v5 13/15] crypto/dpaa_sec: support AEAD and proto " Hemant Agrawal
2021-10-17 16:16         ` [dpdk-dev] [PATCH v5 14/15] test/crypto: add raw API test for dpaax Hemant Agrawal
2021-10-20  9:08           ` Thomas Monjalon
2021-10-20  9:15             ` [dpdk-dev] [EXT] " Akhil Goyal
2021-10-20  9:21               ` Thomas Monjalon
2021-10-20  9:32                 ` Akhil Goyal
2021-10-20 12:13                   ` Thomas Monjalon
2021-10-20 12:25                     ` Akhil Goyal
2021-10-20 12:43                     ` Zhang, Roy Fan
2021-10-20 13:34                       ` Thomas Monjalon
2021-10-17 16:16         ` [dpdk-dev] [PATCH v5 15/15] test/crypto: add raw API support in 5G algos Hemant Agrawal
2021-10-17 17:59         ` [dpdk-dev] [EXT] [PATCH v5 00/15] crypto: add raw vector support in DPAAx Akhil Goyal
2021-10-18  7:33           ` Zhang, Roy Fan

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20211013190032.2308-12-hemant.agrawal@nxp.com \
    --to=hemant.agrawal@nxp.com \
    --cc=dev@dpdk.org \
    --cc=g.singh@nxp.com \
    --cc=gakhil@marvell.com \
    --cc=konstantin.ananyev@intel.com \
    --cc=roy.fan.zhang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).