DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH 0/5] crypto/dpaa_sec: performance optimizations
@ 2017-12-13 13:56 Akhil Goyal
  2017-12-13 13:56 ` [dpdk-dev] [PATCH 1/5] crypto/dpaa_sec: optimize virt to phy conversion Akhil Goyal
                   ` (5 more replies)
  0 siblings, 6 replies; 24+ messages in thread
From: Akhil Goyal @ 2017-12-13 13:56 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch, hemant.agrawal, shreyansh.jain, Akhil Goyal

Following changes are added to improve performance.
1. optimize virtual to physical address conversion
2. support for multiple sessions in a single queue pair
3. support for ipsec protocol offload
4. enqueue/dequeue code is rewritten to optimize the data path.

Akhil Goyal (3):
  crypto/dpaa_sec: support ipsec protocol offload
  bus/dpaa: support for enqueue frames of multiple queues
  crypto/dpaa_sec: rewrite Rx/Tx path

Hemant Agrawal (2):
  crypto/dpaa_sec: optimize virt to phy conversion
  crypto/dpaa_sec: support multiple sessions per qp

 doc/guides/cryptodevs/features/dpaa_sec.ini |   1 +
 drivers/bus/dpaa/base/qbman/qman.c          |  66 +++
 drivers/bus/dpaa/include/fsl_qman.h         |  14 +
 drivers/bus/dpaa/rte_bus_dpaa_version.map   |   1 +
 drivers/crypto/dpaa_sec/dpaa_sec.c          | 804 ++++++++++++++++++++++------
 drivers/crypto/dpaa_sec/dpaa_sec.h          | 137 +++--
 6 files changed, 823 insertions(+), 200 deletions(-)

-- 
2.9.3

^ permalink raw reply	[flat|nested] 24+ messages in thread

* [dpdk-dev] [PATCH 1/5] crypto/dpaa_sec: optimize virt to phy conversion
  2017-12-13 13:56 [dpdk-dev] [PATCH 0/5] crypto/dpaa_sec: performance optimizations Akhil Goyal
@ 2017-12-13 13:56 ` Akhil Goyal
  2017-12-13 13:56 ` [dpdk-dev] [PATCH 2/5] crypto/dpaa_sec: support multiple sessions per qp Akhil Goyal
                   ` (4 subsequent siblings)
  5 siblings, 0 replies; 24+ messages in thread
From: Akhil Goyal @ 2017-12-13 13:56 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch, hemant.agrawal, shreyansh.jain

From: Hemant Agrawal <hemant.agrawal@nxp.com>

Context memory is allocated from mempool. Ideally
it will get all memory from single segment, so simple offset
calculation is used for address conversion for such addresses
from context memory.

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
Acked-by: Akhil Goyal <akhil.goyal@nxp.com>
---
 drivers/crypto/dpaa_sec/dpaa_sec.c | 27 ++++++++++++++++++---------
 drivers/crypto/dpaa_sec/dpaa_sec.h |  1 +
 2 files changed, 19 insertions(+), 9 deletions(-)

diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c
index 16155b1..a1271be 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.c
@@ -106,6 +106,8 @@ dpaa_sec_alloc_ctx(dpaa_sec_session *ses)
 	dcbz_64(&ctx->job.sg[SG_CACHELINE_3]);
 
 	ctx->ctx_pool = ses->ctx_pool;
+	ctx->vtop_offset = (uint64_t) ctx
+				- rte_mempool_virt2iova(ctx);
 
 	return ctx;
 }
@@ -130,6 +132,13 @@ dpaa_mem_vtop(void *vaddr)
 	return (rte_iova_t)(NULL);
 }
 
+/* virtual address conversin when mempool support is available for ctx */
+static inline phys_addr_t
+dpaa_mem_vtop_ctx(struct dpaa_sec_op_ctx *ctx, void *vaddr)
+{
+	return (uint64_t)vaddr - ctx->vtop_offset;
+}
+
 static inline void *
 dpaa_mem_ptov(rte_iova_t paddr)
 {
@@ -589,7 +598,7 @@ build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
 	if (is_decode(ses)) {
 		/* need to extend the input to a compound frame */
 		sg->extension = 1;
-		qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
+		qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
 		sg->length = sym->auth.data.length + ses->digest_length;
 		sg->final = 1;
 		cpu_to_hw_sg(sg);
@@ -603,7 +612,7 @@ build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
 		cpu_to_hw_sg(sg);
 
 		/* let's check digest by hw */
-		start_addr = dpaa_mem_vtop(old_digest);
+		start_addr = dpaa_mem_vtop_ctx(ctx, old_digest);
 		sg++;
 		qm_sg_entry_set64(sg, start_addr);
 		sg->length = ses->digest_length;
@@ -657,7 +666,7 @@ build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
 	sg->extension = 1;
 	sg->final = 1;
 	sg->length = sym->cipher.data.length + ses->iv.length;
-	qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
+	qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
 	cpu_to_hw_sg(sg);
 
 	sg = &cf->sg[2];
@@ -703,7 +712,7 @@ build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
 	/* input */
 	rte_prefetch0(cf->sg);
 	sg = &cf->sg[2];
-	qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
+	qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop_ctx(ctx, sg));
 	if (is_encode(ses)) {
 		qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
 		sg->length = ses->iv.length;
@@ -748,7 +757,7 @@ build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
 		       ses->digest_length);
 		sg++;
 
-		qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
+		qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
 		sg->length = ses->digest_length;
 		length += sg->length;
 		sg->final = 1;
@@ -762,7 +771,7 @@ build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
 
 	/* output */
 	sg++;
-	qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
+	qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop_ctx(ctx, sg));
 	qm_sg_entry_set64(sg,
 		dst_start_addr + sym->aead.data.offset - ses->auth_only_len);
 	sg->length = sym->aead.data.length + ses->auth_only_len;
@@ -814,7 +823,7 @@ build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
 	/* input */
 	rte_prefetch0(cf->sg);
 	sg = &cf->sg[2];
-	qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
+	qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop_ctx(ctx, sg));
 	if (is_encode(ses)) {
 		qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
 		sg->length = ses->iv.length;
@@ -844,7 +853,7 @@ build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
 		       ses->digest_length);
 		sg++;
 
-		qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
+		qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
 		sg->length = ses->digest_length;
 		length += sg->length;
 		sg->final = 1;
@@ -858,7 +867,7 @@ build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
 
 	/* output */
 	sg++;
-	qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
+	qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop_ctx(ctx, sg));
 	qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
 	sg->length = sym->cipher.data.length;
 	length = sg->length;
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.h b/drivers/crypto/dpaa_sec/dpaa_sec.h
index af3f255..eba07b6 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.h
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.h
@@ -181,6 +181,7 @@ struct dpaa_sec_op_ctx {
 	struct rte_crypto_op *op;
 	struct rte_mempool *ctx_pool; /* mempool pointer for dpaa_sec_op_ctx */
 	uint32_t fd_status;
+	int64_t vtop_offset;
 	uint8_t digest[DPAA_MAX_NB_MAX_DIGEST];
 };
 
-- 
2.9.3

^ permalink raw reply	[flat|nested] 24+ messages in thread

* [dpdk-dev] [PATCH 2/5] crypto/dpaa_sec: support multiple sessions per qp
  2017-12-13 13:56 [dpdk-dev] [PATCH 0/5] crypto/dpaa_sec: performance optimizations Akhil Goyal
  2017-12-13 13:56 ` [dpdk-dev] [PATCH 1/5] crypto/dpaa_sec: optimize virt to phy conversion Akhil Goyal
@ 2017-12-13 13:56 ` Akhil Goyal
  2017-12-13 13:56 ` [dpdk-dev] [PATCH 3/5] crypto/dpaa_sec: support ipsec protocol offload Akhil Goyal
                   ` (3 subsequent siblings)
  5 siblings, 0 replies; 24+ messages in thread
From: Akhil Goyal @ 2017-12-13 13:56 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch, hemant.agrawal, shreyansh.jain

From: Hemant Agrawal <hemant.agrawal@nxp.com>

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
Acked-by: Akhil Goyal <akhil.goyal@nxp.com>
---
 drivers/crypto/dpaa_sec/dpaa_sec.c | 154 +++++++++++++++++++++++++------------
 drivers/crypto/dpaa_sec/dpaa_sec.h |  74 +++++++++---------
 2 files changed, 145 insertions(+), 83 deletions(-)

diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c
index a1271be..b51db83 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.c
@@ -69,6 +69,9 @@ static uint8_t cryptodev_driver_id;
 static __thread struct rte_crypto_op **dpaa_sec_ops;
 static __thread int dpaa_sec_op_nb;
 
+static int
+dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
+
 static inline void
 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
 {
@@ -177,15 +180,6 @@ dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
 	/* Clear FQ options */
 	memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
 
-	flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
-		QMAN_FQ_FLAG_TO_DCPORTAL;
-
-	ret = qman_create_fq(0, flags, fq_in);
-	if (unlikely(ret != 0)) {
-		PMD_INIT_LOG(ERR, "qman_create_fq failed");
-		return ret;
-	}
-
 	flags = QMAN_INITFQ_FLAG_SCHED;
 	fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
 			  QM_INITFQ_WE_CONTEXTB;
@@ -197,9 +191,11 @@ dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
 
 	fq_in->cb.ern  = ern_sec_fq_handler;
 
+	PMD_INIT_LOG(DEBUG, "in-%x out-%x", fq_in->fqid, fqid_out);
+
 	ret = qman_init_fq(fq_in, flags, &fq_opts);
 	if (unlikely(ret != 0))
-		PMD_INIT_LOG(ERR, "qman_init_fq failed");
+		PMD_INIT_LOG(ERR, "qman_init_fq failed %d", ret);
 
 	return ret;
 }
@@ -383,7 +379,7 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses)
 {
 	struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
 	uint32_t shared_desc_len = 0;
-	struct sec_cdb *cdb = &ses->qp->cdb;
+	struct sec_cdb *cdb = &ses->cdb;
 	int err;
 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
 	int swap = false;
@@ -903,12 +899,10 @@ dpaa_sec_enqueue_op(struct rte_crypto_op *op,  struct dpaa_sec_qp *qp)
 	ses = (dpaa_sec_session *)get_session_private_data(op->sym->session,
 					cryptodev_driver_id);
 
-	if (unlikely(!qp->ses || qp->ses != ses)) {
-		qp->ses = ses;
-		ses->qp = qp;
-		ret = dpaa_sec_prep_cdb(ses);
-		if (ret)
-			return ret;
+	if (unlikely(!ses->qp || ses->qp != qp)) {
+		PMD_INIT_LOG(DEBUG, "sess->qp - %p qp %p", ses->qp, qp);
+		if (dpaa_sec_attach_sess_q(qp, ses))
+			return -1;
 	}
 
 	/*
@@ -944,7 +938,7 @@ dpaa_sec_enqueue_op(struct rte_crypto_op *op,  struct dpaa_sec_qp *qp)
 	if (auth_only_len)
 		fd.cmd = 0x80000000 | auth_only_len;
 	do {
-		ret = qman_enqueue(&qp->inq, &fd, 0);
+		ret = qman_enqueue(ses->inq, &fd, 0);
 	} while (ret != 0);
 
 	return 0;
@@ -1160,43 +1154,82 @@ dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
 	return 0;
 }
 
-static int
-dpaa_sec_qp_attach_sess(struct rte_cryptodev *dev, uint16_t qp_id, void *ses)
+static struct qman_fq *
+dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
 {
-	dpaa_sec_session *sess = ses;
-	struct dpaa_sec_qp *qp;
+	unsigned int i;
 
-	PMD_INIT_FUNC_TRACE();
+	for (i = 0; i < qi->max_nb_sessions; i++) {
+		if (qi->inq_attach[i] == 0) {
+			qi->inq_attach[i] = 1;
+			return &qi->inq[i];
+		}
+	}
+	PMD_DRV_LOG(ERR, "All ses session in use %x", qi->max_nb_sessions);
+
+	return NULL;
+}
 
-	qp = dev->data->queue_pairs[qp_id];
-	if (qp->ses != NULL) {
-		PMD_INIT_LOG(ERR, "qp in-use by another session\n");
-		return -EBUSY;
+static int
+dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
+{
+	unsigned int i;
+
+	for (i = 0; i < qi->max_nb_sessions; i++) {
+		if (&qi->inq[i] == fq) {
+			qi->inq_attach[i] = 0;
+			return 0;
+		}
 	}
+	return -1;
+}
+
+static int
+dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
+{
+	int ret;
 
-	qp->ses = sess;
 	sess->qp = qp;
+	ret = dpaa_sec_prep_cdb(sess);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Unable to prepare sec cdb");
+		return -1;
+	}
 
-	return dpaa_sec_prep_cdb(sess);
+	ret = dpaa_sec_init_rx(sess->inq, dpaa_mem_vtop(&sess->cdb),
+			       qman_fq_fqid(&qp->outq));
+	if (ret)
+		PMD_DRV_LOG(ERR, "Unable to init sec queue");
+
+	return ret;
+}
+
+static int
+dpaa_sec_qp_attach_sess(struct rte_cryptodev *dev __rte_unused,
+			uint16_t qp_id __rte_unused,
+			void *ses __rte_unused)
+{
+	PMD_INIT_FUNC_TRACE();
+	return 0;
 }
 
 static int
-dpaa_sec_qp_detach_sess(struct rte_cryptodev *dev, uint16_t qp_id, void *ses)
+dpaa_sec_qp_detach_sess(struct rte_cryptodev *dev,
+			uint16_t qp_id  __rte_unused,
+			void *ses)
 {
 	dpaa_sec_session *sess = ses;
-	struct dpaa_sec_qp *qp;
+	struct dpaa_sec_dev_private *qi = dev->data->dev_private;
 
 	PMD_INIT_FUNC_TRACE();
 
-	qp = dev->data->queue_pairs[qp_id];
-	if (qp->ses != NULL) {
-		qp->ses = NULL;
-		sess->qp = NULL;
-		return 0;
-	}
+	if (sess->inq)
+		dpaa_sec_detach_rxq(qi, sess->inq);
+	sess->inq = NULL;
 
-	PMD_DRV_LOG(ERR, "No session attached to qp");
-	return -EINVAL;
+	sess->qp = NULL;
+
+	return 0;
 }
 
 static int
@@ -1259,8 +1292,20 @@ dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
 		return -EINVAL;
 	}
 	session->ctx_pool = internals->ctx_pool;
+	session->inq = dpaa_sec_attach_rxq(internals);
+	if (session->inq == NULL) {
+		PMD_DRV_LOG(ERR, "unable to attach sec queue");
+		goto err1;
+	}
 
 	return 0;
+
+err1:
+	rte_free(session->cipher_key.data);
+	rte_free(session->auth_key.data);
+	memset(session, 0, sizeof(dpaa_sec_session));
+
+	return -EINVAL;
 }
 
 static int
@@ -1293,6 +1338,7 @@ dpaa_sec_session_configure(struct rte_cryptodev *dev,
 	set_session_private_data(sess, dev->driver_id,
 			sess_private_data);
 
+
 	return 0;
 }
 
@@ -1301,16 +1347,22 @@ static void
 dpaa_sec_session_clear(struct rte_cryptodev *dev,
 		struct rte_cryptodev_sym_session *sess)
 {
-	PMD_INIT_FUNC_TRACE();
+	struct dpaa_sec_dev_private *qi = dev->data->dev_private;
 	uint8_t index = dev->driver_id;
 	void *sess_priv = get_session_private_data(sess, index);
+
+	PMD_INIT_FUNC_TRACE();
+
 	dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
 
 	if (sess_priv) {
+		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+
+		if (s->inq)
+			dpaa_sec_detach_rxq(qi, s->inq);
 		rte_free(s->cipher_key.data);
 		rte_free(s->auth_key.data);
 		memset(s, 0, sizeof(dpaa_sec_session));
-		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
 		set_session_private_data(sess, index, NULL);
 		rte_mempool_put(sess_mp, sess_priv);
 	}
@@ -1358,7 +1410,8 @@ dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
 		info->capabilities = dpaa_sec_capabilities;
 		info->sym.max_nb_sessions = internals->max_nb_sessions;
 		info->sym.max_nb_sessions_per_qp =
-			RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS / RTE_MAX_NB_SEC_QPS;
+			RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS /
+			RTE_DPAA_MAX_NB_SEC_QPS;
 		info->driver_id = cryptodev_driver_id;
 	}
 }
@@ -1403,7 +1456,7 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
 {
 	struct dpaa_sec_dev_private *internals;
 	struct dpaa_sec_qp *qp;
-	uint32_t i;
+	uint32_t i, flags;
 	int ret;
 	char str[20];
 
@@ -1419,7 +1472,7 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
 
 	internals = cryptodev->data->dev_private;
-	internals->max_nb_queue_pairs = RTE_MAX_NB_SEC_QPS;
+	internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
 	internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
 
 	for (i = 0; i < internals->max_nb_queue_pairs; i++) {
@@ -1430,10 +1483,15 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
 			PMD_INIT_LOG(ERR, "config tx of queue pair  %d", i);
 			goto init_error;
 		}
-		ret = dpaa_sec_init_rx(&qp->inq, dpaa_mem_vtop(&qp->cdb),
-				       qman_fq_fqid(&qp->outq));
-		if (ret) {
-			PMD_INIT_LOG(ERR, "config rx of queue pair %d", i);
+	}
+
+	flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
+		QMAN_FQ_FLAG_TO_DCPORTAL;
+	for (i = 0; i < internals->max_nb_sessions; i++) {
+		/* create rx qman fq for sessions*/
+		ret = qman_create_fq(0, flags, &internals->inq[i]);
+		if (unlikely(ret != 0)) {
+			PMD_INIT_LOG(ERR, "sec qman_create_fq failed");
 			goto init_error;
 		}
 	}
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.h b/drivers/crypto/dpaa_sec/dpaa_sec.h
index eba07b6..11407d3 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.h
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.h
@@ -64,36 +64,6 @@ enum dpaa_sec_op_type {
 	DPAA_SEC_MAX
 };
 
-typedef struct dpaa_sec_session_entry {
-	uint8_t dir;         /*!< Operation Direction */
-	enum rte_crypto_cipher_algorithm cipher_alg; /*!< Cipher Algorithm*/
-	enum rte_crypto_auth_algorithm auth_alg; /*!< Authentication Algorithm*/
-	enum rte_crypto_aead_algorithm aead_alg; /*!< Authentication Algorithm*/
-	union {
-		struct {
-			uint8_t *data;	/**< pointer to key data */
-			size_t length;	/**< key length in bytes */
-		} aead_key;
-		struct {
-			struct {
-				uint8_t *data;	/**< pointer to key data */
-				size_t length;	/**< key length in bytes */
-			} cipher_key;
-			struct {
-				uint8_t *data;	/**< pointer to key data */
-				size_t length;	/**< key length in bytes */
-			} auth_key;
-		};
-	};
-	struct {
-		uint16_t length;
-		uint16_t offset;
-	} iv;	/**< Initialisation vector parameters */
-	uint16_t auth_only_len; /*!< Length of data for Auth only */
-	uint32_t digest_length;
-	struct dpaa_sec_qp *qp;
-	struct rte_mempool *ctx_pool; /* session mempool for dpaa_sec_op_ctx */
-} dpaa_sec_session;
 
 #define DPAA_SEC_MAX_DESC_SIZE  64
 /* code or cmd block to caam */
@@ -143,11 +113,41 @@ struct sec_cdb {
 	uint32_t sh_desc[DPAA_SEC_MAX_DESC_SIZE];
 };
 
+typedef struct dpaa_sec_session_entry {
+	uint8_t dir;         /*!< Operation Direction */
+	enum rte_crypto_cipher_algorithm cipher_alg; /*!< Cipher Algorithm*/
+	enum rte_crypto_auth_algorithm auth_alg; /*!< Authentication Algorithm*/
+	enum rte_crypto_aead_algorithm aead_alg; /*!< Authentication Algorithm*/
+	union {
+		struct {
+			uint8_t *data;	/**< pointer to key data */
+			size_t length;	/**< key length in bytes */
+		} aead_key;
+		struct {
+			struct {
+				uint8_t *data;	/**< pointer to key data */
+				size_t length;	/**< key length in bytes */
+			} cipher_key;
+			struct {
+				uint8_t *data;	/**< pointer to key data */
+				size_t length;	/**< key length in bytes */
+			} auth_key;
+		};
+	};
+	struct {
+		uint16_t length;
+		uint16_t offset;
+	} iv;	/**< Initialisation vector parameters */
+	uint16_t auth_only_len; /*!< Length of data for Auth only */
+	uint32_t digest_length;
+	struct dpaa_sec_qp *qp;
+	struct qman_fq *inq;
+	struct sec_cdb cdb;	/**< cmd block associated with qp */
+	struct rte_mempool *ctx_pool; /* session mempool for dpaa_sec_op_ctx */
+} dpaa_sec_session;
+
 struct dpaa_sec_qp {
 	struct dpaa_sec_dev_private *internals;
-	struct sec_cdb cdb;		/* cmd block associated with qp */
-	dpaa_sec_session *ses;		/* session associated with qp */
-	struct qman_fq inq;
 	struct qman_fq outq;
 	int rx_pkts;
 	int rx_errs;
@@ -155,12 +155,16 @@ struct dpaa_sec_qp {
 	int tx_errs;
 };
 
-#define RTE_MAX_NB_SEC_QPS RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS
+#define RTE_DPAA_MAX_NB_SEC_QPS 1
+#define RTE_DPAA_MAX_RX_QUEUE RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS
+
 /* internal sec queue interface */
 struct dpaa_sec_dev_private {
 	void *sec_hw;
 	struct rte_mempool *ctx_pool; /* per dev mempool for dpaa_sec_op_ctx */
-	struct dpaa_sec_qp qps[RTE_MAX_NB_SEC_QPS]; /* i/o queue for sec */
+	struct dpaa_sec_qp qps[RTE_DPAA_MAX_NB_SEC_QPS]; /* i/o queue for sec */
+	struct qman_fq inq[RTE_DPAA_MAX_RX_QUEUE];
+	unsigned char inq_attach[RTE_DPAA_MAX_RX_QUEUE];
 	unsigned int max_nb_queue_pairs;
 	unsigned int max_nb_sessions;
 };
-- 
2.9.3

^ permalink raw reply	[flat|nested] 24+ messages in thread

* [dpdk-dev] [PATCH 3/5] crypto/dpaa_sec: support ipsec protocol offload
  2017-12-13 13:56 [dpdk-dev] [PATCH 0/5] crypto/dpaa_sec: performance optimizations Akhil Goyal
  2017-12-13 13:56 ` [dpdk-dev] [PATCH 1/5] crypto/dpaa_sec: optimize virt to phy conversion Akhil Goyal
  2017-12-13 13:56 ` [dpdk-dev] [PATCH 2/5] crypto/dpaa_sec: support multiple sessions per qp Akhil Goyal
@ 2017-12-13 13:56 ` Akhil Goyal
  2017-12-19 12:59   ` Hemant Agrawal
  2017-12-13 13:56 ` [dpdk-dev] [PATCH 4/5] bus/dpaa: support for enqueue frames of multiple queues Akhil Goyal
                   ` (2 subsequent siblings)
  5 siblings, 1 reply; 24+ messages in thread
From: Akhil Goyal @ 2017-12-13 13:56 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch, hemant.agrawal, shreyansh.jain, Akhil Goyal

Signed-off-by: Akhil Goyal <akhil.goyal@nxp.com>
---
 doc/guides/cryptodevs/features/dpaa_sec.ini |   1 +
 drivers/crypto/dpaa_sec/dpaa_sec.c          | 409 ++++++++++++++++++++++++++--
 drivers/crypto/dpaa_sec/dpaa_sec.h          |  62 ++++-
 3 files changed, 449 insertions(+), 23 deletions(-)

diff --git a/doc/guides/cryptodevs/features/dpaa_sec.ini b/doc/guides/cryptodevs/features/dpaa_sec.ini
index 0e8f5b2..deab53a 100644
--- a/doc/guides/cryptodevs/features/dpaa_sec.ini
+++ b/doc/guides/cryptodevs/features/dpaa_sec.ini
@@ -7,6 +7,7 @@
 Symmetric crypto       = Y
 Sym operation chaining = Y
 HW Accelerated         = Y
+Protocol offload       = Y
 
 ;
 ; Supported crypto algorithms of the 'dpaa_sec' crypto driver.
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c
index b51db83..ea744e6 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.c
@@ -41,6 +41,7 @@
 #include <rte_cryptodev_pmd.h>
 #include <rte_crypto.h>
 #include <rte_cryptodev.h>
+#include <rte_security_driver.h>
 #include <rte_cycles.h>
 #include <rte_dev.h>
 #include <rte_kvargs.h>
@@ -222,8 +223,19 @@ dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
 	 * sg[1] for input
 	 */
 	job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
+
 	ctx = container_of(job, struct dpaa_sec_op_ctx, job);
 	ctx->fd_status = fd->status;
+	if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+		struct qm_sg_entry *sg_out;
+		uint32_t len;
+
+		sg_out = &job->sg[0];
+		hw_sg_to_cpu(sg_out);
+		len = sg_out->length;
+		ctx->op->sym->m_src->pkt_len = len;
+		ctx->op->sym->m_src->data_len = len;
+	}
 	dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
 	dpaa_sec_op_ending(ctx);
 
@@ -287,7 +299,13 @@ static inline int is_aead(dpaa_sec_session *ses)
 static inline int is_auth_cipher(dpaa_sec_session *ses)
 {
 	return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
-		(ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
+		(ses->auth_alg != RTE_CRYPTO_AUTH_NULL) &&
+		(ses->proto_alg != RTE_SECURITY_PROTOCOL_IPSEC));
+}
+
+static inline int is_proto_ipsec(dpaa_sec_session *ses)
+{
+	return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC);
 }
 
 static inline int is_encode(dpaa_sec_session *ses)
@@ -308,27 +326,39 @@ caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a)
 		ses->digest_length = 0;
 		break;
 	case RTE_CRYPTO_AUTH_MD5_HMAC:
-		alginfo_a->algtype = OP_ALG_ALGSEL_MD5;
+		alginfo_a->algtype =
+			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+			OP_PCL_IPSEC_HMAC_MD5_96 : OP_ALG_ALGSEL_MD5;
 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
 		break;
 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
-		alginfo_a->algtype = OP_ALG_ALGSEL_SHA1;
+		alginfo_a->algtype =
+			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+			OP_PCL_IPSEC_HMAC_SHA1_96 : OP_ALG_ALGSEL_SHA1;
 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
 		break;
 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
-		alginfo_a->algtype = OP_ALG_ALGSEL_SHA224;
+		alginfo_a->algtype =
+			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+			OP_PCL_IPSEC_HMAC_SHA1_160 : OP_ALG_ALGSEL_SHA224;
 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
 		break;
 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
-		alginfo_a->algtype = OP_ALG_ALGSEL_SHA256;
+		alginfo_a->algtype =
+			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+			OP_PCL_IPSEC_HMAC_SHA2_256_128 : OP_ALG_ALGSEL_SHA256;
 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
 		break;
 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
-		alginfo_a->algtype = OP_ALG_ALGSEL_SHA384;
+		alginfo_a->algtype =
+			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+			OP_PCL_IPSEC_HMAC_SHA2_384_192 : OP_ALG_ALGSEL_SHA384;
 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
 		break;
 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
-		alginfo_a->algtype = OP_ALG_ALGSEL_SHA512;
+		alginfo_a->algtype =
+			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+			OP_PCL_IPSEC_HMAC_SHA2_512_256 : OP_ALG_ALGSEL_SHA512;
 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
 		break;
 	default:
@@ -343,15 +373,21 @@ caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c)
 	case RTE_CRYPTO_CIPHER_NULL:
 		break;
 	case RTE_CRYPTO_CIPHER_AES_CBC:
-		alginfo_c->algtype = OP_ALG_ALGSEL_AES;
+		alginfo_c->algtype =
+			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+			OP_PCL_IPSEC_AES_CBC : OP_ALG_ALGSEL_AES;
 		alginfo_c->algmode = OP_ALG_AAI_CBC;
 		break;
 	case RTE_CRYPTO_CIPHER_3DES_CBC:
-		alginfo_c->algtype = OP_ALG_ALGSEL_3DES;
+		alginfo_c->algtype =
+			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+			OP_PCL_IPSEC_3DES : OP_ALG_ALGSEL_3DES;
 		alginfo_c->algmode = OP_ALG_AAI_CBC;
 		break;
 	case RTE_CRYPTO_CIPHER_AES_CTR:
-		alginfo_c->algtype = OP_ALG_ALGSEL_AES;
+		alginfo_c->algtype =
+			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+			OP_PCL_IPSEC_AES_CTR : OP_ALG_ALGSEL_AES;
 		alginfo_c->algmode = OP_ALG_AAI_CTR;
 		break;
 	default:
@@ -497,14 +533,28 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses)
 		cdb->sh_desc[0] = 0;
 		cdb->sh_desc[1] = 0;
 		cdb->sh_desc[2] = 0;
-
-		/* Auth_only_len is set as 0 here and it will be overwritten
-		 *  in fd for each packet.
-		 */
-		shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
-				true, swap, &alginfo_c, &alginfo_a,
-				ses->iv.length, 0,
-				ses->digest_length, ses->dir);
+		if (is_proto_ipsec(ses)) {
+			if (ses->dir == DIR_ENC) {
+				shared_desc_len = cnstr_shdsc_ipsec_new_encap(
+						cdb->sh_desc,
+						true, swap, &ses->encap_pdb,
+						(uint8_t *)&ses->ip4_hdr,
+						&alginfo_c, &alginfo_a);
+			} else if (ses->dir == DIR_DEC) {
+				shared_desc_len = cnstr_shdsc_ipsec_new_decap(
+						cdb->sh_desc,
+						true, swap, &ses->decap_pdb,
+						&alginfo_c, &alginfo_a);
+			}
+		} else {
+			/* Auth_only_len is set as 0 here and it will be
+			 * overwritten in fd for each packet.
+			 */
+			shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
+					true, swap, &alginfo_c, &alginfo_a,
+					ses->iv.length, 0,
+					ses->digest_length, ses->dir);
+		}
 	}
 	cdb->sh_hdr.hi.field.idlen = shared_desc_len;
 	cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
@@ -886,6 +936,45 @@ build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
 	return cf;
 }
 
+static inline struct dpaa_sec_job *
+build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
+{
+	struct rte_crypto_sym_op *sym = op->sym;
+	struct dpaa_sec_job *cf;
+	struct dpaa_sec_op_ctx *ctx;
+	struct qm_sg_entry *sg;
+	phys_addr_t src_start_addr, dst_start_addr;
+
+	ctx = dpaa_sec_alloc_ctx(ses);
+	if (!ctx)
+		return NULL;
+	cf = &ctx->job;
+	ctx->op = op;
+
+	src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
+
+	if (sym->m_dst)
+		dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
+	else
+		dst_start_addr = src_start_addr;
+
+	/* input */
+	sg = &cf->sg[1];
+	qm_sg_entry_set64(sg, src_start_addr);
+	sg->length = sym->m_src->pkt_len;
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+
+	sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
+	/* output */
+	sg = &cf->sg[0];
+	qm_sg_entry_set64(sg, dst_start_addr);
+	sg->length = sym->m_src->buf_len - sym->m_src->data_off;
+	cpu_to_hw_sg(sg);
+
+	return cf;
+}
+
 static int
 dpaa_sec_enqueue_op(struct rte_crypto_op *op,  struct dpaa_sec_qp *qp)
 {
@@ -896,8 +985,14 @@ dpaa_sec_enqueue_op(struct rte_crypto_op *op,  struct dpaa_sec_qp *qp)
 	uint32_t auth_only_len = op->sym->auth.data.length -
 				op->sym->cipher.data.length;
 
-	ses = (dpaa_sec_session *)get_session_private_data(op->sym->session,
-					cryptodev_driver_id);
+	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
+		ses = (dpaa_sec_session *)get_session_private_data(
+				op->sym->session, cryptodev_driver_id);
+	else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
+		ses = (dpaa_sec_session *)get_sec_session_private_data(
+				op->sym->sec_session);
+	else
+		return -ENOTSUP;
 
 	if (unlikely(!ses->qp || ses->qp != qp)) {
 		PMD_INIT_LOG(DEBUG, "sess->qp - %p qp %p", ses->qp, qp);
@@ -921,6 +1016,8 @@ dpaa_sec_enqueue_op(struct rte_crypto_op *op,  struct dpaa_sec_qp *qp)
 		auth_only_len = ses->auth_only_len;
 	} else if (is_auth_cipher(ses)) {
 		cf = build_cipher_auth(op, ses);
+	} else if (is_proto_ipsec(ses)) {
+		cf = build_proto(op, ses);
 	} else {
 		PMD_TX_LOG(ERR, "not supported sec op");
 		return -ENOTSUP;
@@ -959,7 +1056,7 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
 
 	/*Prepare each packet which is to be sent*/
 	for (loop = 0; loop < nb_ops; loop++) {
-		if (ops[loop]->sess_type != RTE_CRYPTO_OP_WITH_SESSION) {
+		if (ops[loop]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
 			PMD_TX_LOG(ERR, "sessionless crypto op not supported");
 			return 0;
 		}
@@ -1369,6 +1466,235 @@ dpaa_sec_session_clear(struct rte_cryptodev *dev,
 }
 
 static int
+dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
+			   struct rte_security_session_conf *conf,
+			   void *sess)
+{
+	struct dpaa_sec_dev_private *internals = dev->data->dev_private;
+	struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
+	struct rte_crypto_auth_xform *auth_xform;
+	struct rte_crypto_cipher_xform *cipher_xform;
+	dpaa_sec_session *session = (dpaa_sec_session *)sess;
+
+	PMD_INIT_FUNC_TRACE();
+
+	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
+		cipher_xform = &conf->crypto_xform->cipher;
+		auth_xform = &conf->crypto_xform->next->auth;
+	} else {
+		auth_xform = &conf->crypto_xform->auth;
+		cipher_xform = &conf->crypto_xform->next->cipher;
+	}
+	session->proto_alg = conf->protocol;
+	session->cipher_key.data = rte_zmalloc(NULL,
+					       cipher_xform->key.length,
+					       RTE_CACHE_LINE_SIZE);
+	if (session->cipher_key.data == NULL &&
+			cipher_xform->key.length > 0) {
+		RTE_LOG(ERR, PMD, "No Memory for cipher key\n");
+		return -ENOMEM;
+	}
+
+	session->cipher_key.length = cipher_xform->key.length;
+	session->auth_key.data = rte_zmalloc(NULL,
+					auth_xform->key.length,
+					RTE_CACHE_LINE_SIZE);
+	if (session->auth_key.data == NULL &&
+			auth_xform->key.length > 0) {
+		RTE_LOG(ERR, PMD, "No Memory for auth key\n");
+		rte_free(session->cipher_key.data);
+		return -ENOMEM;
+	}
+	session->auth_key.length = auth_xform->key.length;
+	memcpy(session->cipher_key.data, cipher_xform->key.data,
+			cipher_xform->key.length);
+	memcpy(session->auth_key.data, auth_xform->key.data,
+			auth_xform->key.length);
+
+	switch (auth_xform->algo) {
+	case RTE_CRYPTO_AUTH_SHA1_HMAC:
+		session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
+		break;
+	case RTE_CRYPTO_AUTH_MD5_HMAC:
+		session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
+		break;
+	case RTE_CRYPTO_AUTH_SHA256_HMAC:
+		session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
+		break;
+	case RTE_CRYPTO_AUTH_SHA384_HMAC:
+		session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
+		break;
+	case RTE_CRYPTO_AUTH_SHA512_HMAC:
+		session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
+		break;
+	case RTE_CRYPTO_AUTH_AES_CMAC:
+		session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
+		break;
+	case RTE_CRYPTO_AUTH_NULL:
+		session->auth_alg = RTE_CRYPTO_AUTH_NULL;
+		break;
+	case RTE_CRYPTO_AUTH_SHA224_HMAC:
+	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
+	case RTE_CRYPTO_AUTH_SHA1:
+	case RTE_CRYPTO_AUTH_SHA256:
+	case RTE_CRYPTO_AUTH_SHA512:
+	case RTE_CRYPTO_AUTH_SHA224:
+	case RTE_CRYPTO_AUTH_SHA384:
+	case RTE_CRYPTO_AUTH_MD5:
+	case RTE_CRYPTO_AUTH_AES_GMAC:
+	case RTE_CRYPTO_AUTH_KASUMI_F9:
+	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
+	case RTE_CRYPTO_AUTH_ZUC_EIA3:
+		RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n",
+			auth_xform->algo);
+		goto out;
+	default:
+		RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n",
+			auth_xform->algo);
+		goto out;
+	}
+
+	switch (cipher_xform->algo) {
+	case RTE_CRYPTO_CIPHER_AES_CBC:
+		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
+		break;
+	case RTE_CRYPTO_CIPHER_3DES_CBC:
+		session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
+		break;
+	case RTE_CRYPTO_CIPHER_AES_CTR:
+		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
+		break;
+	case RTE_CRYPTO_CIPHER_NULL:
+	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
+	case RTE_CRYPTO_CIPHER_3DES_ECB:
+	case RTE_CRYPTO_CIPHER_AES_ECB:
+	case RTE_CRYPTO_CIPHER_KASUMI_F8:
+		RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n",
+			cipher_xform->algo);
+		goto out;
+	default:
+		RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n",
+			cipher_xform->algo);
+		goto out;
+	}
+
+	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
+		memset(&session->encap_pdb, 0, sizeof(struct ipsec_encap_pdb) +
+				sizeof(session->ip4_hdr));
+		session->ip4_hdr.ip_v = IPVERSION;
+		session->ip4_hdr.ip_hl = 5;
+		session->ip4_hdr.ip_len = rte_cpu_to_be_16(
+						sizeof(session->ip4_hdr));
+		session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
+		session->ip4_hdr.ip_id = 0;
+		session->ip4_hdr.ip_off = 0;
+		session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
+		session->ip4_hdr.ip_p = (ipsec_xform->proto ==
+				RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? 0x32 : 0x33;
+		session->ip4_hdr.ip_sum = 0;
+		session->ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
+		session->ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
+		session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
+						(void *)&session->ip4_hdr,
+						sizeof(struct ip));
+
+		session->encap_pdb.options =
+			(IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
+			PDBOPTS_ESP_OIHI_PDB_INL |
+			PDBOPTS_ESP_IVSRC |
+			PDBHMO_ESP_ENCAP_DTTL;
+		session->encap_pdb.spi = ipsec_xform->spi;
+		session->encap_pdb.ip_hdr_len = sizeof(struct ip);
+
+		session->dir = DIR_ENC;
+	} else if (ipsec_xform->direction ==
+			RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
+		memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
+		session->decap_pdb.options = sizeof(struct ip) << 16;
+		session->dir = DIR_DEC;
+	} else
+		goto out;
+	session->ctx_pool = internals->ctx_pool;
+	session->inq = dpaa_sec_attach_rxq(internals);
+	if (session->inq == NULL) {
+		PMD_DRV_LOG(ERR, "unable to attach sec queue");
+		goto out;
+	}
+
+
+	return 0;
+out:
+	rte_free(session->auth_key.data);
+	rte_free(session->cipher_key.data);
+	memset(session, 0, sizeof(dpaa_sec_session));
+	return -1;
+}
+
+static int
+dpaa_sec_security_session_create(void *dev,
+				 struct rte_security_session_conf *conf,
+				 struct rte_security_session *sess,
+				 struct rte_mempool *mempool)
+{
+	void *sess_private_data;
+	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
+	int ret;
+
+	if (rte_mempool_get(mempool, &sess_private_data)) {
+		CDEV_LOG_ERR(
+			"Couldn't get object from session mempool");
+		return -ENOMEM;
+	}
+
+	switch (conf->protocol) {
+	case RTE_SECURITY_PROTOCOL_IPSEC:
+		ret = dpaa_sec_set_ipsec_session(cdev, conf,
+				sess_private_data);
+		break;
+	case RTE_SECURITY_PROTOCOL_MACSEC:
+		return -ENOTSUP;
+	default:
+		return -EINVAL;
+	}
+	if (ret != 0) {
+		PMD_DRV_LOG(ERR,
+			"DPAA2 PMD: failed to configure session parameters");
+
+		/* Return session to mempool */
+		rte_mempool_put(mempool, sess_private_data);
+		return ret;
+	}
+
+	set_sec_session_private_data(sess, sess_private_data);
+
+	return ret;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static int
+dpaa_sec_security_session_destroy(void *dev __rte_unused,
+		struct rte_security_session *sess)
+{
+	PMD_INIT_FUNC_TRACE();
+	void *sess_priv = get_sec_session_private_data(sess);
+
+	dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
+
+	if (sess_priv) {
+		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+
+		rte_free(s->cipher_key.data);
+		rte_free(s->auth_key.data);
+		memset(sess, 0, sizeof(dpaa_sec_session));
+		set_sec_session_private_data(sess, NULL);
+		rte_mempool_put(sess_mp, sess_priv);
+	}
+	return 0;
+}
+
+
+static int
 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
 		       struct rte_cryptodev_config *config __rte_unused)
 {
@@ -1434,6 +1760,21 @@ static struct rte_cryptodev_ops crypto_ops = {
 	.qp_detach_session    = dpaa_sec_qp_detach_sess,
 };
 
+static const struct rte_security_capability *
+dpaa_sec_capabilities_get(void *device __rte_unused)
+{
+	return dpaa_sec_security_cap;
+}
+
+struct rte_security_ops dpaa_sec_security_ops = {
+	.session_create = dpaa_sec_security_session_create,
+	.session_update = NULL,
+	.session_stats_get = NULL,
+	.session_destroy = dpaa_sec_security_session_destroy,
+	.set_pkt_metadata = NULL,
+	.capabilities_get = dpaa_sec_capabilities_get
+};
+
 static int
 dpaa_sec_uninit(struct rte_cryptodev *dev)
 {
@@ -1442,6 +1783,8 @@ dpaa_sec_uninit(struct rte_cryptodev *dev)
 	if (dev == NULL)
 		return -ENODEV;
 
+	rte_free(dev->security_ctx);
+
 	rte_mempool_free(internals->ctx_pool);
 	rte_free(internals);
 
@@ -1455,6 +1798,7 @@ static int
 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
 {
 	struct dpaa_sec_dev_private *internals;
+	struct rte_security_ctx *security_instance;
 	struct dpaa_sec_qp *qp;
 	uint32_t i, flags;
 	int ret;
@@ -1469,12 +1813,33 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
 	cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
 	cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
 			RTE_CRYPTODEV_FF_HW_ACCELERATED |
-			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
+			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+			RTE_CRYPTODEV_FF_SECURITY;
 
 	internals = cryptodev->data->dev_private;
 	internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
 	internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
 
+	/*
+	 * For secondary processes, we don't initialise any further as primary
+	 * has already done this work. Only check we don't need a different
+	 * RX function
+	 */
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+		PMD_INIT_LOG(DEBUG, "Device already init by primary process");
+		return 0;
+	}
+
+	/* Initialize security_ctx only for primary process*/
+	security_instance = rte_malloc("rte_security_instances_ops",
+				sizeof(struct rte_security_ctx), 0);
+	if (security_instance == NULL)
+		return -ENOMEM;
+	security_instance->device = (void *)cryptodev;
+	security_instance->ops = &dpaa_sec_security_ops;
+	security_instance->sess_cnt = 0;
+	cryptodev->security_ctx = security_instance;
+
 	for (i = 0; i < internals->max_nb_queue_pairs; i++) {
 		/* init qman fq for queue pair */
 		qp = &internals->qps[i];
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.h b/drivers/crypto/dpaa_sec/dpaa_sec.h
index 11407d3..295abf3 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.h
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.h
@@ -117,7 +117,8 @@ typedef struct dpaa_sec_session_entry {
 	uint8_t dir;         /*!< Operation Direction */
 	enum rte_crypto_cipher_algorithm cipher_alg; /*!< Cipher Algorithm*/
 	enum rte_crypto_auth_algorithm auth_alg; /*!< Authentication Algorithm*/
-	enum rte_crypto_aead_algorithm aead_alg; /*!< Authentication Algorithm*/
+	enum rte_crypto_aead_algorithm aead_alg; /*!< AEAD Algorithm*/
+	enum rte_security_session_protocol proto_alg; /*!< Security Algorithm*/
 	union {
 		struct {
 			uint8_t *data;	/**< pointer to key data */
@@ -140,6 +141,9 @@ typedef struct dpaa_sec_session_entry {
 	} iv;	/**< Initialisation vector parameters */
 	uint16_t auth_only_len; /*!< Length of data for Auth only */
 	uint32_t digest_length;
+	struct ipsec_encap_pdb encap_pdb;
+	struct ip ip4_hdr;
+	struct ipsec_decap_pdb decap_pdb;
 	struct dpaa_sec_qp *qp;
 	struct qman_fq *inq;
 	struct sec_cdb cdb;	/**< cmd block associated with qp */
@@ -404,4 +408,60 @@ static const struct rte_cryptodev_capabilities dpaa_sec_capabilities[] = {
 	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
 };
 
+static const struct rte_security_capability dpaa_sec_security_cap[] = {
+	{ /* IPsec Lookaside Protocol offload ESP Transport Egress */
+		.action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
+		.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+		.ipsec = {
+			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+			.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+			.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+			.options = { 0 }
+		},
+		.crypto_capabilities = dpaa_sec_capabilities
+	},
+	{ /* IPsec Lookaside Protocol offload ESP Tunnel Ingress */
+		.action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
+		.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+		.ipsec = {
+			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+			.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+			.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+			.options = { 0 }
+		},
+		.crypto_capabilities = dpaa_sec_capabilities
+	},
+	{
+		.action = RTE_SECURITY_ACTION_TYPE_NONE
+	}
+};
+
+/**
+ * Checksum
+ *
+ * @param buffer calculate chksum for buffer
+ * @param len    buffer length
+ *
+ * @return checksum value in host cpu order
+ */
+static inline uint16_t
+calc_chksum(void *buffer, int len)
+{
+	uint16_t *buf = (uint16_t *)buffer;
+	uint32_t sum = 0;
+	uint16_t result;
+
+	for (sum = 0; len > 1; len -= 2)
+		sum += *buf++;
+
+	if (len == 1)
+		sum += *(unsigned char *)buf;
+
+	sum = (sum >> 16) + (sum & 0xFFFF);
+	sum += (sum >> 16);
+	result = ~sum;
+
+	return  result;
+}
+
 #endif /* _DPAA_SEC_H_ */
-- 
2.9.3

^ permalink raw reply	[flat|nested] 24+ messages in thread

* [dpdk-dev] [PATCH 4/5] bus/dpaa: support for enqueue frames of multiple queues
  2017-12-13 13:56 [dpdk-dev] [PATCH 0/5] crypto/dpaa_sec: performance optimizations Akhil Goyal
                   ` (2 preceding siblings ...)
  2017-12-13 13:56 ` [dpdk-dev] [PATCH 3/5] crypto/dpaa_sec: support ipsec protocol offload Akhil Goyal
@ 2017-12-13 13:56 ` Akhil Goyal
  2017-12-19 11:32   ` Hemant Agrawal
  2017-12-13 13:56 ` [dpdk-dev] [PATCH 5/5] crypto/dpaa_sec: rewrite Rx/Tx path Akhil Goyal
  2018-01-11 11:33 ` [dpdk-dev] [PATCH v2 0/3] crypto/dpaa_sec: performance optimizations Akhil Goyal
  5 siblings, 1 reply; 24+ messages in thread
From: Akhil Goyal @ 2017-12-13 13:56 UTC (permalink / raw)
  To: dev
  Cc: pablo.de.lara.guarch, hemant.agrawal, shreyansh.jain,
	Akhil Goyal, Nipun Gupta

Signed-off-by: Akhil Goyal <akhil.goyal@nxp.com>
Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
---
 drivers/bus/dpaa/base/qbman/qman.c        | 66 +++++++++++++++++++++++++++++++
 drivers/bus/dpaa/include/fsl_qman.h       | 14 +++++++
 drivers/bus/dpaa/rte_bus_dpaa_version.map |  1 +
 3 files changed, 81 insertions(+)

diff --git a/drivers/bus/dpaa/base/qbman/qman.c b/drivers/bus/dpaa/base/qbman/qman.c
index 9faf25f..6b7cbf6 100644
--- a/drivers/bus/dpaa/base/qbman/qman.c
+++ b/drivers/bus/dpaa/base/qbman/qman.c
@@ -2082,6 +2082,72 @@ int qman_enqueue_multi(struct qman_fq *fq,
 	return sent;
 }
 
+int
+qman_enqueue_multi_fq(struct qman_fq *fq[], const struct qm_fd *fd,
+		      int frames_to_send)
+{
+	struct qman_portal *p = get_affine_portal();
+	struct qm_portal *portal = &p->p;
+
+	register struct qm_eqcr *eqcr = &portal->eqcr;
+	struct qm_eqcr_entry *eq = eqcr->cursor, *prev_eq;
+
+	u8 i, diff, old_ci, sent = 0;
+
+	/* Update the available entries if no entry is free */
+	if (!eqcr->available) {
+		old_ci = eqcr->ci;
+		eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
+		diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
+		eqcr->available += diff;
+		if (!diff)
+			return 0;
+	}
+
+	/* try to send as many frames as possible */
+	while (eqcr->available && frames_to_send--) {
+		eq->fqid = fq[sent]->fqid_le;
+		eq->fd.opaque_addr = fd->opaque_addr;
+		eq->fd.addr = cpu_to_be40(fd->addr);
+		eq->fd.status = cpu_to_be32(fd->status);
+		eq->fd.opaque = cpu_to_be32(fd->opaque);
+
+		eq = (void *)((unsigned long)(eq + 1) &
+			(~(unsigned long)(QM_EQCR_SIZE << 6)));
+		eqcr->available--;
+		sent++;
+		fd++;
+	}
+	lwsync();
+
+	/* In order for flushes to complete faster, all lines are recorded in
+	 * 32 bit word.
+	 */
+	eq = eqcr->cursor;
+	for (i = 0; i < sent; i++) {
+		eq->__dont_write_directly__verb =
+			QM_EQCR_VERB_CMD_ENQUEUE | eqcr->vbit;
+		prev_eq = eq;
+		eq = (void *)((unsigned long)(eq + 1) &
+			(~(unsigned long)(QM_EQCR_SIZE << 6)));
+		if (unlikely((prev_eq + 1) != eq))
+			eqcr->vbit ^= QM_EQCR_VERB_VBIT;
+	}
+
+	/* We need  to flush all the lines but without load/store operations
+	 * between them
+	 */
+	eq = eqcr->cursor;
+	for (i = 0; i < sent; i++) {
+		dcbf(eq);
+		eq = (void *)((unsigned long)(eq + 1) &
+			(~(unsigned long)(QM_EQCR_SIZE << 6)));
+	}
+	/* Update cursor for the next call */
+	eqcr->cursor = eq;
+	return sent;
+}
+
 int qman_enqueue_orp(struct qman_fq *fq, const struct qm_fd *fd, u32 flags,
 		     struct qman_fq *orp, u16 orp_seqnum)
 {
diff --git a/drivers/bus/dpaa/include/fsl_qman.h b/drivers/bus/dpaa/include/fsl_qman.h
index 9090b63..6d935d8 100644
--- a/drivers/bus/dpaa/include/fsl_qman.h
+++ b/drivers/bus/dpaa/include/fsl_qman.h
@@ -1724,6 +1724,20 @@ int qman_enqueue_multi(struct qman_fq *fq,
 		       const struct qm_fd *fd,
 		int frames_to_send);
 
+/**
+ * qman_enqueue_multi_fq - Enqueue multiple frames to their respective frame
+ * queues.
+ * @fq[]: Array of frame queue objects to enqueue to
+ * @fd: pointer to first descriptor of frame to be enqueued
+ * @frames_to_send: number of frames to be sent.
+ *
+ * This API is similar to qman_enqueue_multi(), but it takes fd which needs
+ * to be processed by different frame queues.
+ */
+int
+qman_enqueue_multi_fq(struct qman_fq *fq[], const struct qm_fd *fd,
+		      int frames_to_send);
+
 typedef int (*qman_cb_precommit) (void *arg);
 
 /**
diff --git a/drivers/bus/dpaa/rte_bus_dpaa_version.map b/drivers/bus/dpaa/rte_bus_dpaa_version.map
index 5fa975c..d440f91 100644
--- a/drivers/bus/dpaa/rte_bus_dpaa_version.map
+++ b/drivers/bus/dpaa/rte_bus_dpaa_version.map
@@ -72,6 +72,7 @@ DPDK_18.02 {
 	qman_alloc_cgrid_range;
 	qman_create_cgr;
 	qman_delete_cgr;
+	qman_enqueue_multi_fq;
 	qman_query_fq_frm_cnt;
 	qman_release_cgrid_range;
 	rte_dpaa_portal_fq_close;
-- 
2.9.3

^ permalink raw reply	[flat|nested] 24+ messages in thread

* [dpdk-dev] [PATCH 5/5] crypto/dpaa_sec: rewrite Rx/Tx path
  2017-12-13 13:56 [dpdk-dev] [PATCH 0/5] crypto/dpaa_sec: performance optimizations Akhil Goyal
                   ` (3 preceding siblings ...)
  2017-12-13 13:56 ` [dpdk-dev] [PATCH 4/5] bus/dpaa: support for enqueue frames of multiple queues Akhil Goyal
@ 2017-12-13 13:56 ` Akhil Goyal
  2017-12-19 12:45   ` Hemant Agrawal
  2018-01-11 11:44   ` [dpdk-dev] [PATCH v2] " Akhil Goyal
  2018-01-11 11:33 ` [dpdk-dev] [PATCH v2 0/3] crypto/dpaa_sec: performance optimizations Akhil Goyal
  5 siblings, 2 replies; 24+ messages in thread
From: Akhil Goyal @ 2017-12-13 13:56 UTC (permalink / raw)
  To: dev
  Cc: pablo.de.lara.guarch, hemant.agrawal, shreyansh.jain,
	Akhil Goyal, Nipun Gupta

Rx and Tx patch are rewritten with improved internal APIs
to improve performance.

Signed-off-by: Akhil Goyal <akhil.goyal@nxp.com>
Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
---
 drivers/crypto/dpaa_sec/dpaa_sec.c | 260 ++++++++++++++++++++++---------------
 drivers/crypto/dpaa_sec/dpaa_sec.h |   2 +-
 2 files changed, 153 insertions(+), 109 deletions(-)

diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c
index ea744e6..b650d5c 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.c
@@ -563,46 +563,67 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses)
 	return 0;
 }
 
-static inline unsigned int
-dpaa_volatile_deq(struct qman_fq *fq, unsigned int len, bool exact)
-{
-	unsigned int pkts = 0;
-	int ret;
-	struct qm_mcr_queryfq_np np;
-	enum qman_fq_state state;
-	uint32_t flags;
-	uint32_t vdqcr;
-
-	qman_query_fq_np(fq, &np);
-	if (np.frm_cnt) {
-		vdqcr = QM_VDQCR_NUMFRAMES_SET(len);
-		if (exact)
-			vdqcr |= QM_VDQCR_EXACT;
-		ret = qman_volatile_dequeue(fq, 0, vdqcr);
-		if (ret)
-			return 0;
-		do {
-			pkts += qman_poll_dqrr(len);
-			qman_fq_state(fq, &state, &flags);
-		} while (flags & QMAN_FQ_STATE_VDQCR);
-	}
-	return pkts;
-}
-
+#define DPAA_MAX_DEQUEUE_NUM_FRAMES 32
 /* qp is lockless, should be accessed by only one thread */
 static int
 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
 {
 	struct qman_fq *fq;
+	unsigned int pkts = 0;
+	int ret;
+	struct qm_dqrr_entry *dq;
 
 	fq = &qp->outq;
-	dpaa_sec_op_nb = 0;
-	dpaa_sec_ops = ops;
+	ret = qman_set_vdq(fq, (nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES) ?
+				DPAA_MAX_DEQUEUE_NUM_FRAMES : nb_ops);
+	if (ret)
+		return 0;
+
+	do {
+		const struct qm_fd *fd;
+		struct dpaa_sec_job *job;
+		struct dpaa_sec_op_ctx *ctx;
+		struct rte_crypto_op *op;
+
+		dq = qman_dequeue(fq);
+		if (!dq)
+			continue;
+
+		fd = &dq->fd;
+		/* sg is embedded in an op ctx,
+		 * sg[0] is for output
+		 * sg[1] for input
+		 */
+		job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
+
+		ctx = container_of(job, struct dpaa_sec_op_ctx, job);
+		ctx->fd_status = fd->status;
+		op = ctx->op;
+		if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+			struct qm_sg_entry *sg_out;
+			uint32_t len;
+
+			sg_out = &job->sg[0];
+			hw_sg_to_cpu(sg_out);
+			len = sg_out->length;
+			op->sym->m_src->pkt_len = len;
+			op->sym->m_src->data_len = len;
+		}
+		if (!ctx->fd_status) {
+			op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+		} else {
+			printf("\nSEC return err: 0x%x", ctx->fd_status);
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		}
+		ops[pkts++] = op;
 
-	if (unlikely(nb_ops > DPAA_SEC_BURST))
-		nb_ops = DPAA_SEC_BURST;
+		/* report op status to sym->op and then free the ctx memeory */
+		rte_mempool_put(ctx->ctx_pool, (void *)ctx);
 
-	return dpaa_volatile_deq(fq, nb_ops, 1);
+		qman_dqrr_consume(fq, dq);
+	} while (fq->flags & QMAN_FQ_STATE_VDQCR);
+
+	return pkts;
 }
 
 /**
@@ -975,95 +996,118 @@ build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
 	return cf;
 }
 
-static int
-dpaa_sec_enqueue_op(struct rte_crypto_op *op,  struct dpaa_sec_qp *qp)
-{
-	struct dpaa_sec_job *cf;
-	dpaa_sec_session *ses;
-	struct qm_fd fd;
-	int ret;
-	uint32_t auth_only_len = op->sym->auth.data.length -
-				op->sym->cipher.data.length;
-
-	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
-		ses = (dpaa_sec_session *)get_session_private_data(
-				op->sym->session, cryptodev_driver_id);
-	else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
-		ses = (dpaa_sec_session *)get_sec_session_private_data(
-				op->sym->sec_session);
-	else
-		return -ENOTSUP;
-
-	if (unlikely(!ses->qp || ses->qp != qp)) {
-		PMD_INIT_LOG(DEBUG, "sess->qp - %p qp %p", ses->qp, qp);
-		if (dpaa_sec_attach_sess_q(qp, ses))
-			return -1;
-	}
-
-	/*
-	 * Segmented buffer is not supported.
-	 */
-	if (!rte_pktmbuf_is_contiguous(op->sym->m_src)) {
-		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
-		return -ENOTSUP;
-	}
-	if (is_auth_only(ses)) {
-		cf = build_auth_only(op, ses);
-	} else if (is_cipher_only(ses)) {
-		cf = build_cipher_only(op, ses);
-	} else if (is_aead(ses)) {
-		cf = build_cipher_auth_gcm(op, ses);
-		auth_only_len = ses->auth_only_len;
-	} else if (is_auth_cipher(ses)) {
-		cf = build_cipher_auth(op, ses);
-	} else if (is_proto_ipsec(ses)) {
-		cf = build_proto(op, ses);
-	} else {
-		PMD_TX_LOG(ERR, "not supported sec op");
-		return -ENOTSUP;
-	}
-	if (unlikely(!cf))
-		return -ENOMEM;
-
-	memset(&fd, 0, sizeof(struct qm_fd));
-	qm_fd_addr_set64(&fd, dpaa_mem_vtop(cf->sg));
-	fd._format1 = qm_fd_compound;
-	fd.length29 = 2 * sizeof(struct qm_sg_entry);
-	/* Auth_only_len is set as 0 in descriptor and it is overwritten
-	 * here in the fd.cmd which will update the DPOVRD reg.
-	 */
-	if (auth_only_len)
-		fd.cmd = 0x80000000 | auth_only_len;
-	do {
-		ret = qman_enqueue(ses->inq, &fd, 0);
-	} while (ret != 0);
-
-	return 0;
-}
-
 static uint16_t
 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
 		       uint16_t nb_ops)
 {
 	/* Function to transmit the frames to given device and queuepair */
 	uint32_t loop;
-	int32_t ret;
 	struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
 	uint16_t num_tx = 0;
+	struct qm_fd fds[DPAA_SEC_BURST], *fd;
+	uint32_t frames_to_send;
+	struct rte_crypto_op *op;
+	struct dpaa_sec_job *cf;
+	dpaa_sec_session *ses;
+	struct dpaa_sec_op_ctx *ctx;
+	uint32_t auth_only_len;
+	struct qman_fq *inq[DPAA_SEC_BURST];
+
+	while (nb_ops) {
+		frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
+				DPAA_SEC_BURST : nb_ops;
+		for (loop = 0; loop < frames_to_send; loop++) {
+			op = *(ops++);
+			switch (op->sess_type) {
+			case RTE_CRYPTO_OP_WITH_SESSION:
+				ses = (dpaa_sec_session *)
+					get_session_private_data(
+							op->sym->session,
+							cryptodev_driver_id);
+				break;
+			case RTE_CRYPTO_OP_SECURITY_SESSION:
+				ses = (dpaa_sec_session *)
+					get_sec_session_private_data(
+							op->sym->sec_session);
+				break;
+			default:
+				PMD_TX_LOG(ERR,
+					"sessionless crypto op not supported");
+				frames_to_send = loop;
+				nb_ops = loop;
+				goto send_pkts;
+			}
+			if (unlikely(!ses->qp || ses->qp != qp)) {
+				PMD_INIT_LOG(DEBUG, "sess->qp - %p qp %p",
+						ses->qp, qp);
+				if (dpaa_sec_attach_sess_q(qp, ses)) {
+					frames_to_send = loop;
+					nb_ops = loop;
+					goto send_pkts;
+				}
+			}
 
-	if (unlikely(nb_ops == 0))
-		return 0;
+			/*
+			 * Segmented buffer is not supported.
+			 */
+			if (!rte_pktmbuf_is_contiguous(op->sym->m_src)) {
+				op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+				frames_to_send = loop;
+				nb_ops = loop;
+				goto send_pkts;
+			}
+			auth_only_len = op->sym->auth.data.length -
+						op->sym->cipher.data.length;
+
+			if (is_auth_only(ses)) {
+				cf = build_auth_only(op, ses);
+			} else if (is_cipher_only(ses)) {
+				cf = build_cipher_only(op, ses);
+			} else if (is_aead(ses)) {
+				cf = build_cipher_auth_gcm(op, ses);
+				auth_only_len = ses->auth_only_len;
+			} else if (is_auth_cipher(ses)) {
+				cf = build_cipher_auth(op, ses);
+			} else if (is_proto_ipsec(ses)) {
+				cf = build_proto(op, ses);
+			} else {
+				PMD_TX_LOG(ERR, "not supported sec op");
+				frames_to_send = loop;
+				nb_ops = loop;
+				goto send_pkts;
+			}
+			if (unlikely(!cf)) {
+				frames_to_send = loop;
+				nb_ops = loop;
+				goto send_pkts;
+			}
 
-	/*Prepare each packet which is to be sent*/
-	for (loop = 0; loop < nb_ops; loop++) {
-		if (ops[loop]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
-			PMD_TX_LOG(ERR, "sessionless crypto op not supported");
-			return 0;
+			fd = &fds[loop];
+			inq[loop] = ses->inq;
+			fd->opaque_addr = 0;
+			fd->cmd = 0;
+			ctx = container_of(cf, struct dpaa_sec_op_ctx, job);
+			qm_fd_addr_set64(fd, dpaa_mem_vtop_ctx(ctx, cf->sg));
+			fd->_format1 = qm_fd_compound;
+			fd->length29 = 2 * sizeof(struct qm_sg_entry);
+			/* Auth_only_len is set as 0 in descriptor and it is
+			 * overwritten here in the fd.cmd which will update
+			 * the DPOVRD reg.
+			 */
+			if (auth_only_len)
+				fd->cmd = 0x80000000 | auth_only_len;
+
+		}
+send_pkts:
+		loop = 0;
+		while (loop < frames_to_send) {
+			loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
+					frames_to_send - loop);
 		}
-		ret = dpaa_sec_enqueue_op(ops[loop], dpaa_qp);
-		if (!ret)
-			num_tx++;
+		nb_ops -= frames_to_send;
+		num_tx += frames_to_send;
 	}
+
 	dpaa_qp->tx_pkts += num_tx;
 	dpaa_qp->tx_errs += nb_ops - num_tx;
 
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.h b/drivers/crypto/dpaa_sec/dpaa_sec.h
index 295abf3..b00d50a 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.h
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.h
@@ -34,7 +34,7 @@
 #define _DPAA_SEC_H_
 
 #define NUM_POOL_CHANNELS	4
-#define DPAA_SEC_BURST		32
+#define DPAA_SEC_BURST		7
 #define DPAA_SEC_ALG_UNSUPPORT	(-1)
 #define TDES_CBC_IV_LEN		8
 #define AES_CBC_IV_LEN		16
-- 
2.9.3

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [dpdk-dev] [PATCH 4/5] bus/dpaa: support for enqueue frames of multiple queues
  2017-12-13 13:56 ` [dpdk-dev] [PATCH 4/5] bus/dpaa: support for enqueue frames of multiple queues Akhil Goyal
@ 2017-12-19 11:32   ` Hemant Agrawal
  0 siblings, 0 replies; 24+ messages in thread
From: Hemant Agrawal @ 2017-12-19 11:32 UTC (permalink / raw)
  To: Akhil Goyal, dev; +Cc: pablo.de.lara.guarch, shreyansh.jain, Nipun Gupta

On 12/13/2017 7:26 PM, Akhil Goyal wrote:
> Signed-off-by: Akhil Goyal <akhil.goyal@nxp.com>
> Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
> ---
>  drivers/bus/dpaa/base/qbman/qman.c        | 66 +++++++++++++++++++++++++++++++
>  drivers/bus/dpaa/include/fsl_qman.h       | 14 +++++++
>  drivers/bus/dpaa/rte_bus_dpaa_version.map |  1 +
>  3 files changed, 81 insertions(+)
>
> diff --git a/drivers/bus/dpaa/base/qbman/qman.c b/drivers/bus/dpaa/base/qbman/qman.c
> index 9faf25f..6b7cbf6 100644
> --- a/drivers/bus/dpaa/base/qbman/qman.c
> +++ b/drivers/bus/dpaa/base/qbman/qman.c
> @@ -2082,6 +2082,72 @@ int qman_enqueue_multi(struct qman_fq *fq,
>  	return sent;
>  }
>

.. <snip>
>
>  /**
> diff --git a/drivers/bus/dpaa/rte_bus_dpaa_version.map b/drivers/bus/dpaa/rte_bus_dpaa_version.map
> index 5fa975c..d440f91 100644
> --- a/drivers/bus/dpaa/rte_bus_dpaa_version.map
> +++ b/drivers/bus/dpaa/rte_bus_dpaa_version.map
> @@ -72,6 +72,7 @@ DPDK_18.02 {
>  	qman_alloc_cgrid_range;
>  	qman_create_cgr;
>  	qman_delete_cgr;
> +	qman_enqueue_multi_fq;

It seems your patch has dependency on DPAA patch series.


>  	qman_query_fq_frm_cnt;
>  	qman_release_cgrid_range;
>  	rte_dpaa_portal_fq_close;

Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [dpdk-dev] [PATCH 5/5] crypto/dpaa_sec: rewrite Rx/Tx path
  2017-12-13 13:56 ` [dpdk-dev] [PATCH 5/5] crypto/dpaa_sec: rewrite Rx/Tx path Akhil Goyal
@ 2017-12-19 12:45   ` Hemant Agrawal
  2018-01-08 11:13     ` De Lara Guarch, Pablo
  2018-01-11 11:44   ` [dpdk-dev] [PATCH v2] " Akhil Goyal
  1 sibling, 1 reply; 24+ messages in thread
From: Hemant Agrawal @ 2017-12-19 12:45 UTC (permalink / raw)
  To: Akhil Goyal, dev; +Cc: pablo.de.lara.guarch, shreyansh.jain, Nipun Gupta

Hi Akhil,

On 12/13/2017 7:26 PM, Akhil Goyal wrote:
> Rx and Tx patch are rewritten with improved internal APIs
> to improve performance.
>
> Signed-off-by: Akhil Goyal <akhil.goyal@nxp.com>
> Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
> ---
>  drivers/crypto/dpaa_sec/dpaa_sec.c | 260 ++++++++++++++++++++++---------------
>  drivers/crypto/dpaa_sec/dpaa_sec.h |   2 +-
>  2 files changed, 153 insertions(+), 109 deletions(-)
>
> diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c
> index ea744e6..b650d5c 100644
> --- a/drivers/crypto/dpaa_sec/dpaa_sec.c
> +++ b/drivers/crypto/dpaa_sec/dpaa_sec.c
> @@ -563,46 +563,67 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses)
>  	return 0;
>  }
>
..<snip>

> -
> +#define DPAA_MAX_DEQUEUE_NUM_FRAMES 32

It will be better, if you define it in dpaa_sec.h

>  /* qp is lockless, should be accessed by only one thread */
>  static int
>  dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
>  {
>  	struct qman_fq *fq;
> +	unsigned int pkts = 0;
> +	int ret;
> +	struct qm_dqrr_entry *dq;
>
>  	fq = &qp->outq;
> -	dpaa_sec_op_nb = 0;
> -	dpaa_sec_ops = ops;
> +	ret = qman_set_vdq(fq, (nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES) ?
> +				DPAA_MAX_DEQUEUE_NUM_FRAMES : nb_ops);

Any particular reason for keeping the limit as 32 for SEC.
The dpaa eth PMD is using it as 63 i.e 6 bits

Also, you have a option to use '0'. NUM_FRAMES is zero—indicates that 
the volatile command is not terminate until the specified FQ becomes
empty.

>
.. <snip>
> -
>  static uint16_t
>  dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
>  		       uint16_t nb_ops)
>  {
>  	/* Function to transmit the frames to given device and queuepair */
>  	uint32_t loop;
> -	int32_t ret;
>  	struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;

you can avoid this explicit typecasting

>  	uint16_t num_tx = 0;
> +	struct qm_fd fds[DPAA_SEC_BURST], *fd;
> +	uint32_t frames_to_send;
> +	struct rte_crypto_op *op;
> +	struct dpaa_sec_job *cf;
> +	dpaa_sec_session *ses;
> +	struct dpaa_sec_op_ctx *ctx;
> +	uint32_t auth_only_len;
> +	struct qman_fq *inq[DPAA_SEC_BURST];
> +
> +	while (nb_ops) {
> +		frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
> +				DPAA_SEC_BURST : nb_ops;
> +		for (loop = 0; loop < frames_to_send; loop++) {
> +			op = *(ops++);
> +			switch (op->sess_type) {
> +			case RTE_CRYPTO_OP_WITH_SESSION:
> +				ses = (dpaa_sec_session *)

here and other places as well

> +					get_session_private_data(
> +							op->sym->session,
> +							cryptodev_driver_id);
> +				break;
> +			case RTE_CRYPTO_OP_SECURITY_SESSION:
> +				ses = (dpaa_sec_session *)
> +					get_sec_session_private_data(
> +							op->sym->sec_session);
> +				break;
> +			default:
> +				PMD_TX_LOG(ERR,
> +					"sessionless crypto op not supported");
> +				frames_to_send = loop;
> +				nb_ops = loop;
> +				goto send_pkts;
> +			}
> +			if (unlikely(!ses->qp || ses->qp != qp)) {
> +				PMD_INIT_LOG(DEBUG, "sess->qp - %p qp %p",
> +						ses->qp, qp);
> +				if (dpaa_sec_attach_sess_q(qp, ses)) {
> +					frames_to_send = loop;
> +					nb_ops = loop;
> +					goto send_pkts;
> +				}
> +			}
>
> -	if (unlikely(nb_ops == 0))
> -		return 0;
> +			/*
> +			 * Segmented buffer is not supported.
> +			 */
> +			if (!rte_pktmbuf_is_contiguous(op->sym->m_src)) {
> +				op->status = RTE_CRYPTO_OP_STATUS_ERROR;
> +				frames_to_send = loop;
> +				nb_ops = loop;
> +				goto send_pkts;
> +			}
> +			auth_only_len = op->sym->auth.data.length -
> +						op->sym->cipher.data.length;
> +
> +			if (is_auth_only(ses)) {
> +				cf = build_auth_only(op, ses);
> +			} else if (is_cipher_only(ses)) {
> +				cf = build_cipher_only(op, ses);
> +			} else if (is_aead(ses)) {
> +				cf = build_cipher_auth_gcm(op, ses);
> +				auth_only_len = ses->auth_only_len;
> +			} else if (is_auth_cipher(ses)) {
> +				cf = build_cipher_auth(op, ses);
> +			} else if (is_proto_ipsec(ses)) {
> +				cf = build_proto(op, ses);
> +			} else {
> +				PMD_TX_LOG(ERR, "not supported sec op");
> +				frames_to_send = loop;
> +				nb_ops = loop;
> +				goto send_pkts;
> +			}

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [dpdk-dev] [PATCH 3/5] crypto/dpaa_sec: support ipsec protocol offload
  2017-12-13 13:56 ` [dpdk-dev] [PATCH 3/5] crypto/dpaa_sec: support ipsec protocol offload Akhil Goyal
@ 2017-12-19 12:59   ` Hemant Agrawal
  0 siblings, 0 replies; 24+ messages in thread
From: Hemant Agrawal @ 2017-12-19 12:59 UTC (permalink / raw)
  To: Akhil Goyal, dev; +Cc: pablo.de.lara.guarch, shreyansh.jain

On 12/13/2017 7:26 PM, Akhil Goyal wrote:
> Signed-off-by: Akhil Goyal <akhil.goyal@nxp.com>
> ---
>  doc/guides/cryptodevs/features/dpaa_sec.ini |   1 +
>  drivers/crypto/dpaa_sec/dpaa_sec.c          | 409 ++++++++++++++++++++++++++--
>  drivers/crypto/dpaa_sec/dpaa_sec.h          |  62 ++++-
>  3 files changed, 449 insertions(+), 23 deletions(-)
>
..<snip>
> +	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
> +		memset(&session->encap_pdb, 0, sizeof(struct ipsec_encap_pdb) +
> +				sizeof(session->ip4_hdr));
> +		session->ip4_hdr.ip_v = IPVERSION;
> +		session->ip4_hdr.ip_hl = 5;
> +		session->ip4_hdr.ip_len = rte_cpu_to_be_16(
> +						sizeof(session->ip4_hdr));
> +		session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
> +		session->ip4_hdr.ip_id = 0;
> +		session->ip4_hdr.ip_off = 0;
> +		session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
> +		session->ip4_hdr.ip_p = (ipsec_xform->proto ==
> +				RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? 0x32 : 0x33;

It will be better if you can use the standard values such as "IPPROTO_ESP"

> +		session->ip4_hdr.ip_sum = 0;
> +		session->ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
> +		session->ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
> +		session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
> +						(void *)&session->ip4_hdr,
> +						sizeof(struct ip));

Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [dpdk-dev] [PATCH 5/5] crypto/dpaa_sec: rewrite Rx/Tx path
  2017-12-19 12:45   ` Hemant Agrawal
@ 2018-01-08 11:13     ` De Lara Guarch, Pablo
  2018-01-08 11:16       ` Akhil Goyal
  0 siblings, 1 reply; 24+ messages in thread
From: De Lara Guarch, Pablo @ 2018-01-08 11:13 UTC (permalink / raw)
  To: Hemant Agrawal, Akhil Goyal, dev; +Cc: shreyansh.jain, Nipun Gupta



> -----Original Message-----
> From: Hemant Agrawal [mailto:hemant.agrawal@nxp.com]
> Sent: Tuesday, December 19, 2017 12:46 PM
> To: Akhil Goyal <akhil.goyal@nxp.com>; dev@dpdk.org
> Cc: De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>;
> shreyansh.jain@nxp.com; Nipun Gupta <nipun.gupta@nxp.com>
> Subject: Re: [PATCH 5/5] crypto/dpaa_sec: rewrite Rx/Tx path
> 
> Hi Akhil,
> 
> On 12/13/2017 7:26 PM, Akhil Goyal wrote:
> > Rx and Tx patch are rewritten with improved internal APIs to improve
> > performance.
> >
> > Signed-off-by: Akhil Goyal <akhil.goyal@nxp.com>
> > Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>

Hi Akhil,

Are you planning on submitting a new version soon?

Thanks,
Pablo

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [dpdk-dev] [PATCH 5/5] crypto/dpaa_sec: rewrite Rx/Tx path
  2018-01-08 11:13     ` De Lara Guarch, Pablo
@ 2018-01-08 11:16       ` Akhil Goyal
  2018-01-08 11:24         ` De Lara Guarch, Pablo
  0 siblings, 1 reply; 24+ messages in thread
From: Akhil Goyal @ 2018-01-08 11:16 UTC (permalink / raw)
  To: De Lara Guarch, Pablo, Hemant Agrawal, dev; +Cc: shreyansh.jain, Nipun Gupta

On 1/8/2018 4:43 PM, De Lara Guarch, Pablo wrote:
> 
> 
>> -----Original Message-----
>> From: Hemant Agrawal [mailto:hemant.agrawal@nxp.com]
>> Sent: Tuesday, December 19, 2017 12:46 PM
>> To: Akhil Goyal <akhil.goyal@nxp.com>; dev@dpdk.org
>> Cc: De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>;
>> shreyansh.jain@nxp.com; Nipun Gupta <nipun.gupta@nxp.com>
>> Subject: Re: [PATCH 5/5] crypto/dpaa_sec: rewrite Rx/Tx path
>>
>> Hi Akhil,
>>
>> On 12/13/2017 7:26 PM, Akhil Goyal wrote:
>>> Rx and Tx patch are rewritten with improved internal APIs to improve
>>> performance.
>>>
>>> Signed-off-by: Akhil Goyal <akhil.goyal@nxp.com>
>>> Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
> 
> Hi Akhil,
> 
> Are you planning on submitting a new version soon?
> 
> Thanks,
> Pablo
> 
Yes I will be sending it tomorrow. But, I believe this patchset is 
dependent on DPAA1 patches sent by Hemant. So in any case these can be 
merged once his patches gets merged on master so that you can pull them 
back in crypto tree. Hemant may be sending the patches soon.

Thanks,
Akhil

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [dpdk-dev] [PATCH 5/5] crypto/dpaa_sec: rewrite Rx/Tx path
  2018-01-08 11:16       ` Akhil Goyal
@ 2018-01-08 11:24         ` De Lara Guarch, Pablo
  0 siblings, 0 replies; 24+ messages in thread
From: De Lara Guarch, Pablo @ 2018-01-08 11:24 UTC (permalink / raw)
  To: Akhil Goyal, Hemant Agrawal, dev; +Cc: shreyansh.jain, Nipun Gupta



> -----Original Message-----
> From: Akhil Goyal [mailto:akhil.goyal@nxp.com]
> Sent: Monday, January 8, 2018 11:17 AM
> To: De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>; Hemant
> Agrawal <hemant.agrawal@nxp.com>; dev@dpdk.org
> Cc: shreyansh.jain@nxp.com; Nipun Gupta <nipun.gupta@nxp.com>
> Subject: Re: [PATCH 5/5] crypto/dpaa_sec: rewrite Rx/Tx path
> 
> On 1/8/2018 4:43 PM, De Lara Guarch, Pablo wrote:
> >
> >
> >> -----Original Message-----
> >> From: Hemant Agrawal [mailto:hemant.agrawal@nxp.com]
> >> Sent: Tuesday, December 19, 2017 12:46 PM
> >> To: Akhil Goyal <akhil.goyal@nxp.com>; dev@dpdk.org
> >> Cc: De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>;
> >> shreyansh.jain@nxp.com; Nipun Gupta <nipun.gupta@nxp.com>
> >> Subject: Re: [PATCH 5/5] crypto/dpaa_sec: rewrite Rx/Tx path
> >>
> >> Hi Akhil,
> >>
> >> On 12/13/2017 7:26 PM, Akhil Goyal wrote:
> >>> Rx and Tx patch are rewritten with improved internal APIs to improve
> >>> performance.
> >>>
> >>> Signed-off-by: Akhil Goyal <akhil.goyal@nxp.com>
> >>> Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
> >
> > Hi Akhil,
> >
> > Are you planning on submitting a new version soon?
> >
> > Thanks,
> > Pablo
> >
> Yes I will be sending it tomorrow. But, I believe this patchset is dependent
> on DPAA1 patches sent by Hemant. So in any case these can be merged
> once his patches gets merged on master so that you can pull them back in
> crypto tree. Hemant may be sending the patches soon.

Thanks for the information.

Pablo
> 
> Thanks,
> Akhil

^ permalink raw reply	[flat|nested] 24+ messages in thread

* [dpdk-dev] [PATCH v2 0/3] crypto/dpaa_sec: performance optimizations
  2017-12-13 13:56 [dpdk-dev] [PATCH 0/5] crypto/dpaa_sec: performance optimizations Akhil Goyal
                   ` (4 preceding siblings ...)
  2017-12-13 13:56 ` [dpdk-dev] [PATCH 5/5] crypto/dpaa_sec: rewrite Rx/Tx path Akhil Goyal
@ 2018-01-11 11:33 ` Akhil Goyal
  2018-01-11 11:33   ` [dpdk-dev] [PATCH v2 1/3] crypto/dpaa_sec: optimize virt to phy conversion Akhil Goyal
                     ` (3 more replies)
  5 siblings, 4 replies; 24+ messages in thread
From: Akhil Goyal @ 2018-01-11 11:33 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch, hemant.agrawal, Akhil Goyal

Following changes are added to improve performance.
1. optimize virtual to physical address conversion
2. support for multiple sessions in a single queue pair
3. support for ipsec protocol offload

changes in v2:
 - incorporated comments from Hemant
 - split the patchset to remove dependency on bus/dpaa patch
Note:
1. This patchset is now independent of the patches on net subtree.
2. bus/dpaa patch is already applied to net subtree.
3. The last patch in v1 of this series will be sent separately as it will
   be dependent on the net subtree.

Akhil Goyal (1):
  crypto/dpaa_sec: support ipsec protocol offload

Hemant Agrawal (2):
  crypto/dpaa_sec: optimize virt to phy conversion
  crypto/dpaa_sec: support multiple sessions per qp

 doc/guides/cryptodevs/features/dpaa_sec.ini |   1 +
 drivers/crypto/dpaa_sec/dpaa_sec.c          | 591 ++++++++++++++++++++++++----
 drivers/crypto/dpaa_sec/dpaa_sec.h          | 135 +++++--
 3 files changed, 613 insertions(+), 114 deletions(-)

-- 
2.9.3

^ permalink raw reply	[flat|nested] 24+ messages in thread

* [dpdk-dev] [PATCH v2 1/3] crypto/dpaa_sec: optimize virt to phy conversion
  2018-01-11 11:33 ` [dpdk-dev] [PATCH v2 0/3] crypto/dpaa_sec: performance optimizations Akhil Goyal
@ 2018-01-11 11:33   ` Akhil Goyal
  2018-01-11 11:33   ` [dpdk-dev] [PATCH v2 2/3] crypto/dpaa_sec: support multiple sessions per qp Akhil Goyal
                     ` (2 subsequent siblings)
  3 siblings, 0 replies; 24+ messages in thread
From: Akhil Goyal @ 2018-01-11 11:33 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch, hemant.agrawal

From: Hemant Agrawal <hemant.agrawal@nxp.com>

Context memory is allocated from mempool. Ideally
it will get all memory from single segment, so simple offset
calculation is used for address conversion for such addresses
from context memory.

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
Acked-by: Akhil Goyal <akhil.goyal@nxp.com>
---
 drivers/crypto/dpaa_sec/dpaa_sec.c | 27 ++++++++++++++++++---------
 drivers/crypto/dpaa_sec/dpaa_sec.h |  1 +
 2 files changed, 19 insertions(+), 9 deletions(-)

diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c
index ad1b309..157eace 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.c
@@ -80,6 +80,8 @@ dpaa_sec_alloc_ctx(dpaa_sec_session *ses)
 	dcbz_64(&ctx->job.sg[SG_CACHELINE_3]);
 
 	ctx->ctx_pool = ses->ctx_pool;
+	ctx->vtop_offset = (uint64_t) ctx
+				- rte_mempool_virt2iova(ctx);
 
 	return ctx;
 }
@@ -104,6 +106,13 @@ dpaa_mem_vtop(void *vaddr)
 	return (rte_iova_t)(NULL);
 }
 
+/* virtual address conversin when mempool support is available for ctx */
+static inline phys_addr_t
+dpaa_mem_vtop_ctx(struct dpaa_sec_op_ctx *ctx, void *vaddr)
+{
+	return (uint64_t)vaddr - ctx->vtop_offset;
+}
+
 static inline void *
 dpaa_mem_ptov(rte_iova_t paddr)
 {
@@ -563,7 +572,7 @@ build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
 	if (is_decode(ses)) {
 		/* need to extend the input to a compound frame */
 		sg->extension = 1;
-		qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
+		qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
 		sg->length = sym->auth.data.length + ses->digest_length;
 		sg->final = 1;
 		cpu_to_hw_sg(sg);
@@ -577,7 +586,7 @@ build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
 		cpu_to_hw_sg(sg);
 
 		/* let's check digest by hw */
-		start_addr = dpaa_mem_vtop(old_digest);
+		start_addr = dpaa_mem_vtop_ctx(ctx, old_digest);
 		sg++;
 		qm_sg_entry_set64(sg, start_addr);
 		sg->length = ses->digest_length;
@@ -631,7 +640,7 @@ build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
 	sg->extension = 1;
 	sg->final = 1;
 	sg->length = sym->cipher.data.length + ses->iv.length;
-	qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
+	qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
 	cpu_to_hw_sg(sg);
 
 	sg = &cf->sg[2];
@@ -677,7 +686,7 @@ build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
 	/* input */
 	rte_prefetch0(cf->sg);
 	sg = &cf->sg[2];
-	qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
+	qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop_ctx(ctx, sg));
 	if (is_encode(ses)) {
 		qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
 		sg->length = ses->iv.length;
@@ -722,7 +731,7 @@ build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
 		       ses->digest_length);
 		sg++;
 
-		qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
+		qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
 		sg->length = ses->digest_length;
 		length += sg->length;
 		sg->final = 1;
@@ -736,7 +745,7 @@ build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
 
 	/* output */
 	sg++;
-	qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
+	qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop_ctx(ctx, sg));
 	qm_sg_entry_set64(sg,
 		dst_start_addr + sym->aead.data.offset - ses->auth_only_len);
 	sg->length = sym->aead.data.length + ses->auth_only_len;
@@ -788,7 +797,7 @@ build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
 	/* input */
 	rte_prefetch0(cf->sg);
 	sg = &cf->sg[2];
-	qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
+	qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop_ctx(ctx, sg));
 	if (is_encode(ses)) {
 		qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
 		sg->length = ses->iv.length;
@@ -818,7 +827,7 @@ build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
 		       ses->digest_length);
 		sg++;
 
-		qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
+		qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
 		sg->length = ses->digest_length;
 		length += sg->length;
 		sg->final = 1;
@@ -832,7 +841,7 @@ build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
 
 	/* output */
 	sg++;
-	qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
+	qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop_ctx(ctx, sg));
 	qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
 	sg->length = sym->cipher.data.length;
 	length = sg->length;
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.h b/drivers/crypto/dpaa_sec/dpaa_sec.h
index 9342949..e82f4fb 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.h
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.h
@@ -155,6 +155,7 @@ struct dpaa_sec_op_ctx {
 	struct rte_crypto_op *op;
 	struct rte_mempool *ctx_pool; /* mempool pointer for dpaa_sec_op_ctx */
 	uint32_t fd_status;
+	int64_t vtop_offset;
 	uint8_t digest[DPAA_MAX_NB_MAX_DIGEST];
 };
 
-- 
2.9.3

^ permalink raw reply	[flat|nested] 24+ messages in thread

* [dpdk-dev] [PATCH v2 2/3] crypto/dpaa_sec: support multiple sessions per qp
  2018-01-11 11:33 ` [dpdk-dev] [PATCH v2 0/3] crypto/dpaa_sec: performance optimizations Akhil Goyal
  2018-01-11 11:33   ` [dpdk-dev] [PATCH v2 1/3] crypto/dpaa_sec: optimize virt to phy conversion Akhil Goyal
@ 2018-01-11 11:33   ` Akhil Goyal
  2018-01-11 11:33   ` [dpdk-dev] [PATCH v2 3/3] crypto/dpaa_sec: support ipsec protocol offload Akhil Goyal
  2018-01-15  6:35   ` [dpdk-dev] [PATCH v3 0/3] crypto/dpaa_sec: performance optimizations Akhil Goyal
  3 siblings, 0 replies; 24+ messages in thread
From: Akhil Goyal @ 2018-01-11 11:33 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch, hemant.agrawal

From: Hemant Agrawal <hemant.agrawal@nxp.com>

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
Acked-by: Akhil Goyal <akhil.goyal@nxp.com>
---
 drivers/crypto/dpaa_sec/dpaa_sec.c | 154 +++++++++++++++++++++++++------------
 drivers/crypto/dpaa_sec/dpaa_sec.h |  74 +++++++++---------
 2 files changed, 145 insertions(+), 83 deletions(-)

diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c
index 157eace..f433f0a 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.c
@@ -43,6 +43,9 @@ static uint8_t cryptodev_driver_id;
 static __thread struct rte_crypto_op **dpaa_sec_ops;
 static __thread int dpaa_sec_op_nb;
 
+static int
+dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
+
 static inline void
 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
 {
@@ -151,15 +154,6 @@ dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
 	/* Clear FQ options */
 	memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
 
-	flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
-		QMAN_FQ_FLAG_TO_DCPORTAL;
-
-	ret = qman_create_fq(0, flags, fq_in);
-	if (unlikely(ret != 0)) {
-		PMD_INIT_LOG(ERR, "qman_create_fq failed");
-		return ret;
-	}
-
 	flags = QMAN_INITFQ_FLAG_SCHED;
 	fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
 			  QM_INITFQ_WE_CONTEXTB;
@@ -171,9 +165,11 @@ dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
 
 	fq_in->cb.ern  = ern_sec_fq_handler;
 
+	PMD_INIT_LOG(DEBUG, "in-%x out-%x", fq_in->fqid, fqid_out);
+
 	ret = qman_init_fq(fq_in, flags, &fq_opts);
 	if (unlikely(ret != 0))
-		PMD_INIT_LOG(ERR, "qman_init_fq failed");
+		PMD_INIT_LOG(ERR, "qman_init_fq failed %d", ret);
 
 	return ret;
 }
@@ -357,7 +353,7 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses)
 {
 	struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
 	uint32_t shared_desc_len = 0;
-	struct sec_cdb *cdb = &ses->qp->cdb;
+	struct sec_cdb *cdb = &ses->cdb;
 	int err;
 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
 	int swap = false;
@@ -877,12 +873,10 @@ dpaa_sec_enqueue_op(struct rte_crypto_op *op,  struct dpaa_sec_qp *qp)
 	ses = (dpaa_sec_session *)get_session_private_data(op->sym->session,
 					cryptodev_driver_id);
 
-	if (unlikely(!qp->ses || qp->ses != ses)) {
-		qp->ses = ses;
-		ses->qp = qp;
-		ret = dpaa_sec_prep_cdb(ses);
-		if (ret)
-			return ret;
+	if (unlikely(!ses->qp || ses->qp != qp)) {
+		PMD_INIT_LOG(DEBUG, "sess->qp - %p qp %p", ses->qp, qp);
+		if (dpaa_sec_attach_sess_q(qp, ses))
+			return -1;
 	}
 
 	/*
@@ -918,7 +912,7 @@ dpaa_sec_enqueue_op(struct rte_crypto_op *op,  struct dpaa_sec_qp *qp)
 	if (auth_only_len)
 		fd.cmd = 0x80000000 | auth_only_len;
 	do {
-		ret = qman_enqueue(&qp->inq, &fd, 0);
+		ret = qman_enqueue(ses->inq, &fd, 0);
 	} while (ret != 0);
 
 	return 0;
@@ -1134,43 +1128,82 @@ dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
 	return 0;
 }
 
-static int
-dpaa_sec_qp_attach_sess(struct rte_cryptodev *dev, uint16_t qp_id, void *ses)
+static struct qman_fq *
+dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
 {
-	dpaa_sec_session *sess = ses;
-	struct dpaa_sec_qp *qp;
+	unsigned int i;
 
-	PMD_INIT_FUNC_TRACE();
+	for (i = 0; i < qi->max_nb_sessions; i++) {
+		if (qi->inq_attach[i] == 0) {
+			qi->inq_attach[i] = 1;
+			return &qi->inq[i];
+		}
+	}
+	PMD_DRV_LOG(ERR, "All ses session in use %x", qi->max_nb_sessions);
+
+	return NULL;
+}
 
-	qp = dev->data->queue_pairs[qp_id];
-	if (qp->ses != NULL) {
-		PMD_INIT_LOG(ERR, "qp in-use by another session\n");
-		return -EBUSY;
+static int
+dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
+{
+	unsigned int i;
+
+	for (i = 0; i < qi->max_nb_sessions; i++) {
+		if (&qi->inq[i] == fq) {
+			qi->inq_attach[i] = 0;
+			return 0;
+		}
 	}
+	return -1;
+}
+
+static int
+dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
+{
+	int ret;
 
-	qp->ses = sess;
 	sess->qp = qp;
+	ret = dpaa_sec_prep_cdb(sess);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Unable to prepare sec cdb");
+		return -1;
+	}
 
-	return dpaa_sec_prep_cdb(sess);
+	ret = dpaa_sec_init_rx(sess->inq, dpaa_mem_vtop(&sess->cdb),
+			       qman_fq_fqid(&qp->outq));
+	if (ret)
+		PMD_DRV_LOG(ERR, "Unable to init sec queue");
+
+	return ret;
+}
+
+static int
+dpaa_sec_qp_attach_sess(struct rte_cryptodev *dev __rte_unused,
+			uint16_t qp_id __rte_unused,
+			void *ses __rte_unused)
+{
+	PMD_INIT_FUNC_TRACE();
+	return 0;
 }
 
 static int
-dpaa_sec_qp_detach_sess(struct rte_cryptodev *dev, uint16_t qp_id, void *ses)
+dpaa_sec_qp_detach_sess(struct rte_cryptodev *dev,
+			uint16_t qp_id  __rte_unused,
+			void *ses)
 {
 	dpaa_sec_session *sess = ses;
-	struct dpaa_sec_qp *qp;
+	struct dpaa_sec_dev_private *qi = dev->data->dev_private;
 
 	PMD_INIT_FUNC_TRACE();
 
-	qp = dev->data->queue_pairs[qp_id];
-	if (qp->ses != NULL) {
-		qp->ses = NULL;
-		sess->qp = NULL;
-		return 0;
-	}
+	if (sess->inq)
+		dpaa_sec_detach_rxq(qi, sess->inq);
+	sess->inq = NULL;
 
-	PMD_DRV_LOG(ERR, "No session attached to qp");
-	return -EINVAL;
+	sess->qp = NULL;
+
+	return 0;
 }
 
 static int
@@ -1233,8 +1266,20 @@ dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
 		return -EINVAL;
 	}
 	session->ctx_pool = internals->ctx_pool;
+	session->inq = dpaa_sec_attach_rxq(internals);
+	if (session->inq == NULL) {
+		PMD_DRV_LOG(ERR, "unable to attach sec queue");
+		goto err1;
+	}
 
 	return 0;
+
+err1:
+	rte_free(session->cipher_key.data);
+	rte_free(session->auth_key.data);
+	memset(session, 0, sizeof(dpaa_sec_session));
+
+	return -EINVAL;
 }
 
 static int
@@ -1267,6 +1312,7 @@ dpaa_sec_session_configure(struct rte_cryptodev *dev,
 	set_session_private_data(sess, dev->driver_id,
 			sess_private_data);
 
+
 	return 0;
 }
 
@@ -1275,16 +1321,22 @@ static void
 dpaa_sec_session_clear(struct rte_cryptodev *dev,
 		struct rte_cryptodev_sym_session *sess)
 {
-	PMD_INIT_FUNC_TRACE();
+	struct dpaa_sec_dev_private *qi = dev->data->dev_private;
 	uint8_t index = dev->driver_id;
 	void *sess_priv = get_session_private_data(sess, index);
+
+	PMD_INIT_FUNC_TRACE();
+
 	dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
 
 	if (sess_priv) {
+		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+
+		if (s->inq)
+			dpaa_sec_detach_rxq(qi, s->inq);
 		rte_free(s->cipher_key.data);
 		rte_free(s->auth_key.data);
 		memset(s, 0, sizeof(dpaa_sec_session));
-		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
 		set_session_private_data(sess, index, NULL);
 		rte_mempool_put(sess_mp, sess_priv);
 	}
@@ -1332,7 +1384,8 @@ dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
 		info->capabilities = dpaa_sec_capabilities;
 		info->sym.max_nb_sessions = internals->max_nb_sessions;
 		info->sym.max_nb_sessions_per_qp =
-			RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS / RTE_MAX_NB_SEC_QPS;
+			RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS /
+			RTE_DPAA_MAX_NB_SEC_QPS;
 		info->driver_id = cryptodev_driver_id;
 	}
 }
@@ -1377,7 +1430,7 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
 {
 	struct dpaa_sec_dev_private *internals;
 	struct dpaa_sec_qp *qp;
-	uint32_t i;
+	uint32_t i, flags;
 	int ret;
 	char str[20];
 
@@ -1393,7 +1446,7 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
 
 	internals = cryptodev->data->dev_private;
-	internals->max_nb_queue_pairs = RTE_MAX_NB_SEC_QPS;
+	internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
 	internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
 
 	for (i = 0; i < internals->max_nb_queue_pairs; i++) {
@@ -1404,10 +1457,15 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
 			PMD_INIT_LOG(ERR, "config tx of queue pair  %d", i);
 			goto init_error;
 		}
-		ret = dpaa_sec_init_rx(&qp->inq, dpaa_mem_vtop(&qp->cdb),
-				       qman_fq_fqid(&qp->outq));
-		if (ret) {
-			PMD_INIT_LOG(ERR, "config rx of queue pair %d", i);
+	}
+
+	flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
+		QMAN_FQ_FLAG_TO_DCPORTAL;
+	for (i = 0; i < internals->max_nb_sessions; i++) {
+		/* create rx qman fq for sessions*/
+		ret = qman_create_fq(0, flags, &internals->inq[i]);
+		if (unlikely(ret != 0)) {
+			PMD_INIT_LOG(ERR, "sec qman_create_fq failed");
 			goto init_error;
 		}
 	}
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.h b/drivers/crypto/dpaa_sec/dpaa_sec.h
index e82f4fb..93369e4 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.h
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.h
@@ -38,36 +38,6 @@ enum dpaa_sec_op_type {
 	DPAA_SEC_MAX
 };
 
-typedef struct dpaa_sec_session_entry {
-	uint8_t dir;         /*!< Operation Direction */
-	enum rte_crypto_cipher_algorithm cipher_alg; /*!< Cipher Algorithm*/
-	enum rte_crypto_auth_algorithm auth_alg; /*!< Authentication Algorithm*/
-	enum rte_crypto_aead_algorithm aead_alg; /*!< Authentication Algorithm*/
-	union {
-		struct {
-			uint8_t *data;	/**< pointer to key data */
-			size_t length;	/**< key length in bytes */
-		} aead_key;
-		struct {
-			struct {
-				uint8_t *data;	/**< pointer to key data */
-				size_t length;	/**< key length in bytes */
-			} cipher_key;
-			struct {
-				uint8_t *data;	/**< pointer to key data */
-				size_t length;	/**< key length in bytes */
-			} auth_key;
-		};
-	};
-	struct {
-		uint16_t length;
-		uint16_t offset;
-	} iv;	/**< Initialisation vector parameters */
-	uint16_t auth_only_len; /*!< Length of data for Auth only */
-	uint32_t digest_length;
-	struct dpaa_sec_qp *qp;
-	struct rte_mempool *ctx_pool; /* session mempool for dpaa_sec_op_ctx */
-} dpaa_sec_session;
 
 #define DPAA_SEC_MAX_DESC_SIZE  64
 /* code or cmd block to caam */
@@ -117,11 +87,41 @@ struct sec_cdb {
 	uint32_t sh_desc[DPAA_SEC_MAX_DESC_SIZE];
 };
 
+typedef struct dpaa_sec_session_entry {
+	uint8_t dir;         /*!< Operation Direction */
+	enum rte_crypto_cipher_algorithm cipher_alg; /*!< Cipher Algorithm*/
+	enum rte_crypto_auth_algorithm auth_alg; /*!< Authentication Algorithm*/
+	enum rte_crypto_aead_algorithm aead_alg; /*!< Authentication Algorithm*/
+	union {
+		struct {
+			uint8_t *data;	/**< pointer to key data */
+			size_t length;	/**< key length in bytes */
+		} aead_key;
+		struct {
+			struct {
+				uint8_t *data;	/**< pointer to key data */
+				size_t length;	/**< key length in bytes */
+			} cipher_key;
+			struct {
+				uint8_t *data;	/**< pointer to key data */
+				size_t length;	/**< key length in bytes */
+			} auth_key;
+		};
+	};
+	struct {
+		uint16_t length;
+		uint16_t offset;
+	} iv;	/**< Initialisation vector parameters */
+	uint16_t auth_only_len; /*!< Length of data for Auth only */
+	uint32_t digest_length;
+	struct dpaa_sec_qp *qp;
+	struct qman_fq *inq;
+	struct sec_cdb cdb;	/**< cmd block associated with qp */
+	struct rte_mempool *ctx_pool; /* session mempool for dpaa_sec_op_ctx */
+} dpaa_sec_session;
+
 struct dpaa_sec_qp {
 	struct dpaa_sec_dev_private *internals;
-	struct sec_cdb cdb;		/* cmd block associated with qp */
-	dpaa_sec_session *ses;		/* session associated with qp */
-	struct qman_fq inq;
 	struct qman_fq outq;
 	int rx_pkts;
 	int rx_errs;
@@ -129,12 +129,16 @@ struct dpaa_sec_qp {
 	int tx_errs;
 };
 
-#define RTE_MAX_NB_SEC_QPS RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS
+#define RTE_DPAA_MAX_NB_SEC_QPS 1
+#define RTE_DPAA_MAX_RX_QUEUE RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS
+
 /* internal sec queue interface */
 struct dpaa_sec_dev_private {
 	void *sec_hw;
 	struct rte_mempool *ctx_pool; /* per dev mempool for dpaa_sec_op_ctx */
-	struct dpaa_sec_qp qps[RTE_MAX_NB_SEC_QPS]; /* i/o queue for sec */
+	struct dpaa_sec_qp qps[RTE_DPAA_MAX_NB_SEC_QPS]; /* i/o queue for sec */
+	struct qman_fq inq[RTE_DPAA_MAX_RX_QUEUE];
+	unsigned char inq_attach[RTE_DPAA_MAX_RX_QUEUE];
 	unsigned int max_nb_queue_pairs;
 	unsigned int max_nb_sessions;
 };
-- 
2.9.3

^ permalink raw reply	[flat|nested] 24+ messages in thread

* [dpdk-dev] [PATCH v2 3/3] crypto/dpaa_sec: support ipsec protocol offload
  2018-01-11 11:33 ` [dpdk-dev] [PATCH v2 0/3] crypto/dpaa_sec: performance optimizations Akhil Goyal
  2018-01-11 11:33   ` [dpdk-dev] [PATCH v2 1/3] crypto/dpaa_sec: optimize virt to phy conversion Akhil Goyal
  2018-01-11 11:33   ` [dpdk-dev] [PATCH v2 2/3] crypto/dpaa_sec: support multiple sessions per qp Akhil Goyal
@ 2018-01-11 11:33   ` Akhil Goyal
  2018-01-11 14:13     ` De Lara Guarch, Pablo
  2018-01-15  6:35   ` [dpdk-dev] [PATCH v3 0/3] crypto/dpaa_sec: performance optimizations Akhil Goyal
  3 siblings, 1 reply; 24+ messages in thread
From: Akhil Goyal @ 2018-01-11 11:33 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch, hemant.agrawal, Akhil Goyal

Signed-off-by: Akhil Goyal <akhil.goyal@nxp.com>
Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 doc/guides/cryptodevs/features/dpaa_sec.ini |   1 +
 drivers/crypto/dpaa_sec/dpaa_sec.c          | 410 ++++++++++++++++++++++++++--
 drivers/crypto/dpaa_sec/dpaa_sec.h          |  62 ++++-
 3 files changed, 450 insertions(+), 23 deletions(-)

diff --git a/doc/guides/cryptodevs/features/dpaa_sec.ini b/doc/guides/cryptodevs/features/dpaa_sec.ini
index 0e8f5b2..deab53a 100644
--- a/doc/guides/cryptodevs/features/dpaa_sec.ini
+++ b/doc/guides/cryptodevs/features/dpaa_sec.ini
@@ -7,6 +7,7 @@
 Symmetric crypto       = Y
 Sym operation chaining = Y
 HW Accelerated         = Y
+Protocol offload       = Y
 
 ;
 ; Supported crypto algorithms of the 'dpaa_sec' crypto driver.
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c
index f433f0a..d7b6f39 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.c
@@ -15,6 +15,7 @@
 #include <rte_cryptodev_pmd.h>
 #include <rte_crypto.h>
 #include <rte_cryptodev.h>
+#include <rte_security_driver.h>
 #include <rte_cycles.h>
 #include <rte_dev.h>
 #include <rte_kvargs.h>
@@ -196,8 +197,19 @@ dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
 	 * sg[1] for input
 	 */
 	job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
+
 	ctx = container_of(job, struct dpaa_sec_op_ctx, job);
 	ctx->fd_status = fd->status;
+	if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+		struct qm_sg_entry *sg_out;
+		uint32_t len;
+
+		sg_out = &job->sg[0];
+		hw_sg_to_cpu(sg_out);
+		len = sg_out->length;
+		ctx->op->sym->m_src->pkt_len = len;
+		ctx->op->sym->m_src->data_len = len;
+	}
 	dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
 	dpaa_sec_op_ending(ctx);
 
@@ -261,7 +273,13 @@ static inline int is_aead(dpaa_sec_session *ses)
 static inline int is_auth_cipher(dpaa_sec_session *ses)
 {
 	return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
-		(ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
+		(ses->auth_alg != RTE_CRYPTO_AUTH_NULL) &&
+		(ses->proto_alg != RTE_SECURITY_PROTOCOL_IPSEC));
+}
+
+static inline int is_proto_ipsec(dpaa_sec_session *ses)
+{
+	return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC);
 }
 
 static inline int is_encode(dpaa_sec_session *ses)
@@ -282,27 +300,39 @@ caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a)
 		ses->digest_length = 0;
 		break;
 	case RTE_CRYPTO_AUTH_MD5_HMAC:
-		alginfo_a->algtype = OP_ALG_ALGSEL_MD5;
+		alginfo_a->algtype =
+			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+			OP_PCL_IPSEC_HMAC_MD5_96 : OP_ALG_ALGSEL_MD5;
 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
 		break;
 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
-		alginfo_a->algtype = OP_ALG_ALGSEL_SHA1;
+		alginfo_a->algtype =
+			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+			OP_PCL_IPSEC_HMAC_SHA1_96 : OP_ALG_ALGSEL_SHA1;
 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
 		break;
 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
-		alginfo_a->algtype = OP_ALG_ALGSEL_SHA224;
+		alginfo_a->algtype =
+			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+			OP_PCL_IPSEC_HMAC_SHA1_160 : OP_ALG_ALGSEL_SHA224;
 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
 		break;
 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
-		alginfo_a->algtype = OP_ALG_ALGSEL_SHA256;
+		alginfo_a->algtype =
+			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+			OP_PCL_IPSEC_HMAC_SHA2_256_128 : OP_ALG_ALGSEL_SHA256;
 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
 		break;
 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
-		alginfo_a->algtype = OP_ALG_ALGSEL_SHA384;
+		alginfo_a->algtype =
+			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+			OP_PCL_IPSEC_HMAC_SHA2_384_192 : OP_ALG_ALGSEL_SHA384;
 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
 		break;
 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
-		alginfo_a->algtype = OP_ALG_ALGSEL_SHA512;
+		alginfo_a->algtype =
+			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+			OP_PCL_IPSEC_HMAC_SHA2_512_256 : OP_ALG_ALGSEL_SHA512;
 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
 		break;
 	default:
@@ -317,15 +347,21 @@ caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c)
 	case RTE_CRYPTO_CIPHER_NULL:
 		break;
 	case RTE_CRYPTO_CIPHER_AES_CBC:
-		alginfo_c->algtype = OP_ALG_ALGSEL_AES;
+		alginfo_c->algtype =
+			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+			OP_PCL_IPSEC_AES_CBC : OP_ALG_ALGSEL_AES;
 		alginfo_c->algmode = OP_ALG_AAI_CBC;
 		break;
 	case RTE_CRYPTO_CIPHER_3DES_CBC:
-		alginfo_c->algtype = OP_ALG_ALGSEL_3DES;
+		alginfo_c->algtype =
+			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+			OP_PCL_IPSEC_3DES : OP_ALG_ALGSEL_3DES;
 		alginfo_c->algmode = OP_ALG_AAI_CBC;
 		break;
 	case RTE_CRYPTO_CIPHER_AES_CTR:
-		alginfo_c->algtype = OP_ALG_ALGSEL_AES;
+		alginfo_c->algtype =
+			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+			OP_PCL_IPSEC_AES_CTR : OP_ALG_ALGSEL_AES;
 		alginfo_c->algmode = OP_ALG_AAI_CTR;
 		break;
 	default:
@@ -471,14 +507,28 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses)
 		cdb->sh_desc[0] = 0;
 		cdb->sh_desc[1] = 0;
 		cdb->sh_desc[2] = 0;
-
-		/* Auth_only_len is set as 0 here and it will be overwritten
-		 *  in fd for each packet.
-		 */
-		shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
-				true, swap, &alginfo_c, &alginfo_a,
-				ses->iv.length, 0,
-				ses->digest_length, ses->dir);
+		if (is_proto_ipsec(ses)) {
+			if (ses->dir == DIR_ENC) {
+				shared_desc_len = cnstr_shdsc_ipsec_new_encap(
+						cdb->sh_desc,
+						true, swap, &ses->encap_pdb,
+						(uint8_t *)&ses->ip4_hdr,
+						&alginfo_c, &alginfo_a);
+			} else if (ses->dir == DIR_DEC) {
+				shared_desc_len = cnstr_shdsc_ipsec_new_decap(
+						cdb->sh_desc,
+						true, swap, &ses->decap_pdb,
+						&alginfo_c, &alginfo_a);
+			}
+		} else {
+			/* Auth_only_len is set as 0 here and it will be
+			 * overwritten in fd for each packet.
+			 */
+			shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
+					true, swap, &alginfo_c, &alginfo_a,
+					ses->iv.length, 0,
+					ses->digest_length, ses->dir);
+		}
 	}
 	cdb->sh_hdr.hi.field.idlen = shared_desc_len;
 	cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
@@ -860,6 +910,45 @@ build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
 	return cf;
 }
 
+static inline struct dpaa_sec_job *
+build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
+{
+	struct rte_crypto_sym_op *sym = op->sym;
+	struct dpaa_sec_job *cf;
+	struct dpaa_sec_op_ctx *ctx;
+	struct qm_sg_entry *sg;
+	phys_addr_t src_start_addr, dst_start_addr;
+
+	ctx = dpaa_sec_alloc_ctx(ses);
+	if (!ctx)
+		return NULL;
+	cf = &ctx->job;
+	ctx->op = op;
+
+	src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
+
+	if (sym->m_dst)
+		dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
+	else
+		dst_start_addr = src_start_addr;
+
+	/* input */
+	sg = &cf->sg[1];
+	qm_sg_entry_set64(sg, src_start_addr);
+	sg->length = sym->m_src->pkt_len;
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+
+	sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
+	/* output */
+	sg = &cf->sg[0];
+	qm_sg_entry_set64(sg, dst_start_addr);
+	sg->length = sym->m_src->buf_len - sym->m_src->data_off;
+	cpu_to_hw_sg(sg);
+
+	return cf;
+}
+
 static int
 dpaa_sec_enqueue_op(struct rte_crypto_op *op,  struct dpaa_sec_qp *qp)
 {
@@ -870,8 +959,14 @@ dpaa_sec_enqueue_op(struct rte_crypto_op *op,  struct dpaa_sec_qp *qp)
 	uint32_t auth_only_len = op->sym->auth.data.length -
 				op->sym->cipher.data.length;
 
-	ses = (dpaa_sec_session *)get_session_private_data(op->sym->session,
-					cryptodev_driver_id);
+	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
+		ses = (dpaa_sec_session *)get_session_private_data(
+				op->sym->session, cryptodev_driver_id);
+	else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
+		ses = (dpaa_sec_session *)get_sec_session_private_data(
+				op->sym->sec_session);
+	else
+		return -ENOTSUP;
 
 	if (unlikely(!ses->qp || ses->qp != qp)) {
 		PMD_INIT_LOG(DEBUG, "sess->qp - %p qp %p", ses->qp, qp);
@@ -895,6 +990,8 @@ dpaa_sec_enqueue_op(struct rte_crypto_op *op,  struct dpaa_sec_qp *qp)
 		auth_only_len = ses->auth_only_len;
 	} else if (is_auth_cipher(ses)) {
 		cf = build_cipher_auth(op, ses);
+	} else if (is_proto_ipsec(ses)) {
+		cf = build_proto(op, ses);
 	} else {
 		PMD_TX_LOG(ERR, "not supported sec op");
 		return -ENOTSUP;
@@ -933,7 +1030,7 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
 
 	/*Prepare each packet which is to be sent*/
 	for (loop = 0; loop < nb_ops; loop++) {
-		if (ops[loop]->sess_type != RTE_CRYPTO_OP_WITH_SESSION) {
+		if (ops[loop]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
 			PMD_TX_LOG(ERR, "sessionless crypto op not supported");
 			return 0;
 		}
@@ -1343,6 +1440,236 @@ dpaa_sec_session_clear(struct rte_cryptodev *dev,
 }
 
 static int
+dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
+			   struct rte_security_session_conf *conf,
+			   void *sess)
+{
+	struct dpaa_sec_dev_private *internals = dev->data->dev_private;
+	struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
+	struct rte_crypto_auth_xform *auth_xform;
+	struct rte_crypto_cipher_xform *cipher_xform;
+	dpaa_sec_session *session = (dpaa_sec_session *)sess;
+
+	PMD_INIT_FUNC_TRACE();
+
+	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
+		cipher_xform = &conf->crypto_xform->cipher;
+		auth_xform = &conf->crypto_xform->next->auth;
+	} else {
+		auth_xform = &conf->crypto_xform->auth;
+		cipher_xform = &conf->crypto_xform->next->cipher;
+	}
+	session->proto_alg = conf->protocol;
+	session->cipher_key.data = rte_zmalloc(NULL,
+					       cipher_xform->key.length,
+					       RTE_CACHE_LINE_SIZE);
+	if (session->cipher_key.data == NULL &&
+			cipher_xform->key.length > 0) {
+		RTE_LOG(ERR, PMD, "No Memory for cipher key\n");
+		return -ENOMEM;
+	}
+
+	session->cipher_key.length = cipher_xform->key.length;
+	session->auth_key.data = rte_zmalloc(NULL,
+					auth_xform->key.length,
+					RTE_CACHE_LINE_SIZE);
+	if (session->auth_key.data == NULL &&
+			auth_xform->key.length > 0) {
+		RTE_LOG(ERR, PMD, "No Memory for auth key\n");
+		rte_free(session->cipher_key.data);
+		return -ENOMEM;
+	}
+	session->auth_key.length = auth_xform->key.length;
+	memcpy(session->cipher_key.data, cipher_xform->key.data,
+			cipher_xform->key.length);
+	memcpy(session->auth_key.data, auth_xform->key.data,
+			auth_xform->key.length);
+
+	switch (auth_xform->algo) {
+	case RTE_CRYPTO_AUTH_SHA1_HMAC:
+		session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
+		break;
+	case RTE_CRYPTO_AUTH_MD5_HMAC:
+		session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
+		break;
+	case RTE_CRYPTO_AUTH_SHA256_HMAC:
+		session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
+		break;
+	case RTE_CRYPTO_AUTH_SHA384_HMAC:
+		session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
+		break;
+	case RTE_CRYPTO_AUTH_SHA512_HMAC:
+		session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
+		break;
+	case RTE_CRYPTO_AUTH_AES_CMAC:
+		session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
+		break;
+	case RTE_CRYPTO_AUTH_NULL:
+		session->auth_alg = RTE_CRYPTO_AUTH_NULL;
+		break;
+	case RTE_CRYPTO_AUTH_SHA224_HMAC:
+	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
+	case RTE_CRYPTO_AUTH_SHA1:
+	case RTE_CRYPTO_AUTH_SHA256:
+	case RTE_CRYPTO_AUTH_SHA512:
+	case RTE_CRYPTO_AUTH_SHA224:
+	case RTE_CRYPTO_AUTH_SHA384:
+	case RTE_CRYPTO_AUTH_MD5:
+	case RTE_CRYPTO_AUTH_AES_GMAC:
+	case RTE_CRYPTO_AUTH_KASUMI_F9:
+	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
+	case RTE_CRYPTO_AUTH_ZUC_EIA3:
+		RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n",
+			auth_xform->algo);
+		goto out;
+	default:
+		RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n",
+			auth_xform->algo);
+		goto out;
+	}
+
+	switch (cipher_xform->algo) {
+	case RTE_CRYPTO_CIPHER_AES_CBC:
+		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
+		break;
+	case RTE_CRYPTO_CIPHER_3DES_CBC:
+		session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
+		break;
+	case RTE_CRYPTO_CIPHER_AES_CTR:
+		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
+		break;
+	case RTE_CRYPTO_CIPHER_NULL:
+	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
+	case RTE_CRYPTO_CIPHER_3DES_ECB:
+	case RTE_CRYPTO_CIPHER_AES_ECB:
+	case RTE_CRYPTO_CIPHER_KASUMI_F8:
+		RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n",
+			cipher_xform->algo);
+		goto out;
+	default:
+		RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n",
+			cipher_xform->algo);
+		goto out;
+	}
+
+	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
+		memset(&session->encap_pdb, 0, sizeof(struct ipsec_encap_pdb) +
+				sizeof(session->ip4_hdr));
+		session->ip4_hdr.ip_v = IPVERSION;
+		session->ip4_hdr.ip_hl = 5;
+		session->ip4_hdr.ip_len = rte_cpu_to_be_16(
+						sizeof(session->ip4_hdr));
+		session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
+		session->ip4_hdr.ip_id = 0;
+		session->ip4_hdr.ip_off = 0;
+		session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
+		session->ip4_hdr.ip_p = (ipsec_xform->proto ==
+				RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? IPPROTO_ESP
+				: IPPROTO_AH;
+		session->ip4_hdr.ip_sum = 0;
+		session->ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
+		session->ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
+		session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
+						(void *)&session->ip4_hdr,
+						sizeof(struct ip));
+
+		session->encap_pdb.options =
+			(IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
+			PDBOPTS_ESP_OIHI_PDB_INL |
+			PDBOPTS_ESP_IVSRC |
+			PDBHMO_ESP_ENCAP_DTTL;
+		session->encap_pdb.spi = ipsec_xform->spi;
+		session->encap_pdb.ip_hdr_len = sizeof(struct ip);
+
+		session->dir = DIR_ENC;
+	} else if (ipsec_xform->direction ==
+			RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
+		memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
+		session->decap_pdb.options = sizeof(struct ip) << 16;
+		session->dir = DIR_DEC;
+	} else
+		goto out;
+	session->ctx_pool = internals->ctx_pool;
+	session->inq = dpaa_sec_attach_rxq(internals);
+	if (session->inq == NULL) {
+		PMD_DRV_LOG(ERR, "unable to attach sec queue");
+		goto out;
+	}
+
+
+	return 0;
+out:
+	rte_free(session->auth_key.data);
+	rte_free(session->cipher_key.data);
+	memset(session, 0, sizeof(dpaa_sec_session));
+	return -1;
+}
+
+static int
+dpaa_sec_security_session_create(void *dev,
+				 struct rte_security_session_conf *conf,
+				 struct rte_security_session *sess,
+				 struct rte_mempool *mempool)
+{
+	void *sess_private_data;
+	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
+	int ret;
+
+	if (rte_mempool_get(mempool, &sess_private_data)) {
+		CDEV_LOG_ERR(
+			"Couldn't get object from session mempool");
+		return -ENOMEM;
+	}
+
+	switch (conf->protocol) {
+	case RTE_SECURITY_PROTOCOL_IPSEC:
+		ret = dpaa_sec_set_ipsec_session(cdev, conf,
+				sess_private_data);
+		break;
+	case RTE_SECURITY_PROTOCOL_MACSEC:
+		return -ENOTSUP;
+	default:
+		return -EINVAL;
+	}
+	if (ret != 0) {
+		PMD_DRV_LOG(ERR,
+			"DPAA2 PMD: failed to configure session parameters");
+
+		/* Return session to mempool */
+		rte_mempool_put(mempool, sess_private_data);
+		return ret;
+	}
+
+	set_sec_session_private_data(sess, sess_private_data);
+
+	return ret;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static int
+dpaa_sec_security_session_destroy(void *dev __rte_unused,
+		struct rte_security_session *sess)
+{
+	PMD_INIT_FUNC_TRACE();
+	void *sess_priv = get_sec_session_private_data(sess);
+
+	dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
+
+	if (sess_priv) {
+		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+
+		rte_free(s->cipher_key.data);
+		rte_free(s->auth_key.data);
+		memset(sess, 0, sizeof(dpaa_sec_session));
+		set_sec_session_private_data(sess, NULL);
+		rte_mempool_put(sess_mp, sess_priv);
+	}
+	return 0;
+}
+
+
+static int
 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
 		       struct rte_cryptodev_config *config __rte_unused)
 {
@@ -1408,6 +1735,21 @@ static struct rte_cryptodev_ops crypto_ops = {
 	.qp_detach_session    = dpaa_sec_qp_detach_sess,
 };
 
+static const struct rte_security_capability *
+dpaa_sec_capabilities_get(void *device __rte_unused)
+{
+	return dpaa_sec_security_cap;
+}
+
+struct rte_security_ops dpaa_sec_security_ops = {
+	.session_create = dpaa_sec_security_session_create,
+	.session_update = NULL,
+	.session_stats_get = NULL,
+	.session_destroy = dpaa_sec_security_session_destroy,
+	.set_pkt_metadata = NULL,
+	.capabilities_get = dpaa_sec_capabilities_get
+};
+
 static int
 dpaa_sec_uninit(struct rte_cryptodev *dev)
 {
@@ -1416,6 +1758,8 @@ dpaa_sec_uninit(struct rte_cryptodev *dev)
 	if (dev == NULL)
 		return -ENODEV;
 
+	rte_free(dev->security_ctx);
+
 	rte_mempool_free(internals->ctx_pool);
 	rte_free(internals);
 
@@ -1429,6 +1773,7 @@ static int
 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
 {
 	struct dpaa_sec_dev_private *internals;
+	struct rte_security_ctx *security_instance;
 	struct dpaa_sec_qp *qp;
 	uint32_t i, flags;
 	int ret;
@@ -1443,12 +1788,33 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
 	cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
 	cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
 			RTE_CRYPTODEV_FF_HW_ACCELERATED |
-			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
+			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+			RTE_CRYPTODEV_FF_SECURITY;
 
 	internals = cryptodev->data->dev_private;
 	internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
 	internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
 
+	/*
+	 * For secondary processes, we don't initialise any further as primary
+	 * has already done this work. Only check we don't need a different
+	 * RX function
+	 */
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+		PMD_INIT_LOG(DEBUG, "Device already init by primary process");
+		return 0;
+	}
+
+	/* Initialize security_ctx only for primary process*/
+	security_instance = rte_malloc("rte_security_instances_ops",
+				sizeof(struct rte_security_ctx), 0);
+	if (security_instance == NULL)
+		return -ENOMEM;
+	security_instance->device = (void *)cryptodev;
+	security_instance->ops = &dpaa_sec_security_ops;
+	security_instance->sess_cnt = 0;
+	cryptodev->security_ctx = security_instance;
+
 	for (i = 0; i < internals->max_nb_queue_pairs; i++) {
 		/* init qman fq for queue pair */
 		qp = &internals->qps[i];
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.h b/drivers/crypto/dpaa_sec/dpaa_sec.h
index 93369e4..578c46a 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.h
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.h
@@ -91,7 +91,8 @@ typedef struct dpaa_sec_session_entry {
 	uint8_t dir;         /*!< Operation Direction */
 	enum rte_crypto_cipher_algorithm cipher_alg; /*!< Cipher Algorithm*/
 	enum rte_crypto_auth_algorithm auth_alg; /*!< Authentication Algorithm*/
-	enum rte_crypto_aead_algorithm aead_alg; /*!< Authentication Algorithm*/
+	enum rte_crypto_aead_algorithm aead_alg; /*!< AEAD Algorithm*/
+	enum rte_security_session_protocol proto_alg; /*!< Security Algorithm*/
 	union {
 		struct {
 			uint8_t *data;	/**< pointer to key data */
@@ -114,6 +115,9 @@ typedef struct dpaa_sec_session_entry {
 	} iv;	/**< Initialisation vector parameters */
 	uint16_t auth_only_len; /*!< Length of data for Auth only */
 	uint32_t digest_length;
+	struct ipsec_encap_pdb encap_pdb;
+	struct ip ip4_hdr;
+	struct ipsec_decap_pdb decap_pdb;
 	struct dpaa_sec_qp *qp;
 	struct qman_fq *inq;
 	struct sec_cdb cdb;	/**< cmd block associated with qp */
@@ -378,4 +382,60 @@ static const struct rte_cryptodev_capabilities dpaa_sec_capabilities[] = {
 	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
 };
 
+static const struct rte_security_capability dpaa_sec_security_cap[] = {
+	{ /* IPsec Lookaside Protocol offload ESP Transport Egress */
+		.action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
+		.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+		.ipsec = {
+			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+			.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+			.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+			.options = { 0 }
+		},
+		.crypto_capabilities = dpaa_sec_capabilities
+	},
+	{ /* IPsec Lookaside Protocol offload ESP Tunnel Ingress */
+		.action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
+		.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+		.ipsec = {
+			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+			.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+			.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+			.options = { 0 }
+		},
+		.crypto_capabilities = dpaa_sec_capabilities
+	},
+	{
+		.action = RTE_SECURITY_ACTION_TYPE_NONE
+	}
+};
+
+/**
+ * Checksum
+ *
+ * @param buffer calculate chksum for buffer
+ * @param len    buffer length
+ *
+ * @return checksum value in host cpu order
+ */
+static inline uint16_t
+calc_chksum(void *buffer, int len)
+{
+	uint16_t *buf = (uint16_t *)buffer;
+	uint32_t sum = 0;
+	uint16_t result;
+
+	for (sum = 0; len > 1; len -= 2)
+		sum += *buf++;
+
+	if (len == 1)
+		sum += *(unsigned char *)buf;
+
+	sum = (sum >> 16) + (sum & 0xFFFF);
+	sum += (sum >> 16);
+	result = ~sum;
+
+	return  result;
+}
+
 #endif /* _DPAA_SEC_H_ */
-- 
2.9.3

^ permalink raw reply	[flat|nested] 24+ messages in thread

* [dpdk-dev] [PATCH v2] crypto/dpaa_sec: rewrite Rx/Tx path
  2017-12-13 13:56 ` [dpdk-dev] [PATCH 5/5] crypto/dpaa_sec: rewrite Rx/Tx path Akhil Goyal
  2017-12-19 12:45   ` Hemant Agrawal
@ 2018-01-11 11:44   ` Akhil Goyal
  2018-01-17 16:54     ` De Lara Guarch, Pablo
  1 sibling, 1 reply; 24+ messages in thread
From: Akhil Goyal @ 2018-01-11 11:44 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch, hemant.agrawal, Akhil Goyal, Nipun Gupta

Rx and Tx patch are rewritten with improved internal APIs
to improve performance.

Signed-off-by: Akhil Goyal <akhil.goyal@nxp.com>
Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
---
changes in v2:
Incorporated comments from Hemant.
split patch from the patch series to remove dependancy of other patches on bus/dpaa patch
Note:
This patch is dependent on [1](already merged in net subtree) and [2](under review)

[1] c7b3891464c9 ("bus/dpaa: support for enqueue frames of multiple queues")
[2]  http://dpdk.org/ml/archives/dev/2018-January/086674.html

 drivers/crypto/dpaa_sec/dpaa_sec.c | 259 +++++++++++++++++++++----------------
 drivers/crypto/dpaa_sec/dpaa_sec.h |   3 +-
 2 files changed, 153 insertions(+), 109 deletions(-)

diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c
index 66828b5..586ebb6 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.c
@@ -537,46 +537,66 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses)
 	return 0;
 }
 
-static inline unsigned int
-dpaa_volatile_deq(struct qman_fq *fq, unsigned int len, bool exact)
-{
-	unsigned int pkts = 0;
-	int ret;
-	struct qm_mcr_queryfq_np np;
-	enum qman_fq_state state;
-	uint32_t flags;
-	uint32_t vdqcr;
-
-	qman_query_fq_np(fq, &np);
-	if (np.frm_cnt) {
-		vdqcr = QM_VDQCR_NUMFRAMES_SET(len);
-		if (exact)
-			vdqcr |= QM_VDQCR_EXACT;
-		ret = qman_volatile_dequeue(fq, 0, vdqcr);
-		if (ret)
-			return 0;
-		do {
-			pkts += qman_poll_dqrr(len);
-			qman_fq_state(fq, &state, &flags);
-		} while (flags & QMAN_FQ_STATE_VDQCR);
-	}
-	return pkts;
-}
-
 /* qp is lockless, should be accessed by only one thread */
 static int
 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
 {
 	struct qman_fq *fq;
+	unsigned int pkts = 0;
+	int ret;
+	struct qm_dqrr_entry *dq;
 
 	fq = &qp->outq;
-	dpaa_sec_op_nb = 0;
-	dpaa_sec_ops = ops;
+	ret = qman_set_vdq(fq, (nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES) ?
+				DPAA_MAX_DEQUEUE_NUM_FRAMES : nb_ops);
+	if (ret)
+		return 0;
+
+	do {
+		const struct qm_fd *fd;
+		struct dpaa_sec_job *job;
+		struct dpaa_sec_op_ctx *ctx;
+		struct rte_crypto_op *op;
+
+		dq = qman_dequeue(fq);
+		if (!dq)
+			continue;
+
+		fd = &dq->fd;
+		/* sg is embedded in an op ctx,
+		 * sg[0] is for output
+		 * sg[1] for input
+		 */
+		job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
+
+		ctx = container_of(job, struct dpaa_sec_op_ctx, job);
+		ctx->fd_status = fd->status;
+		op = ctx->op;
+		if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+			struct qm_sg_entry *sg_out;
+			uint32_t len;
+
+			sg_out = &job->sg[0];
+			hw_sg_to_cpu(sg_out);
+			len = sg_out->length;
+			op->sym->m_src->pkt_len = len;
+			op->sym->m_src->data_len = len;
+		}
+		if (!ctx->fd_status) {
+			op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+		} else {
+			printf("\nSEC return err: 0x%x", ctx->fd_status);
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		}
+		ops[pkts++] = op;
 
-	if (unlikely(nb_ops > DPAA_SEC_BURST))
-		nb_ops = DPAA_SEC_BURST;
+		/* report op status to sym->op and then free the ctx memeory */
+		rte_mempool_put(ctx->ctx_pool, (void *)ctx);
 
-	return dpaa_volatile_deq(fq, nb_ops, 1);
+		qman_dqrr_consume(fq, dq);
+	} while (fq->flags & QMAN_FQ_STATE_VDQCR);
+
+	return pkts;
 }
 
 /**
@@ -949,95 +969,118 @@ build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
 	return cf;
 }
 
-static int
-dpaa_sec_enqueue_op(struct rte_crypto_op *op,  struct dpaa_sec_qp *qp)
-{
-	struct dpaa_sec_job *cf;
-	dpaa_sec_session *ses;
-	struct qm_fd fd;
-	int ret;
-	uint32_t auth_only_len = op->sym->auth.data.length -
-				op->sym->cipher.data.length;
-
-	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
-		ses = (dpaa_sec_session *)get_session_private_data(
-				op->sym->session, cryptodev_driver_id);
-	else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
-		ses = (dpaa_sec_session *)get_sec_session_private_data(
-				op->sym->sec_session);
-	else
-		return -ENOTSUP;
-
-	if (unlikely(!ses->qp || ses->qp != qp)) {
-		PMD_INIT_LOG(DEBUG, "sess->qp - %p qp %p", ses->qp, qp);
-		if (dpaa_sec_attach_sess_q(qp, ses))
-			return -1;
-	}
-
-	/*
-	 * Segmented buffer is not supported.
-	 */
-	if (!rte_pktmbuf_is_contiguous(op->sym->m_src)) {
-		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
-		return -ENOTSUP;
-	}
-	if (is_auth_only(ses)) {
-		cf = build_auth_only(op, ses);
-	} else if (is_cipher_only(ses)) {
-		cf = build_cipher_only(op, ses);
-	} else if (is_aead(ses)) {
-		cf = build_cipher_auth_gcm(op, ses);
-		auth_only_len = ses->auth_only_len;
-	} else if (is_auth_cipher(ses)) {
-		cf = build_cipher_auth(op, ses);
-	} else if (is_proto_ipsec(ses)) {
-		cf = build_proto(op, ses);
-	} else {
-		PMD_TX_LOG(ERR, "not supported sec op");
-		return -ENOTSUP;
-	}
-	if (unlikely(!cf))
-		return -ENOMEM;
-
-	memset(&fd, 0, sizeof(struct qm_fd));
-	qm_fd_addr_set64(&fd, dpaa_mem_vtop(cf->sg));
-	fd._format1 = qm_fd_compound;
-	fd.length29 = 2 * sizeof(struct qm_sg_entry);
-	/* Auth_only_len is set as 0 in descriptor and it is overwritten
-	 * here in the fd.cmd which will update the DPOVRD reg.
-	 */
-	if (auth_only_len)
-		fd.cmd = 0x80000000 | auth_only_len;
-	do {
-		ret = qman_enqueue(ses->inq, &fd, 0);
-	} while (ret != 0);
-
-	return 0;
-}
-
 static uint16_t
 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
 		       uint16_t nb_ops)
 {
 	/* Function to transmit the frames to given device and queuepair */
 	uint32_t loop;
-	int32_t ret;
 	struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
 	uint16_t num_tx = 0;
+	struct qm_fd fds[DPAA_SEC_BURST], *fd;
+	uint32_t frames_to_send;
+	struct rte_crypto_op *op;
+	struct dpaa_sec_job *cf;
+	dpaa_sec_session *ses;
+	struct dpaa_sec_op_ctx *ctx;
+	uint32_t auth_only_len;
+	struct qman_fq *inq[DPAA_SEC_BURST];
+
+	while (nb_ops) {
+		frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
+				DPAA_SEC_BURST : nb_ops;
+		for (loop = 0; loop < frames_to_send; loop++) {
+			op = *(ops++);
+			switch (op->sess_type) {
+			case RTE_CRYPTO_OP_WITH_SESSION:
+				ses = (dpaa_sec_session *)
+					get_session_private_data(
+							op->sym->session,
+							cryptodev_driver_id);
+				break;
+			case RTE_CRYPTO_OP_SECURITY_SESSION:
+				ses = (dpaa_sec_session *)
+					get_sec_session_private_data(
+							op->sym->sec_session);
+				break;
+			default:
+				PMD_TX_LOG(ERR,
+					"sessionless crypto op not supported");
+				frames_to_send = loop;
+				nb_ops = loop;
+				goto send_pkts;
+			}
+			if (unlikely(!ses->qp || ses->qp != qp)) {
+				PMD_INIT_LOG(DEBUG, "sess->qp - %p qp %p",
+						ses->qp, qp);
+				if (dpaa_sec_attach_sess_q(qp, ses)) {
+					frames_to_send = loop;
+					nb_ops = loop;
+					goto send_pkts;
+				}
+			}
 
-	if (unlikely(nb_ops == 0))
-		return 0;
+			/*
+			 * Segmented buffer is not supported.
+			 */
+			if (!rte_pktmbuf_is_contiguous(op->sym->m_src)) {
+				op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+				frames_to_send = loop;
+				nb_ops = loop;
+				goto send_pkts;
+			}
+			auth_only_len = op->sym->auth.data.length -
+						op->sym->cipher.data.length;
+
+			if (is_auth_only(ses)) {
+				cf = build_auth_only(op, ses);
+			} else if (is_cipher_only(ses)) {
+				cf = build_cipher_only(op, ses);
+			} else if (is_aead(ses)) {
+				cf = build_cipher_auth_gcm(op, ses);
+				auth_only_len = ses->auth_only_len;
+			} else if (is_auth_cipher(ses)) {
+				cf = build_cipher_auth(op, ses);
+			} else if (is_proto_ipsec(ses)) {
+				cf = build_proto(op, ses);
+			} else {
+				PMD_TX_LOG(ERR, "not supported sec op");
+				frames_to_send = loop;
+				nb_ops = loop;
+				goto send_pkts;
+			}
+			if (unlikely(!cf)) {
+				frames_to_send = loop;
+				nb_ops = loop;
+				goto send_pkts;
+			}
 
-	/*Prepare each packet which is to be sent*/
-	for (loop = 0; loop < nb_ops; loop++) {
-		if (ops[loop]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
-			PMD_TX_LOG(ERR, "sessionless crypto op not supported");
-			return 0;
+			fd = &fds[loop];
+			inq[loop] = ses->inq;
+			fd->opaque_addr = 0;
+			fd->cmd = 0;
+			ctx = container_of(cf, struct dpaa_sec_op_ctx, job);
+			qm_fd_addr_set64(fd, dpaa_mem_vtop_ctx(ctx, cf->sg));
+			fd->_format1 = qm_fd_compound;
+			fd->length29 = 2 * sizeof(struct qm_sg_entry);
+			/* Auth_only_len is set as 0 in descriptor and it is
+			 * overwritten here in the fd.cmd which will update
+			 * the DPOVRD reg.
+			 */
+			if (auth_only_len)
+				fd->cmd = 0x80000000 | auth_only_len;
+
+		}
+send_pkts:
+		loop = 0;
+		while (loop < frames_to_send) {
+			loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
+					frames_to_send - loop);
 		}
-		ret = dpaa_sec_enqueue_op(ops[loop], dpaa_qp);
-		if (!ret)
-			num_tx++;
+		nb_ops -= frames_to_send;
+		num_tx += frames_to_send;
 	}
+
 	dpaa_qp->tx_pkts += num_tx;
 	dpaa_qp->tx_errs += nb_ops - num_tx;
 
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.h b/drivers/crypto/dpaa_sec/dpaa_sec.h
index 578c46a..c53d9ae 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.h
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.h
@@ -8,7 +8,7 @@
 #define _DPAA_SEC_H_
 
 #define NUM_POOL_CHANNELS	4
-#define DPAA_SEC_BURST		32
+#define DPAA_SEC_BURST		7
 #define DPAA_SEC_ALG_UNSUPPORT	(-1)
 #define TDES_CBC_IV_LEN		8
 #define AES_CBC_IV_LEN		16
@@ -135,6 +135,7 @@ struct dpaa_sec_qp {
 
 #define RTE_DPAA_MAX_NB_SEC_QPS 1
 #define RTE_DPAA_MAX_RX_QUEUE RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS
+#define DPAA_MAX_DEQUEUE_NUM_FRAMES 63
 
 /* internal sec queue interface */
 struct dpaa_sec_dev_private {
-- 
2.9.3

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [dpdk-dev] [PATCH v2 3/3] crypto/dpaa_sec: support ipsec protocol offload
  2018-01-11 11:33   ` [dpdk-dev] [PATCH v2 3/3] crypto/dpaa_sec: support ipsec protocol offload Akhil Goyal
@ 2018-01-11 14:13     ` De Lara Guarch, Pablo
  0 siblings, 0 replies; 24+ messages in thread
From: De Lara Guarch, Pablo @ 2018-01-11 14:13 UTC (permalink / raw)
  To: Akhil Goyal, dev; +Cc: hemant.agrawal

Hi Akhil,

> -----Original Message-----
> From: Akhil Goyal [mailto:akhil.goyal@nxp.com]
> Sent: Thursday, January 11, 2018 11:33 AM
> To: dev@dpdk.org
> Cc: De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>;
> hemant.agrawal@nxp.com; Akhil Goyal <akhil.goyal@nxp.com>
> Subject: [PATCH v2 3/3] crypto/dpaa_sec: support ipsec protocol offload
> 
> Signed-off-by: Akhil Goyal <akhil.goyal@nxp.com>
> Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>

I think it is worth adding this change in release notes.

Thanks,
Pablo

^ permalink raw reply	[flat|nested] 24+ messages in thread

* [dpdk-dev] [PATCH v3 0/3] crypto/dpaa_sec: performance optimizations
  2018-01-11 11:33 ` [dpdk-dev] [PATCH v2 0/3] crypto/dpaa_sec: performance optimizations Akhil Goyal
                     ` (2 preceding siblings ...)
  2018-01-11 11:33   ` [dpdk-dev] [PATCH v2 3/3] crypto/dpaa_sec: support ipsec protocol offload Akhil Goyal
@ 2018-01-15  6:35   ` Akhil Goyal
  2018-01-15  6:35     ` [dpdk-dev] [PATCH v3 1/3] crypto/dpaa_sec: optimize virt to phy conversion Akhil Goyal
                       ` (3 more replies)
  3 siblings, 4 replies; 24+ messages in thread
From: Akhil Goyal @ 2018-01-15  6:35 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch, hemant.agrawal, Akhil Goyal

Following changes are added to improve performance.
1. optimize virtual to physical address conversion
2. support for multiple sessions in a single queue pair
3. support for ipsec protocol offload

changes in v3:
 - updated release notes in patch 3/3 for ipsec protocol offload.
changes in v2:
 - incorporated comments from Hemant
 - split the patchset to remove dependency on bus/dpaa patch
Note:
1. This patchset is now independent of the patches on net subtree.
2. bus/dpaa patch is already applied to net subtree.
3. The last patch in v1 of this series will be sent separately as it will
   be dependent on the net subtree.

Akhil Goyal (1):
  crypto/dpaa_sec: support ipsec protocol offload

Hemant Agrawal (2):
  crypto/dpaa_sec: optimize virt to phy conversion
  crypto/dpaa_sec: support multiple sessions per qp

 doc/guides/cryptodevs/features/dpaa_sec.ini |   1 +
 doc/guides/rel_notes/release_18_02.rst      |   5 +
 drivers/crypto/dpaa_sec/dpaa_sec.c          | 591 ++++++++++++++++++++++++----
 drivers/crypto/dpaa_sec/dpaa_sec.h          | 135 +++++--
 4 files changed, 618 insertions(+), 114 deletions(-)

-- 
2.9.3

^ permalink raw reply	[flat|nested] 24+ messages in thread

* [dpdk-dev] [PATCH v3 1/3] crypto/dpaa_sec: optimize virt to phy conversion
  2018-01-15  6:35   ` [dpdk-dev] [PATCH v3 0/3] crypto/dpaa_sec: performance optimizations Akhil Goyal
@ 2018-01-15  6:35     ` Akhil Goyal
  2018-01-15  6:35     ` [dpdk-dev] [PATCH v3 2/3] crypto/dpaa_sec: support multiple sessions per qp Akhil Goyal
                       ` (2 subsequent siblings)
  3 siblings, 0 replies; 24+ messages in thread
From: Akhil Goyal @ 2018-01-15  6:35 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch, hemant.agrawal

From: Hemant Agrawal <hemant.agrawal@nxp.com>

Context memory is allocated from mempool. Ideally
it will get all memory from single segment, so simple offset
calculation is used for address conversion for such addresses
from context memory.

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
Acked-by: Akhil Goyal <akhil.goyal@nxp.com>
---
 drivers/crypto/dpaa_sec/dpaa_sec.c | 27 ++++++++++++++++++---------
 drivers/crypto/dpaa_sec/dpaa_sec.h |  1 +
 2 files changed, 19 insertions(+), 9 deletions(-)

diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c
index ad1b309..157eace 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.c
@@ -80,6 +80,8 @@ dpaa_sec_alloc_ctx(dpaa_sec_session *ses)
 	dcbz_64(&ctx->job.sg[SG_CACHELINE_3]);
 
 	ctx->ctx_pool = ses->ctx_pool;
+	ctx->vtop_offset = (uint64_t) ctx
+				- rte_mempool_virt2iova(ctx);
 
 	return ctx;
 }
@@ -104,6 +106,13 @@ dpaa_mem_vtop(void *vaddr)
 	return (rte_iova_t)(NULL);
 }
 
+/* virtual address conversin when mempool support is available for ctx */
+static inline phys_addr_t
+dpaa_mem_vtop_ctx(struct dpaa_sec_op_ctx *ctx, void *vaddr)
+{
+	return (uint64_t)vaddr - ctx->vtop_offset;
+}
+
 static inline void *
 dpaa_mem_ptov(rte_iova_t paddr)
 {
@@ -563,7 +572,7 @@ build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
 	if (is_decode(ses)) {
 		/* need to extend the input to a compound frame */
 		sg->extension = 1;
-		qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
+		qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
 		sg->length = sym->auth.data.length + ses->digest_length;
 		sg->final = 1;
 		cpu_to_hw_sg(sg);
@@ -577,7 +586,7 @@ build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
 		cpu_to_hw_sg(sg);
 
 		/* let's check digest by hw */
-		start_addr = dpaa_mem_vtop(old_digest);
+		start_addr = dpaa_mem_vtop_ctx(ctx, old_digest);
 		sg++;
 		qm_sg_entry_set64(sg, start_addr);
 		sg->length = ses->digest_length;
@@ -631,7 +640,7 @@ build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
 	sg->extension = 1;
 	sg->final = 1;
 	sg->length = sym->cipher.data.length + ses->iv.length;
-	qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
+	qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
 	cpu_to_hw_sg(sg);
 
 	sg = &cf->sg[2];
@@ -677,7 +686,7 @@ build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
 	/* input */
 	rte_prefetch0(cf->sg);
 	sg = &cf->sg[2];
-	qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
+	qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop_ctx(ctx, sg));
 	if (is_encode(ses)) {
 		qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
 		sg->length = ses->iv.length;
@@ -722,7 +731,7 @@ build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
 		       ses->digest_length);
 		sg++;
 
-		qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
+		qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
 		sg->length = ses->digest_length;
 		length += sg->length;
 		sg->final = 1;
@@ -736,7 +745,7 @@ build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
 
 	/* output */
 	sg++;
-	qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
+	qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop_ctx(ctx, sg));
 	qm_sg_entry_set64(sg,
 		dst_start_addr + sym->aead.data.offset - ses->auth_only_len);
 	sg->length = sym->aead.data.length + ses->auth_only_len;
@@ -788,7 +797,7 @@ build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
 	/* input */
 	rte_prefetch0(cf->sg);
 	sg = &cf->sg[2];
-	qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
+	qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop_ctx(ctx, sg));
 	if (is_encode(ses)) {
 		qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
 		sg->length = ses->iv.length;
@@ -818,7 +827,7 @@ build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
 		       ses->digest_length);
 		sg++;
 
-		qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
+		qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
 		sg->length = ses->digest_length;
 		length += sg->length;
 		sg->final = 1;
@@ -832,7 +841,7 @@ build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
 
 	/* output */
 	sg++;
-	qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
+	qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop_ctx(ctx, sg));
 	qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
 	sg->length = sym->cipher.data.length;
 	length = sg->length;
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.h b/drivers/crypto/dpaa_sec/dpaa_sec.h
index 9342949..e82f4fb 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.h
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.h
@@ -155,6 +155,7 @@ struct dpaa_sec_op_ctx {
 	struct rte_crypto_op *op;
 	struct rte_mempool *ctx_pool; /* mempool pointer for dpaa_sec_op_ctx */
 	uint32_t fd_status;
+	int64_t vtop_offset;
 	uint8_t digest[DPAA_MAX_NB_MAX_DIGEST];
 };
 
-- 
2.9.3

^ permalink raw reply	[flat|nested] 24+ messages in thread

* [dpdk-dev] [PATCH v3 2/3] crypto/dpaa_sec: support multiple sessions per qp
  2018-01-15  6:35   ` [dpdk-dev] [PATCH v3 0/3] crypto/dpaa_sec: performance optimizations Akhil Goyal
  2018-01-15  6:35     ` [dpdk-dev] [PATCH v3 1/3] crypto/dpaa_sec: optimize virt to phy conversion Akhil Goyal
@ 2018-01-15  6:35     ` Akhil Goyal
  2018-01-15  6:35     ` [dpdk-dev] [PATCH v3 3/3] crypto/dpaa_sec: support ipsec protocol offload Akhil Goyal
  2018-01-15 14:46     ` [dpdk-dev] [PATCH v3 0/3] crypto/dpaa_sec: performance optimizations De Lara Guarch, Pablo
  3 siblings, 0 replies; 24+ messages in thread
From: Akhil Goyal @ 2018-01-15  6:35 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch, hemant.agrawal

From: Hemant Agrawal <hemant.agrawal@nxp.com>

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
Acked-by: Akhil Goyal <akhil.goyal@nxp.com>
---
 drivers/crypto/dpaa_sec/dpaa_sec.c | 154 +++++++++++++++++++++++++------------
 drivers/crypto/dpaa_sec/dpaa_sec.h |  74 +++++++++---------
 2 files changed, 145 insertions(+), 83 deletions(-)

diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c
index 157eace..f433f0a 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.c
@@ -43,6 +43,9 @@ static uint8_t cryptodev_driver_id;
 static __thread struct rte_crypto_op **dpaa_sec_ops;
 static __thread int dpaa_sec_op_nb;
 
+static int
+dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
+
 static inline void
 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
 {
@@ -151,15 +154,6 @@ dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
 	/* Clear FQ options */
 	memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
 
-	flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
-		QMAN_FQ_FLAG_TO_DCPORTAL;
-
-	ret = qman_create_fq(0, flags, fq_in);
-	if (unlikely(ret != 0)) {
-		PMD_INIT_LOG(ERR, "qman_create_fq failed");
-		return ret;
-	}
-
 	flags = QMAN_INITFQ_FLAG_SCHED;
 	fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
 			  QM_INITFQ_WE_CONTEXTB;
@@ -171,9 +165,11 @@ dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
 
 	fq_in->cb.ern  = ern_sec_fq_handler;
 
+	PMD_INIT_LOG(DEBUG, "in-%x out-%x", fq_in->fqid, fqid_out);
+
 	ret = qman_init_fq(fq_in, flags, &fq_opts);
 	if (unlikely(ret != 0))
-		PMD_INIT_LOG(ERR, "qman_init_fq failed");
+		PMD_INIT_LOG(ERR, "qman_init_fq failed %d", ret);
 
 	return ret;
 }
@@ -357,7 +353,7 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses)
 {
 	struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
 	uint32_t shared_desc_len = 0;
-	struct sec_cdb *cdb = &ses->qp->cdb;
+	struct sec_cdb *cdb = &ses->cdb;
 	int err;
 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
 	int swap = false;
@@ -877,12 +873,10 @@ dpaa_sec_enqueue_op(struct rte_crypto_op *op,  struct dpaa_sec_qp *qp)
 	ses = (dpaa_sec_session *)get_session_private_data(op->sym->session,
 					cryptodev_driver_id);
 
-	if (unlikely(!qp->ses || qp->ses != ses)) {
-		qp->ses = ses;
-		ses->qp = qp;
-		ret = dpaa_sec_prep_cdb(ses);
-		if (ret)
-			return ret;
+	if (unlikely(!ses->qp || ses->qp != qp)) {
+		PMD_INIT_LOG(DEBUG, "sess->qp - %p qp %p", ses->qp, qp);
+		if (dpaa_sec_attach_sess_q(qp, ses))
+			return -1;
 	}
 
 	/*
@@ -918,7 +912,7 @@ dpaa_sec_enqueue_op(struct rte_crypto_op *op,  struct dpaa_sec_qp *qp)
 	if (auth_only_len)
 		fd.cmd = 0x80000000 | auth_only_len;
 	do {
-		ret = qman_enqueue(&qp->inq, &fd, 0);
+		ret = qman_enqueue(ses->inq, &fd, 0);
 	} while (ret != 0);
 
 	return 0;
@@ -1134,43 +1128,82 @@ dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
 	return 0;
 }
 
-static int
-dpaa_sec_qp_attach_sess(struct rte_cryptodev *dev, uint16_t qp_id, void *ses)
+static struct qman_fq *
+dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
 {
-	dpaa_sec_session *sess = ses;
-	struct dpaa_sec_qp *qp;
+	unsigned int i;
 
-	PMD_INIT_FUNC_TRACE();
+	for (i = 0; i < qi->max_nb_sessions; i++) {
+		if (qi->inq_attach[i] == 0) {
+			qi->inq_attach[i] = 1;
+			return &qi->inq[i];
+		}
+	}
+	PMD_DRV_LOG(ERR, "All ses session in use %x", qi->max_nb_sessions);
+
+	return NULL;
+}
 
-	qp = dev->data->queue_pairs[qp_id];
-	if (qp->ses != NULL) {
-		PMD_INIT_LOG(ERR, "qp in-use by another session\n");
-		return -EBUSY;
+static int
+dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
+{
+	unsigned int i;
+
+	for (i = 0; i < qi->max_nb_sessions; i++) {
+		if (&qi->inq[i] == fq) {
+			qi->inq_attach[i] = 0;
+			return 0;
+		}
 	}
+	return -1;
+}
+
+static int
+dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
+{
+	int ret;
 
-	qp->ses = sess;
 	sess->qp = qp;
+	ret = dpaa_sec_prep_cdb(sess);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Unable to prepare sec cdb");
+		return -1;
+	}
 
-	return dpaa_sec_prep_cdb(sess);
+	ret = dpaa_sec_init_rx(sess->inq, dpaa_mem_vtop(&sess->cdb),
+			       qman_fq_fqid(&qp->outq));
+	if (ret)
+		PMD_DRV_LOG(ERR, "Unable to init sec queue");
+
+	return ret;
+}
+
+static int
+dpaa_sec_qp_attach_sess(struct rte_cryptodev *dev __rte_unused,
+			uint16_t qp_id __rte_unused,
+			void *ses __rte_unused)
+{
+	PMD_INIT_FUNC_TRACE();
+	return 0;
 }
 
 static int
-dpaa_sec_qp_detach_sess(struct rte_cryptodev *dev, uint16_t qp_id, void *ses)
+dpaa_sec_qp_detach_sess(struct rte_cryptodev *dev,
+			uint16_t qp_id  __rte_unused,
+			void *ses)
 {
 	dpaa_sec_session *sess = ses;
-	struct dpaa_sec_qp *qp;
+	struct dpaa_sec_dev_private *qi = dev->data->dev_private;
 
 	PMD_INIT_FUNC_TRACE();
 
-	qp = dev->data->queue_pairs[qp_id];
-	if (qp->ses != NULL) {
-		qp->ses = NULL;
-		sess->qp = NULL;
-		return 0;
-	}
+	if (sess->inq)
+		dpaa_sec_detach_rxq(qi, sess->inq);
+	sess->inq = NULL;
 
-	PMD_DRV_LOG(ERR, "No session attached to qp");
-	return -EINVAL;
+	sess->qp = NULL;
+
+	return 0;
 }
 
 static int
@@ -1233,8 +1266,20 @@ dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
 		return -EINVAL;
 	}
 	session->ctx_pool = internals->ctx_pool;
+	session->inq = dpaa_sec_attach_rxq(internals);
+	if (session->inq == NULL) {
+		PMD_DRV_LOG(ERR, "unable to attach sec queue");
+		goto err1;
+	}
 
 	return 0;
+
+err1:
+	rte_free(session->cipher_key.data);
+	rte_free(session->auth_key.data);
+	memset(session, 0, sizeof(dpaa_sec_session));
+
+	return -EINVAL;
 }
 
 static int
@@ -1267,6 +1312,7 @@ dpaa_sec_session_configure(struct rte_cryptodev *dev,
 	set_session_private_data(sess, dev->driver_id,
 			sess_private_data);
 
+
 	return 0;
 }
 
@@ -1275,16 +1321,22 @@ static void
 dpaa_sec_session_clear(struct rte_cryptodev *dev,
 		struct rte_cryptodev_sym_session *sess)
 {
-	PMD_INIT_FUNC_TRACE();
+	struct dpaa_sec_dev_private *qi = dev->data->dev_private;
 	uint8_t index = dev->driver_id;
 	void *sess_priv = get_session_private_data(sess, index);
+
+	PMD_INIT_FUNC_TRACE();
+
 	dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
 
 	if (sess_priv) {
+		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+
+		if (s->inq)
+			dpaa_sec_detach_rxq(qi, s->inq);
 		rte_free(s->cipher_key.data);
 		rte_free(s->auth_key.data);
 		memset(s, 0, sizeof(dpaa_sec_session));
-		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
 		set_session_private_data(sess, index, NULL);
 		rte_mempool_put(sess_mp, sess_priv);
 	}
@@ -1332,7 +1384,8 @@ dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
 		info->capabilities = dpaa_sec_capabilities;
 		info->sym.max_nb_sessions = internals->max_nb_sessions;
 		info->sym.max_nb_sessions_per_qp =
-			RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS / RTE_MAX_NB_SEC_QPS;
+			RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS /
+			RTE_DPAA_MAX_NB_SEC_QPS;
 		info->driver_id = cryptodev_driver_id;
 	}
 }
@@ -1377,7 +1430,7 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
 {
 	struct dpaa_sec_dev_private *internals;
 	struct dpaa_sec_qp *qp;
-	uint32_t i;
+	uint32_t i, flags;
 	int ret;
 	char str[20];
 
@@ -1393,7 +1446,7 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
 
 	internals = cryptodev->data->dev_private;
-	internals->max_nb_queue_pairs = RTE_MAX_NB_SEC_QPS;
+	internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
 	internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
 
 	for (i = 0; i < internals->max_nb_queue_pairs; i++) {
@@ -1404,10 +1457,15 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
 			PMD_INIT_LOG(ERR, "config tx of queue pair  %d", i);
 			goto init_error;
 		}
-		ret = dpaa_sec_init_rx(&qp->inq, dpaa_mem_vtop(&qp->cdb),
-				       qman_fq_fqid(&qp->outq));
-		if (ret) {
-			PMD_INIT_LOG(ERR, "config rx of queue pair %d", i);
+	}
+
+	flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
+		QMAN_FQ_FLAG_TO_DCPORTAL;
+	for (i = 0; i < internals->max_nb_sessions; i++) {
+		/* create rx qman fq for sessions*/
+		ret = qman_create_fq(0, flags, &internals->inq[i]);
+		if (unlikely(ret != 0)) {
+			PMD_INIT_LOG(ERR, "sec qman_create_fq failed");
 			goto init_error;
 		}
 	}
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.h b/drivers/crypto/dpaa_sec/dpaa_sec.h
index e82f4fb..93369e4 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.h
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.h
@@ -38,36 +38,6 @@ enum dpaa_sec_op_type {
 	DPAA_SEC_MAX
 };
 
-typedef struct dpaa_sec_session_entry {
-	uint8_t dir;         /*!< Operation Direction */
-	enum rte_crypto_cipher_algorithm cipher_alg; /*!< Cipher Algorithm*/
-	enum rte_crypto_auth_algorithm auth_alg; /*!< Authentication Algorithm*/
-	enum rte_crypto_aead_algorithm aead_alg; /*!< Authentication Algorithm*/
-	union {
-		struct {
-			uint8_t *data;	/**< pointer to key data */
-			size_t length;	/**< key length in bytes */
-		} aead_key;
-		struct {
-			struct {
-				uint8_t *data;	/**< pointer to key data */
-				size_t length;	/**< key length in bytes */
-			} cipher_key;
-			struct {
-				uint8_t *data;	/**< pointer to key data */
-				size_t length;	/**< key length in bytes */
-			} auth_key;
-		};
-	};
-	struct {
-		uint16_t length;
-		uint16_t offset;
-	} iv;	/**< Initialisation vector parameters */
-	uint16_t auth_only_len; /*!< Length of data for Auth only */
-	uint32_t digest_length;
-	struct dpaa_sec_qp *qp;
-	struct rte_mempool *ctx_pool; /* session mempool for dpaa_sec_op_ctx */
-} dpaa_sec_session;
 
 #define DPAA_SEC_MAX_DESC_SIZE  64
 /* code or cmd block to caam */
@@ -117,11 +87,41 @@ struct sec_cdb {
 	uint32_t sh_desc[DPAA_SEC_MAX_DESC_SIZE];
 };
 
+typedef struct dpaa_sec_session_entry {
+	uint8_t dir;         /*!< Operation Direction */
+	enum rte_crypto_cipher_algorithm cipher_alg; /*!< Cipher Algorithm*/
+	enum rte_crypto_auth_algorithm auth_alg; /*!< Authentication Algorithm*/
+	enum rte_crypto_aead_algorithm aead_alg; /*!< Authentication Algorithm*/
+	union {
+		struct {
+			uint8_t *data;	/**< pointer to key data */
+			size_t length;	/**< key length in bytes */
+		} aead_key;
+		struct {
+			struct {
+				uint8_t *data;	/**< pointer to key data */
+				size_t length;	/**< key length in bytes */
+			} cipher_key;
+			struct {
+				uint8_t *data;	/**< pointer to key data */
+				size_t length;	/**< key length in bytes */
+			} auth_key;
+		};
+	};
+	struct {
+		uint16_t length;
+		uint16_t offset;
+	} iv;	/**< Initialisation vector parameters */
+	uint16_t auth_only_len; /*!< Length of data for Auth only */
+	uint32_t digest_length;
+	struct dpaa_sec_qp *qp;
+	struct qman_fq *inq;
+	struct sec_cdb cdb;	/**< cmd block associated with qp */
+	struct rte_mempool *ctx_pool; /* session mempool for dpaa_sec_op_ctx */
+} dpaa_sec_session;
+
 struct dpaa_sec_qp {
 	struct dpaa_sec_dev_private *internals;
-	struct sec_cdb cdb;		/* cmd block associated with qp */
-	dpaa_sec_session *ses;		/* session associated with qp */
-	struct qman_fq inq;
 	struct qman_fq outq;
 	int rx_pkts;
 	int rx_errs;
@@ -129,12 +129,16 @@ struct dpaa_sec_qp {
 	int tx_errs;
 };
 
-#define RTE_MAX_NB_SEC_QPS RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS
+#define RTE_DPAA_MAX_NB_SEC_QPS 1
+#define RTE_DPAA_MAX_RX_QUEUE RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS
+
 /* internal sec queue interface */
 struct dpaa_sec_dev_private {
 	void *sec_hw;
 	struct rte_mempool *ctx_pool; /* per dev mempool for dpaa_sec_op_ctx */
-	struct dpaa_sec_qp qps[RTE_MAX_NB_SEC_QPS]; /* i/o queue for sec */
+	struct dpaa_sec_qp qps[RTE_DPAA_MAX_NB_SEC_QPS]; /* i/o queue for sec */
+	struct qman_fq inq[RTE_DPAA_MAX_RX_QUEUE];
+	unsigned char inq_attach[RTE_DPAA_MAX_RX_QUEUE];
 	unsigned int max_nb_queue_pairs;
 	unsigned int max_nb_sessions;
 };
-- 
2.9.3

^ permalink raw reply	[flat|nested] 24+ messages in thread

* [dpdk-dev] [PATCH v3 3/3] crypto/dpaa_sec: support ipsec protocol offload
  2018-01-15  6:35   ` [dpdk-dev] [PATCH v3 0/3] crypto/dpaa_sec: performance optimizations Akhil Goyal
  2018-01-15  6:35     ` [dpdk-dev] [PATCH v3 1/3] crypto/dpaa_sec: optimize virt to phy conversion Akhil Goyal
  2018-01-15  6:35     ` [dpdk-dev] [PATCH v3 2/3] crypto/dpaa_sec: support multiple sessions per qp Akhil Goyal
@ 2018-01-15  6:35     ` Akhil Goyal
  2018-01-15 14:46     ` [dpdk-dev] [PATCH v3 0/3] crypto/dpaa_sec: performance optimizations De Lara Guarch, Pablo
  3 siblings, 0 replies; 24+ messages in thread
From: Akhil Goyal @ 2018-01-15  6:35 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch, hemant.agrawal, Akhil Goyal

Signed-off-by: Akhil Goyal <akhil.goyal@nxp.com>
Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 doc/guides/cryptodevs/features/dpaa_sec.ini |   1 +
 doc/guides/rel_notes/release_18_02.rst      |   5 +
 drivers/crypto/dpaa_sec/dpaa_sec.c          | 410 ++++++++++++++++++++++++++--
 drivers/crypto/dpaa_sec/dpaa_sec.h          |  62 ++++-
 4 files changed, 455 insertions(+), 23 deletions(-)

diff --git a/doc/guides/cryptodevs/features/dpaa_sec.ini b/doc/guides/cryptodevs/features/dpaa_sec.ini
index 0e8f5b2..deab53a 100644
--- a/doc/guides/cryptodevs/features/dpaa_sec.ini
+++ b/doc/guides/cryptodevs/features/dpaa_sec.ini
@@ -7,6 +7,7 @@
 Symmetric crypto       = Y
 Sym operation chaining = Y
 HW Accelerated         = Y
+Protocol offload       = Y
 
 ;
 ; Supported crypto algorithms of the 'dpaa_sec' crypto driver.
diff --git a/doc/guides/rel_notes/release_18_02.rst b/doc/guides/rel_notes/release_18_02.rst
index 0da12cb..318ae62 100644
--- a/doc/guides/rel_notes/release_18_02.rst
+++ b/doc/guides/rel_notes/release_18_02.rst
@@ -47,6 +47,11 @@ New Features
 
   * AES-CCM algorithm.
 
+* **Updated the DPAA_SEC crypto driver to support rte_security.**
+
+  Updated the ``dpaa_sec`` crypto PMD to support ``rte_security`` lookaside
+  protocol offload for IPSec.
+
 API Changes
 -----------
 
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c
index f433f0a..d7b6f39 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.c
@@ -15,6 +15,7 @@
 #include <rte_cryptodev_pmd.h>
 #include <rte_crypto.h>
 #include <rte_cryptodev.h>
+#include <rte_security_driver.h>
 #include <rte_cycles.h>
 #include <rte_dev.h>
 #include <rte_kvargs.h>
@@ -196,8 +197,19 @@ dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
 	 * sg[1] for input
 	 */
 	job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
+
 	ctx = container_of(job, struct dpaa_sec_op_ctx, job);
 	ctx->fd_status = fd->status;
+	if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+		struct qm_sg_entry *sg_out;
+		uint32_t len;
+
+		sg_out = &job->sg[0];
+		hw_sg_to_cpu(sg_out);
+		len = sg_out->length;
+		ctx->op->sym->m_src->pkt_len = len;
+		ctx->op->sym->m_src->data_len = len;
+	}
 	dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
 	dpaa_sec_op_ending(ctx);
 
@@ -261,7 +273,13 @@ static inline int is_aead(dpaa_sec_session *ses)
 static inline int is_auth_cipher(dpaa_sec_session *ses)
 {
 	return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
-		(ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
+		(ses->auth_alg != RTE_CRYPTO_AUTH_NULL) &&
+		(ses->proto_alg != RTE_SECURITY_PROTOCOL_IPSEC));
+}
+
+static inline int is_proto_ipsec(dpaa_sec_session *ses)
+{
+	return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC);
 }
 
 static inline int is_encode(dpaa_sec_session *ses)
@@ -282,27 +300,39 @@ caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a)
 		ses->digest_length = 0;
 		break;
 	case RTE_CRYPTO_AUTH_MD5_HMAC:
-		alginfo_a->algtype = OP_ALG_ALGSEL_MD5;
+		alginfo_a->algtype =
+			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+			OP_PCL_IPSEC_HMAC_MD5_96 : OP_ALG_ALGSEL_MD5;
 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
 		break;
 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
-		alginfo_a->algtype = OP_ALG_ALGSEL_SHA1;
+		alginfo_a->algtype =
+			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+			OP_PCL_IPSEC_HMAC_SHA1_96 : OP_ALG_ALGSEL_SHA1;
 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
 		break;
 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
-		alginfo_a->algtype = OP_ALG_ALGSEL_SHA224;
+		alginfo_a->algtype =
+			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+			OP_PCL_IPSEC_HMAC_SHA1_160 : OP_ALG_ALGSEL_SHA224;
 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
 		break;
 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
-		alginfo_a->algtype = OP_ALG_ALGSEL_SHA256;
+		alginfo_a->algtype =
+			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+			OP_PCL_IPSEC_HMAC_SHA2_256_128 : OP_ALG_ALGSEL_SHA256;
 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
 		break;
 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
-		alginfo_a->algtype = OP_ALG_ALGSEL_SHA384;
+		alginfo_a->algtype =
+			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+			OP_PCL_IPSEC_HMAC_SHA2_384_192 : OP_ALG_ALGSEL_SHA384;
 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
 		break;
 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
-		alginfo_a->algtype = OP_ALG_ALGSEL_SHA512;
+		alginfo_a->algtype =
+			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+			OP_PCL_IPSEC_HMAC_SHA2_512_256 : OP_ALG_ALGSEL_SHA512;
 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
 		break;
 	default:
@@ -317,15 +347,21 @@ caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c)
 	case RTE_CRYPTO_CIPHER_NULL:
 		break;
 	case RTE_CRYPTO_CIPHER_AES_CBC:
-		alginfo_c->algtype = OP_ALG_ALGSEL_AES;
+		alginfo_c->algtype =
+			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+			OP_PCL_IPSEC_AES_CBC : OP_ALG_ALGSEL_AES;
 		alginfo_c->algmode = OP_ALG_AAI_CBC;
 		break;
 	case RTE_CRYPTO_CIPHER_3DES_CBC:
-		alginfo_c->algtype = OP_ALG_ALGSEL_3DES;
+		alginfo_c->algtype =
+			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+			OP_PCL_IPSEC_3DES : OP_ALG_ALGSEL_3DES;
 		alginfo_c->algmode = OP_ALG_AAI_CBC;
 		break;
 	case RTE_CRYPTO_CIPHER_AES_CTR:
-		alginfo_c->algtype = OP_ALG_ALGSEL_AES;
+		alginfo_c->algtype =
+			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+			OP_PCL_IPSEC_AES_CTR : OP_ALG_ALGSEL_AES;
 		alginfo_c->algmode = OP_ALG_AAI_CTR;
 		break;
 	default:
@@ -471,14 +507,28 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses)
 		cdb->sh_desc[0] = 0;
 		cdb->sh_desc[1] = 0;
 		cdb->sh_desc[2] = 0;
-
-		/* Auth_only_len is set as 0 here and it will be overwritten
-		 *  in fd for each packet.
-		 */
-		shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
-				true, swap, &alginfo_c, &alginfo_a,
-				ses->iv.length, 0,
-				ses->digest_length, ses->dir);
+		if (is_proto_ipsec(ses)) {
+			if (ses->dir == DIR_ENC) {
+				shared_desc_len = cnstr_shdsc_ipsec_new_encap(
+						cdb->sh_desc,
+						true, swap, &ses->encap_pdb,
+						(uint8_t *)&ses->ip4_hdr,
+						&alginfo_c, &alginfo_a);
+			} else if (ses->dir == DIR_DEC) {
+				shared_desc_len = cnstr_shdsc_ipsec_new_decap(
+						cdb->sh_desc,
+						true, swap, &ses->decap_pdb,
+						&alginfo_c, &alginfo_a);
+			}
+		} else {
+			/* Auth_only_len is set as 0 here and it will be
+			 * overwritten in fd for each packet.
+			 */
+			shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
+					true, swap, &alginfo_c, &alginfo_a,
+					ses->iv.length, 0,
+					ses->digest_length, ses->dir);
+		}
 	}
 	cdb->sh_hdr.hi.field.idlen = shared_desc_len;
 	cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
@@ -860,6 +910,45 @@ build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
 	return cf;
 }
 
+static inline struct dpaa_sec_job *
+build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
+{
+	struct rte_crypto_sym_op *sym = op->sym;
+	struct dpaa_sec_job *cf;
+	struct dpaa_sec_op_ctx *ctx;
+	struct qm_sg_entry *sg;
+	phys_addr_t src_start_addr, dst_start_addr;
+
+	ctx = dpaa_sec_alloc_ctx(ses);
+	if (!ctx)
+		return NULL;
+	cf = &ctx->job;
+	ctx->op = op;
+
+	src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
+
+	if (sym->m_dst)
+		dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
+	else
+		dst_start_addr = src_start_addr;
+
+	/* input */
+	sg = &cf->sg[1];
+	qm_sg_entry_set64(sg, src_start_addr);
+	sg->length = sym->m_src->pkt_len;
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+
+	sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
+	/* output */
+	sg = &cf->sg[0];
+	qm_sg_entry_set64(sg, dst_start_addr);
+	sg->length = sym->m_src->buf_len - sym->m_src->data_off;
+	cpu_to_hw_sg(sg);
+
+	return cf;
+}
+
 static int
 dpaa_sec_enqueue_op(struct rte_crypto_op *op,  struct dpaa_sec_qp *qp)
 {
@@ -870,8 +959,14 @@ dpaa_sec_enqueue_op(struct rte_crypto_op *op,  struct dpaa_sec_qp *qp)
 	uint32_t auth_only_len = op->sym->auth.data.length -
 				op->sym->cipher.data.length;
 
-	ses = (dpaa_sec_session *)get_session_private_data(op->sym->session,
-					cryptodev_driver_id);
+	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
+		ses = (dpaa_sec_session *)get_session_private_data(
+				op->sym->session, cryptodev_driver_id);
+	else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
+		ses = (dpaa_sec_session *)get_sec_session_private_data(
+				op->sym->sec_session);
+	else
+		return -ENOTSUP;
 
 	if (unlikely(!ses->qp || ses->qp != qp)) {
 		PMD_INIT_LOG(DEBUG, "sess->qp - %p qp %p", ses->qp, qp);
@@ -895,6 +990,8 @@ dpaa_sec_enqueue_op(struct rte_crypto_op *op,  struct dpaa_sec_qp *qp)
 		auth_only_len = ses->auth_only_len;
 	} else if (is_auth_cipher(ses)) {
 		cf = build_cipher_auth(op, ses);
+	} else if (is_proto_ipsec(ses)) {
+		cf = build_proto(op, ses);
 	} else {
 		PMD_TX_LOG(ERR, "not supported sec op");
 		return -ENOTSUP;
@@ -933,7 +1030,7 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
 
 	/*Prepare each packet which is to be sent*/
 	for (loop = 0; loop < nb_ops; loop++) {
-		if (ops[loop]->sess_type != RTE_CRYPTO_OP_WITH_SESSION) {
+		if (ops[loop]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
 			PMD_TX_LOG(ERR, "sessionless crypto op not supported");
 			return 0;
 		}
@@ -1343,6 +1440,236 @@ dpaa_sec_session_clear(struct rte_cryptodev *dev,
 }
 
 static int
+dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
+			   struct rte_security_session_conf *conf,
+			   void *sess)
+{
+	struct dpaa_sec_dev_private *internals = dev->data->dev_private;
+	struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
+	struct rte_crypto_auth_xform *auth_xform;
+	struct rte_crypto_cipher_xform *cipher_xform;
+	dpaa_sec_session *session = (dpaa_sec_session *)sess;
+
+	PMD_INIT_FUNC_TRACE();
+
+	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
+		cipher_xform = &conf->crypto_xform->cipher;
+		auth_xform = &conf->crypto_xform->next->auth;
+	} else {
+		auth_xform = &conf->crypto_xform->auth;
+		cipher_xform = &conf->crypto_xform->next->cipher;
+	}
+	session->proto_alg = conf->protocol;
+	session->cipher_key.data = rte_zmalloc(NULL,
+					       cipher_xform->key.length,
+					       RTE_CACHE_LINE_SIZE);
+	if (session->cipher_key.data == NULL &&
+			cipher_xform->key.length > 0) {
+		RTE_LOG(ERR, PMD, "No Memory for cipher key\n");
+		return -ENOMEM;
+	}
+
+	session->cipher_key.length = cipher_xform->key.length;
+	session->auth_key.data = rte_zmalloc(NULL,
+					auth_xform->key.length,
+					RTE_CACHE_LINE_SIZE);
+	if (session->auth_key.data == NULL &&
+			auth_xform->key.length > 0) {
+		RTE_LOG(ERR, PMD, "No Memory for auth key\n");
+		rte_free(session->cipher_key.data);
+		return -ENOMEM;
+	}
+	session->auth_key.length = auth_xform->key.length;
+	memcpy(session->cipher_key.data, cipher_xform->key.data,
+			cipher_xform->key.length);
+	memcpy(session->auth_key.data, auth_xform->key.data,
+			auth_xform->key.length);
+
+	switch (auth_xform->algo) {
+	case RTE_CRYPTO_AUTH_SHA1_HMAC:
+		session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
+		break;
+	case RTE_CRYPTO_AUTH_MD5_HMAC:
+		session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
+		break;
+	case RTE_CRYPTO_AUTH_SHA256_HMAC:
+		session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
+		break;
+	case RTE_CRYPTO_AUTH_SHA384_HMAC:
+		session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
+		break;
+	case RTE_CRYPTO_AUTH_SHA512_HMAC:
+		session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
+		break;
+	case RTE_CRYPTO_AUTH_AES_CMAC:
+		session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
+		break;
+	case RTE_CRYPTO_AUTH_NULL:
+		session->auth_alg = RTE_CRYPTO_AUTH_NULL;
+		break;
+	case RTE_CRYPTO_AUTH_SHA224_HMAC:
+	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
+	case RTE_CRYPTO_AUTH_SHA1:
+	case RTE_CRYPTO_AUTH_SHA256:
+	case RTE_CRYPTO_AUTH_SHA512:
+	case RTE_CRYPTO_AUTH_SHA224:
+	case RTE_CRYPTO_AUTH_SHA384:
+	case RTE_CRYPTO_AUTH_MD5:
+	case RTE_CRYPTO_AUTH_AES_GMAC:
+	case RTE_CRYPTO_AUTH_KASUMI_F9:
+	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
+	case RTE_CRYPTO_AUTH_ZUC_EIA3:
+		RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n",
+			auth_xform->algo);
+		goto out;
+	default:
+		RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n",
+			auth_xform->algo);
+		goto out;
+	}
+
+	switch (cipher_xform->algo) {
+	case RTE_CRYPTO_CIPHER_AES_CBC:
+		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
+		break;
+	case RTE_CRYPTO_CIPHER_3DES_CBC:
+		session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
+		break;
+	case RTE_CRYPTO_CIPHER_AES_CTR:
+		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
+		break;
+	case RTE_CRYPTO_CIPHER_NULL:
+	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
+	case RTE_CRYPTO_CIPHER_3DES_ECB:
+	case RTE_CRYPTO_CIPHER_AES_ECB:
+	case RTE_CRYPTO_CIPHER_KASUMI_F8:
+		RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n",
+			cipher_xform->algo);
+		goto out;
+	default:
+		RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n",
+			cipher_xform->algo);
+		goto out;
+	}
+
+	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
+		memset(&session->encap_pdb, 0, sizeof(struct ipsec_encap_pdb) +
+				sizeof(session->ip4_hdr));
+		session->ip4_hdr.ip_v = IPVERSION;
+		session->ip4_hdr.ip_hl = 5;
+		session->ip4_hdr.ip_len = rte_cpu_to_be_16(
+						sizeof(session->ip4_hdr));
+		session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
+		session->ip4_hdr.ip_id = 0;
+		session->ip4_hdr.ip_off = 0;
+		session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
+		session->ip4_hdr.ip_p = (ipsec_xform->proto ==
+				RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? IPPROTO_ESP
+				: IPPROTO_AH;
+		session->ip4_hdr.ip_sum = 0;
+		session->ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
+		session->ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
+		session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
+						(void *)&session->ip4_hdr,
+						sizeof(struct ip));
+
+		session->encap_pdb.options =
+			(IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
+			PDBOPTS_ESP_OIHI_PDB_INL |
+			PDBOPTS_ESP_IVSRC |
+			PDBHMO_ESP_ENCAP_DTTL;
+		session->encap_pdb.spi = ipsec_xform->spi;
+		session->encap_pdb.ip_hdr_len = sizeof(struct ip);
+
+		session->dir = DIR_ENC;
+	} else if (ipsec_xform->direction ==
+			RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
+		memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
+		session->decap_pdb.options = sizeof(struct ip) << 16;
+		session->dir = DIR_DEC;
+	} else
+		goto out;
+	session->ctx_pool = internals->ctx_pool;
+	session->inq = dpaa_sec_attach_rxq(internals);
+	if (session->inq == NULL) {
+		PMD_DRV_LOG(ERR, "unable to attach sec queue");
+		goto out;
+	}
+
+
+	return 0;
+out:
+	rte_free(session->auth_key.data);
+	rte_free(session->cipher_key.data);
+	memset(session, 0, sizeof(dpaa_sec_session));
+	return -1;
+}
+
+static int
+dpaa_sec_security_session_create(void *dev,
+				 struct rte_security_session_conf *conf,
+				 struct rte_security_session *sess,
+				 struct rte_mempool *mempool)
+{
+	void *sess_private_data;
+	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
+	int ret;
+
+	if (rte_mempool_get(mempool, &sess_private_data)) {
+		CDEV_LOG_ERR(
+			"Couldn't get object from session mempool");
+		return -ENOMEM;
+	}
+
+	switch (conf->protocol) {
+	case RTE_SECURITY_PROTOCOL_IPSEC:
+		ret = dpaa_sec_set_ipsec_session(cdev, conf,
+				sess_private_data);
+		break;
+	case RTE_SECURITY_PROTOCOL_MACSEC:
+		return -ENOTSUP;
+	default:
+		return -EINVAL;
+	}
+	if (ret != 0) {
+		PMD_DRV_LOG(ERR,
+			"DPAA2 PMD: failed to configure session parameters");
+
+		/* Return session to mempool */
+		rte_mempool_put(mempool, sess_private_data);
+		return ret;
+	}
+
+	set_sec_session_private_data(sess, sess_private_data);
+
+	return ret;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static int
+dpaa_sec_security_session_destroy(void *dev __rte_unused,
+		struct rte_security_session *sess)
+{
+	PMD_INIT_FUNC_TRACE();
+	void *sess_priv = get_sec_session_private_data(sess);
+
+	dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
+
+	if (sess_priv) {
+		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+
+		rte_free(s->cipher_key.data);
+		rte_free(s->auth_key.data);
+		memset(sess, 0, sizeof(dpaa_sec_session));
+		set_sec_session_private_data(sess, NULL);
+		rte_mempool_put(sess_mp, sess_priv);
+	}
+	return 0;
+}
+
+
+static int
 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
 		       struct rte_cryptodev_config *config __rte_unused)
 {
@@ -1408,6 +1735,21 @@ static struct rte_cryptodev_ops crypto_ops = {
 	.qp_detach_session    = dpaa_sec_qp_detach_sess,
 };
 
+static const struct rte_security_capability *
+dpaa_sec_capabilities_get(void *device __rte_unused)
+{
+	return dpaa_sec_security_cap;
+}
+
+struct rte_security_ops dpaa_sec_security_ops = {
+	.session_create = dpaa_sec_security_session_create,
+	.session_update = NULL,
+	.session_stats_get = NULL,
+	.session_destroy = dpaa_sec_security_session_destroy,
+	.set_pkt_metadata = NULL,
+	.capabilities_get = dpaa_sec_capabilities_get
+};
+
 static int
 dpaa_sec_uninit(struct rte_cryptodev *dev)
 {
@@ -1416,6 +1758,8 @@ dpaa_sec_uninit(struct rte_cryptodev *dev)
 	if (dev == NULL)
 		return -ENODEV;
 
+	rte_free(dev->security_ctx);
+
 	rte_mempool_free(internals->ctx_pool);
 	rte_free(internals);
 
@@ -1429,6 +1773,7 @@ static int
 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
 {
 	struct dpaa_sec_dev_private *internals;
+	struct rte_security_ctx *security_instance;
 	struct dpaa_sec_qp *qp;
 	uint32_t i, flags;
 	int ret;
@@ -1443,12 +1788,33 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
 	cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
 	cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
 			RTE_CRYPTODEV_FF_HW_ACCELERATED |
-			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
+			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+			RTE_CRYPTODEV_FF_SECURITY;
 
 	internals = cryptodev->data->dev_private;
 	internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
 	internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
 
+	/*
+	 * For secondary processes, we don't initialise any further as primary
+	 * has already done this work. Only check we don't need a different
+	 * RX function
+	 */
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+		PMD_INIT_LOG(DEBUG, "Device already init by primary process");
+		return 0;
+	}
+
+	/* Initialize security_ctx only for primary process*/
+	security_instance = rte_malloc("rte_security_instances_ops",
+				sizeof(struct rte_security_ctx), 0);
+	if (security_instance == NULL)
+		return -ENOMEM;
+	security_instance->device = (void *)cryptodev;
+	security_instance->ops = &dpaa_sec_security_ops;
+	security_instance->sess_cnt = 0;
+	cryptodev->security_ctx = security_instance;
+
 	for (i = 0; i < internals->max_nb_queue_pairs; i++) {
 		/* init qman fq for queue pair */
 		qp = &internals->qps[i];
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.h b/drivers/crypto/dpaa_sec/dpaa_sec.h
index 93369e4..578c46a 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.h
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.h
@@ -91,7 +91,8 @@ typedef struct dpaa_sec_session_entry {
 	uint8_t dir;         /*!< Operation Direction */
 	enum rte_crypto_cipher_algorithm cipher_alg; /*!< Cipher Algorithm*/
 	enum rte_crypto_auth_algorithm auth_alg; /*!< Authentication Algorithm*/
-	enum rte_crypto_aead_algorithm aead_alg; /*!< Authentication Algorithm*/
+	enum rte_crypto_aead_algorithm aead_alg; /*!< AEAD Algorithm*/
+	enum rte_security_session_protocol proto_alg; /*!< Security Algorithm*/
 	union {
 		struct {
 			uint8_t *data;	/**< pointer to key data */
@@ -114,6 +115,9 @@ typedef struct dpaa_sec_session_entry {
 	} iv;	/**< Initialisation vector parameters */
 	uint16_t auth_only_len; /*!< Length of data for Auth only */
 	uint32_t digest_length;
+	struct ipsec_encap_pdb encap_pdb;
+	struct ip ip4_hdr;
+	struct ipsec_decap_pdb decap_pdb;
 	struct dpaa_sec_qp *qp;
 	struct qman_fq *inq;
 	struct sec_cdb cdb;	/**< cmd block associated with qp */
@@ -378,4 +382,60 @@ static const struct rte_cryptodev_capabilities dpaa_sec_capabilities[] = {
 	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
 };
 
+static const struct rte_security_capability dpaa_sec_security_cap[] = {
+	{ /* IPsec Lookaside Protocol offload ESP Transport Egress */
+		.action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
+		.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+		.ipsec = {
+			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+			.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+			.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+			.options = { 0 }
+		},
+		.crypto_capabilities = dpaa_sec_capabilities
+	},
+	{ /* IPsec Lookaside Protocol offload ESP Tunnel Ingress */
+		.action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
+		.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+		.ipsec = {
+			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+			.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+			.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+			.options = { 0 }
+		},
+		.crypto_capabilities = dpaa_sec_capabilities
+	},
+	{
+		.action = RTE_SECURITY_ACTION_TYPE_NONE
+	}
+};
+
+/**
+ * Checksum
+ *
+ * @param buffer calculate chksum for buffer
+ * @param len    buffer length
+ *
+ * @return checksum value in host cpu order
+ */
+static inline uint16_t
+calc_chksum(void *buffer, int len)
+{
+	uint16_t *buf = (uint16_t *)buffer;
+	uint32_t sum = 0;
+	uint16_t result;
+
+	for (sum = 0; len > 1; len -= 2)
+		sum += *buf++;
+
+	if (len == 1)
+		sum += *(unsigned char *)buf;
+
+	sum = (sum >> 16) + (sum & 0xFFFF);
+	sum += (sum >> 16);
+	result = ~sum;
+
+	return  result;
+}
+
 #endif /* _DPAA_SEC_H_ */
-- 
2.9.3

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [dpdk-dev] [PATCH v3 0/3] crypto/dpaa_sec: performance optimizations
  2018-01-15  6:35   ` [dpdk-dev] [PATCH v3 0/3] crypto/dpaa_sec: performance optimizations Akhil Goyal
                       ` (2 preceding siblings ...)
  2018-01-15  6:35     ` [dpdk-dev] [PATCH v3 3/3] crypto/dpaa_sec: support ipsec protocol offload Akhil Goyal
@ 2018-01-15 14:46     ` De Lara Guarch, Pablo
  3 siblings, 0 replies; 24+ messages in thread
From: De Lara Guarch, Pablo @ 2018-01-15 14:46 UTC (permalink / raw)
  To: Akhil Goyal, dev; +Cc: hemant.agrawal



> -----Original Message-----
> From: Akhil Goyal [mailto:akhil.goyal@nxp.com]
> Sent: Monday, January 15, 2018 6:36 AM
> To: dev@dpdk.org
> Cc: De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>;
> hemant.agrawal@nxp.com; Akhil Goyal <akhil.goyal@nxp.com>
> Subject: [PATCH v3 0/3] crypto/dpaa_sec: performance optimizations
> 
> Following changes are added to improve performance.
> 1. optimize virtual to physical address conversion 2. support for multiple
> sessions in a single queue pair 3. support for ipsec protocol offload
> 
> changes in v3:
>  - updated release notes in patch 3/3 for ipsec protocol offload.
> changes in v2:
>  - incorporated comments from Hemant
>  - split the patchset to remove dependency on bus/dpaa patch
> Note:
> 1. This patchset is now independent of the patches on net subtree.
> 2. bus/dpaa patch is already applied to net subtree.
> 3. The last patch in v1 of this series will be sent separately as it will
>    be dependent on the net subtree.
> 
> Akhil Goyal (1):
>   crypto/dpaa_sec: support ipsec protocol offload
> 
> Hemant Agrawal (2):
>   crypto/dpaa_sec: optimize virt to phy conversion
>   crypto/dpaa_sec: support multiple sessions per qp

Applied to dpdk-next-crypto.
Thanks,

Pablo

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [dpdk-dev] [PATCH v2] crypto/dpaa_sec: rewrite Rx/Tx path
  2018-01-11 11:44   ` [dpdk-dev] [PATCH v2] " Akhil Goyal
@ 2018-01-17 16:54     ` De Lara Guarch, Pablo
  0 siblings, 0 replies; 24+ messages in thread
From: De Lara Guarch, Pablo @ 2018-01-17 16:54 UTC (permalink / raw)
  To: Akhil Goyal, dev; +Cc: hemant.agrawal, Nipun Gupta



> -----Original Message-----
> From: Akhil Goyal [mailto:akhil.goyal@nxp.com]
> Sent: Thursday, January 11, 2018 11:44 AM
> To: dev@dpdk.org
> Cc: De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>;
> hemant.agrawal@nxp.com; Akhil Goyal <akhil.goyal@nxp.com>; Nipun
> Gupta <nipun.gupta@nxp.com>
> Subject: [PATCH v2] crypto/dpaa_sec: rewrite Rx/Tx path
> 
> Rx and Tx patch are rewritten with improved internal APIs to improve
> performance.
> 
> Signed-off-by: Akhil Goyal <akhil.goyal@nxp.com>
> Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>

Applied to dpdk-next-crypto.
Thanks,

Pablo

^ permalink raw reply	[flat|nested] 24+ messages in thread

end of thread, other threads:[~2018-01-17 16:54 UTC | newest]

Thread overview: 24+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-12-13 13:56 [dpdk-dev] [PATCH 0/5] crypto/dpaa_sec: performance optimizations Akhil Goyal
2017-12-13 13:56 ` [dpdk-dev] [PATCH 1/5] crypto/dpaa_sec: optimize virt to phy conversion Akhil Goyal
2017-12-13 13:56 ` [dpdk-dev] [PATCH 2/5] crypto/dpaa_sec: support multiple sessions per qp Akhil Goyal
2017-12-13 13:56 ` [dpdk-dev] [PATCH 3/5] crypto/dpaa_sec: support ipsec protocol offload Akhil Goyal
2017-12-19 12:59   ` Hemant Agrawal
2017-12-13 13:56 ` [dpdk-dev] [PATCH 4/5] bus/dpaa: support for enqueue frames of multiple queues Akhil Goyal
2017-12-19 11:32   ` Hemant Agrawal
2017-12-13 13:56 ` [dpdk-dev] [PATCH 5/5] crypto/dpaa_sec: rewrite Rx/Tx path Akhil Goyal
2017-12-19 12:45   ` Hemant Agrawal
2018-01-08 11:13     ` De Lara Guarch, Pablo
2018-01-08 11:16       ` Akhil Goyal
2018-01-08 11:24         ` De Lara Guarch, Pablo
2018-01-11 11:44   ` [dpdk-dev] [PATCH v2] " Akhil Goyal
2018-01-17 16:54     ` De Lara Guarch, Pablo
2018-01-11 11:33 ` [dpdk-dev] [PATCH v2 0/3] crypto/dpaa_sec: performance optimizations Akhil Goyal
2018-01-11 11:33   ` [dpdk-dev] [PATCH v2 1/3] crypto/dpaa_sec: optimize virt to phy conversion Akhil Goyal
2018-01-11 11:33   ` [dpdk-dev] [PATCH v2 2/3] crypto/dpaa_sec: support multiple sessions per qp Akhil Goyal
2018-01-11 11:33   ` [dpdk-dev] [PATCH v2 3/3] crypto/dpaa_sec: support ipsec protocol offload Akhil Goyal
2018-01-11 14:13     ` De Lara Guarch, Pablo
2018-01-15  6:35   ` [dpdk-dev] [PATCH v3 0/3] crypto/dpaa_sec: performance optimizations Akhil Goyal
2018-01-15  6:35     ` [dpdk-dev] [PATCH v3 1/3] crypto/dpaa_sec: optimize virt to phy conversion Akhil Goyal
2018-01-15  6:35     ` [dpdk-dev] [PATCH v3 2/3] crypto/dpaa_sec: support multiple sessions per qp Akhil Goyal
2018-01-15  6:35     ` [dpdk-dev] [PATCH v3 3/3] crypto/dpaa_sec: support ipsec protocol offload Akhil Goyal
2018-01-15 14:46     ` [dpdk-dev] [PATCH v3 0/3] crypto/dpaa_sec: performance optimizations De Lara Guarch, Pablo

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).