DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH 1/2] crypto/dpaa2_sec: support for scatter gather
@ 2018-01-19 11:51 Akhil Goyal
  2018-01-19 11:51 ` [dpdk-dev] [PATCH 2/2] crypto/dpaa_sec: " Akhil Goyal
                   ` (3 more replies)
  0 siblings, 4 replies; 10+ messages in thread
From: Akhil Goyal @ 2018-01-19 11:51 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch, hemant.agrawal, alok.makhariya, Akhil Goyal

Signed-off-by: Alok Makhariya <alok.makhariya@nxp.com>
Signed-off-by: Akhil Goyal <akhil.goyal@nxp.com>
---
 doc/guides/cryptodevs/features/default.ini   |   1 +
 doc/guides/cryptodevs/features/dpaa2_sec.ini |   1 +
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c  | 588 +++++++++++++++++++++++++--
 test/test/test_cryptodev.c                   |  10 +
 test/test/test_cryptodev_aes_test_vectors.h  |  22 +-
 test/test/test_cryptodev_hash_test_vectors.h |  14 +
 6 files changed, 596 insertions(+), 40 deletions(-)

diff --git a/doc/guides/cryptodevs/features/default.ini b/doc/guides/cryptodevs/features/default.ini
index 18d66cb..728ce3b 100644
--- a/doc/guides/cryptodevs/features/default.ini
+++ b/doc/guides/cryptodevs/features/default.ini
@@ -18,6 +18,7 @@ CPU AVX512             =
 CPU AESNI              =
 CPU NEON               =
 CPU ARM CE             =
+Mbuf scatter gather    =
 
 ;
 ; Supported crypto algorithms of a default crypto driver.
diff --git a/doc/guides/cryptodevs/features/dpaa2_sec.ini b/doc/guides/cryptodevs/features/dpaa2_sec.ini
index 8fd07d6..68c9960 100644
--- a/doc/guides/cryptodevs/features/dpaa2_sec.ini
+++ b/doc/guides/cryptodevs/features/dpaa2_sec.ini
@@ -8,6 +8,7 @@ Symmetric crypto       = Y
 Sym operation chaining = Y
 HW Accelerated         = Y
 Protocol offload       = Y
+Mbuf scatter gather    = Y
 
 ;
 ; Supported crypto algorithms of the 'dpaa2_sec' crypto driver.
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index 977c49a..bafe754 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -48,6 +48,7 @@
 #define FLE_POOL_NUM_BUFS	32000
 #define FLE_POOL_BUF_SIZE	256
 #define FLE_POOL_CACHE_SIZE	512
+#define FLE_SG_MEM_SIZE		2048
 #define SEC_FLC_DHR_OUTBOUND	-114
 #define SEC_FLC_DHR_INBOUND	0
 
@@ -86,6 +87,153 @@ build_proto_fd(dpaa2_sec_session *sess,
 }
 
 static inline int
+build_authenc_gcm_sg_fd(dpaa2_sec_session *sess,
+		 struct rte_crypto_op *op,
+		 struct qbman_fd *fd, __rte_unused uint16_t bpid)
+{
+	struct rte_crypto_sym_op *sym_op = op->sym;
+	struct ctxt_priv *priv = sess->ctxt;
+	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
+	struct sec_flow_context *flc;
+	uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
+	int icv_len = sess->digest_length;
+	uint8_t *old_icv;
+	struct rte_mbuf *mbuf;
+	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+			sess->iv.offset);
+
+	PMD_INIT_FUNC_TRACE();
+
+	if (sym_op->m_dst)
+		mbuf = sym_op->m_dst;
+	else
+		mbuf = sym_op->m_src;
+
+	/* first FLE entry used to store mbuf and session ctxt */
+	fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
+			RTE_CACHE_LINE_SIZE);
+	if (unlikely(!fle)) {
+		RTE_LOG(ERR, PMD, "GCM SG: Memory alloc failed for SGE\n");
+		return -1;
+	}
+	memset(fle, 0, FLE_SG_MEM_SIZE);
+	DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
+	DPAA2_FLE_SAVE_CTXT(fle, priv);
+
+	op_fle = fle + 1;
+	ip_fle = fle + 2;
+	sge = fle + 3;
+
+	/* Save the shared descriptor */
+	flc = &priv->flc_desc[0].flc;
+
+	/* Configure FD as a FRAME LIST */
+	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
+	DPAA2_SET_FD_COMPOUND_FMT(fd);
+	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+	PMD_TX_LOG(DEBUG, "GCM SG: auth_off: 0x%x/length %d, digest-len=%d\n"
+		   "iv-len=%d data_off: 0x%x\n",
+		   sym_op->aead.data.offset,
+		   sym_op->aead.data.length,
+		   sym_op->aead.digest.length,
+		   sess->iv.length,
+		   sym_op->m_src->data_off);
+
+	/* Configure Output FLE with Scatter/Gather Entry */
+	DPAA2_SET_FLE_SG_EXT(op_fle);
+	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
+
+	if (auth_only_len)
+		DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
+
+	op_fle->length = (sess->dir == DIR_ENC) ?
+			(sym_op->aead.data.length + icv_len + auth_only_len) :
+			sym_op->aead.data.length + auth_only_len;
+
+	/* Configure Output SGE for Encap/Decap */
+	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+	DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->aead.data.offset -
+								auth_only_len);
+	sge->length = mbuf->data_len - sym_op->aead.data.offset + auth_only_len;
+
+	mbuf = mbuf->next;
+	/* o/p segs */
+	while (mbuf) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
+		sge->length = mbuf->data_len;
+		mbuf = mbuf->next;
+	}
+	sge->length -= icv_len;
+
+	if (sess->dir == DIR_ENC) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge,
+				DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
+		sge->length = icv_len;
+	}
+	DPAA2_SET_FLE_FIN(sge);
+
+	sge++;
+	mbuf = sym_op->m_src;
+
+	/* Configure Input FLE with Scatter/Gather Entry */
+	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
+	DPAA2_SET_FLE_SG_EXT(ip_fle);
+	DPAA2_SET_FLE_FIN(ip_fle);
+	ip_fle->length = (sess->dir == DIR_ENC) ?
+		(sym_op->aead.data.length + sess->iv.length + auth_only_len) :
+		(sym_op->aead.data.length + sess->iv.length + auth_only_len +
+		 icv_len);
+
+	/* Configure Input SGE for Encap/Decap */
+	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
+	sge->length = sess->iv.length;
+
+	sge++;
+	if (auth_only_len) {
+		DPAA2_SET_FLE_ADDR(sge,
+				DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
+		sge->length = auth_only_len;
+		sge++;
+	}
+
+	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+	DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
+				mbuf->data_off);
+	sge->length = mbuf->data_len - sym_op->aead.data.offset;
+
+	mbuf = mbuf->next;
+	/* i/p segs */
+	while (mbuf) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
+		sge->length = mbuf->data_len;
+		mbuf = mbuf->next;
+	}
+
+	if (sess->dir == DIR_DEC) {
+		sge++;
+		old_icv = (uint8_t *)(sge + 1);
+		memcpy(old_icv,	sym_op->aead.digest.data, icv_len);
+		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
+		sge->length = icv_len;
+	}
+
+	DPAA2_SET_FLE_FIN(sge);
+	if (auth_only_len) {
+		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
+		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
+	}
+	DPAA2_SET_FD_LEN(fd, ip_fle->length);
+
+	return 0;
+}
+
+static inline int
 build_authenc_gcm_fd(dpaa2_sec_session *sess,
 		     struct rte_crypto_op *op,
 		     struct qbman_fd *fd, uint16_t bpid)
@@ -116,7 +264,7 @@ build_authenc_gcm_fd(dpaa2_sec_session *sess,
 	 */
 	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
 	if (retval) {
-		RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
+		RTE_LOG(ERR, PMD, "GCM: Memory alloc failed for SGE\n");
 		return -1;
 	}
 	memset(fle, 0, FLE_POOL_BUF_SIZE);
@@ -149,7 +297,7 @@ build_authenc_gcm_fd(dpaa2_sec_session *sess,
 	DPAA2_SET_FD_COMPOUND_FMT(fd);
 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
 
-	PMD_TX_LOG(DEBUG, "auth_off: 0x%x/length %d, digest-len=%d\n"
+	PMD_TX_LOG(DEBUG, "GCM: auth_off: 0x%x/length %d, digest-len=%d\n"
 		   "iv-len=%d data_off: 0x%x\n",
 		   sym_op->aead.data.offset,
 		   sym_op->aead.data.length,
@@ -234,6 +382,151 @@ build_authenc_gcm_fd(dpaa2_sec_session *sess,
 }
 
 static inline int
+build_authenc_sg_fd(dpaa2_sec_session *sess,
+		 struct rte_crypto_op *op,
+		 struct qbman_fd *fd, __rte_unused uint16_t bpid)
+{
+	struct rte_crypto_sym_op *sym_op = op->sym;
+	struct ctxt_priv *priv = sess->ctxt;
+	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
+	struct sec_flow_context *flc;
+	uint32_t auth_only_len = sym_op->auth.data.length -
+				sym_op->cipher.data.length;
+	int icv_len = sess->digest_length;
+	uint8_t *old_icv;
+	struct rte_mbuf *mbuf;
+	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+			sess->iv.offset);
+
+	PMD_INIT_FUNC_TRACE();
+
+	if (sym_op->m_dst)
+		mbuf = sym_op->m_dst;
+	else
+		mbuf = sym_op->m_src;
+
+	/* first FLE entry used to store mbuf and session ctxt */
+	fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
+			RTE_CACHE_LINE_SIZE);
+	if (unlikely(!fle)) {
+		RTE_LOG(ERR, PMD, "AUTHENC SG: Memory alloc failed for SGE\n");
+		return -1;
+	}
+	memset(fle, 0, FLE_SG_MEM_SIZE);
+	DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
+	DPAA2_FLE_SAVE_CTXT(fle, priv);
+
+	op_fle = fle + 1;
+	ip_fle = fle + 2;
+	sge = fle + 3;
+
+	/* Save the shared descriptor */
+	flc = &priv->flc_desc[0].flc;
+
+	/* Configure FD as a FRAME LIST */
+	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
+	DPAA2_SET_FD_COMPOUND_FMT(fd);
+	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+	PMD_TX_LOG(DEBUG,
+			"AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d\n"
+			"cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
+		   sym_op->auth.data.offset,
+		   sym_op->auth.data.length,
+		   sym_op->auth.digest.length,
+		   sym_op->cipher.data.offset,
+		   sym_op->cipher.data.length,
+		   sym_op->cipher.iv.length,
+		   sym_op->m_src->data_off);
+
+	/* Configure Output FLE with Scatter/Gather Entry */
+	DPAA2_SET_FLE_SG_EXT(op_fle);
+	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
+
+	if (auth_only_len)
+		DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
+
+	op_fle->length = (sess->dir == DIR_ENC) ?
+			(sym_op->cipher.data.length + icv_len) :
+			sym_op->cipher.data.length;
+
+	/* Configure Output SGE for Encap/Decap */
+	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+	DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->auth.data.offset);
+	sge->length = mbuf->data_len - sym_op->auth.data.offset;
+
+	mbuf = mbuf->next;
+	/* o/p segs */
+	while (mbuf) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
+		sge->length = mbuf->data_len;
+		mbuf = mbuf->next;
+	}
+	sge->length -= icv_len;
+
+	if (sess->dir == DIR_ENC) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge,
+				DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
+		sge->length = icv_len;
+	}
+	DPAA2_SET_FLE_FIN(sge);
+
+	sge++;
+	mbuf = sym_op->m_src;
+
+	/* Configure Input FLE with Scatter/Gather Entry */
+	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
+	DPAA2_SET_FLE_SG_EXT(ip_fle);
+	DPAA2_SET_FLE_FIN(ip_fle);
+	ip_fle->length = (sess->dir == DIR_ENC) ?
+			(sym_op->auth.data.length + sess->iv.length) :
+			(sym_op->auth.data.length + sess->iv.length +
+			 icv_len);
+
+	/* Configure Input SGE for Encap/Decap */
+	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
+	sge->length = sess->iv.length;
+
+	sge++;
+	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+	DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
+				mbuf->data_off);
+	sge->length = mbuf->data_len - sym_op->auth.data.offset;
+
+	mbuf = mbuf->next;
+	/* i/p segs */
+	while (mbuf) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
+		sge->length = mbuf->data_len;
+		mbuf = mbuf->next;
+	}
+	sge->length -= icv_len;
+
+	if (sess->dir == DIR_DEC) {
+		sge++;
+		old_icv = (uint8_t *)(sge + 1);
+		memcpy(old_icv,	sym_op->auth.digest.data,
+		       icv_len);
+		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
+		sge->length = icv_len;
+	}
+
+	DPAA2_SET_FLE_FIN(sge);
+	if (auth_only_len) {
+		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
+		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
+	}
+	DPAA2_SET_FD_LEN(fd, ip_fle->length);
+
+	return 0;
+}
+
+static inline int
 build_authenc_fd(dpaa2_sec_session *sess,
 		 struct rte_crypto_op *op,
 		 struct qbman_fd *fd, uint16_t bpid)
@@ -298,7 +591,7 @@ build_authenc_fd(dpaa2_sec_session *sess,
 	DPAA2_SET_FD_COMPOUND_FMT(fd);
 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
 
-	PMD_TX_LOG(DEBUG, "auth_off: 0x%x/length %d, digest-len=%d\n"
+	PMD_TX_LOG(DEBUG, "AUTHENC: auth_off: 0x%x/length %d, digest-len=%d\n"
 		   "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
 		   sym_op->auth.data.offset,
 		   sym_op->auth.data.length,
@@ -374,6 +667,86 @@ build_authenc_fd(dpaa2_sec_session *sess,
 	return 0;
 }
 
+static inline int build_auth_sg_fd(
+		dpaa2_sec_session *sess,
+		struct rte_crypto_op *op,
+		struct qbman_fd *fd,
+		__rte_unused uint16_t bpid)
+{
+	struct rte_crypto_sym_op *sym_op = op->sym;
+	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
+	struct sec_flow_context *flc;
+	struct ctxt_priv *priv = sess->ctxt;
+	uint8_t *old_digest;
+	struct rte_mbuf *mbuf;
+
+	PMD_INIT_FUNC_TRACE();
+
+	mbuf = sym_op->m_src;
+	fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
+			RTE_CACHE_LINE_SIZE);
+	if (unlikely(!fle)) {
+		RTE_LOG(ERR, PMD, "AUTH SG: Memory alloc failed for SGE\n");
+		return -1;
+	}
+	memset(fle, 0, FLE_SG_MEM_SIZE);
+	/* first FLE entry used to store mbuf and session ctxt */
+	DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
+	DPAA2_FLE_SAVE_CTXT(fle, priv);
+	op_fle = fle + 1;
+	ip_fle = fle + 2;
+	sge = fle + 3;
+
+	flc = &priv->flc_desc[DESC_INITFINAL].flc;
+	/* sg FD */
+	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
+	DPAA2_SET_FD_COMPOUND_FMT(fd);
+
+	/* o/p fle */
+	DPAA2_SET_FLE_ADDR(op_fle,
+				DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
+	op_fle->length = sess->digest_length;
+
+	/* i/p fle */
+	DPAA2_SET_FLE_SG_EXT(ip_fle);
+	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
+	/* i/p 1st seg */
+	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+	DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset + mbuf->data_off);
+	sge->length = mbuf->data_len - sym_op->auth.data.offset;
+
+	/* i/p segs */
+	mbuf = mbuf->next;
+	while (mbuf) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
+		sge->length = mbuf->data_len;
+		mbuf = mbuf->next;
+	}
+	if (sess->dir == DIR_ENC) {
+		/* Digest calculation case */
+		sge->length -= sess->digest_length;
+		ip_fle->length = sym_op->auth.data.length;
+	} else {
+		/* Digest verification case */
+		sge++;
+		old_digest = (uint8_t *)(sge + 1);
+		rte_memcpy(old_digest, sym_op->auth.digest.data,
+			   sess->digest_length);
+		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
+		sge->length = sess->digest_length;
+		ip_fle->length = sym_op->auth.data.length +
+				sess->digest_length;
+	}
+	DPAA2_SET_FLE_FIN(sge);
+	DPAA2_SET_FLE_FIN(ip_fle);
+	DPAA2_SET_FD_LEN(fd, ip_fle->length);
+
+	return 0;
+}
+
 static inline int
 build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
 	      struct qbman_fd *fd, uint16_t bpid)
@@ -389,7 +762,7 @@ build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
 
 	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
 	if (retval) {
-		RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
+		RTE_LOG(ERR, PMD, "AUTH Memory alloc failed for SGE\n");
 		return -1;
 	}
 	memset(fle, 0, FLE_POOL_BUF_SIZE);
@@ -465,6 +838,123 @@ build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
 }
 
 static int
+build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
+		struct qbman_fd *fd, __rte_unused uint16_t bpid)
+{
+	struct rte_crypto_sym_op *sym_op = op->sym;
+	struct qbman_fle *ip_fle, *op_fle, *sge, *fle;
+	struct sec_flow_context *flc;
+	struct ctxt_priv *priv = sess->ctxt;
+	struct rte_mbuf *mbuf;
+	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+			sess->iv.offset);
+
+	PMD_INIT_FUNC_TRACE();
+
+	if (sym_op->m_dst)
+		mbuf = sym_op->m_dst;
+	else
+		mbuf = sym_op->m_src;
+
+	fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
+			RTE_CACHE_LINE_SIZE);
+	if (!fle) {
+		RTE_LOG(ERR, PMD, "CIPHER SG: Memory alloc failed for SGE\n");
+		return -1;
+	}
+	memset(fle, 0, FLE_SG_MEM_SIZE);
+	/* first FLE entry used to store mbuf and session ctxt */
+	DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
+	DPAA2_FLE_SAVE_CTXT(fle, priv);
+
+	op_fle = fle + 1;
+	ip_fle = fle + 2;
+	sge = fle + 3;
+
+	flc = &priv->flc_desc[0].flc;
+
+	PMD_TX_LOG(DEBUG,
+			"CIPHER SG: cipher_off: 0x%x/length %d,ivlen=%d data_off: 0x%x",
+		   sym_op->cipher.data.offset,
+		   sym_op->cipher.data.length,
+		   sym_op->cipher.iv.length,
+		   sym_op->m_src->data_off);
+
+	/* o/p fle */
+	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
+	op_fle->length = sym_op->cipher.data.length;
+	DPAA2_SET_FLE_SG_EXT(op_fle);
+
+	/* o/p 1st seg */
+	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+	DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset + mbuf->data_off);
+	sge->length = mbuf->data_len - sym_op->cipher.data.offset;
+
+	mbuf = mbuf->next;
+	/* o/p segs */
+	while (mbuf) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
+		sge->length = mbuf->data_len;
+		mbuf = mbuf->next;
+	}
+	DPAA2_SET_FLE_FIN(sge);
+
+	PMD_TX_LOG(DEBUG,
+			"CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d",
+			flc, fle, fle->addr_hi, fle->addr_lo,
+			fle->length);
+
+	/* i/p fle */
+	mbuf = sym_op->m_src;
+	sge++;
+	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
+	ip_fle->length = sess->iv.length + sym_op->cipher.data.length;
+	DPAA2_SET_FLE_SG_EXT(ip_fle);
+
+	/* i/p IV */
+	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
+	DPAA2_SET_FLE_OFFSET(sge, 0);
+	sge->length = sess->iv.length;
+
+	sge++;
+
+	/* i/p 1st seg */
+	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+	DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
+			     mbuf->data_off);
+	sge->length = mbuf->data_len - sym_op->cipher.data.offset;
+
+	mbuf = mbuf->next;
+	/* i/p segs */
+	while (mbuf) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
+		sge->length = mbuf->data_len;
+		mbuf = mbuf->next;
+	}
+	DPAA2_SET_FLE_FIN(sge);
+	DPAA2_SET_FLE_FIN(ip_fle);
+
+	/* sg fd */
+	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
+	DPAA2_SET_FD_LEN(fd, ip_fle->length);
+	DPAA2_SET_FD_COMPOUND_FMT(fd);
+	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+	PMD_TX_LOG(DEBUG,
+			"CIPHER SG: fdaddr =%p bpid =%d meta =%d off =%d, len =%d",
+		   (void *)DPAA2_GET_FD_ADDR(fd),
+		   DPAA2_GET_FD_BPID(fd),
+		   rte_dpaa2_bpid_info[bpid].meta_data_size,
+		   DPAA2_GET_FD_OFFSET(fd),
+		   DPAA2_GET_FD_LEN(fd));
+	return 0;
+}
+
+static int
 build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
 		struct qbman_fd *fd, uint16_t bpid)
 {
@@ -486,7 +976,7 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
 
 	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
 	if (retval) {
-		RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
+		RTE_LOG(ERR, PMD, "CIPHER: Memory alloc failed for SGE\n");
 		return -1;
 	}
 	memset(fle, 0, FLE_POOL_BUF_SIZE);
@@ -522,7 +1012,8 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
 	DPAA2_SET_FD_COMPOUND_FMT(fd);
 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
 
-	PMD_TX_LOG(DEBUG, "cipher_off: 0x%x/length %d,ivlen=%d data_off: 0x%x",
+	PMD_TX_LOG(DEBUG,
+			"CIPHER: cipher_off: 0x%x/length %d, ivlen=%d, data_off: 0x%x",
 		   sym_op->cipher.data.offset,
 		   sym_op->cipher.data.length,
 		   sess->iv.length,
@@ -534,8 +1025,10 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
 
 	fle->length = sym_op->cipher.data.length + sess->iv.length;
 
-	PMD_TX_LOG(DEBUG, "1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d",
-		   flc, fle, fle->addr_hi, fle->addr_lo, fle->length);
+	PMD_TX_LOG(DEBUG,
+			"CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d",
+			flc, fle, fle->addr_hi, fle->addr_lo,
+			fle->length);
 
 	fle++;
 
@@ -556,7 +1049,8 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
 	DPAA2_SET_FLE_FIN(sge);
 	DPAA2_SET_FLE_FIN(fle);
 
-	PMD_TX_LOG(DEBUG, "fdaddr =%p bpid =%d meta =%d off =%d, len =%d",
+	PMD_TX_LOG(DEBUG,
+			"CIPHER: fdaddr =%p bpid =%d meta =%d off =%d, len =%d",
 		   (void *)DPAA2_GET_FD_ADDR(fd),
 		   DPAA2_GET_FD_BPID(fd),
 		   rte_dpaa2_bpid_info[bpid].meta_data_size,
@@ -574,13 +1068,6 @@ build_sec_fd(struct rte_crypto_op *op,
 	dpaa2_sec_session *sess;
 
 	PMD_INIT_FUNC_TRACE();
-	/*
-	 * Segmented buffer is not supported.
-	 */
-	if (!rte_pktmbuf_is_contiguous(op->sym->m_src)) {
-		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
-		return -ENOTSUP;
-	}
 
 	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
 		sess = (dpaa2_sec_session *)get_session_private_data(
@@ -591,25 +1078,46 @@ build_sec_fd(struct rte_crypto_op *op,
 	else
 		return -1;
 
-	switch (sess->ctxt_type) {
-	case DPAA2_SEC_CIPHER:
-		ret = build_cipher_fd(sess, op, fd, bpid);
-		break;
-	case DPAA2_SEC_AUTH:
-		ret = build_auth_fd(sess, op, fd, bpid);
-		break;
-	case DPAA2_SEC_AEAD:
-		ret = build_authenc_gcm_fd(sess, op, fd, bpid);
-		break;
-	case DPAA2_SEC_CIPHER_HASH:
-		ret = build_authenc_fd(sess, op, fd, bpid);
-		break;
-	case DPAA2_SEC_IPSEC:
-		ret = build_proto_fd(sess, op, fd, bpid);
-		break;
-	case DPAA2_SEC_HASH_CIPHER:
-	default:
-		RTE_LOG(ERR, PMD, "error: Unsupported session\n");
+	/* Segmented buffer */
+	if (unlikely(!rte_pktmbuf_is_contiguous(op->sym->m_src))) {
+		switch (sess->ctxt_type) {
+		case DPAA2_SEC_CIPHER:
+			ret = build_cipher_sg_fd(sess, op, fd, bpid);
+			break;
+		case DPAA2_SEC_AUTH:
+			ret = build_auth_sg_fd(sess, op, fd, bpid);
+			break;
+		case DPAA2_SEC_AEAD:
+			ret = build_authenc_gcm_sg_fd(sess, op, fd, bpid);
+			break;
+		case DPAA2_SEC_CIPHER_HASH:
+			ret = build_authenc_sg_fd(sess, op, fd, bpid);
+			break;
+		case DPAA2_SEC_HASH_CIPHER:
+		default:
+			RTE_LOG(ERR, PMD, "error: Unsupported session\n");
+		}
+	} else {
+		switch (sess->ctxt_type) {
+		case DPAA2_SEC_CIPHER:
+			ret = build_cipher_fd(sess, op, fd, bpid);
+			break;
+		case DPAA2_SEC_AUTH:
+			ret = build_auth_fd(sess, op, fd, bpid);
+			break;
+		case DPAA2_SEC_AEAD:
+			ret = build_authenc_gcm_fd(sess, op, fd, bpid);
+			break;
+		case DPAA2_SEC_CIPHER_HASH:
+			ret = build_authenc_fd(sess, op, fd, bpid);
+			break;
+		case DPAA2_SEC_IPSEC:
+			ret = build_proto_fd(sess, op, fd, bpid);
+			break;
+		case DPAA2_SEC_HASH_CIPHER:
+		default:
+			RTE_LOG(ERR, PMD, "error: Unsupported session\n");
+		}
 	}
 	return ret;
 }
@@ -766,8 +1274,11 @@ sec_fd_to_mbuf(const struct qbman_fd *fd, uint8_t driver_id)
 		   DPAA2_GET_FD_LEN(fd));
 
 	/* free the fle memory */
-	priv = (struct ctxt_priv *)DPAA2_GET_FLE_CTXT(fle - 1);
-	rte_mempool_put(priv->fle_pool, (void *)(fle - 1));
+	if (likely(rte_pktmbuf_is_contiguous(src))) {
+		priv = (struct ctxt_priv *)DPAA2_GET_FLE_CTXT(fle - 1);
+		rte_mempool_put(priv->fle_pool, (void *)(fle-1));
+	} else
+		rte_free((void *)(fle-1));
 
 	return op;
 }
@@ -2252,7 +2763,8 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
 	cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
 			RTE_CRYPTODEV_FF_HW_ACCELERATED |
 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
-			RTE_CRYPTODEV_FF_SECURITY;
+			RTE_CRYPTODEV_FF_SECURITY |
+			RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
 
 	internals = cryptodev->data->dev_private;
 	internals->max_nb_sessions = RTE_DPAA2_SEC_PMD_MAX_NB_SESSIONS;
diff --git a/test/test/test_cryptodev.c b/test/test/test_cryptodev.c
index 6dbc764..6f22896 100644
--- a/test/test/test_cryptodev.c
+++ b/test/test/test_cryptodev.c
@@ -9492,6 +9492,16 @@ static struct unit_test_suite cryptodev_dpaa2_sec_testsuite  = {
 		TEST_CASE_ST(ut_setup, ut_teardown,
 			test_AES_GCM_authenticated_decryption_oop_test_case_1),
 
+		/** Scatter-Gather */
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_AES_GCM_auth_encrypt_SGL_in_place_1500B),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_AES_GCM_auth_encrypt_SGL_out_of_place_400B_400B),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_AES_GCM_auth_encrypt_SGL_out_of_place_400B_1seg),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_AES_GCM_auth_encrypt_SGL_out_of_place_1500B_2000B),
+
 		TEST_CASES_END() /**< NULL terminate unit test array */
 	}
 };
diff --git a/test/test/test_cryptodev_aes_test_vectors.h b/test/test/test_cryptodev_aes_test_vectors.h
index 3fc3c2b..20f5285 100644
--- a/test/test/test_cryptodev_aes_test_vectors.h
+++ b/test/test/test_cryptodev_aes_test_vectors.h
@@ -1269,7 +1269,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA1 Decryption Digest "
@@ -1287,6 +1288,14 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA1 Decryption Digest "
+			"Verify Scatter Gather",
+		.test_data = &aes_test_data_4,
+		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
+		.feature_mask = BLOCKCIPHER_TEST_FEATURE_SG,
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
+	},
+	{
+		.test_descr = "AES-128-CBC HMAC-SHA1 Decryption Digest "
 			"Verify (short buffers)",
 		.test_data = &aes_test_data_13,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
@@ -1391,6 +1400,7 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
 	},
 	{
@@ -1544,7 +1554,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 		.op_mask = BLOCKCIPHER_TEST_OP_ENCRYPT,
 		.feature_mask = BLOCKCIPHER_TEST_FEATURE_SG |
 			BLOCKCIPHER_TEST_FEATURE_OOP,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-192-CBC Decryption",
@@ -1558,6 +1569,13 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
 	},
 	{
+		.test_descr = "AES-192-CBC Decryption Scatter Gather",
+		.test_data = &aes_test_data_10,
+		.op_mask = BLOCKCIPHER_TEST_OP_DECRYPT,
+		.feature_mask = BLOCKCIPHER_TEST_FEATURE_SG,
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
+	},
+	{
 		.test_descr = "AES-256-CBC Encryption",
 		.test_data = &aes_test_data_11,
 		.op_mask = BLOCKCIPHER_TEST_OP_ENCRYPT,
diff --git a/test/test/test_cryptodev_hash_test_vectors.h b/test/test/test_cryptodev_hash_test_vectors.h
index 2215b86..bd793f4 100644
--- a/test/test/test_cryptodev_hash_test_vectors.h
+++ b/test/test/test_cryptodev_hash_test_vectors.h
@@ -378,6 +378,13 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT
 	},
 	{
+		.test_descr = "HMAC-SHA1 Digest Scatter Gather",
+		.test_data = &hmac_sha1_test_vector,
+		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
+		.feature_mask = BLOCKCIPHER_TEST_FEATURE_SG,
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
+	},
+	{
 		.test_descr = "HMAC-SHA1 Digest Verify",
 		.test_data = &hmac_sha1_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
@@ -389,6 +396,13 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT
 	},
 	{
+		.test_descr = "HMAC-SHA1 Digest Verify Scatter Gather",
+		.test_data = &hmac_sha1_test_vector,
+		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
+		.feature_mask = BLOCKCIPHER_TEST_FEATURE_SG,
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
+	},
+	{
 		.test_descr = "SHA224 Digest",
 		.test_data = &sha224_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
-- 
2.9.3

^ permalink raw reply	[flat|nested] 10+ messages in thread

* [dpdk-dev] [PATCH 2/2] crypto/dpaa_sec: support for scatter gather
  2018-01-19 11:51 [dpdk-dev] [PATCH 1/2] crypto/dpaa2_sec: support for scatter gather Akhil Goyal
@ 2018-01-19 11:51 ` Akhil Goyal
  2018-01-19 14:08   ` Hemant Agrawal
  2018-01-19 14:06 ` [dpdk-dev] [PATCH 1/2] crypto/dpaa2_sec: " Hemant Agrawal
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 10+ messages in thread
From: Akhil Goyal @ 2018-01-19 11:51 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch, hemant.agrawal, alok.makhariya, Akhil Goyal

Signed-off-by: Alok Makhariya <alok.makhariya@nxp.com>
Signed-off-by: Akhil Goyal <akhil.goyal@nxp.com>
---
 doc/guides/cryptodevs/features/dpaa_sec.ini  |   1 +
 drivers/crypto/dpaa_sec/dpaa_sec.c           | 501 +++++++++++++++++++++++++--
 test/test/test_cryptodev.c                   |  10 +
 test/test/test_cryptodev_aes_test_vectors.h  |  11 +-
 test/test/test_cryptodev_hash_test_vectors.h |   6 +-
 5 files changed, 498 insertions(+), 31 deletions(-)

diff --git a/doc/guides/cryptodevs/features/dpaa_sec.ini b/doc/guides/cryptodevs/features/dpaa_sec.ini
index deab53a..260fae7 100644
--- a/doc/guides/cryptodevs/features/dpaa_sec.ini
+++ b/doc/guides/cryptodevs/features/dpaa_sec.ini
@@ -8,6 +8,7 @@ Symmetric crypto       = Y
 Sym operation chaining = Y
 HW Accelerated         = Y
 Protocol offload       = Y
+Mbuf scatter gather    = Y
 
 ;
 ; Supported crypto algorithms of the 'dpaa_sec' crypto driver.
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c
index a402e61..18681cf 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.c
@@ -599,6 +599,86 @@ dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
 	return pkts;
 }
 
+static inline struct dpaa_sec_job *
+build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
+{
+	struct rte_crypto_sym_op *sym = op->sym;
+	struct rte_mbuf *mbuf = sym->m_src;
+	struct dpaa_sec_job *cf;
+	struct dpaa_sec_op_ctx *ctx;
+	struct qm_sg_entry *sg, *out_sg, *in_sg;
+	phys_addr_t start_addr;
+	uint8_t *old_digest, extra_segs;
+
+	if (is_decode(ses))
+		extra_segs = 3;
+	else
+		extra_segs = 2;
+
+	if ((mbuf->nb_segs + extra_segs) > MAX_SG_ENTRIES) {
+		PMD_TX_LOG(ERR, "Auth: Max sec segs supported is %d\n",
+								MAX_SG_ENTRIES);
+		return NULL;
+	}
+	ctx = dpaa_sec_alloc_ctx(ses);
+	if (!ctx)
+		return NULL;
+
+	cf = &ctx->job;
+	ctx->op = op;
+	old_digest = ctx->digest;
+
+	/* output */
+	out_sg = &cf->sg[0];
+	qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
+	out_sg->length = ses->digest_length;
+	cpu_to_hw_sg(out_sg);
+
+	/* input */
+	in_sg = &cf->sg[1];
+	/* need to extend the input to a compound frame */
+	in_sg->extension = 1;
+	in_sg->final = 1;
+	in_sg->length = sym->auth.data.length;
+	qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
+
+	/* 1st seg */
+	sg = in_sg + 1;
+	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+	sg->length = mbuf->data_len - sym->auth.data.offset;
+	sg->offset = sym->auth.data.offset;
+
+	/* Successive segs */
+	mbuf = mbuf->next;
+	while (mbuf) {
+		cpu_to_hw_sg(sg);
+		sg++;
+		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+		sg->length = mbuf->data_len;
+		mbuf = mbuf->next;
+	}
+
+	if (is_decode(ses)) {
+		/* Digest verification case */
+		cpu_to_hw_sg(sg);
+		sg++;
+		rte_memcpy(old_digest, sym->auth.digest.data,
+				ses->digest_length);
+		start_addr = dpaa_mem_vtop_ctx(ctx, old_digest);
+		qm_sg_entry_set64(sg, start_addr);
+		sg->length = ses->digest_length;
+		in_sg->length += ses->digest_length;
+	} else {
+		/* Digest calculation case */
+		sg->length -= ses->digest_length;
+	}
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+	cpu_to_hw_sg(in_sg);
+
+	return cf;
+}
+
 /**
  * packet looks like:
  *		|<----data_len------->|
@@ -669,6 +749,101 @@ build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
 }
 
 static inline struct dpaa_sec_job *
+build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
+{
+	struct rte_crypto_sym_op *sym = op->sym;
+	struct dpaa_sec_job *cf;
+	struct dpaa_sec_op_ctx *ctx;
+	struct qm_sg_entry *sg, *out_sg, *in_sg;
+	struct rte_mbuf *mbuf;
+	uint8_t req_segs;
+	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+			ses->iv.offset);
+
+	if (sym->m_dst) {
+		mbuf = sym->m_dst;
+		req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
+	} else {
+		mbuf = sym->m_src;
+		req_segs = mbuf->nb_segs * 2 + 3;
+	}
+
+	if (req_segs > MAX_SG_ENTRIES) {
+		PMD_TX_LOG(ERR, "Cipher: Max sec segs supported is %d\n",
+								MAX_SG_ENTRIES);
+		return NULL;
+	}
+
+	ctx = dpaa_sec_alloc_ctx(ses);
+	if (!ctx)
+		return NULL;
+
+	cf = &ctx->job;
+	ctx->op = op;
+
+	/* output */
+	out_sg = &cf->sg[0];
+	out_sg->extension = 1;
+	out_sg->length = sym->cipher.data.length;
+	qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
+	cpu_to_hw_sg(out_sg);
+
+	/* 1st seg */
+	sg = &cf->sg[2];
+	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+	sg->length = mbuf->data_len - sym->cipher.data.offset;
+	sg->offset = sym->cipher.data.offset;
+
+	/* Successive segs */
+	mbuf = mbuf->next;
+	while (mbuf) {
+		cpu_to_hw_sg(sg);
+		sg++;
+		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+		sg->length = mbuf->data_len;
+		mbuf = mbuf->next;
+	}
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+
+	/* input */
+	mbuf = sym->m_src;
+	in_sg = &cf->sg[1];
+	in_sg->extension = 1;
+	in_sg->final = 1;
+	in_sg->length = sym->cipher.data.length + ses->iv.length;
+
+	sg++;
+	qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg));
+	cpu_to_hw_sg(in_sg);
+
+	/* IV */
+	qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+	sg->length = ses->iv.length;
+	cpu_to_hw_sg(sg);
+
+	/* 1st seg */
+	sg++;
+	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+	sg->length = mbuf->data_len - sym->cipher.data.offset;
+	sg->offset = sym->cipher.data.offset;
+
+	/* Successive segs */
+	mbuf = mbuf->next;
+	while (mbuf) {
+		cpu_to_hw_sg(sg);
+		sg++;
+		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+		sg->length = mbuf->data_len;
+		mbuf = mbuf->next;
+	}
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+
+	return cf;
+}
+
+static inline struct dpaa_sec_job *
 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
 {
 	struct rte_crypto_sym_op *sym = op->sym;
@@ -724,6 +899,145 @@ build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
 }
 
 static inline struct dpaa_sec_job *
+build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
+{
+	struct rte_crypto_sym_op *sym = op->sym;
+	struct dpaa_sec_job *cf;
+	struct dpaa_sec_op_ctx *ctx;
+	struct qm_sg_entry *sg, *out_sg, *in_sg;
+	struct rte_mbuf *mbuf;
+	uint8_t req_segs;
+	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+			ses->iv.offset);
+
+	if (sym->m_dst) {
+		mbuf = sym->m_dst;
+		req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
+	} else {
+		mbuf = sym->m_src;
+		req_segs = mbuf->nb_segs * 2 + 4;
+	}
+
+	if (ses->auth_only_len)
+		req_segs++;
+
+	if (req_segs > MAX_SG_ENTRIES) {
+		PMD_TX_LOG(ERR, "AEAD: Max sec segs supported is %d\n",
+				MAX_SG_ENTRIES);
+		return NULL;
+	}
+
+	ctx = dpaa_sec_alloc_ctx(ses);
+	if (!ctx)
+		return NULL;
+
+	cf = &ctx->job;
+	ctx->op = op;
+
+	rte_prefetch0(cf->sg);
+
+	/* output */
+	out_sg = &cf->sg[0];
+	out_sg->extension = 1;
+	if (is_encode(ses))
+		out_sg->length = sym->aead.data.length + ses->auth_only_len
+						+ ses->digest_length;
+	else
+		out_sg->length = sym->aead.data.length + ses->auth_only_len;
+
+	/* output sg entries */
+	sg = &cf->sg[2];
+	qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, sg));
+	cpu_to_hw_sg(out_sg);
+
+	/* 1st seg */
+	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+	sg->length = mbuf->data_len - sym->aead.data.offset +
+					ses->auth_only_len;
+	sg->offset = sym->aead.data.offset - ses->auth_only_len;
+
+	/* Successive segs */
+	mbuf = mbuf->next;
+	while (mbuf) {
+		cpu_to_hw_sg(sg);
+		sg++;
+		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+		sg->length = mbuf->data_len;
+		mbuf = mbuf->next;
+	}
+	sg->length -= ses->digest_length;
+
+	if (is_encode(ses)) {
+		cpu_to_hw_sg(sg);
+		/* set auth output */
+		sg++;
+		qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
+		sg->length = ses->digest_length;
+	}
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+
+	/* input */
+	mbuf = sym->m_src;
+	in_sg = &cf->sg[1];
+	in_sg->extension = 1;
+	in_sg->final = 1;
+	if (is_encode(ses))
+		in_sg->length = ses->iv.length + sym->aead.data.length
+							+ ses->auth_only_len;
+	else
+		in_sg->length = ses->iv.length + sym->aead.data.length
+				+ ses->auth_only_len + ses->digest_length;
+
+	/* input sg entries */
+	sg++;
+	qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg));
+	cpu_to_hw_sg(in_sg);
+
+	/* 1st seg IV */
+	qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+	sg->length = ses->iv.length;
+	cpu_to_hw_sg(sg);
+
+	/* 2nd seg auth only */
+	if (ses->auth_only_len) {
+		sg++;
+		qm_sg_entry_set64(sg, dpaa_mem_vtop(sym->aead.aad.data));
+		sg->length = ses->auth_only_len;
+		cpu_to_hw_sg(sg);
+	}
+
+	/* 3rd seg */
+	sg++;
+	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+	sg->length = mbuf->data_len - sym->aead.data.offset;
+	sg->offset = sym->aead.data.offset;
+
+	/* Successive segs */
+	mbuf = mbuf->next;
+	while (mbuf) {
+		cpu_to_hw_sg(sg);
+		sg++;
+		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+		sg->length = mbuf->data_len;
+		mbuf = mbuf->next;
+	}
+
+	if (is_decode(ses)) {
+		cpu_to_hw_sg(sg);
+		sg++;
+		memcpy(ctx->digest, sym->aead.digest.data,
+			ses->digest_length);
+		qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
+		sg->length = ses->digest_length;
+	}
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+
+	return cf;
+}
+
+static inline struct dpaa_sec_job *
 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
 {
 	struct rte_crypto_sym_op *sym = op->sym;
@@ -836,6 +1150,132 @@ build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
 }
 
 static inline struct dpaa_sec_job *
+build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
+{
+	struct rte_crypto_sym_op *sym = op->sym;
+	struct dpaa_sec_job *cf;
+	struct dpaa_sec_op_ctx *ctx;
+	struct qm_sg_entry *sg, *out_sg, *in_sg;
+	struct rte_mbuf *mbuf;
+	uint8_t req_segs;
+	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+			ses->iv.offset);
+
+	if (sym->m_dst) {
+		mbuf = sym->m_dst;
+		req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
+	} else {
+		mbuf = sym->m_src;
+		req_segs = mbuf->nb_segs * 2 + 4;
+	}
+
+	if (req_segs > MAX_SG_ENTRIES) {
+		PMD_TX_LOG(ERR, "Cipher-Auth: Max sec segs supported is %d\n",
+				MAX_SG_ENTRIES);
+		return NULL;
+	}
+
+	ctx = dpaa_sec_alloc_ctx(ses);
+	if (!ctx)
+		return NULL;
+
+	cf = &ctx->job;
+	ctx->op = op;
+
+	rte_prefetch0(cf->sg);
+
+	/* output */
+	out_sg = &cf->sg[0];
+	out_sg->extension = 1;
+	if (is_encode(ses))
+		out_sg->length = sym->auth.data.length + ses->digest_length;
+	else
+		out_sg->length = sym->auth.data.length;
+
+	/* output sg entries */
+	sg = &cf->sg[2];
+	qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, sg));
+	cpu_to_hw_sg(out_sg);
+
+	/* 1st seg */
+	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+	sg->length = mbuf->data_len - sym->auth.data.offset;
+	sg->offset = sym->auth.data.offset;
+
+	/* Successive segs */
+	mbuf = mbuf->next;
+	while (mbuf) {
+		cpu_to_hw_sg(sg);
+		sg++;
+		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+		sg->length = mbuf->data_len;
+		mbuf = mbuf->next;
+	}
+	sg->length -= ses->digest_length;
+
+	if (is_encode(ses)) {
+		cpu_to_hw_sg(sg);
+		/* set auth output */
+		sg++;
+		qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
+		sg->length = ses->digest_length;
+	}
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+
+	/* input */
+	mbuf = sym->m_src;
+	in_sg = &cf->sg[1];
+	in_sg->extension = 1;
+	in_sg->final = 1;
+	if (is_encode(ses))
+		in_sg->length = ses->iv.length + sym->auth.data.length;
+	else
+		in_sg->length = ses->iv.length + sym->auth.data.length
+						+ ses->digest_length;
+
+	/* input sg entries */
+	sg++;
+	qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg));
+	cpu_to_hw_sg(in_sg);
+
+	/* 1st seg IV */
+	qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+	sg->length = ses->iv.length;
+	cpu_to_hw_sg(sg);
+
+	/* 2nd seg */
+	sg++;
+	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+	sg->length = mbuf->data_len - sym->auth.data.offset;
+	sg->offset = sym->auth.data.offset;
+
+	/* Successive segs */
+	mbuf = mbuf->next;
+	while (mbuf) {
+		cpu_to_hw_sg(sg);
+		sg++;
+		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+		sg->length = mbuf->data_len;
+		mbuf = mbuf->next;
+	}
+
+	sg->length -= ses->digest_length;
+	if (is_decode(ses)) {
+		cpu_to_hw_sg(sg);
+		sg++;
+		memcpy(ctx->digest, sym->auth.digest.data,
+			ses->digest_length);
+		qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
+		sg->length = ses->digest_length;
+	}
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+
+	return cf;
+}
+
+static inline struct dpaa_sec_job *
 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
 {
 	struct rte_crypto_sym_op *sym = op->sym;
@@ -1020,34 +1460,42 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
 				}
 			}
 
-			/*
-			 * Segmented buffer is not supported.
-			 */
-			if (!rte_pktmbuf_is_contiguous(op->sym->m_src)) {
-				op->status = RTE_CRYPTO_OP_STATUS_ERROR;
-				frames_to_send = loop;
-				nb_ops = loop;
-				goto send_pkts;
-			}
 			auth_only_len = op->sym->auth.data.length -
 						op->sym->cipher.data.length;
-
-			if (is_auth_only(ses)) {
-				cf = build_auth_only(op, ses);
-			} else if (is_cipher_only(ses)) {
-				cf = build_cipher_only(op, ses);
-			} else if (is_aead(ses)) {
-				cf = build_cipher_auth_gcm(op, ses);
-				auth_only_len = ses->auth_only_len;
-			} else if (is_auth_cipher(ses)) {
-				cf = build_cipher_auth(op, ses);
-			} else if (is_proto_ipsec(ses)) {
-				cf = build_proto(op, ses);
+			if (rte_pktmbuf_is_contiguous(op->sym->m_src)) {
+				if (is_auth_only(ses)) {
+					cf = build_auth_only(op, ses);
+				} else if (is_cipher_only(ses)) {
+					cf = build_cipher_only(op, ses);
+				} else if (is_aead(ses)) {
+					cf = build_cipher_auth_gcm(op, ses);
+					auth_only_len = ses->auth_only_len;
+				} else if (is_auth_cipher(ses)) {
+					cf = build_cipher_auth(op, ses);
+				} else if (is_proto_ipsec(ses)) {
+					cf = build_proto(op, ses);
+				} else {
+					PMD_TX_LOG(ERR, "not supported sec op");
+					frames_to_send = loop;
+					nb_ops = loop;
+					goto send_pkts;
+				}
 			} else {
-				PMD_TX_LOG(ERR, "not supported sec op");
-				frames_to_send = loop;
-				nb_ops = loop;
-				goto send_pkts;
+				if (is_auth_only(ses)) {
+					cf = build_auth_only_sg(op, ses);
+				} else if (is_cipher_only(ses)) {
+					cf = build_cipher_only_sg(op, ses);
+				} else if (is_aead(ses)) {
+					cf = build_cipher_auth_gcm_sg(op, ses);
+					auth_only_len = ses->auth_only_len;
+				} else if (is_auth_cipher(ses)) {
+					cf = build_cipher_auth_sg(op, ses);
+				} else {
+					PMD_TX_LOG(ERR, "not supported sec op");
+					frames_to_send = loop;
+					nb_ops = loop;
+					goto send_pkts;
+				}
 			}
 			if (unlikely(!cf)) {
 				frames_to_send = loop;
@@ -1834,7 +2282,8 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
 	cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
 			RTE_CRYPTODEV_FF_HW_ACCELERATED |
 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
-			RTE_CRYPTODEV_FF_SECURITY;
+			RTE_CRYPTODEV_FF_SECURITY |
+			RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
 
 	internals = cryptodev->data->dev_private;
 	internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
diff --git a/test/test/test_cryptodev.c b/test/test/test_cryptodev.c
index 6f22896..0f2a045 100644
--- a/test/test/test_cryptodev.c
+++ b/test/test/test_cryptodev.c
@@ -9365,6 +9365,16 @@ static struct unit_test_suite cryptodev_dpaa_sec_testsuite  = {
 		TEST_CASE_ST(ut_setup, ut_teardown,
 			test_AES_GCM_authenticated_decryption_oop_test_case_1),
 
+		/** Scatter-Gather */
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_AES_GCM_auth_encrypt_SGL_in_place_1500B),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_AES_GCM_auth_encrypt_SGL_out_of_place_400B_400B),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_AES_GCM_auth_encrypt_SGL_out_of_place_400B_1seg),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_AES_GCM_auth_encrypt_SGL_out_of_place_1500B_2000B),
+
 		TEST_CASES_END() /**< NULL terminate unit test array */
 	}
 };
diff --git a/test/test/test_cryptodev_aes_test_vectors.h b/test/test/test_cryptodev_aes_test_vectors.h
index 20f5285..3577ef4 100644
--- a/test/test/test_cryptodev_aes_test_vectors.h
+++ b/test/test/test_cryptodev_aes_test_vectors.h
@@ -1270,7 +1270,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
-			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA1 Decryption Digest "
@@ -1292,7 +1293,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 		.test_data = &aes_test_data_4,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
 		.feature_mask = BLOCKCIPHER_TEST_FEATURE_SG,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+			    BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA1 Decryption Digest "
@@ -1401,6 +1403,7 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
 	},
 	{
@@ -1555,6 +1558,7 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 		.feature_mask = BLOCKCIPHER_TEST_FEATURE_SG |
 			BLOCKCIPHER_TEST_FEATURE_OOP,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
@@ -1573,7 +1577,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 		.test_data = &aes_test_data_10,
 		.op_mask = BLOCKCIPHER_TEST_OP_DECRYPT,
 		.feature_mask = BLOCKCIPHER_TEST_FEATURE_SG,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
 	},
 	{
 		.test_descr = "AES-256-CBC Encryption",
diff --git a/test/test/test_cryptodev_hash_test_vectors.h b/test/test/test_cryptodev_hash_test_vectors.h
index bd793f4..93dacb7 100644
--- a/test/test/test_cryptodev_hash_test_vectors.h
+++ b/test/test/test_cryptodev_hash_test_vectors.h
@@ -382,7 +382,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 		.test_data = &hmac_sha1_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
 		.feature_mask = BLOCKCIPHER_TEST_FEATURE_SG,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+			    BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
 	},
 	{
 		.test_descr = "HMAC-SHA1 Digest Verify",
@@ -400,7 +401,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 		.test_data = &hmac_sha1_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
 		.feature_mask = BLOCKCIPHER_TEST_FEATURE_SG,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+			    BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
 	},
 	{
 		.test_descr = "SHA224 Digest",
-- 
2.9.3

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [dpdk-dev] [PATCH 1/2] crypto/dpaa2_sec: support for scatter gather
  2018-01-19 11:51 [dpdk-dev] [PATCH 1/2] crypto/dpaa2_sec: support for scatter gather Akhil Goyal
  2018-01-19 11:51 ` [dpdk-dev] [PATCH 2/2] crypto/dpaa_sec: " Akhil Goyal
@ 2018-01-19 14:06 ` Hemant Agrawal
  2018-01-19 20:45 ` De Lara Guarch, Pablo
  2018-01-22  8:46 ` [dpdk-dev] [PATCH v2 1/3] doc: update feature list for cryptodevs Akhil Goyal
  3 siblings, 0 replies; 10+ messages in thread
From: Hemant Agrawal @ 2018-01-19 14:06 UTC (permalink / raw)
  To: Akhil Goyal, dev; +Cc: pablo.de.lara.guarch, alok.makhariya

On 1/19/2018 5:21 PM, Akhil Goyal wrote:
> Signed-off-by: Alok Makhariya <alok.makhariya@nxp.com>
> Signed-off-by: Akhil Goyal <akhil.goyal@nxp.com>
> ---
>  doc/guides/cryptodevs/features/default.ini   |   1 +
>  doc/guides/cryptodevs/features/dpaa2_sec.ini |   1 +
>  drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c  | 588 +++++++++++++++++++++++++--
>  test/test/test_cryptodev.c                   |  10 +
>  test/test/test_cryptodev_aes_test_vectors.h  |  22 +-
>  test/test/test_cryptodev_hash_test_vectors.h |  14 +
>  6 files changed, 596 insertions(+), 40 deletions(-)
>

Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [dpdk-dev] [PATCH 2/2] crypto/dpaa_sec: support for scatter gather
  2018-01-19 11:51 ` [dpdk-dev] [PATCH 2/2] crypto/dpaa_sec: " Akhil Goyal
@ 2018-01-19 14:08   ` Hemant Agrawal
  0 siblings, 0 replies; 10+ messages in thread
From: Hemant Agrawal @ 2018-01-19 14:08 UTC (permalink / raw)
  To: Akhil Goyal, dev; +Cc: pablo.de.lara.guarch, alok.makhariya

On 1/19/2018 5:21 PM, Akhil Goyal wrote:
> Signed-off-by: Alok Makhariya <alok.makhariya@nxp.com>
> Signed-off-by: Akhil Goyal <akhil.goyal@nxp.com>
> ---
>  doc/guides/cryptodevs/features/dpaa_sec.ini  |   1 +
>  drivers/crypto/dpaa_sec/dpaa_sec.c           | 501 +++++++++++++++++++++++++--
>  test/test/test_cryptodev.c                   |  10 +
>  test/test/test_cryptodev_aes_test_vectors.h  |  11 +-
>  test/test/test_cryptodev_hash_test_vectors.h |   6 +-
>  5 files changed, 498 insertions(+), 31 deletions(-)
>
Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [dpdk-dev] [PATCH 1/2] crypto/dpaa2_sec: support for scatter gather
  2018-01-19 11:51 [dpdk-dev] [PATCH 1/2] crypto/dpaa2_sec: support for scatter gather Akhil Goyal
  2018-01-19 11:51 ` [dpdk-dev] [PATCH 2/2] crypto/dpaa_sec: " Akhil Goyal
  2018-01-19 14:06 ` [dpdk-dev] [PATCH 1/2] crypto/dpaa2_sec: " Hemant Agrawal
@ 2018-01-19 20:45 ` De Lara Guarch, Pablo
  2018-01-22  8:46 ` [dpdk-dev] [PATCH v2 1/3] doc: update feature list for cryptodevs Akhil Goyal
  3 siblings, 0 replies; 10+ messages in thread
From: De Lara Guarch, Pablo @ 2018-01-19 20:45 UTC (permalink / raw)
  To: Akhil Goyal, dev; +Cc: hemant.agrawal, alok.makhariya

Hi Akhil,

> -----Original Message-----
> From: Akhil Goyal [mailto:akhil.goyal@nxp.com]
> Sent: Friday, January 19, 2018 11:51 AM
> To: dev@dpdk.org
> Cc: De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>;
> hemant.agrawal@nxp.com; alok.makhariya@nxp.com; Akhil Goyal
> <akhil.goyal@nxp.com>
> Subject: [PATCH 1/2] crypto/dpaa2_sec: support for scatter gather
> 
> Signed-off-by: Alok Makhariya <alok.makhariya@nxp.com>
> Signed-off-by: Akhil Goyal <akhil.goyal@nxp.com>
> ---
>  doc/guides/cryptodevs/features/default.ini   |   1 +
>  doc/guides/cryptodevs/features/dpaa2_sec.ini |   1 +
>  drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c  | 588
> +++++++++++++++++++++++++--
>  test/test/test_cryptodev.c                   |  10 +
>  test/test/test_cryptodev_aes_test_vectors.h  |  22 +-
> test/test/test_cryptodev_hash_test_vectors.h |  14 +
>  6 files changed, 596 insertions(+), 40 deletions(-)
> 
> diff --git a/doc/guides/cryptodevs/features/default.ini
> b/doc/guides/cryptodevs/features/default.ini
> index 18d66cb..728ce3b 100644
> --- a/doc/guides/cryptodevs/features/default.ini
> +++ b/doc/guides/cryptodevs/features/default.ini
> @@ -18,6 +18,7 @@ CPU AVX512             =
>  CPU AESNI              =
>  CPU NEON               =
>  CPU ARM CE             =
> +Mbuf scatter gather    =

I think we should separate this addition (Mbuf scatter gather) to the default.ini file in another patch,
and then check the PMDs that support this feature and set it in their .ini files (with other patches).
Hopefully it should be straight forward, as there is a feature flag equivalent to this one.

Thanks,
Pablo

^ permalink raw reply	[flat|nested] 10+ messages in thread

* [dpdk-dev] [PATCH v2 1/3] doc: update feature list for cryptodevs
  2018-01-19 11:51 [dpdk-dev] [PATCH 1/2] crypto/dpaa2_sec: support for scatter gather Akhil Goyal
                   ` (2 preceding siblings ...)
  2018-01-19 20:45 ` De Lara Guarch, Pablo
@ 2018-01-22  8:46 ` Akhil Goyal
  2018-01-22  8:46   ` [dpdk-dev] [PATCH v2 2/3] crypto/dpaa2_sec: support for scatter gather Akhil Goyal
                     ` (3 more replies)
  3 siblings, 4 replies; 10+ messages in thread
From: Akhil Goyal @ 2018-01-22  8:46 UTC (permalink / raw)
  To: dev
  Cc: pablo.de.lara.guarch, hemant.agrawal, alok.makhariya,
	john.griffin, fiona.trahe, deepak.k.jain, declan.doherty,
	Akhil Goyal

Signed-off-by: Akhil Goyal <akhil.goyal@nxp.com>
---
changes in v2: separated the default.ini change from the dpaa2_sec patch.

 doc/guides/cryptodevs/features/aesni_gcm.ini | 1 +
 doc/guides/cryptodevs/features/default.ini   | 1 +
 doc/guides/cryptodevs/features/null.ini      | 1 +
 doc/guides/cryptodevs/features/openssl.ini   | 1 +
 doc/guides/cryptodevs/features/qat.ini       | 1 +
 5 files changed, 5 insertions(+)

diff --git a/doc/guides/cryptodevs/features/aesni_gcm.ini b/doc/guides/cryptodevs/features/aesni_gcm.ini
index bacd94e..920b6b6 100644
--- a/doc/guides/cryptodevs/features/aesni_gcm.ini
+++ b/doc/guides/cryptodevs/features/aesni_gcm.ini
@@ -10,6 +10,7 @@ CPU AESNI              = Y
 CPU SSE                = Y
 CPU AVX                = Y
 CPU AVX2               = Y
+Mbuf scatter gather    = Y
 ;
 ; Supported crypto algorithms of the 'aesni_gcm' crypto driver.
 ;
diff --git a/doc/guides/cryptodevs/features/default.ini b/doc/guides/cryptodevs/features/default.ini
index 18d66cb..728ce3b 100644
--- a/doc/guides/cryptodevs/features/default.ini
+++ b/doc/guides/cryptodevs/features/default.ini
@@ -18,6 +18,7 @@ CPU AVX512             =
 CPU AESNI              =
 CPU NEON               =
 CPU ARM CE             =
+Mbuf scatter gather    =
 
 ;
 ; Supported crypto algorithms of a default crypto driver.
diff --git a/doc/guides/cryptodevs/features/null.ini b/doc/guides/cryptodevs/features/null.ini
index 523c453..a9e172d 100644
--- a/doc/guides/cryptodevs/features/null.ini
+++ b/doc/guides/cryptodevs/features/null.ini
@@ -6,6 +6,7 @@
 [Features]
 Symmetric crypto       = Y
 Sym operation chaining = Y
+Mbuf scatter gather    = Y
 
 ;
 ; Supported crypto algorithms of the 'null' crypto driver.
diff --git a/doc/guides/cryptodevs/features/openssl.ini b/doc/guides/cryptodevs/features/openssl.ini
index 385ec4e..6915658 100644
--- a/doc/guides/cryptodevs/features/openssl.ini
+++ b/doc/guides/cryptodevs/features/openssl.ini
@@ -6,6 +6,7 @@
 [Features]
 Symmetric crypto       = Y
 Sym operation chaining = Y
+Mbuf scatter gather    = Y
 
 ;
 ; Supported crypto algorithms of the 'openssl' crypto driver.
diff --git a/doc/guides/cryptodevs/features/qat.ini b/doc/guides/cryptodevs/features/qat.ini
index 40da898..51ed596 100644
--- a/doc/guides/cryptodevs/features/qat.ini
+++ b/doc/guides/cryptodevs/features/qat.ini
@@ -7,6 +7,7 @@
 Symmetric crypto       = Y
 Sym operation chaining = Y
 HW Accelerated         = Y
+Mbuf scatter gather    = Y
 
 ;
 ; Supported crypto algorithms of the 'qat' crypto driver.
-- 
2.9.3

^ permalink raw reply	[flat|nested] 10+ messages in thread

* [dpdk-dev] [PATCH v2 2/3] crypto/dpaa2_sec: support for scatter gather
  2018-01-22  8:46 ` [dpdk-dev] [PATCH v2 1/3] doc: update feature list for cryptodevs Akhil Goyal
@ 2018-01-22  8:46   ` Akhil Goyal
  2018-01-22  8:46   ` [dpdk-dev] [PATCH v2 3/3] crypto/dpaa_sec: " Akhil Goyal
                     ` (2 subsequent siblings)
  3 siblings, 0 replies; 10+ messages in thread
From: Akhil Goyal @ 2018-01-22  8:46 UTC (permalink / raw)
  To: dev
  Cc: pablo.de.lara.guarch, hemant.agrawal, alok.makhariya,
	john.griffin, fiona.trahe, deepak.k.jain, declan.doherty,
	Akhil Goyal

Signed-off-by: Alok Makhariya <alok.makhariya@nxp.com>
Signed-off-by: Akhil Goyal <akhil.goyal@nxp.com>
Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 doc/guides/cryptodevs/features/dpaa2_sec.ini |   1 +
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c  | 588 +++++++++++++++++++++++++--
 test/test/test_cryptodev.c                   |  10 +
 test/test/test_cryptodev_aes_test_vectors.h  |  22 +-
 test/test/test_cryptodev_hash_test_vectors.h |  14 +
 5 files changed, 595 insertions(+), 40 deletions(-)

diff --git a/doc/guides/cryptodevs/features/dpaa2_sec.ini b/doc/guides/cryptodevs/features/dpaa2_sec.ini
index 8fd07d6..68c9960 100644
--- a/doc/guides/cryptodevs/features/dpaa2_sec.ini
+++ b/doc/guides/cryptodevs/features/dpaa2_sec.ini
@@ -8,6 +8,7 @@ Symmetric crypto       = Y
 Sym operation chaining = Y
 HW Accelerated         = Y
 Protocol offload       = Y
+Mbuf scatter gather    = Y
 
 ;
 ; Supported crypto algorithms of the 'dpaa2_sec' crypto driver.
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index 977c49a..bafe754 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -48,6 +48,7 @@
 #define FLE_POOL_NUM_BUFS	32000
 #define FLE_POOL_BUF_SIZE	256
 #define FLE_POOL_CACHE_SIZE	512
+#define FLE_SG_MEM_SIZE		2048
 #define SEC_FLC_DHR_OUTBOUND	-114
 #define SEC_FLC_DHR_INBOUND	0
 
@@ -86,6 +87,153 @@ build_proto_fd(dpaa2_sec_session *sess,
 }
 
 static inline int
+build_authenc_gcm_sg_fd(dpaa2_sec_session *sess,
+		 struct rte_crypto_op *op,
+		 struct qbman_fd *fd, __rte_unused uint16_t bpid)
+{
+	struct rte_crypto_sym_op *sym_op = op->sym;
+	struct ctxt_priv *priv = sess->ctxt;
+	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
+	struct sec_flow_context *flc;
+	uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
+	int icv_len = sess->digest_length;
+	uint8_t *old_icv;
+	struct rte_mbuf *mbuf;
+	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+			sess->iv.offset);
+
+	PMD_INIT_FUNC_TRACE();
+
+	if (sym_op->m_dst)
+		mbuf = sym_op->m_dst;
+	else
+		mbuf = sym_op->m_src;
+
+	/* first FLE entry used to store mbuf and session ctxt */
+	fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
+			RTE_CACHE_LINE_SIZE);
+	if (unlikely(!fle)) {
+		RTE_LOG(ERR, PMD, "GCM SG: Memory alloc failed for SGE\n");
+		return -1;
+	}
+	memset(fle, 0, FLE_SG_MEM_SIZE);
+	DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
+	DPAA2_FLE_SAVE_CTXT(fle, priv);
+
+	op_fle = fle + 1;
+	ip_fle = fle + 2;
+	sge = fle + 3;
+
+	/* Save the shared descriptor */
+	flc = &priv->flc_desc[0].flc;
+
+	/* Configure FD as a FRAME LIST */
+	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
+	DPAA2_SET_FD_COMPOUND_FMT(fd);
+	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+	PMD_TX_LOG(DEBUG, "GCM SG: auth_off: 0x%x/length %d, digest-len=%d\n"
+		   "iv-len=%d data_off: 0x%x\n",
+		   sym_op->aead.data.offset,
+		   sym_op->aead.data.length,
+		   sym_op->aead.digest.length,
+		   sess->iv.length,
+		   sym_op->m_src->data_off);
+
+	/* Configure Output FLE with Scatter/Gather Entry */
+	DPAA2_SET_FLE_SG_EXT(op_fle);
+	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
+
+	if (auth_only_len)
+		DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
+
+	op_fle->length = (sess->dir == DIR_ENC) ?
+			(sym_op->aead.data.length + icv_len + auth_only_len) :
+			sym_op->aead.data.length + auth_only_len;
+
+	/* Configure Output SGE for Encap/Decap */
+	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+	DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->aead.data.offset -
+								auth_only_len);
+	sge->length = mbuf->data_len - sym_op->aead.data.offset + auth_only_len;
+
+	mbuf = mbuf->next;
+	/* o/p segs */
+	while (mbuf) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
+		sge->length = mbuf->data_len;
+		mbuf = mbuf->next;
+	}
+	sge->length -= icv_len;
+
+	if (sess->dir == DIR_ENC) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge,
+				DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
+		sge->length = icv_len;
+	}
+	DPAA2_SET_FLE_FIN(sge);
+
+	sge++;
+	mbuf = sym_op->m_src;
+
+	/* Configure Input FLE with Scatter/Gather Entry */
+	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
+	DPAA2_SET_FLE_SG_EXT(ip_fle);
+	DPAA2_SET_FLE_FIN(ip_fle);
+	ip_fle->length = (sess->dir == DIR_ENC) ?
+		(sym_op->aead.data.length + sess->iv.length + auth_only_len) :
+		(sym_op->aead.data.length + sess->iv.length + auth_only_len +
+		 icv_len);
+
+	/* Configure Input SGE for Encap/Decap */
+	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
+	sge->length = sess->iv.length;
+
+	sge++;
+	if (auth_only_len) {
+		DPAA2_SET_FLE_ADDR(sge,
+				DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
+		sge->length = auth_only_len;
+		sge++;
+	}
+
+	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+	DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
+				mbuf->data_off);
+	sge->length = mbuf->data_len - sym_op->aead.data.offset;
+
+	mbuf = mbuf->next;
+	/* i/p segs */
+	while (mbuf) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
+		sge->length = mbuf->data_len;
+		mbuf = mbuf->next;
+	}
+
+	if (sess->dir == DIR_DEC) {
+		sge++;
+		old_icv = (uint8_t *)(sge + 1);
+		memcpy(old_icv,	sym_op->aead.digest.data, icv_len);
+		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
+		sge->length = icv_len;
+	}
+
+	DPAA2_SET_FLE_FIN(sge);
+	if (auth_only_len) {
+		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
+		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
+	}
+	DPAA2_SET_FD_LEN(fd, ip_fle->length);
+
+	return 0;
+}
+
+static inline int
 build_authenc_gcm_fd(dpaa2_sec_session *sess,
 		     struct rte_crypto_op *op,
 		     struct qbman_fd *fd, uint16_t bpid)
@@ -116,7 +264,7 @@ build_authenc_gcm_fd(dpaa2_sec_session *sess,
 	 */
 	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
 	if (retval) {
-		RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
+		RTE_LOG(ERR, PMD, "GCM: Memory alloc failed for SGE\n");
 		return -1;
 	}
 	memset(fle, 0, FLE_POOL_BUF_SIZE);
@@ -149,7 +297,7 @@ build_authenc_gcm_fd(dpaa2_sec_session *sess,
 	DPAA2_SET_FD_COMPOUND_FMT(fd);
 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
 
-	PMD_TX_LOG(DEBUG, "auth_off: 0x%x/length %d, digest-len=%d\n"
+	PMD_TX_LOG(DEBUG, "GCM: auth_off: 0x%x/length %d, digest-len=%d\n"
 		   "iv-len=%d data_off: 0x%x\n",
 		   sym_op->aead.data.offset,
 		   sym_op->aead.data.length,
@@ -234,6 +382,151 @@ build_authenc_gcm_fd(dpaa2_sec_session *sess,
 }
 
 static inline int
+build_authenc_sg_fd(dpaa2_sec_session *sess,
+		 struct rte_crypto_op *op,
+		 struct qbman_fd *fd, __rte_unused uint16_t bpid)
+{
+	struct rte_crypto_sym_op *sym_op = op->sym;
+	struct ctxt_priv *priv = sess->ctxt;
+	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
+	struct sec_flow_context *flc;
+	uint32_t auth_only_len = sym_op->auth.data.length -
+				sym_op->cipher.data.length;
+	int icv_len = sess->digest_length;
+	uint8_t *old_icv;
+	struct rte_mbuf *mbuf;
+	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+			sess->iv.offset);
+
+	PMD_INIT_FUNC_TRACE();
+
+	if (sym_op->m_dst)
+		mbuf = sym_op->m_dst;
+	else
+		mbuf = sym_op->m_src;
+
+	/* first FLE entry used to store mbuf and session ctxt */
+	fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
+			RTE_CACHE_LINE_SIZE);
+	if (unlikely(!fle)) {
+		RTE_LOG(ERR, PMD, "AUTHENC SG: Memory alloc failed for SGE\n");
+		return -1;
+	}
+	memset(fle, 0, FLE_SG_MEM_SIZE);
+	DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
+	DPAA2_FLE_SAVE_CTXT(fle, priv);
+
+	op_fle = fle + 1;
+	ip_fle = fle + 2;
+	sge = fle + 3;
+
+	/* Save the shared descriptor */
+	flc = &priv->flc_desc[0].flc;
+
+	/* Configure FD as a FRAME LIST */
+	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
+	DPAA2_SET_FD_COMPOUND_FMT(fd);
+	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+	PMD_TX_LOG(DEBUG,
+			"AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d\n"
+			"cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
+		   sym_op->auth.data.offset,
+		   sym_op->auth.data.length,
+		   sym_op->auth.digest.length,
+		   sym_op->cipher.data.offset,
+		   sym_op->cipher.data.length,
+		   sym_op->cipher.iv.length,
+		   sym_op->m_src->data_off);
+
+	/* Configure Output FLE with Scatter/Gather Entry */
+	DPAA2_SET_FLE_SG_EXT(op_fle);
+	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
+
+	if (auth_only_len)
+		DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
+
+	op_fle->length = (sess->dir == DIR_ENC) ?
+			(sym_op->cipher.data.length + icv_len) :
+			sym_op->cipher.data.length;
+
+	/* Configure Output SGE for Encap/Decap */
+	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+	DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->auth.data.offset);
+	sge->length = mbuf->data_len - sym_op->auth.data.offset;
+
+	mbuf = mbuf->next;
+	/* o/p segs */
+	while (mbuf) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
+		sge->length = mbuf->data_len;
+		mbuf = mbuf->next;
+	}
+	sge->length -= icv_len;
+
+	if (sess->dir == DIR_ENC) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge,
+				DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
+		sge->length = icv_len;
+	}
+	DPAA2_SET_FLE_FIN(sge);
+
+	sge++;
+	mbuf = sym_op->m_src;
+
+	/* Configure Input FLE with Scatter/Gather Entry */
+	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
+	DPAA2_SET_FLE_SG_EXT(ip_fle);
+	DPAA2_SET_FLE_FIN(ip_fle);
+	ip_fle->length = (sess->dir == DIR_ENC) ?
+			(sym_op->auth.data.length + sess->iv.length) :
+			(sym_op->auth.data.length + sess->iv.length +
+			 icv_len);
+
+	/* Configure Input SGE for Encap/Decap */
+	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
+	sge->length = sess->iv.length;
+
+	sge++;
+	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+	DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
+				mbuf->data_off);
+	sge->length = mbuf->data_len - sym_op->auth.data.offset;
+
+	mbuf = mbuf->next;
+	/* i/p segs */
+	while (mbuf) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
+		sge->length = mbuf->data_len;
+		mbuf = mbuf->next;
+	}
+	sge->length -= icv_len;
+
+	if (sess->dir == DIR_DEC) {
+		sge++;
+		old_icv = (uint8_t *)(sge + 1);
+		memcpy(old_icv,	sym_op->auth.digest.data,
+		       icv_len);
+		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
+		sge->length = icv_len;
+	}
+
+	DPAA2_SET_FLE_FIN(sge);
+	if (auth_only_len) {
+		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
+		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
+	}
+	DPAA2_SET_FD_LEN(fd, ip_fle->length);
+
+	return 0;
+}
+
+static inline int
 build_authenc_fd(dpaa2_sec_session *sess,
 		 struct rte_crypto_op *op,
 		 struct qbman_fd *fd, uint16_t bpid)
@@ -298,7 +591,7 @@ build_authenc_fd(dpaa2_sec_session *sess,
 	DPAA2_SET_FD_COMPOUND_FMT(fd);
 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
 
-	PMD_TX_LOG(DEBUG, "auth_off: 0x%x/length %d, digest-len=%d\n"
+	PMD_TX_LOG(DEBUG, "AUTHENC: auth_off: 0x%x/length %d, digest-len=%d\n"
 		   "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
 		   sym_op->auth.data.offset,
 		   sym_op->auth.data.length,
@@ -374,6 +667,86 @@ build_authenc_fd(dpaa2_sec_session *sess,
 	return 0;
 }
 
+static inline int build_auth_sg_fd(
+		dpaa2_sec_session *sess,
+		struct rte_crypto_op *op,
+		struct qbman_fd *fd,
+		__rte_unused uint16_t bpid)
+{
+	struct rte_crypto_sym_op *sym_op = op->sym;
+	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
+	struct sec_flow_context *flc;
+	struct ctxt_priv *priv = sess->ctxt;
+	uint8_t *old_digest;
+	struct rte_mbuf *mbuf;
+
+	PMD_INIT_FUNC_TRACE();
+
+	mbuf = sym_op->m_src;
+	fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
+			RTE_CACHE_LINE_SIZE);
+	if (unlikely(!fle)) {
+		RTE_LOG(ERR, PMD, "AUTH SG: Memory alloc failed for SGE\n");
+		return -1;
+	}
+	memset(fle, 0, FLE_SG_MEM_SIZE);
+	/* first FLE entry used to store mbuf and session ctxt */
+	DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
+	DPAA2_FLE_SAVE_CTXT(fle, priv);
+	op_fle = fle + 1;
+	ip_fle = fle + 2;
+	sge = fle + 3;
+
+	flc = &priv->flc_desc[DESC_INITFINAL].flc;
+	/* sg FD */
+	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
+	DPAA2_SET_FD_COMPOUND_FMT(fd);
+
+	/* o/p fle */
+	DPAA2_SET_FLE_ADDR(op_fle,
+				DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
+	op_fle->length = sess->digest_length;
+
+	/* i/p fle */
+	DPAA2_SET_FLE_SG_EXT(ip_fle);
+	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
+	/* i/p 1st seg */
+	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+	DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset + mbuf->data_off);
+	sge->length = mbuf->data_len - sym_op->auth.data.offset;
+
+	/* i/p segs */
+	mbuf = mbuf->next;
+	while (mbuf) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
+		sge->length = mbuf->data_len;
+		mbuf = mbuf->next;
+	}
+	if (sess->dir == DIR_ENC) {
+		/* Digest calculation case */
+		sge->length -= sess->digest_length;
+		ip_fle->length = sym_op->auth.data.length;
+	} else {
+		/* Digest verification case */
+		sge++;
+		old_digest = (uint8_t *)(sge + 1);
+		rte_memcpy(old_digest, sym_op->auth.digest.data,
+			   sess->digest_length);
+		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
+		sge->length = sess->digest_length;
+		ip_fle->length = sym_op->auth.data.length +
+				sess->digest_length;
+	}
+	DPAA2_SET_FLE_FIN(sge);
+	DPAA2_SET_FLE_FIN(ip_fle);
+	DPAA2_SET_FD_LEN(fd, ip_fle->length);
+
+	return 0;
+}
+
 static inline int
 build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
 	      struct qbman_fd *fd, uint16_t bpid)
@@ -389,7 +762,7 @@ build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
 
 	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
 	if (retval) {
-		RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
+		RTE_LOG(ERR, PMD, "AUTH Memory alloc failed for SGE\n");
 		return -1;
 	}
 	memset(fle, 0, FLE_POOL_BUF_SIZE);
@@ -465,6 +838,123 @@ build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
 }
 
 static int
+build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
+		struct qbman_fd *fd, __rte_unused uint16_t bpid)
+{
+	struct rte_crypto_sym_op *sym_op = op->sym;
+	struct qbman_fle *ip_fle, *op_fle, *sge, *fle;
+	struct sec_flow_context *flc;
+	struct ctxt_priv *priv = sess->ctxt;
+	struct rte_mbuf *mbuf;
+	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+			sess->iv.offset);
+
+	PMD_INIT_FUNC_TRACE();
+
+	if (sym_op->m_dst)
+		mbuf = sym_op->m_dst;
+	else
+		mbuf = sym_op->m_src;
+
+	fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
+			RTE_CACHE_LINE_SIZE);
+	if (!fle) {
+		RTE_LOG(ERR, PMD, "CIPHER SG: Memory alloc failed for SGE\n");
+		return -1;
+	}
+	memset(fle, 0, FLE_SG_MEM_SIZE);
+	/* first FLE entry used to store mbuf and session ctxt */
+	DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
+	DPAA2_FLE_SAVE_CTXT(fle, priv);
+
+	op_fle = fle + 1;
+	ip_fle = fle + 2;
+	sge = fle + 3;
+
+	flc = &priv->flc_desc[0].flc;
+
+	PMD_TX_LOG(DEBUG,
+			"CIPHER SG: cipher_off: 0x%x/length %d,ivlen=%d data_off: 0x%x",
+		   sym_op->cipher.data.offset,
+		   sym_op->cipher.data.length,
+		   sym_op->cipher.iv.length,
+		   sym_op->m_src->data_off);
+
+	/* o/p fle */
+	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
+	op_fle->length = sym_op->cipher.data.length;
+	DPAA2_SET_FLE_SG_EXT(op_fle);
+
+	/* o/p 1st seg */
+	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+	DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset + mbuf->data_off);
+	sge->length = mbuf->data_len - sym_op->cipher.data.offset;
+
+	mbuf = mbuf->next;
+	/* o/p segs */
+	while (mbuf) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
+		sge->length = mbuf->data_len;
+		mbuf = mbuf->next;
+	}
+	DPAA2_SET_FLE_FIN(sge);
+
+	PMD_TX_LOG(DEBUG,
+			"CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d",
+			flc, fle, fle->addr_hi, fle->addr_lo,
+			fle->length);
+
+	/* i/p fle */
+	mbuf = sym_op->m_src;
+	sge++;
+	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
+	ip_fle->length = sess->iv.length + sym_op->cipher.data.length;
+	DPAA2_SET_FLE_SG_EXT(ip_fle);
+
+	/* i/p IV */
+	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
+	DPAA2_SET_FLE_OFFSET(sge, 0);
+	sge->length = sess->iv.length;
+
+	sge++;
+
+	/* i/p 1st seg */
+	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+	DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
+			     mbuf->data_off);
+	sge->length = mbuf->data_len - sym_op->cipher.data.offset;
+
+	mbuf = mbuf->next;
+	/* i/p segs */
+	while (mbuf) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
+		sge->length = mbuf->data_len;
+		mbuf = mbuf->next;
+	}
+	DPAA2_SET_FLE_FIN(sge);
+	DPAA2_SET_FLE_FIN(ip_fle);
+
+	/* sg fd */
+	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
+	DPAA2_SET_FD_LEN(fd, ip_fle->length);
+	DPAA2_SET_FD_COMPOUND_FMT(fd);
+	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+	PMD_TX_LOG(DEBUG,
+			"CIPHER SG: fdaddr =%p bpid =%d meta =%d off =%d, len =%d",
+		   (void *)DPAA2_GET_FD_ADDR(fd),
+		   DPAA2_GET_FD_BPID(fd),
+		   rte_dpaa2_bpid_info[bpid].meta_data_size,
+		   DPAA2_GET_FD_OFFSET(fd),
+		   DPAA2_GET_FD_LEN(fd));
+	return 0;
+}
+
+static int
 build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
 		struct qbman_fd *fd, uint16_t bpid)
 {
@@ -486,7 +976,7 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
 
 	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
 	if (retval) {
-		RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
+		RTE_LOG(ERR, PMD, "CIPHER: Memory alloc failed for SGE\n");
 		return -1;
 	}
 	memset(fle, 0, FLE_POOL_BUF_SIZE);
@@ -522,7 +1012,8 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
 	DPAA2_SET_FD_COMPOUND_FMT(fd);
 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
 
-	PMD_TX_LOG(DEBUG, "cipher_off: 0x%x/length %d,ivlen=%d data_off: 0x%x",
+	PMD_TX_LOG(DEBUG,
+			"CIPHER: cipher_off: 0x%x/length %d, ivlen=%d, data_off: 0x%x",
 		   sym_op->cipher.data.offset,
 		   sym_op->cipher.data.length,
 		   sess->iv.length,
@@ -534,8 +1025,10 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
 
 	fle->length = sym_op->cipher.data.length + sess->iv.length;
 
-	PMD_TX_LOG(DEBUG, "1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d",
-		   flc, fle, fle->addr_hi, fle->addr_lo, fle->length);
+	PMD_TX_LOG(DEBUG,
+			"CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d",
+			flc, fle, fle->addr_hi, fle->addr_lo,
+			fle->length);
 
 	fle++;
 
@@ -556,7 +1049,8 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
 	DPAA2_SET_FLE_FIN(sge);
 	DPAA2_SET_FLE_FIN(fle);
 
-	PMD_TX_LOG(DEBUG, "fdaddr =%p bpid =%d meta =%d off =%d, len =%d",
+	PMD_TX_LOG(DEBUG,
+			"CIPHER: fdaddr =%p bpid =%d meta =%d off =%d, len =%d",
 		   (void *)DPAA2_GET_FD_ADDR(fd),
 		   DPAA2_GET_FD_BPID(fd),
 		   rte_dpaa2_bpid_info[bpid].meta_data_size,
@@ -574,13 +1068,6 @@ build_sec_fd(struct rte_crypto_op *op,
 	dpaa2_sec_session *sess;
 
 	PMD_INIT_FUNC_TRACE();
-	/*
-	 * Segmented buffer is not supported.
-	 */
-	if (!rte_pktmbuf_is_contiguous(op->sym->m_src)) {
-		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
-		return -ENOTSUP;
-	}
 
 	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
 		sess = (dpaa2_sec_session *)get_session_private_data(
@@ -591,25 +1078,46 @@ build_sec_fd(struct rte_crypto_op *op,
 	else
 		return -1;
 
-	switch (sess->ctxt_type) {
-	case DPAA2_SEC_CIPHER:
-		ret = build_cipher_fd(sess, op, fd, bpid);
-		break;
-	case DPAA2_SEC_AUTH:
-		ret = build_auth_fd(sess, op, fd, bpid);
-		break;
-	case DPAA2_SEC_AEAD:
-		ret = build_authenc_gcm_fd(sess, op, fd, bpid);
-		break;
-	case DPAA2_SEC_CIPHER_HASH:
-		ret = build_authenc_fd(sess, op, fd, bpid);
-		break;
-	case DPAA2_SEC_IPSEC:
-		ret = build_proto_fd(sess, op, fd, bpid);
-		break;
-	case DPAA2_SEC_HASH_CIPHER:
-	default:
-		RTE_LOG(ERR, PMD, "error: Unsupported session\n");
+	/* Segmented buffer */
+	if (unlikely(!rte_pktmbuf_is_contiguous(op->sym->m_src))) {
+		switch (sess->ctxt_type) {
+		case DPAA2_SEC_CIPHER:
+			ret = build_cipher_sg_fd(sess, op, fd, bpid);
+			break;
+		case DPAA2_SEC_AUTH:
+			ret = build_auth_sg_fd(sess, op, fd, bpid);
+			break;
+		case DPAA2_SEC_AEAD:
+			ret = build_authenc_gcm_sg_fd(sess, op, fd, bpid);
+			break;
+		case DPAA2_SEC_CIPHER_HASH:
+			ret = build_authenc_sg_fd(sess, op, fd, bpid);
+			break;
+		case DPAA2_SEC_HASH_CIPHER:
+		default:
+			RTE_LOG(ERR, PMD, "error: Unsupported session\n");
+		}
+	} else {
+		switch (sess->ctxt_type) {
+		case DPAA2_SEC_CIPHER:
+			ret = build_cipher_fd(sess, op, fd, bpid);
+			break;
+		case DPAA2_SEC_AUTH:
+			ret = build_auth_fd(sess, op, fd, bpid);
+			break;
+		case DPAA2_SEC_AEAD:
+			ret = build_authenc_gcm_fd(sess, op, fd, bpid);
+			break;
+		case DPAA2_SEC_CIPHER_HASH:
+			ret = build_authenc_fd(sess, op, fd, bpid);
+			break;
+		case DPAA2_SEC_IPSEC:
+			ret = build_proto_fd(sess, op, fd, bpid);
+			break;
+		case DPAA2_SEC_HASH_CIPHER:
+		default:
+			RTE_LOG(ERR, PMD, "error: Unsupported session\n");
+		}
 	}
 	return ret;
 }
@@ -766,8 +1274,11 @@ sec_fd_to_mbuf(const struct qbman_fd *fd, uint8_t driver_id)
 		   DPAA2_GET_FD_LEN(fd));
 
 	/* free the fle memory */
-	priv = (struct ctxt_priv *)DPAA2_GET_FLE_CTXT(fle - 1);
-	rte_mempool_put(priv->fle_pool, (void *)(fle - 1));
+	if (likely(rte_pktmbuf_is_contiguous(src))) {
+		priv = (struct ctxt_priv *)DPAA2_GET_FLE_CTXT(fle - 1);
+		rte_mempool_put(priv->fle_pool, (void *)(fle-1));
+	} else
+		rte_free((void *)(fle-1));
 
 	return op;
 }
@@ -2252,7 +2763,8 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
 	cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
 			RTE_CRYPTODEV_FF_HW_ACCELERATED |
 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
-			RTE_CRYPTODEV_FF_SECURITY;
+			RTE_CRYPTODEV_FF_SECURITY |
+			RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
 
 	internals = cryptodev->data->dev_private;
 	internals->max_nb_sessions = RTE_DPAA2_SEC_PMD_MAX_NB_SESSIONS;
diff --git a/test/test/test_cryptodev.c b/test/test/test_cryptodev.c
index 6dbc764..6f22896 100644
--- a/test/test/test_cryptodev.c
+++ b/test/test/test_cryptodev.c
@@ -9492,6 +9492,16 @@ static struct unit_test_suite cryptodev_dpaa2_sec_testsuite  = {
 		TEST_CASE_ST(ut_setup, ut_teardown,
 			test_AES_GCM_authenticated_decryption_oop_test_case_1),
 
+		/** Scatter-Gather */
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_AES_GCM_auth_encrypt_SGL_in_place_1500B),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_AES_GCM_auth_encrypt_SGL_out_of_place_400B_400B),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_AES_GCM_auth_encrypt_SGL_out_of_place_400B_1seg),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_AES_GCM_auth_encrypt_SGL_out_of_place_1500B_2000B),
+
 		TEST_CASES_END() /**< NULL terminate unit test array */
 	}
 };
diff --git a/test/test/test_cryptodev_aes_test_vectors.h b/test/test/test_cryptodev_aes_test_vectors.h
index 3fc3c2b..20f5285 100644
--- a/test/test/test_cryptodev_aes_test_vectors.h
+++ b/test/test/test_cryptodev_aes_test_vectors.h
@@ -1269,7 +1269,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA1 Decryption Digest "
@@ -1287,6 +1288,14 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA1 Decryption Digest "
+			"Verify Scatter Gather",
+		.test_data = &aes_test_data_4,
+		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
+		.feature_mask = BLOCKCIPHER_TEST_FEATURE_SG,
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
+	},
+	{
+		.test_descr = "AES-128-CBC HMAC-SHA1 Decryption Digest "
 			"Verify (short buffers)",
 		.test_data = &aes_test_data_13,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
@@ -1391,6 +1400,7 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
 	},
 	{
@@ -1544,7 +1554,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 		.op_mask = BLOCKCIPHER_TEST_OP_ENCRYPT,
 		.feature_mask = BLOCKCIPHER_TEST_FEATURE_SG |
 			BLOCKCIPHER_TEST_FEATURE_OOP,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-192-CBC Decryption",
@@ -1558,6 +1569,13 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
 	},
 	{
+		.test_descr = "AES-192-CBC Decryption Scatter Gather",
+		.test_data = &aes_test_data_10,
+		.op_mask = BLOCKCIPHER_TEST_OP_DECRYPT,
+		.feature_mask = BLOCKCIPHER_TEST_FEATURE_SG,
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
+	},
+	{
 		.test_descr = "AES-256-CBC Encryption",
 		.test_data = &aes_test_data_11,
 		.op_mask = BLOCKCIPHER_TEST_OP_ENCRYPT,
diff --git a/test/test/test_cryptodev_hash_test_vectors.h b/test/test/test_cryptodev_hash_test_vectors.h
index 2215b86..bd793f4 100644
--- a/test/test/test_cryptodev_hash_test_vectors.h
+++ b/test/test/test_cryptodev_hash_test_vectors.h
@@ -378,6 +378,13 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT
 	},
 	{
+		.test_descr = "HMAC-SHA1 Digest Scatter Gather",
+		.test_data = &hmac_sha1_test_vector,
+		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
+		.feature_mask = BLOCKCIPHER_TEST_FEATURE_SG,
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
+	},
+	{
 		.test_descr = "HMAC-SHA1 Digest Verify",
 		.test_data = &hmac_sha1_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
@@ -389,6 +396,13 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT
 	},
 	{
+		.test_descr = "HMAC-SHA1 Digest Verify Scatter Gather",
+		.test_data = &hmac_sha1_test_vector,
+		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
+		.feature_mask = BLOCKCIPHER_TEST_FEATURE_SG,
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
+	},
+	{
 		.test_descr = "SHA224 Digest",
 		.test_data = &sha224_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
-- 
2.9.3

^ permalink raw reply	[flat|nested] 10+ messages in thread

* [dpdk-dev] [PATCH v2 3/3] crypto/dpaa_sec: support for scatter gather
  2018-01-22  8:46 ` [dpdk-dev] [PATCH v2 1/3] doc: update feature list for cryptodevs Akhil Goyal
  2018-01-22  8:46   ` [dpdk-dev] [PATCH v2 2/3] crypto/dpaa2_sec: support for scatter gather Akhil Goyal
@ 2018-01-22  8:46   ` Akhil Goyal
  2018-01-22 11:55   ` [dpdk-dev] [PATCH v2 1/3] doc: update feature list for cryptodevs De Lara Guarch, Pablo
  2018-01-22 12:22   ` De Lara Guarch, Pablo
  3 siblings, 0 replies; 10+ messages in thread
From: Akhil Goyal @ 2018-01-22  8:46 UTC (permalink / raw)
  To: dev
  Cc: pablo.de.lara.guarch, hemant.agrawal, alok.makhariya,
	john.griffin, fiona.trahe, deepak.k.jain, declan.doherty,
	Akhil Goyal

Signed-off-by: Alok Makhariya <alok.makhariya@nxp.com>
Signed-off-by: Akhil Goyal <akhil.goyal@nxp.com>
Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 doc/guides/cryptodevs/features/dpaa_sec.ini  |   1 +
 drivers/crypto/dpaa_sec/dpaa_sec.c           | 501 +++++++++++++++++++++++++--
 test/test/test_cryptodev.c                   |  10 +
 test/test/test_cryptodev_aes_test_vectors.h  |  11 +-
 test/test/test_cryptodev_hash_test_vectors.h |   6 +-
 5 files changed, 498 insertions(+), 31 deletions(-)

diff --git a/doc/guides/cryptodevs/features/dpaa_sec.ini b/doc/guides/cryptodevs/features/dpaa_sec.ini
index deab53a..260fae7 100644
--- a/doc/guides/cryptodevs/features/dpaa_sec.ini
+++ b/doc/guides/cryptodevs/features/dpaa_sec.ini
@@ -8,6 +8,7 @@ Symmetric crypto       = Y
 Sym operation chaining = Y
 HW Accelerated         = Y
 Protocol offload       = Y
+Mbuf scatter gather    = Y
 
 ;
 ; Supported crypto algorithms of the 'dpaa_sec' crypto driver.
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c
index a402e61..18681cf 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.c
@@ -599,6 +599,86 @@ dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
 	return pkts;
 }
 
+static inline struct dpaa_sec_job *
+build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
+{
+	struct rte_crypto_sym_op *sym = op->sym;
+	struct rte_mbuf *mbuf = sym->m_src;
+	struct dpaa_sec_job *cf;
+	struct dpaa_sec_op_ctx *ctx;
+	struct qm_sg_entry *sg, *out_sg, *in_sg;
+	phys_addr_t start_addr;
+	uint8_t *old_digest, extra_segs;
+
+	if (is_decode(ses))
+		extra_segs = 3;
+	else
+		extra_segs = 2;
+
+	if ((mbuf->nb_segs + extra_segs) > MAX_SG_ENTRIES) {
+		PMD_TX_LOG(ERR, "Auth: Max sec segs supported is %d\n",
+								MAX_SG_ENTRIES);
+		return NULL;
+	}
+	ctx = dpaa_sec_alloc_ctx(ses);
+	if (!ctx)
+		return NULL;
+
+	cf = &ctx->job;
+	ctx->op = op;
+	old_digest = ctx->digest;
+
+	/* output */
+	out_sg = &cf->sg[0];
+	qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
+	out_sg->length = ses->digest_length;
+	cpu_to_hw_sg(out_sg);
+
+	/* input */
+	in_sg = &cf->sg[1];
+	/* need to extend the input to a compound frame */
+	in_sg->extension = 1;
+	in_sg->final = 1;
+	in_sg->length = sym->auth.data.length;
+	qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
+
+	/* 1st seg */
+	sg = in_sg + 1;
+	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+	sg->length = mbuf->data_len - sym->auth.data.offset;
+	sg->offset = sym->auth.data.offset;
+
+	/* Successive segs */
+	mbuf = mbuf->next;
+	while (mbuf) {
+		cpu_to_hw_sg(sg);
+		sg++;
+		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+		sg->length = mbuf->data_len;
+		mbuf = mbuf->next;
+	}
+
+	if (is_decode(ses)) {
+		/* Digest verification case */
+		cpu_to_hw_sg(sg);
+		sg++;
+		rte_memcpy(old_digest, sym->auth.digest.data,
+				ses->digest_length);
+		start_addr = dpaa_mem_vtop_ctx(ctx, old_digest);
+		qm_sg_entry_set64(sg, start_addr);
+		sg->length = ses->digest_length;
+		in_sg->length += ses->digest_length;
+	} else {
+		/* Digest calculation case */
+		sg->length -= ses->digest_length;
+	}
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+	cpu_to_hw_sg(in_sg);
+
+	return cf;
+}
+
 /**
  * packet looks like:
  *		|<----data_len------->|
@@ -669,6 +749,101 @@ build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
 }
 
 static inline struct dpaa_sec_job *
+build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
+{
+	struct rte_crypto_sym_op *sym = op->sym;
+	struct dpaa_sec_job *cf;
+	struct dpaa_sec_op_ctx *ctx;
+	struct qm_sg_entry *sg, *out_sg, *in_sg;
+	struct rte_mbuf *mbuf;
+	uint8_t req_segs;
+	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+			ses->iv.offset);
+
+	if (sym->m_dst) {
+		mbuf = sym->m_dst;
+		req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
+	} else {
+		mbuf = sym->m_src;
+		req_segs = mbuf->nb_segs * 2 + 3;
+	}
+
+	if (req_segs > MAX_SG_ENTRIES) {
+		PMD_TX_LOG(ERR, "Cipher: Max sec segs supported is %d\n",
+								MAX_SG_ENTRIES);
+		return NULL;
+	}
+
+	ctx = dpaa_sec_alloc_ctx(ses);
+	if (!ctx)
+		return NULL;
+
+	cf = &ctx->job;
+	ctx->op = op;
+
+	/* output */
+	out_sg = &cf->sg[0];
+	out_sg->extension = 1;
+	out_sg->length = sym->cipher.data.length;
+	qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
+	cpu_to_hw_sg(out_sg);
+
+	/* 1st seg */
+	sg = &cf->sg[2];
+	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+	sg->length = mbuf->data_len - sym->cipher.data.offset;
+	sg->offset = sym->cipher.data.offset;
+
+	/* Successive segs */
+	mbuf = mbuf->next;
+	while (mbuf) {
+		cpu_to_hw_sg(sg);
+		sg++;
+		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+		sg->length = mbuf->data_len;
+		mbuf = mbuf->next;
+	}
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+
+	/* input */
+	mbuf = sym->m_src;
+	in_sg = &cf->sg[1];
+	in_sg->extension = 1;
+	in_sg->final = 1;
+	in_sg->length = sym->cipher.data.length + ses->iv.length;
+
+	sg++;
+	qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg));
+	cpu_to_hw_sg(in_sg);
+
+	/* IV */
+	qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+	sg->length = ses->iv.length;
+	cpu_to_hw_sg(sg);
+
+	/* 1st seg */
+	sg++;
+	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+	sg->length = mbuf->data_len - sym->cipher.data.offset;
+	sg->offset = sym->cipher.data.offset;
+
+	/* Successive segs */
+	mbuf = mbuf->next;
+	while (mbuf) {
+		cpu_to_hw_sg(sg);
+		sg++;
+		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+		sg->length = mbuf->data_len;
+		mbuf = mbuf->next;
+	}
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+
+	return cf;
+}
+
+static inline struct dpaa_sec_job *
 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
 {
 	struct rte_crypto_sym_op *sym = op->sym;
@@ -724,6 +899,145 @@ build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
 }
 
 static inline struct dpaa_sec_job *
+build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
+{
+	struct rte_crypto_sym_op *sym = op->sym;
+	struct dpaa_sec_job *cf;
+	struct dpaa_sec_op_ctx *ctx;
+	struct qm_sg_entry *sg, *out_sg, *in_sg;
+	struct rte_mbuf *mbuf;
+	uint8_t req_segs;
+	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+			ses->iv.offset);
+
+	if (sym->m_dst) {
+		mbuf = sym->m_dst;
+		req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
+	} else {
+		mbuf = sym->m_src;
+		req_segs = mbuf->nb_segs * 2 + 4;
+	}
+
+	if (ses->auth_only_len)
+		req_segs++;
+
+	if (req_segs > MAX_SG_ENTRIES) {
+		PMD_TX_LOG(ERR, "AEAD: Max sec segs supported is %d\n",
+				MAX_SG_ENTRIES);
+		return NULL;
+	}
+
+	ctx = dpaa_sec_alloc_ctx(ses);
+	if (!ctx)
+		return NULL;
+
+	cf = &ctx->job;
+	ctx->op = op;
+
+	rte_prefetch0(cf->sg);
+
+	/* output */
+	out_sg = &cf->sg[0];
+	out_sg->extension = 1;
+	if (is_encode(ses))
+		out_sg->length = sym->aead.data.length + ses->auth_only_len
+						+ ses->digest_length;
+	else
+		out_sg->length = sym->aead.data.length + ses->auth_only_len;
+
+	/* output sg entries */
+	sg = &cf->sg[2];
+	qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, sg));
+	cpu_to_hw_sg(out_sg);
+
+	/* 1st seg */
+	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+	sg->length = mbuf->data_len - sym->aead.data.offset +
+					ses->auth_only_len;
+	sg->offset = sym->aead.data.offset - ses->auth_only_len;
+
+	/* Successive segs */
+	mbuf = mbuf->next;
+	while (mbuf) {
+		cpu_to_hw_sg(sg);
+		sg++;
+		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+		sg->length = mbuf->data_len;
+		mbuf = mbuf->next;
+	}
+	sg->length -= ses->digest_length;
+
+	if (is_encode(ses)) {
+		cpu_to_hw_sg(sg);
+		/* set auth output */
+		sg++;
+		qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
+		sg->length = ses->digest_length;
+	}
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+
+	/* input */
+	mbuf = sym->m_src;
+	in_sg = &cf->sg[1];
+	in_sg->extension = 1;
+	in_sg->final = 1;
+	if (is_encode(ses))
+		in_sg->length = ses->iv.length + sym->aead.data.length
+							+ ses->auth_only_len;
+	else
+		in_sg->length = ses->iv.length + sym->aead.data.length
+				+ ses->auth_only_len + ses->digest_length;
+
+	/* input sg entries */
+	sg++;
+	qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg));
+	cpu_to_hw_sg(in_sg);
+
+	/* 1st seg IV */
+	qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+	sg->length = ses->iv.length;
+	cpu_to_hw_sg(sg);
+
+	/* 2nd seg auth only */
+	if (ses->auth_only_len) {
+		sg++;
+		qm_sg_entry_set64(sg, dpaa_mem_vtop(sym->aead.aad.data));
+		sg->length = ses->auth_only_len;
+		cpu_to_hw_sg(sg);
+	}
+
+	/* 3rd seg */
+	sg++;
+	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+	sg->length = mbuf->data_len - sym->aead.data.offset;
+	sg->offset = sym->aead.data.offset;
+
+	/* Successive segs */
+	mbuf = mbuf->next;
+	while (mbuf) {
+		cpu_to_hw_sg(sg);
+		sg++;
+		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+		sg->length = mbuf->data_len;
+		mbuf = mbuf->next;
+	}
+
+	if (is_decode(ses)) {
+		cpu_to_hw_sg(sg);
+		sg++;
+		memcpy(ctx->digest, sym->aead.digest.data,
+			ses->digest_length);
+		qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
+		sg->length = ses->digest_length;
+	}
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+
+	return cf;
+}
+
+static inline struct dpaa_sec_job *
 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
 {
 	struct rte_crypto_sym_op *sym = op->sym;
@@ -836,6 +1150,132 @@ build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
 }
 
 static inline struct dpaa_sec_job *
+build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
+{
+	struct rte_crypto_sym_op *sym = op->sym;
+	struct dpaa_sec_job *cf;
+	struct dpaa_sec_op_ctx *ctx;
+	struct qm_sg_entry *sg, *out_sg, *in_sg;
+	struct rte_mbuf *mbuf;
+	uint8_t req_segs;
+	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+			ses->iv.offset);
+
+	if (sym->m_dst) {
+		mbuf = sym->m_dst;
+		req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
+	} else {
+		mbuf = sym->m_src;
+		req_segs = mbuf->nb_segs * 2 + 4;
+	}
+
+	if (req_segs > MAX_SG_ENTRIES) {
+		PMD_TX_LOG(ERR, "Cipher-Auth: Max sec segs supported is %d\n",
+				MAX_SG_ENTRIES);
+		return NULL;
+	}
+
+	ctx = dpaa_sec_alloc_ctx(ses);
+	if (!ctx)
+		return NULL;
+
+	cf = &ctx->job;
+	ctx->op = op;
+
+	rte_prefetch0(cf->sg);
+
+	/* output */
+	out_sg = &cf->sg[0];
+	out_sg->extension = 1;
+	if (is_encode(ses))
+		out_sg->length = sym->auth.data.length + ses->digest_length;
+	else
+		out_sg->length = sym->auth.data.length;
+
+	/* output sg entries */
+	sg = &cf->sg[2];
+	qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, sg));
+	cpu_to_hw_sg(out_sg);
+
+	/* 1st seg */
+	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+	sg->length = mbuf->data_len - sym->auth.data.offset;
+	sg->offset = sym->auth.data.offset;
+
+	/* Successive segs */
+	mbuf = mbuf->next;
+	while (mbuf) {
+		cpu_to_hw_sg(sg);
+		sg++;
+		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+		sg->length = mbuf->data_len;
+		mbuf = mbuf->next;
+	}
+	sg->length -= ses->digest_length;
+
+	if (is_encode(ses)) {
+		cpu_to_hw_sg(sg);
+		/* set auth output */
+		sg++;
+		qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
+		sg->length = ses->digest_length;
+	}
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+
+	/* input */
+	mbuf = sym->m_src;
+	in_sg = &cf->sg[1];
+	in_sg->extension = 1;
+	in_sg->final = 1;
+	if (is_encode(ses))
+		in_sg->length = ses->iv.length + sym->auth.data.length;
+	else
+		in_sg->length = ses->iv.length + sym->auth.data.length
+						+ ses->digest_length;
+
+	/* input sg entries */
+	sg++;
+	qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg));
+	cpu_to_hw_sg(in_sg);
+
+	/* 1st seg IV */
+	qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+	sg->length = ses->iv.length;
+	cpu_to_hw_sg(sg);
+
+	/* 2nd seg */
+	sg++;
+	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+	sg->length = mbuf->data_len - sym->auth.data.offset;
+	sg->offset = sym->auth.data.offset;
+
+	/* Successive segs */
+	mbuf = mbuf->next;
+	while (mbuf) {
+		cpu_to_hw_sg(sg);
+		sg++;
+		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+		sg->length = mbuf->data_len;
+		mbuf = mbuf->next;
+	}
+
+	sg->length -= ses->digest_length;
+	if (is_decode(ses)) {
+		cpu_to_hw_sg(sg);
+		sg++;
+		memcpy(ctx->digest, sym->auth.digest.data,
+			ses->digest_length);
+		qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
+		sg->length = ses->digest_length;
+	}
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+
+	return cf;
+}
+
+static inline struct dpaa_sec_job *
 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
 {
 	struct rte_crypto_sym_op *sym = op->sym;
@@ -1020,34 +1460,42 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
 				}
 			}
 
-			/*
-			 * Segmented buffer is not supported.
-			 */
-			if (!rte_pktmbuf_is_contiguous(op->sym->m_src)) {
-				op->status = RTE_CRYPTO_OP_STATUS_ERROR;
-				frames_to_send = loop;
-				nb_ops = loop;
-				goto send_pkts;
-			}
 			auth_only_len = op->sym->auth.data.length -
 						op->sym->cipher.data.length;
-
-			if (is_auth_only(ses)) {
-				cf = build_auth_only(op, ses);
-			} else if (is_cipher_only(ses)) {
-				cf = build_cipher_only(op, ses);
-			} else if (is_aead(ses)) {
-				cf = build_cipher_auth_gcm(op, ses);
-				auth_only_len = ses->auth_only_len;
-			} else if (is_auth_cipher(ses)) {
-				cf = build_cipher_auth(op, ses);
-			} else if (is_proto_ipsec(ses)) {
-				cf = build_proto(op, ses);
+			if (rte_pktmbuf_is_contiguous(op->sym->m_src)) {
+				if (is_auth_only(ses)) {
+					cf = build_auth_only(op, ses);
+				} else if (is_cipher_only(ses)) {
+					cf = build_cipher_only(op, ses);
+				} else if (is_aead(ses)) {
+					cf = build_cipher_auth_gcm(op, ses);
+					auth_only_len = ses->auth_only_len;
+				} else if (is_auth_cipher(ses)) {
+					cf = build_cipher_auth(op, ses);
+				} else if (is_proto_ipsec(ses)) {
+					cf = build_proto(op, ses);
+				} else {
+					PMD_TX_LOG(ERR, "not supported sec op");
+					frames_to_send = loop;
+					nb_ops = loop;
+					goto send_pkts;
+				}
 			} else {
-				PMD_TX_LOG(ERR, "not supported sec op");
-				frames_to_send = loop;
-				nb_ops = loop;
-				goto send_pkts;
+				if (is_auth_only(ses)) {
+					cf = build_auth_only_sg(op, ses);
+				} else if (is_cipher_only(ses)) {
+					cf = build_cipher_only_sg(op, ses);
+				} else if (is_aead(ses)) {
+					cf = build_cipher_auth_gcm_sg(op, ses);
+					auth_only_len = ses->auth_only_len;
+				} else if (is_auth_cipher(ses)) {
+					cf = build_cipher_auth_sg(op, ses);
+				} else {
+					PMD_TX_LOG(ERR, "not supported sec op");
+					frames_to_send = loop;
+					nb_ops = loop;
+					goto send_pkts;
+				}
 			}
 			if (unlikely(!cf)) {
 				frames_to_send = loop;
@@ -1834,7 +2282,8 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
 	cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
 			RTE_CRYPTODEV_FF_HW_ACCELERATED |
 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
-			RTE_CRYPTODEV_FF_SECURITY;
+			RTE_CRYPTODEV_FF_SECURITY |
+			RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
 
 	internals = cryptodev->data->dev_private;
 	internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
diff --git a/test/test/test_cryptodev.c b/test/test/test_cryptodev.c
index 6f22896..0f2a045 100644
--- a/test/test/test_cryptodev.c
+++ b/test/test/test_cryptodev.c
@@ -9365,6 +9365,16 @@ static struct unit_test_suite cryptodev_dpaa_sec_testsuite  = {
 		TEST_CASE_ST(ut_setup, ut_teardown,
 			test_AES_GCM_authenticated_decryption_oop_test_case_1),
 
+		/** Scatter-Gather */
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_AES_GCM_auth_encrypt_SGL_in_place_1500B),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_AES_GCM_auth_encrypt_SGL_out_of_place_400B_400B),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_AES_GCM_auth_encrypt_SGL_out_of_place_400B_1seg),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_AES_GCM_auth_encrypt_SGL_out_of_place_1500B_2000B),
+
 		TEST_CASES_END() /**< NULL terminate unit test array */
 	}
 };
diff --git a/test/test/test_cryptodev_aes_test_vectors.h b/test/test/test_cryptodev_aes_test_vectors.h
index 20f5285..3577ef4 100644
--- a/test/test/test_cryptodev_aes_test_vectors.h
+++ b/test/test/test_cryptodev_aes_test_vectors.h
@@ -1270,7 +1270,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
-			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA1 Decryption Digest "
@@ -1292,7 +1293,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 		.test_data = &aes_test_data_4,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
 		.feature_mask = BLOCKCIPHER_TEST_FEATURE_SG,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+			    BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA1 Decryption Digest "
@@ -1401,6 +1403,7 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
 	},
 	{
@@ -1555,6 +1558,7 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 		.feature_mask = BLOCKCIPHER_TEST_FEATURE_SG |
 			BLOCKCIPHER_TEST_FEATURE_OOP,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
@@ -1573,7 +1577,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 		.test_data = &aes_test_data_10,
 		.op_mask = BLOCKCIPHER_TEST_OP_DECRYPT,
 		.feature_mask = BLOCKCIPHER_TEST_FEATURE_SG,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
 	},
 	{
 		.test_descr = "AES-256-CBC Encryption",
diff --git a/test/test/test_cryptodev_hash_test_vectors.h b/test/test/test_cryptodev_hash_test_vectors.h
index bd793f4..93dacb7 100644
--- a/test/test/test_cryptodev_hash_test_vectors.h
+++ b/test/test/test_cryptodev_hash_test_vectors.h
@@ -382,7 +382,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 		.test_data = &hmac_sha1_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
 		.feature_mask = BLOCKCIPHER_TEST_FEATURE_SG,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+			    BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
 	},
 	{
 		.test_descr = "HMAC-SHA1 Digest Verify",
@@ -400,7 +401,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 		.test_data = &hmac_sha1_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
 		.feature_mask = BLOCKCIPHER_TEST_FEATURE_SG,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+			    BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
 	},
 	{
 		.test_descr = "SHA224 Digest",
-- 
2.9.3

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [dpdk-dev] [PATCH v2 1/3] doc: update feature list for cryptodevs
  2018-01-22  8:46 ` [dpdk-dev] [PATCH v2 1/3] doc: update feature list for cryptodevs Akhil Goyal
  2018-01-22  8:46   ` [dpdk-dev] [PATCH v2 2/3] crypto/dpaa2_sec: support for scatter gather Akhil Goyal
  2018-01-22  8:46   ` [dpdk-dev] [PATCH v2 3/3] crypto/dpaa_sec: " Akhil Goyal
@ 2018-01-22 11:55   ` De Lara Guarch, Pablo
  2018-01-22 12:22   ` De Lara Guarch, Pablo
  3 siblings, 0 replies; 10+ messages in thread
From: De Lara Guarch, Pablo @ 2018-01-22 11:55 UTC (permalink / raw)
  To: Akhil Goyal, dev
  Cc: hemant.agrawal, alok.makhariya, Griffin, John, Trahe, Fiona,
	Jain, Deepak K, Doherty, Declan



> -----Original Message-----
> From: Akhil Goyal [mailto:akhil.goyal@nxp.com]
> Sent: Monday, January 22, 2018 8:47 AM
> To: dev@dpdk.org
> Cc: De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>;
> hemant.agrawal@nxp.com; alok.makhariya@nxp.com; Griffin, John
> <john.griffin@intel.com>; Trahe, Fiona <fiona.trahe@intel.com>; Jain,
> Deepak K <deepak.k.jain@intel.com>; Doherty, Declan
> <declan.doherty@intel.com>; Akhil Goyal <akhil.goyal@nxp.com>
> Subject: [PATCH v2 1/3] doc: update feature list for cryptodevs
> 
> Signed-off-by: Akhil Goyal <akhil.goyal@nxp.com>

Acked-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [dpdk-dev] [PATCH v2 1/3] doc: update feature list for cryptodevs
  2018-01-22  8:46 ` [dpdk-dev] [PATCH v2 1/3] doc: update feature list for cryptodevs Akhil Goyal
                     ` (2 preceding siblings ...)
  2018-01-22 11:55   ` [dpdk-dev] [PATCH v2 1/3] doc: update feature list for cryptodevs De Lara Guarch, Pablo
@ 2018-01-22 12:22   ` De Lara Guarch, Pablo
  3 siblings, 0 replies; 10+ messages in thread
From: De Lara Guarch, Pablo @ 2018-01-22 12:22 UTC (permalink / raw)
  To: Akhil Goyal, dev
  Cc: hemant.agrawal, alok.makhariya, Griffin, John, Trahe, Fiona,
	Jain, Deepak K, Doherty, Declan



> -----Original Message-----
> From: Akhil Goyal [mailto:akhil.goyal@nxp.com]
> Sent: Monday, January 22, 2018 8:47 AM
> To: dev@dpdk.org
> Cc: De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>;
> hemant.agrawal@nxp.com; alok.makhariya@nxp.com; Griffin, John
> <john.griffin@intel.com>; Trahe, Fiona <fiona.trahe@intel.com>; Jain,
> Deepak K <deepak.k.jain@intel.com>; Doherty, Declan
> <declan.doherty@intel.com>; Akhil Goyal <akhil.goyal@nxp.com>
> Subject: [PATCH v2 1/3] doc: update feature list for cryptodevs
> 
> Signed-off-by: Akhil Goyal <akhil.goyal@nxp.com>

Applied to dpdk-next-crypto.
Thanks,

Pablo

^ permalink raw reply	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2018-01-22 12:22 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-01-19 11:51 [dpdk-dev] [PATCH 1/2] crypto/dpaa2_sec: support for scatter gather Akhil Goyal
2018-01-19 11:51 ` [dpdk-dev] [PATCH 2/2] crypto/dpaa_sec: " Akhil Goyal
2018-01-19 14:08   ` Hemant Agrawal
2018-01-19 14:06 ` [dpdk-dev] [PATCH 1/2] crypto/dpaa2_sec: " Hemant Agrawal
2018-01-19 20:45 ` De Lara Guarch, Pablo
2018-01-22  8:46 ` [dpdk-dev] [PATCH v2 1/3] doc: update feature list for cryptodevs Akhil Goyal
2018-01-22  8:46   ` [dpdk-dev] [PATCH v2 2/3] crypto/dpaa2_sec: support for scatter gather Akhil Goyal
2018-01-22  8:46   ` [dpdk-dev] [PATCH v2 3/3] crypto/dpaa_sec: " Akhil Goyal
2018-01-22 11:55   ` [dpdk-dev] [PATCH v2 1/3] doc: update feature list for cryptodevs De Lara Guarch, Pablo
2018-01-22 12:22   ` De Lara Guarch, Pablo

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).