DPDK patches and discussions
 help / color / mirror / Atom feed
From: Ciara Power <ciara.power@intel.com>
To: dev@dpdk.org
Cc: roy.fan.zhang@intel.com, kai.ji@intel.com,
	Ciara Power <ciara.power@intel.com>,
	Pablo de Lara <pablo.de.lara.guarch@intel.com>
Subject: [PATCH 1/3] crypto/ipsec_mb: add GCM sgl support to aesni_mb
Date: Thu,  7 Apr 2022 10:30:39 +0000	[thread overview]
Message-ID: <20220407103041.4037942-2-ciara.power@intel.com> (raw)
In-Reply-To: <20220407103041.4037942-1-ciara.power@intel.com>

Add SGL support for GCM algorithm through JOB API.

This change supports IN-PLACE SGL, OOP SGL IN and LB OUT,
and OOP SGL IN and SGL OUT.

Feature flags are not added, as the PMD does not yet support SGL for
all other algorithms.

Signed-off-by: Ciara Power <ciara.power@intel.com>
---
 drivers/crypto/ipsec_mb/pmd_aesni_mb.c      | 144 +++++++++++++++++++-
 drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h |   2 +
 2 files changed, 142 insertions(+), 4 deletions(-)

diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c
index afa0b6e3a4..09a0cc5ace 100644
--- a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c
+++ b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c
@@ -4,6 +4,11 @@
 
 #include "pmd_aesni_mb_priv.h"
 
+struct aesni_mb_op_buf_data {
+	struct rte_mbuf *m;
+	uint32_t offset;
+};
+
 /**
  * Calculate the authentication pre-computes
  *
@@ -1092,6 +1097,69 @@ set_cpu_mb_job_params(IMB_JOB *job, struct aesni_mb_session *session,
 	job->user_data = udata;
 }
 
+static int
+handle_aead_sgl_job(IMB_JOB *job, IMB_MGR *mb_mgr,
+		uint32_t *total_len,
+		struct aesni_mb_op_buf_data *src_data,
+		struct aesni_mb_op_buf_data *dst_data)
+{
+	uint32_t data_len, part_len;
+
+	if (*total_len == 0) {
+		job->sgl_state = IMB_SGL_COMPLETE;
+		return 0;
+	}
+
+	if (src_data->m == NULL) {
+		IPSEC_MB_LOG(ERR, "Invalid source buffer");
+		return -EINVAL;
+	}
+
+	job->sgl_state = IMB_SGL_UPDATE;
+
+	data_len = src_data->m->data_len - src_data->offset;
+
+	job->src = rte_pktmbuf_mtod_offset(src_data->m, uint8_t *,
+			src_data->offset);
+
+	if (dst_data->m != NULL) {
+		if (dst_data->m->data_len - dst_data->offset == 0) {
+			dst_data->m = dst_data->m->next;
+			if (dst_data->m == NULL) {
+				IPSEC_MB_LOG(ERR, "Invalid destination buffer");
+				return -EINVAL;
+			}
+			dst_data->offset = 0;
+		}
+		part_len = RTE_MIN(data_len, (dst_data->m->data_len -
+				dst_data->offset));
+		job->dst = rte_pktmbuf_mtod_offset(dst_data->m,
+				uint8_t *, dst_data->offset);
+		dst_data->offset += part_len;
+	} else {
+		part_len = RTE_MIN(data_len, *total_len);
+		job->dst = rte_pktmbuf_mtod_offset(src_data->m, uint8_t *,
+			src_data->offset);
+	}
+
+	job->msg_len_to_cipher_in_bytes = part_len;
+	job->msg_len_to_hash_in_bytes = part_len;
+
+	job = IMB_SUBMIT_JOB(mb_mgr);
+
+	*total_len -= part_len;
+
+	if (part_len != data_len) {
+		src_data->offset += part_len;
+	} else {
+		src_data->m = src_data->m->next;
+		src_data->offset = 0;
+	}
+
+	return 0;
+}
+
+
 /**
  * Process a crypto operation and complete a IMB_JOB job structure for
  * submission to the multi buffer library for processing.
@@ -1107,16 +1175,23 @@ set_cpu_mb_job_params(IMB_JOB *job, struct aesni_mb_session *session,
  */
 static inline int
 set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp,
-		struct rte_crypto_op *op, uint8_t *digest_idx)
+		struct rte_crypto_op *op, uint8_t *digest_idx,
+		IMB_MGR *mb_mgr)
 {
 	struct rte_mbuf *m_src = op->sym->m_src, *m_dst;
 	struct aesni_mb_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
+	struct aesni_mb_op_buf_data src_sgl = {0};
+	struct aesni_mb_op_buf_data dst_sgl = {0};
 	struct aesni_mb_session *session;
 	uint32_t m_offset, oop;
 	uint32_t auth_off_in_bytes;
 	uint32_t ciph_off_in_bytes;
 	uint32_t auth_len_in_bytes;
 	uint32_t ciph_len_in_bytes;
+	uint32_t total_len;
+	IMB_JOB base_job;
+	uint8_t sgl = 0;
+	int ret;
 
 	session = ipsec_mb_get_session_private(qp, op);
 	if (session == NULL) {
@@ -1124,6 +1199,9 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp,
 		return -1;
 	}
 
+	if (op->sym->m_src->nb_segs > 1)
+		sgl = 1;
+
 	/* Set crypto operation */
 	job->chain_order = session->chain_order;
 
@@ -1175,6 +1253,11 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp,
 		if (session->cipher.mode == IMB_CIPHER_GCM) {
 			job->u.GCM.aad = op->sym->aead.aad.data;
 			job->u.GCM.aad_len_in_bytes = session->aead.aad_len;
+			if (sgl) {
+				job->u.GCM.ctx = &session->aead.gcm_sgl_ctx;
+				job->cipher_mode = IMB_CIPHER_GCM_SGL;
+				job->hash_alg = IMB_AUTH_GCM_SGL;
+			}
 		} else {
 			/* For GMAC */
 			job->u.GCM.aad = rte_pktmbuf_mtod_offset(m_src,
@@ -1278,8 +1361,13 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp,
 	job->iv_len_in_bytes = session->iv.length;
 
 	/* Data Parameters */
-	job->src = rte_pktmbuf_mtod(m_src, uint8_t *);
-	job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, m_offset);
+	if (sgl) {
+		job->src = NULL;
+		job->dst = NULL;
+	} else {
+		job->src = rte_pktmbuf_mtod(m_src, uint8_t *);
+		job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, m_offset);
+	}
 
 	switch (job->hash_alg) {
 	case IMB_AUTH_AES_CCM:
@@ -1305,6 +1393,13 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp,
 				session->iv.offset);
 		break;
 
+	case IMB_AUTH_GCM_SGL:
+		job->hash_start_src_offset_in_bytes = 0;
+		job->msg_len_to_hash_in_bytes = 0;
+		job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+			session->iv.offset);
+		break;
+
 	case IMB_AUTH_CHACHA20_POLY1305:
 		job->hash_start_src_offset_in_bytes =
 			op->sym->aead.data.offset;
@@ -1395,6 +1490,10 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp,
 				op->sym->aead.data.offset;
 		job->msg_len_to_cipher_in_bytes = op->sym->aead.data.length;
 		break;
+	case IMB_CIPHER_GCM_SGL:
+		job->msg_len_to_cipher_in_bytes = 0;
+		job->cipher_start_src_offset_in_bytes = 0;
+		break;
 	default:
 		job->cipher_start_src_offset_in_bytes =
 					op->sym->cipher.data.offset;
@@ -1410,6 +1509,43 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp,
 	/* Set user data to be crypto operation data struct */
 	job->user_data = op;
 
+	if (sgl && aead) {
+		base_job = *job;
+		job = IMB_SUBMIT_JOB(mb_mgr);
+		total_len = op->sym->aead.data.length;
+
+		src_sgl.m = m_src;
+		src_sgl.offset = m_offset;
+
+		while (src_sgl.offset >= src_sgl.m->data_len) {
+			src_sgl.offset -= src_sgl.m->data_len;
+			src_sgl.m = src_sgl.m->next;
+
+			RTE_ASSERT(src_sgl.m != NULL);
+		}
+
+		if (oop) {
+			dst_sgl.m = m_dst;
+			dst_sgl.offset = m_offset;
+
+			while (dst_sgl.offset >= dst_sgl.m->data_len) {
+				dst_sgl.offset -= dst_sgl.m->data_len;
+				dst_sgl.m = dst_sgl.m->next;
+
+				RTE_ASSERT(dst_sgl.m != NULL);
+			}
+		}
+
+		while (job->sgl_state != IMB_SGL_COMPLETE) {
+			job = IMB_GET_NEXT_JOB(mb_mgr);
+			*job = base_job;
+			ret = handle_aead_sgl_job(job, mb_mgr, &total_len,
+				&src_sgl, &dst_sgl);
+			if (ret < 0)
+				return ret;
+		}
+	}
+
 	return 0;
 }
 
@@ -1776,7 +1912,7 @@ aesni_mb_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
 		else
 #endif
 			retval = set_mb_job_params(job, qp, op,
-				&digest_idx);
+				&digest_idx, mb_mgr);
 
 		if (unlikely(retval != 0)) {
 			qp->stats.dequeue_err_count++;
diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h b/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h
index 6ddfce2285..1d1e9dde00 100644
--- a/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h
+++ b/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h
@@ -946,6 +946,8 @@ struct aesni_mb_session {
 	struct {
 		/* * AAD data length */
 		uint16_t aad_len;
+
+		struct gcm_context_data gcm_sgl_ctx;
 	} aead;
 } __rte_cache_aligned;
 
-- 
2.25.1


  reply	other threads:[~2022-04-07 10:31 UTC|newest]

Thread overview: 14+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-04-07 10:30 [PATCH 0/3] add partial SGL support to AESNI_MB Ciara Power
2022-04-07 10:30 ` Ciara Power [this message]
2022-05-08 14:39   ` [PATCH 1/3] crypto/ipsec_mb: add GCM sgl support to aesni_mb De Lara Guarch, Pablo
2022-05-11 12:35     ` Power, Ciara
2022-04-07 10:30 ` [PATCH 2/3] crypto/ipsec_mb: add chachapoly SGL " Ciara Power
2022-04-07 10:30 ` [PATCH 3/3] crypto/ipsec_mb: check SGL support for algorithm Ciara Power
2022-05-08 14:39   ` De Lara Guarch, Pablo
2022-05-02  9:48 ` [EXT] [PATCH 0/3] add partial SGL support to AESNI_MB Akhil Goyal
2022-05-05 14:47   ` De Lara Guarch, Pablo
2022-05-11 12:30 ` [PATCH v2 0/2] " Ciara Power
2022-05-11 12:30   ` [PATCH v2 1/2] crypto/ipsec_mb: add GCM SGL support to aesni-mb Ciara Power
2022-05-11 12:30   ` [PATCH v2 2/2] crypto/ipsec_mb: add chachapoly " Ciara Power
2022-05-11 15:44   ` [PATCH v2 0/2] add partial SGL support to AESNI_MB De Lara Guarch, Pablo
2022-05-26 16:13   ` [EXT] " Akhil Goyal

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220407103041.4037942-2-ciara.power@intel.com \
    --to=ciara.power@intel.com \
    --cc=dev@dpdk.org \
    --cc=kai.ji@intel.com \
    --cc=pablo.de.lara.guarch@intel.com \
    --cc=roy.fan.zhang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).