From mboxrd@z Thu Jan  1 00:00:00 1970
Return-Path: <dev-bounces@dpdk.org>
Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124])
	by inbox.dpdk.org (Postfix) with ESMTP id EFA17430FE;
	Fri, 25 Aug 2023 10:41:46 +0200 (CEST)
Received: from mails.dpdk.org (localhost [127.0.0.1])
	by mails.dpdk.org (Postfix) with ESMTP id DA38D40A7A;
	Fri, 25 Aug 2023 10:41:46 +0200 (CEST)
Received: from mgamail.intel.com (mgamail.intel.com [192.55.52.136])
 by mails.dpdk.org (Postfix) with ESMTP id E5BAF40695
 for <dev@dpdk.org>; Fri, 25 Aug 2023 10:41:44 +0200 (CEST)
DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple;
 d=intel.com; i=@intel.com; q=dns/txt; s=Intel;
 t=1692952905; x=1724488905;
 h=from:to:cc:subject:date:message-id:in-reply-to:
 references:mime-version:content-transfer-encoding;
 bh=FD3E2OE2fE3rgaQGtE3ILaFPK1fTfFTygsRCG7xC56Y=;
 b=amw0Cf7Uz2+A/6/qIlkWts3H/vWsTGEWgq87iRiyTF85AS4BY1vW8Huy
 81Yp7W6tpP3wsNBTCumZq6YGMxBo2BpGwwGBuNCWdEsnaiH6RVk1WB09B
 /SAS8AyVTYL7xmxu0iZ0NGQ0m7hK2fz/P0HchVgH/P9BqMRBy9Pg0zHp+
 f1apHCtZfhoSf1k127YkCfK8Bl+4ntg+fphZQmAQjRMAhavlpmKhqx1NY
 jzzxJQAfM+Ny4rSzMcfkoy6YN5IGUWojBHlPAk+Y0PQ/vmfUhUULjX0Kp
 /O3Z77xW/oKtkWLFdMIuy57igcZyMHf0aUTs4VTkeQmJEnNnN+ysoMRkT A==;
X-IronPort-AV: E=McAfee;i="6600,9927,10812"; a="354195531"
X-IronPort-AV: E=Sophos;i="6.02,195,1688454000"; d="scan'208";a="354195531"
Received: from fmsmga008.fm.intel.com ([10.253.24.58])
 by fmsmga106.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;
 25 Aug 2023 01:41:39 -0700
X-ExtLoop1: 1
X-IronPort-AV: E=McAfee;i="6600,9927,10812"; a="802894069"
X-IronPort-AV: E=Sophos;i="6.02,195,1688454000"; d="scan'208";a="802894069"
Received: from silpixa00400883.ir.intel.com ([10.243.22.155])
 by fmsmga008.fm.intel.com with ESMTP; 25 Aug 2023 01:41:38 -0700
From: Brian Dooley <brian.dooley@intel.com>
To: Kai Ji <kai.ji@intel.com>, Pablo de Lara <pablo.de.lara.guarch@intel.com>
Cc: dev@dpdk.org,
	gakhil@marvell.com,
	Brian Dooley <brian.dooley@intel.com>
Subject: [PATCH v3] crypto/ipsec_mb: add digest encrypted feature
Date: Fri, 25 Aug 2023 08:41:35 +0000
Message-Id: <20230825084135.3430358-1-brian.dooley@intel.com>
X-Mailer: git-send-email 2.25.1
In-Reply-To: <20230821144234.3249892-1-brian.dooley@intel.com>
References: <20230821144234.3249892-1-brian.dooley@intel.com>
MIME-Version: 1.0
Content-Transfer-Encoding: 8bit
X-BeenThere: dev@dpdk.org
X-Mailman-Version: 2.1.29
Precedence: list
List-Id: DPDK patches and discussions <dev.dpdk.org>
List-Unsubscribe: <https://mails.dpdk.org/options/dev>,
 <mailto:dev-request@dpdk.org?subject=unsubscribe>
List-Archive: <http://mails.dpdk.org/archives/dev/>
List-Post: <mailto:dev@dpdk.org>
List-Help: <mailto:dev-request@dpdk.org?subject=help>
List-Subscribe: <https://mails.dpdk.org/listinfo/dev>,
 <mailto:dev-request@dpdk.org?subject=subscribe>
Errors-To: dev-bounces@dpdk.org

AESNI_MB PMD does not support Digest Encrypted. This patch adds a check and
support for this feature.

Signed-off-by: Brian Dooley <brian.dooley@intel.com>
---
v2:
Fixed CHECKPATCH warning
v3:
Add Digest encrypted support to docs
---
 doc/guides/cryptodevs/features/aesni_mb.ini |   1 +
 drivers/crypto/ipsec_mb/pmd_aesni_mb.c      | 107 +++++++++++++++++++-
 2 files changed, 103 insertions(+), 5 deletions(-)

diff --git a/doc/guides/cryptodevs/features/aesni_mb.ini b/doc/guides/cryptodevs/features/aesni_mb.ini
index e4e965c35a..8df5fa2c85 100644
--- a/doc/guides/cryptodevs/features/aesni_mb.ini
+++ b/doc/guides/cryptodevs/features/aesni_mb.ini
@@ -20,6 +20,7 @@ OOP LB  In LB  Out     = Y
 CPU crypto             = Y
 Symmetric sessionless  = Y
 Non-Byte aligned data  = Y
+Digest encrypted       = Y
 
 ;
 ; Supported crypto algorithms of the 'aesni_mb' crypto driver.
diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c
index 9e298023d7..66f3c82e80 100644
--- a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c
+++ b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c
@@ -1438,6 +1438,54 @@ set_gcm_job(IMB_MGR *mb_mgr, IMB_JOB *job, const uint8_t sgl,
 	return 0;
 }
 
+/** Check if conditions are met for digest-appended operations */
+static uint8_t *
+aesni_mb_digest_appended_in_src(struct rte_crypto_op *op, IMB_JOB *job,
+		uint32_t oop)
+{
+	unsigned int auth_size, cipher_size;
+	uint8_t *end_cipher;
+	uint8_t *start_cipher;
+
+	if (job->cipher_mode == IMB_CIPHER_NULL)
+		return NULL;
+
+	if (job->cipher_mode == IMB_CIPHER_ZUC_EEA3 ||
+		job->cipher_mode == IMB_CIPHER_SNOW3G_UEA2_BITLEN ||
+		job->cipher_mode == IMB_CIPHER_KASUMI_UEA1_BITLEN) {
+		cipher_size = (op->sym->cipher.data.offset >> 3) +
+			(op->sym->cipher.data.length >> 3);
+	} else {
+		cipher_size = (op->sym->cipher.data.offset) +
+			(op->sym->cipher.data.length);
+	}
+	if (job->hash_alg == IMB_AUTH_ZUC_EIA3_BITLEN ||
+		job->hash_alg == IMB_AUTH_SNOW3G_UIA2_BITLEN ||
+		job->hash_alg == IMB_AUTH_KASUMI_UIA1 ||
+		job->hash_alg == IMB_AUTH_ZUC256_EIA3_BITLEN) {
+		auth_size = (op->sym->auth.data.offset >> 3) +
+			(op->sym->auth.data.length >> 3);
+	} else {
+		auth_size = (op->sym->auth.data.offset) +
+			(op->sym->auth.data.length);
+	}
+
+	if (!oop) {
+		end_cipher = rte_pktmbuf_mtod_offset(op->sym->m_src, uint8_t *, cipher_size);
+		start_cipher = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *);
+	} else {
+		end_cipher = rte_pktmbuf_mtod_offset(op->sym->m_dst, uint8_t *, cipher_size);
+		start_cipher = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *);
+	}
+
+	if (start_cipher < op->sym->auth.digest.data &&
+		op->sym->auth.digest.data < end_cipher) {
+		return rte_pktmbuf_mtod_offset(op->sym->m_src, uint8_t *, auth_size);
+	} else {
+		return NULL;
+	}
+}
+
 /**
  * Process a crypto operation and complete a IMB_JOB job structure for
  * submission to the multi buffer library for processing.
@@ -1580,9 +1628,12 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp,
 	} else {
 		if (aead)
 			job->auth_tag_output = op->sym->aead.digest.data;
-		else
-			job->auth_tag_output = op->sym->auth.digest.data;
-
+		else {
+			job->auth_tag_output = aesni_mb_digest_appended_in_src(op, job, oop);
+			if (job->auth_tag_output == NULL) {
+				job->auth_tag_output = op->sym->auth.digest.data;
+			}
+		}
 		if (session->auth.req_digest_len !=
 				job->auth_tag_output_len_in_bytes) {
 			job->auth_tag_output =
@@ -1917,6 +1968,7 @@ post_process_mb_job(struct ipsec_mb_qp *qp, IMB_JOB *job)
 	struct aesni_mb_session *sess = NULL;
 	uint8_t *linear_buf = NULL;
 	int sgl = 0;
+	uint8_t oop = 0;
 	uint8_t is_docsis_sec = 0;
 
 	if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
@@ -1962,8 +2014,52 @@ post_process_mb_job(struct ipsec_mb_qp *qp, IMB_JOB *job)
 						op->sym->auth.digest.data,
 						sess->auth.req_digest_len,
 						&op->status);
-			} else
+			} else {
+				if (!op->sym->m_dst || op->sym->m_dst == op->sym->m_src) {
+					/* in-place operation */
+					oop = 0;
+				} else { /* out-of-place operation */
+					oop = 1;
+				}
+
+				if (op->sym->m_src->nb_segs == 1 && op->sym->m_dst != NULL
+				&& !is_aead_algo(job->hash_alg,	sess->template_job.cipher_mode) &&
+				aesni_mb_digest_appended_in_src(op, job, oop) != NULL) {
+					unsigned int auth_size, cipher_size;
+					int unencrypted_bytes = 0;
+					if (job->cipher_mode == IMB_CIPHER_SNOW3G_UEA2_BITLEN ||
+						job->cipher_mode == IMB_CIPHER_KASUMI_UEA1_BITLEN ||
+						job->cipher_mode == IMB_CIPHER_ZUC_EEA3) {
+						cipher_size = (op->sym->cipher.data.offset >> 3) +
+							(op->sym->cipher.data.length >> 3);
+					} else {
+						cipher_size = (op->sym->cipher.data.offset) +
+							(op->sym->cipher.data.length);
+					}
+					if (job->hash_alg == IMB_AUTH_ZUC_EIA3_BITLEN ||
+						job->hash_alg == IMB_AUTH_SNOW3G_UIA2_BITLEN ||
+						job->hash_alg == IMB_AUTH_KASUMI_UIA1 ||
+						job->hash_alg == IMB_AUTH_ZUC256_EIA3_BITLEN) {
+						auth_size = (op->sym->auth.data.offset >> 3) +
+							(op->sym->auth.data.length >> 3);
+					} else {
+						auth_size = (op->sym->auth.data.offset) +
+						(op->sym->auth.data.length);
+					}
+					if (job->cipher_mode != IMB_CIPHER_NULL) {
+						unencrypted_bytes =	auth_size +
+						job->auth_tag_output_len_in_bytes - cipher_size;
+					}
+					if (unencrypted_bytes > 0)
+						rte_memcpy(
+						rte_pktmbuf_mtod_offset(
+							op->sym->m_dst, uint8_t *, cipher_size),
+						rte_pktmbuf_mtod_offset(
+							op->sym->m_src, uint8_t *, cipher_size),
+						unencrypted_bytes);
+				}
 				generate_digest(job, op, sess);
+			}
 			break;
 		default:
 			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
@@ -2555,7 +2651,8 @@ RTE_INIT(ipsec_mb_register_aesni_mb)
 			RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
 			RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
 			RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
-			RTE_CRYPTODEV_FF_SECURITY;
+			RTE_CRYPTODEV_FF_SECURITY |
+			RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED;
 
 	aesni_mb_data->internals_priv_size = 0;
 	aesni_mb_data->ops = &aesni_mb_pmd_ops;
-- 
2.25.1