From: Pablo de Lara <pablo.de.lara.guarch@intel.com>
To: luca.boccassi@gmail.com, stable@dpdk.org
Cc: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Subject: [PATCH 20.11 1/2] crypto/ipsec_mb: fix length and offset settings
Date: Mon, 14 Mar 2022 11:05:58 +0000 [thread overview]
Message-ID: <20220314110559.1502861-1-pablo.de.lara.guarch@intel.com> (raw)
[ upstream commit a501609ea6466ed8526c0dfadedee332a4d4a451 ]
KASUMI, SNOW3G and ZUC require lengths and offsets to
be set in bits or bytes depending on the algorithm.
There were some algorithms that were mixing these two,
so this commit is fixing this issue.
Fixes: ae8e085c608d ("crypto/aesni_mb: support KASUMI F8/F9")
Fixes: 6c42e0cf4d12 ("crypto/aesni_mb: support SNOW3G-UEA2/UIA2")
Fixes: fd8df85487c4 ("crypto/aesni_mb: support ZUC-EEA3/EIA3")
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
---
drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c | 121 +++++++++++++++------
1 file changed, 86 insertions(+), 35 deletions(-)
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
index f4ffb21e10..07f5caa76f 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
@@ -1057,7 +1057,9 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op)
static inline uint64_t
auth_start_offset(struct rte_crypto_op *op, struct aesni_mb_session *session,
- uint32_t oop)
+ uint32_t oop, const uint32_t auth_offset,
+ const uint32_t cipher_offset, const uint32_t auth_length,
+ const uint32_t cipher_length)
{
struct rte_mbuf *m_src, *m_dst;
uint8_t *p_src, *p_dst;
@@ -1066,7 +1068,7 @@ auth_start_offset(struct rte_crypto_op *op, struct aesni_mb_session *session,
/* Only cipher then hash needs special calculation. */
if (!oop || session->chain_order != CIPHER_HASH)
- return op->sym->auth.data.offset;
+ return auth_offset;
m_src = op->sym->m_src;
m_dst = op->sym->m_dst;
@@ -1074,24 +1076,23 @@ auth_start_offset(struct rte_crypto_op *op, struct aesni_mb_session *session,
p_src = rte_pktmbuf_mtod(m_src, uint8_t *);
p_dst = rte_pktmbuf_mtod(m_dst, uint8_t *);
u_src = (uintptr_t)p_src;
- u_dst = (uintptr_t)p_dst + op->sym->auth.data.offset;
+ u_dst = (uintptr_t)p_dst + auth_offset;
/**
* Copy the content between cipher offset and auth offset for generating
* correct digest.
*/
- if (op->sym->cipher.data.offset > op->sym->auth.data.offset)
- memcpy(p_dst + op->sym->auth.data.offset,
- p_src + op->sym->auth.data.offset,
- op->sym->cipher.data.offset -
- op->sym->auth.data.offset);
-
+ if (cipher_offset > auth_offset)
+ memcpy(p_dst + auth_offset,
+ p_src + auth_offset,
+ cipher_offset -
+ auth_offset);
/**
* Copy the content between (cipher offset + length) and (auth offset +
* length) for generating correct digest
*/
- cipher_end = op->sym->cipher.data.offset + op->sym->cipher.data.length;
- auth_end = op->sym->auth.data.offset + op->sym->auth.data.length;
+ cipher_end = cipher_offset + cipher_length;
+ auth_end = auth_offset + auth_length;
if (cipher_end < auth_end)
memcpy(p_dst + cipher_end, p_src + cipher_end,
auth_end - cipher_end);
@@ -1246,6 +1247,10 @@ set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
struct rte_mbuf *m_src = op->sym->m_src, *m_dst;
struct aesni_mb_session *session;
uint32_t m_offset, oop;
+ uint32_t auth_off_in_bytes;
+ uint32_t ciph_off_in_bytes;
+ uint32_t auth_len_in_bytes;
+ uint32_t ciph_len_in_bytes;
session = get_session(qp, op);
if (session == NULL) {
@@ -1362,6 +1367,7 @@ set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
if (job->cipher_mode == IMB_CIPHER_ZUC_EEA3) {
job->aes_enc_key_expanded = session->cipher.zuc_cipher_key;
job->aes_dec_key_expanded = session->cipher.zuc_cipher_key;
+ m_offset >>= 3;
} else if (job->cipher_mode == IMB_CIPHER_SNOW3G_UEA2_BITLEN) {
job->enc_keys = &session->cipher.pKeySched_snow3g_cipher;
m_offset = 0;
@@ -1418,9 +1424,6 @@ set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
switch (job->hash_alg) {
case AES_CCM:
- job->cipher_start_src_offset_in_bytes =
- op->sym->aead.data.offset;
- job->msg_len_to_cipher_in_bytes = op->sym->aead.data.length;
job->hash_start_src_offset_in_bytes = op->sym->aead.data.offset;
job->msg_len_to_hash_in_bytes = op->sym->aead.data.length;
@@ -1430,19 +1433,11 @@ set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
case AES_GMAC:
if (session->cipher.mode == GCM) {
- job->cipher_start_src_offset_in_bytes =
- op->sym->aead.data.offset;
job->hash_start_src_offset_in_bytes =
op->sym->aead.data.offset;
- job->msg_len_to_cipher_in_bytes =
- op->sym->aead.data.length;
job->msg_len_to_hash_in_bytes =
op->sym->aead.data.length;
} else {
- job->cipher_start_src_offset_in_bytes =
- op->sym->auth.data.offset;
- job->hash_start_src_offset_in_bytes =
- op->sym->auth.data.offset;
job->msg_len_to_cipher_in_bytes = 0;
job->msg_len_to_hash_in_bytes = 0;
}
@@ -1453,10 +1448,7 @@ set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
#if IMB_VERSION(0, 54, 3) <= IMB_VERSION_NUM
case IMB_AUTH_CHACHA20_POLY1305:
- job->cipher_start_src_offset_in_bytes = op->sym->aead.data.offset;
job->hash_start_src_offset_in_bytes = op->sym->aead.data.offset;
- job->msg_len_to_cipher_in_bytes =
- op->sym->aead.data.length;
job->msg_len_to_hash_in_bytes =
op->sym->aead.data.length;
@@ -1464,26 +1456,85 @@ set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
session->iv.offset);
break;
#endif
- default:
- /* For SNOW3G, length and offsets are already in bits */
- job->cipher_start_src_offset_in_bytes =
- op->sym->cipher.data.offset;
- job->msg_len_to_cipher_in_bytes = op->sym->cipher.data.length;
+#if IMB_VERSION(0, 53, 3) <= IMB_VERSION_NUM
+ /* ZUC and SNOW3G require length in bits and offset in bytes */
+ case IMB_AUTH_ZUC_EIA3_BITLEN:
+ case IMB_AUTH_SNOW3G_UIA2_BITLEN:
+ auth_off_in_bytes = op->sym->auth.data.offset >> 3;
+ ciph_off_in_bytes = op->sym->cipher.data.offset >> 3;
+ auth_len_in_bytes = op->sym->auth.data.length >> 3;
+ ciph_len_in_bytes = op->sym->cipher.data.length >> 3;
+
+ job->hash_start_src_offset_in_bytes = auth_start_offset(op,
+ session, oop, auth_off_in_bytes,
+ ciph_off_in_bytes, auth_len_in_bytes,
+ ciph_len_in_bytes);
+ job->msg_len_to_hash_in_bits = op->sym->auth.data.length;
+
+ job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ session->iv.offset);
+ break;
+
+ /* KASUMI requires lengths and offset in bytes */
+ case IMB_AUTH_KASUMI_UIA1:
+ auth_off_in_bytes = op->sym->auth.data.offset >> 3;
+ ciph_off_in_bytes = op->sym->cipher.data.offset >> 3;
+ auth_len_in_bytes = op->sym->auth.data.length >> 3;
+ ciph_len_in_bytes = op->sym->cipher.data.length >> 3;
job->hash_start_src_offset_in_bytes = auth_start_offset(op,
- session, oop);
+ session, oop, auth_off_in_bytes,
+ ciph_off_in_bytes, auth_len_in_bytes,
+ ciph_len_in_bytes);
+ job->msg_len_to_hash_in_bytes = auth_len_in_bytes;
+
+ job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ session->iv.offset);
+ break;
+#endif
+
+ default:
+ job->hash_start_src_offset_in_bytes = auth_start_offset(op,
+ session, oop, op->sym->auth.data.offset,
+ op->sym->cipher.data.offset,
+ op->sym->auth.data.length,
+ op->sym->cipher.data.length);
job->msg_len_to_hash_in_bytes = op->sym->auth.data.length;
job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
session->iv.offset);
}
+ switch (job->cipher_mode) {
#if IMB_VERSION(0, 53, 3) <= IMB_VERSION_NUM
- if (job->cipher_mode == IMB_CIPHER_ZUC_EEA3)
- job->msg_len_to_cipher_in_bytes >>= 3;
- else if (job->hash_alg == IMB_AUTH_KASUMI_UIA1)
- job->msg_len_to_hash_in_bytes >>= 3;
+ /* ZUC requires length and offset in bytes */
+ case IMB_CIPHER_ZUC_EEA3:
+ job->cipher_start_src_offset_in_bytes =
+ op->sym->cipher.data.offset >> 3;
+ job->msg_len_to_cipher_in_bytes =
+ op->sym->cipher.data.length >> 3;
+ break;
+ /* ZUC and SNOW3G require length and offset in bits */
+ case IMB_CIPHER_SNOW3G_UEA2_BITLEN:
+ case IMB_CIPHER_KASUMI_UEA1_BITLEN:
+ job->cipher_start_src_offset_in_bits =
+ op->sym->cipher.data.offset;
+ job->msg_len_to_cipher_in_bits =
+ op->sym->cipher.data.length;
+ break;
#endif
+ case IMB_CIPHER_CCM:
+ case IMB_CIPHER_GCM:
+ case IMB_CIPHER_CHACHA20_POLY1305:
+ job->cipher_start_src_offset_in_bytes =
+ op->sym->aead.data.offset;
+ job->msg_len_to_cipher_in_bytes = op->sym->aead.data.length;
+ break;
+ default:
+ job->cipher_start_src_offset_in_bytes =
+ op->sym->cipher.data.offset;
+ job->msg_len_to_cipher_in_bytes = op->sym->cipher.data.length;
+ }
/* Set user data to be crypto operation data struct */
job->user_data = op;
--
2.25.1
next reply other threads:[~2022-03-14 11:06 UTC|newest]
Thread overview: 12+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-03-14 11:05 Pablo de Lara [this message]
2022-03-14 11:05 ` [PATCH 20.11 2/2] crypto/ipsec_mb: fix GMAC parameters setting Pablo de Lara
2022-03-14 12:20 ` [PATCH 20.11 1/2] crypto/ipsec_mb: fix length and offset settings Luca Boccassi
2022-03-21 19:57 ` De Lara Guarch, Pablo
2022-03-21 22:12 ` Luca Boccassi
2022-03-22 10:39 ` Kevin Traynor
2022-03-22 12:25 ` De Lara Guarch, Pablo
2022-03-15 11:10 ` Kevin Traynor
2022-03-15 11:18 ` Kevin Traynor
2022-03-21 20:02 ` De Lara Guarch, Pablo
2022-03-22 10:39 ` Kevin Traynor
2022-03-22 13:39 Pablo de Lara
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220314110559.1502861-1-pablo.de.lara.guarch@intel.com \
--to=pablo.de.lara.guarch@intel.com \
--cc=luca.boccassi@gmail.com \
--cc=stable@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).