* [PATCH v1] crypto/ipsec_mb: unified IPsec MB interface
@ 2023-12-12 15:36 Brian Dooley
2023-12-14 15:15 ` [PATCH v2] " Brian Dooley
` (5 more replies)
0 siblings, 6 replies; 45+ messages in thread
From: Brian Dooley @ 2023-12-12 15:36 UTC (permalink / raw)
To: Kai Ji, Pablo de Lara; +Cc: dev, gakhil, Brian Dooley
Currently IPsec MB provides both the JOB API and direct API.
AESNI_MB PMD is using the JOB API codepath while ZUC, KASUMI, SNOW3G,
CHACHA20_POLY1305 and AESNI_GCM are using the direct API.
Instead of using the direct API for these PMDs, they should now make
use of the JOB API codepath. This would remove all use of the IPsec MB
direct API for these PMDs.
Signed-off-by: Brian Dooley <brian.dooley@intel.com>
---
drivers/crypto/ipsec_mb/pmd_aesni_gcm.c | 758 +-----------------
drivers/crypto/ipsec_mb/pmd_aesni_gcm_priv.h | 21 -
drivers/crypto/ipsec_mb/pmd_aesni_mb.c | 6 +-
drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h | 13 +
drivers/crypto/ipsec_mb/pmd_chacha_poly.c | 335 +-------
.../crypto/ipsec_mb/pmd_chacha_poly_priv.h | 19 -
drivers/crypto/ipsec_mb/pmd_kasumi.c | 404 +---------
drivers/crypto/ipsec_mb/pmd_kasumi_priv.h | 12 -
drivers/crypto/ipsec_mb/pmd_snow3g.c | 540 +------------
drivers/crypto/ipsec_mb/pmd_snow3g_priv.h | 13 -
drivers/crypto/ipsec_mb/pmd_zuc.c | 342 +-------
drivers/crypto/ipsec_mb/pmd_zuc_priv.h | 11 -
12 files changed, 38 insertions(+), 2436 deletions(-)
diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_gcm.c b/drivers/crypto/ipsec_mb/pmd_aesni_gcm.c
index 8d40bd9169..44609333ee 100644
--- a/drivers/crypto/ipsec_mb/pmd_aesni_gcm.c
+++ b/drivers/crypto/ipsec_mb/pmd_aesni_gcm.c
@@ -3,753 +3,7 @@
*/
#include "pmd_aesni_gcm_priv.h"
-
-static void
-aesni_gcm_set_ops(struct aesni_gcm_ops *ops, IMB_MGR *mb_mgr)
-{
- /* Set 128 bit function pointers. */
- ops[GCM_KEY_128].pre = mb_mgr->gcm128_pre;
- ops[GCM_KEY_128].init = mb_mgr->gcm128_init;
-
- ops[GCM_KEY_128].enc = mb_mgr->gcm128_enc;
- ops[GCM_KEY_128].update_enc = mb_mgr->gcm128_enc_update;
- ops[GCM_KEY_128].finalize_enc = mb_mgr->gcm128_enc_finalize;
-
- ops[GCM_KEY_128].dec = mb_mgr->gcm128_dec;
- ops[GCM_KEY_128].update_dec = mb_mgr->gcm128_dec_update;
- ops[GCM_KEY_128].finalize_dec = mb_mgr->gcm128_dec_finalize;
-
- ops[GCM_KEY_128].gmac_init = mb_mgr->gmac128_init;
- ops[GCM_KEY_128].gmac_update = mb_mgr->gmac128_update;
- ops[GCM_KEY_128].gmac_finalize = mb_mgr->gmac128_finalize;
-
- /* Set 192 bit function pointers. */
- ops[GCM_KEY_192].pre = mb_mgr->gcm192_pre;
- ops[GCM_KEY_192].init = mb_mgr->gcm192_init;
-
- ops[GCM_KEY_192].enc = mb_mgr->gcm192_enc;
- ops[GCM_KEY_192].update_enc = mb_mgr->gcm192_enc_update;
- ops[GCM_KEY_192].finalize_enc = mb_mgr->gcm192_enc_finalize;
-
- ops[GCM_KEY_192].dec = mb_mgr->gcm192_dec;
- ops[GCM_KEY_192].update_dec = mb_mgr->gcm192_dec_update;
- ops[GCM_KEY_192].finalize_dec = mb_mgr->gcm192_dec_finalize;
-
- ops[GCM_KEY_192].gmac_init = mb_mgr->gmac192_init;
- ops[GCM_KEY_192].gmac_update = mb_mgr->gmac192_update;
- ops[GCM_KEY_192].gmac_finalize = mb_mgr->gmac192_finalize;
-
- /* Set 256 bit function pointers. */
- ops[GCM_KEY_256].pre = mb_mgr->gcm256_pre;
- ops[GCM_KEY_256].init = mb_mgr->gcm256_init;
-
- ops[GCM_KEY_256].enc = mb_mgr->gcm256_enc;
- ops[GCM_KEY_256].update_enc = mb_mgr->gcm256_enc_update;
- ops[GCM_KEY_256].finalize_enc = mb_mgr->gcm256_enc_finalize;
-
- ops[GCM_KEY_256].dec = mb_mgr->gcm256_dec;
- ops[GCM_KEY_256].update_dec = mb_mgr->gcm256_dec_update;
- ops[GCM_KEY_256].finalize_dec = mb_mgr->gcm256_dec_finalize;
-
- ops[GCM_KEY_256].gmac_init = mb_mgr->gmac256_init;
- ops[GCM_KEY_256].gmac_update = mb_mgr->gmac256_update;
- ops[GCM_KEY_256].gmac_finalize = mb_mgr->gmac256_finalize;
-}
-
-static int
-aesni_gcm_session_configure(IMB_MGR *mb_mgr, void *session,
- const struct rte_crypto_sym_xform *xform)
-{
- struct aesni_gcm_session *sess = session;
- const struct rte_crypto_sym_xform *auth_xform;
- const struct rte_crypto_sym_xform *cipher_xform;
- const struct rte_crypto_sym_xform *aead_xform;
-
- uint8_t key_length;
- const uint8_t *key;
- enum ipsec_mb_operation mode;
- int ret = 0;
-
- ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform,
- &cipher_xform, &aead_xform);
- if (ret)
- return ret;
-
- /**< GCM key type */
-
- sess->op = mode;
-
- switch (sess->op) {
- case IPSEC_MB_OP_HASH_GEN_ONLY:
- case IPSEC_MB_OP_HASH_VERIFY_ONLY:
- /* AES-GMAC
- * auth_xform = xform;
- */
- if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GMAC) {
- IPSEC_MB_LOG(ERR,
- "Only AES GMAC is supported as an authentication only algorithm");
- ret = -ENOTSUP;
- goto error_exit;
- }
- /* Set IV parameters */
- sess->iv.offset = auth_xform->auth.iv.offset;
- sess->iv.length = auth_xform->auth.iv.length;
- key_length = auth_xform->auth.key.length;
- key = auth_xform->auth.key.data;
- sess->req_digest_length =
- RTE_MIN(auth_xform->auth.digest_length,
- DIGEST_LENGTH_MAX);
- break;
- case IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT:
- case IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT:
- /* AES-GCM
- * aead_xform = xform;
- */
-
- if (aead_xform->aead.algo != RTE_CRYPTO_AEAD_AES_GCM) {
- IPSEC_MB_LOG(ERR,
- "The only combined operation supported is AES GCM");
- ret = -ENOTSUP;
- goto error_exit;
- }
- /* Set IV parameters */
- sess->iv.offset = aead_xform->aead.iv.offset;
- sess->iv.length = aead_xform->aead.iv.length;
- key_length = aead_xform->aead.key.length;
- key = aead_xform->aead.key.data;
- sess->aad_length = aead_xform->aead.aad_length;
- sess->req_digest_length =
- RTE_MIN(aead_xform->aead.digest_length,
- DIGEST_LENGTH_MAX);
- break;
- default:
- IPSEC_MB_LOG(
- ERR, "Wrong xform type, has to be AEAD or authentication");
- ret = -ENOTSUP;
- goto error_exit;
- }
-
- /* Check key length, and calculate GCM pre-compute. */
- switch (key_length) {
- case 16:
- sess->key_length = GCM_KEY_128;
- mb_mgr->gcm128_pre(key, &sess->gdata_key);
- break;
- case 24:
- sess->key_length = GCM_KEY_192;
- mb_mgr->gcm192_pre(key, &sess->gdata_key);
- break;
- case 32:
- sess->key_length = GCM_KEY_256;
- mb_mgr->gcm256_pre(key, &sess->gdata_key);
- break;
- default:
- IPSEC_MB_LOG(ERR, "Invalid key length");
- ret = -EINVAL;
- goto error_exit;
- }
-
- /* Digest check */
- if (sess->req_digest_length > 16) {
- IPSEC_MB_LOG(ERR, "Invalid digest length");
- ret = -EINVAL;
- goto error_exit;
- }
- /*
- * If size requested is different, generate the full digest
- * (16 bytes) in a temporary location and then memcpy
- * the requested number of bytes.
- */
- if (sess->req_digest_length < 4)
- sess->gen_digest_length = 16;
- else
- sess->gen_digest_length = sess->req_digest_length;
-
-error_exit:
- return ret;
-}
-
-/**
- * Process a completed job and return rte_mbuf which job processed
- *
- * @param job IMB_JOB job to process
- *
- * @return
- * - Returns processed mbuf which is trimmed of output digest used in
- * verification of supplied digest in the case of a HASH_CIPHER operation
- * - Returns NULL on invalid job
- */
-static void
-post_process_gcm_crypto_op(struct ipsec_mb_qp *qp,
- struct rte_crypto_op *op,
- struct aesni_gcm_session *session)
-{
- struct aesni_gcm_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
-
- op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
- /* Verify digest if required */
- if (session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT ||
- session->op == IPSEC_MB_OP_HASH_VERIFY_ONLY) {
- uint8_t *digest;
-
- uint8_t *tag = qp_data->temp_digest;
-
- if (session->op == IPSEC_MB_OP_HASH_VERIFY_ONLY)
- digest = op->sym->auth.digest.data;
- else
- digest = op->sym->aead.digest.data;
-
-#ifdef RTE_LIBRTE_PMD_AESNI_GCM_DEBUG
- rte_hexdump(stdout, "auth tag (orig):",
- digest, session->req_digest_length);
- rte_hexdump(stdout, "auth tag (calc):",
- tag, session->req_digest_length);
-#endif
-
- if (memcmp(tag, digest, session->req_digest_length) != 0)
- op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
- } else {
- if (session->req_digest_length != session->gen_digest_length) {
- if (session->op ==
- IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT)
- memcpy(op->sym->aead.digest.data,
- qp_data->temp_digest,
- session->req_digest_length);
- else
- memcpy(op->sym->auth.digest.data,
- qp_data->temp_digest,
- session->req_digest_length);
- }
- }
-}
-
-/**
- * Process a completed GCM request
- *
- * @param qp Queue Pair to process
- * @param op Crypto operation
- * @param sess AESNI-GCM session
- *
- */
-static void
-handle_completed_gcm_crypto_op(struct ipsec_mb_qp *qp,
- struct rte_crypto_op *op,
- struct aesni_gcm_session *sess)
-{
- post_process_gcm_crypto_op(qp, op, sess);
-
- /* Free session if a session-less crypto op */
- if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
- memset(sess, 0, sizeof(struct aesni_gcm_session));
- rte_mempool_put(qp->sess_mp, op->sym->session);
- op->sym->session = NULL;
- }
-}
-
-/**
- * Process a crypto operation, calling
- * the GCM API from the multi buffer library.
- *
- * @param qp queue pair
- * @param op symmetric crypto operation
- * @param session GCM session
- *
- * @return
- * 0 on success
- */
-static int
-process_gcm_crypto_op(struct ipsec_mb_qp *qp, struct rte_crypto_op *op,
- struct aesni_gcm_session *session)
-{
- struct aesni_gcm_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
- uint8_t *src, *dst;
- uint8_t *iv_ptr;
- struct rte_crypto_sym_op *sym_op = op->sym;
- struct rte_mbuf *m_src = sym_op->m_src;
- uint32_t offset, data_offset, data_length;
- uint32_t part_len, total_len, data_len;
- uint8_t *tag;
- unsigned int oop = 0;
- struct aesni_gcm_ops *ops = &qp_data->ops[session->key_length];
-
- if (session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT ||
- session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT) {
- offset = sym_op->aead.data.offset;
- data_offset = offset;
- data_length = sym_op->aead.data.length;
- } else {
- offset = sym_op->auth.data.offset;
- data_offset = offset;
- data_length = sym_op->auth.data.length;
- }
-
- RTE_ASSERT(m_src != NULL);
-
- while (offset >= m_src->data_len && data_length != 0) {
- offset -= m_src->data_len;
- m_src = m_src->next;
-
- RTE_ASSERT(m_src != NULL);
- }
-
- src = rte_pktmbuf_mtod_offset(m_src, uint8_t *, offset);
-
- data_len = m_src->data_len - offset;
- part_len = (data_len < data_length) ? data_len :
- data_length;
-
- RTE_ASSERT((sym_op->m_dst == NULL) ||
- ((sym_op->m_dst != NULL) &&
- rte_pktmbuf_is_contiguous(sym_op->m_dst)));
-
- /* In-place */
- if (sym_op->m_dst == NULL || (sym_op->m_dst == sym_op->m_src))
- dst = src;
- /* Out-of-place */
- else {
- oop = 1;
- /* Segmented destination buffer is not supported
- * if operation is Out-of-place
- */
- RTE_ASSERT(rte_pktmbuf_is_contiguous(sym_op->m_dst));
- dst = rte_pktmbuf_mtod_offset(sym_op->m_dst, uint8_t *,
- data_offset);
- }
-
- iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
- session->iv.offset);
-
- if (session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT) {
- ops->init(&session->gdata_key, &qp_data->gcm_ctx_data, iv_ptr,
- sym_op->aead.aad.data,
- (uint64_t)session->aad_length);
-
- ops->update_enc(&session->gdata_key, &qp_data->gcm_ctx_data,
- dst, src, (uint64_t)part_len);
- total_len = data_length - part_len;
-
- while (total_len) {
- m_src = m_src->next;
-
- RTE_ASSERT(m_src != NULL);
-
- src = rte_pktmbuf_mtod(m_src, uint8_t *);
- if (oop)
- dst += part_len;
- else
- dst = src;
- part_len = (m_src->data_len < total_len) ?
- m_src->data_len : total_len;
-
- ops->update_enc(&session->gdata_key,
- &qp_data->gcm_ctx_data,
- dst, src, (uint64_t)part_len);
- total_len -= part_len;
- }
-
- if (session->req_digest_length != session->gen_digest_length)
- tag = qp_data->temp_digest;
- else
- tag = sym_op->aead.digest.data;
-
- ops->finalize_enc(&session->gdata_key, &qp_data->gcm_ctx_data,
- tag, session->gen_digest_length);
- } else if (session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT) {
- ops->init(&session->gdata_key, &qp_data->gcm_ctx_data, iv_ptr,
- sym_op->aead.aad.data,
- (uint64_t)session->aad_length);
-
- ops->update_dec(&session->gdata_key, &qp_data->gcm_ctx_data,
- dst, src, (uint64_t)part_len);
- total_len = data_length - part_len;
-
- while (total_len) {
- m_src = m_src->next;
-
- RTE_ASSERT(m_src != NULL);
-
- src = rte_pktmbuf_mtod(m_src, uint8_t *);
- if (oop)
- dst += part_len;
- else
- dst = src;
- part_len = (m_src->data_len < total_len) ?
- m_src->data_len : total_len;
-
- ops->update_dec(&session->gdata_key,
- &qp_data->gcm_ctx_data,
- dst, src, (uint64_t)part_len);
- total_len -= part_len;
- }
-
- tag = qp_data->temp_digest;
- ops->finalize_dec(&session->gdata_key, &qp_data->gcm_ctx_data,
- tag, session->gen_digest_length);
- } else if (session->op == IPSEC_MB_OP_HASH_GEN_ONLY) {
- ops->gmac_init(&session->gdata_key, &qp_data->gcm_ctx_data,
- iv_ptr, session->iv.length);
-
- ops->gmac_update(&session->gdata_key, &qp_data->gcm_ctx_data,
- src, (uint64_t)part_len);
- total_len = data_length - part_len;
-
- while (total_len) {
- m_src = m_src->next;
-
- RTE_ASSERT(m_src != NULL);
-
- src = rte_pktmbuf_mtod(m_src, uint8_t *);
- part_len = (m_src->data_len < total_len) ?
- m_src->data_len : total_len;
-
- ops->gmac_update(&session->gdata_key,
- &qp_data->gcm_ctx_data, src,
- (uint64_t)part_len);
- total_len -= part_len;
- }
-
- if (session->req_digest_length != session->gen_digest_length)
- tag = qp_data->temp_digest;
- else
- tag = sym_op->auth.digest.data;
-
- ops->gmac_finalize(&session->gdata_key, &qp_data->gcm_ctx_data,
- tag, session->gen_digest_length);
- } else { /* IPSEC_MB_OP_HASH_VERIFY_ONLY */
- ops->gmac_init(&session->gdata_key, &qp_data->gcm_ctx_data,
- iv_ptr, session->iv.length);
-
- ops->gmac_update(&session->gdata_key, &qp_data->gcm_ctx_data,
- src, (uint64_t)part_len);
- total_len = data_length - part_len;
-
- while (total_len) {
- m_src = m_src->next;
-
- RTE_ASSERT(m_src != NULL);
-
- src = rte_pktmbuf_mtod(m_src, uint8_t *);
- part_len = (m_src->data_len < total_len) ?
- m_src->data_len : total_len;
-
- ops->gmac_update(&session->gdata_key,
- &qp_data->gcm_ctx_data, src,
- (uint64_t)part_len);
- total_len -= part_len;
- }
-
- tag = qp_data->temp_digest;
-
- ops->gmac_finalize(&session->gdata_key, &qp_data->gcm_ctx_data,
- tag, session->gen_digest_length);
- }
- return 0;
-}
-
-/** Get gcm session */
-static inline struct aesni_gcm_session *
-aesni_gcm_get_session(struct ipsec_mb_qp *qp,
- struct rte_crypto_op *op)
-{
- struct rte_cryptodev_sym_session *sess = NULL;
- struct rte_crypto_sym_op *sym_op = op->sym;
-
- if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
- if (likely(sym_op->session != NULL))
- sess = sym_op->session;
- } else {
- if (rte_mempool_get(qp->sess_mp, (void **)&sess))
- return NULL;
-
- if (unlikely(sess->sess_data_sz <
- sizeof(struct aesni_gcm_session))) {
- rte_mempool_put(qp->sess_mp, sess);
- return NULL;
- }
-
- if (unlikely(aesni_gcm_session_configure(qp->mb_mgr,
- CRYPTODEV_GET_SYM_SESS_PRIV(sess),
- sym_op->xform) != 0)) {
- rte_mempool_put(qp->sess_mp, sess);
- sess = NULL;
- }
- sym_op->session = sess;
- }
-
- if (unlikely(sess == NULL))
- op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
-
- return CRYPTODEV_GET_SYM_SESS_PRIV(sess);
-}
-
-static uint16_t
-aesni_gcm_pmd_dequeue_burst(void *queue_pair,
- struct rte_crypto_op **ops, uint16_t nb_ops)
-{
- struct aesni_gcm_session *sess;
- struct ipsec_mb_qp *qp = queue_pair;
-
- int retval = 0;
- unsigned int i, nb_dequeued;
-
- nb_dequeued = rte_ring_dequeue_burst(qp->ingress_queue,
- (void **)ops, nb_ops, NULL);
-
- for (i = 0; i < nb_dequeued; i++) {
-
- sess = aesni_gcm_get_session(qp, ops[i]);
- if (unlikely(sess == NULL)) {
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- qp->stats.dequeue_err_count++;
- break;
- }
-
- retval = process_gcm_crypto_op(qp, ops[i], sess);
- if (retval < 0) {
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- qp->stats.dequeue_err_count++;
- break;
- }
-
- handle_completed_gcm_crypto_op(qp, ops[i], sess);
- }
-
- qp->stats.dequeued_count += i;
-
- return i;
-}
-
-static inline void
-aesni_gcm_fill_error_code(struct rte_crypto_sym_vec *vec,
- int32_t errnum)
-{
- uint32_t i;
-
- for (i = 0; i < vec->num; i++)
- vec->status[i] = errnum;
-}
-
-static inline int32_t
-aesni_gcm_sgl_op_finalize_encryption(const struct aesni_gcm_session *s,
- struct gcm_context_data *gdata_ctx,
- uint8_t *digest, struct aesni_gcm_ops ops)
-{
- if (s->req_digest_length != s->gen_digest_length) {
- uint8_t tmpdigest[s->gen_digest_length];
-
- ops.finalize_enc(&s->gdata_key, gdata_ctx, tmpdigest,
- s->gen_digest_length);
- memcpy(digest, tmpdigest, s->req_digest_length);
- } else {
- ops.finalize_enc(&s->gdata_key, gdata_ctx, digest,
- s->gen_digest_length);
- }
-
- return 0;
-}
-
-static inline int32_t
-aesni_gcm_sgl_op_finalize_decryption(const struct aesni_gcm_session *s,
- struct gcm_context_data *gdata_ctx,
- uint8_t *digest, struct aesni_gcm_ops ops)
-{
- uint8_t tmpdigest[s->gen_digest_length];
-
- ops.finalize_dec(&s->gdata_key, gdata_ctx, tmpdigest,
- s->gen_digest_length);
-
- return memcmp(digest, tmpdigest, s->req_digest_length) == 0 ? 0
- : EBADMSG;
-}
-
-static inline void
-aesni_gcm_process_gcm_sgl_op(const struct aesni_gcm_session *s,
- struct gcm_context_data *gdata_ctx,
- struct rte_crypto_sgl *sgl, void *iv, void *aad,
- struct aesni_gcm_ops ops)
-{
- uint32_t i;
-
- /* init crypto operation */
- ops.init(&s->gdata_key, gdata_ctx, iv, aad,
- (uint64_t)s->aad_length);
-
- /* update with sgl data */
- for (i = 0; i < sgl->num; i++) {
- struct rte_crypto_vec *vec = &sgl->vec[i];
-
- switch (s->op) {
- case IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT:
- ops.update_enc(&s->gdata_key, gdata_ctx,
- vec->base, vec->base, vec->len);
- break;
- case IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT:
- ops.update_dec(&s->gdata_key, gdata_ctx,
- vec->base, vec->base, vec->len);
- break;
- default:
- IPSEC_MB_LOG(ERR, "Invalid session op");
- break;
- }
-
- }
-}
-
-static inline void
-aesni_gcm_process_gmac_sgl_op(const struct aesni_gcm_session *s,
- struct gcm_context_data *gdata_ctx,
- struct rte_crypto_sgl *sgl, void *iv,
- struct aesni_gcm_ops ops)
-{
- ops.init(&s->gdata_key, gdata_ctx, iv, sgl->vec[0].base,
- sgl->vec[0].len);
-}
-
-static inline uint32_t
-aesni_gcm_sgl_encrypt(struct aesni_gcm_session *s,
- struct gcm_context_data *gdata_ctx,
- struct rte_crypto_sym_vec *vec,
- struct aesni_gcm_ops ops)
-{
- uint32_t i, processed;
-
- processed = 0;
- for (i = 0; i < vec->num; ++i) {
- aesni_gcm_process_gcm_sgl_op(s, gdata_ctx, &vec->src_sgl[i],
- vec->iv[i].va, vec->aad[i].va,
- ops);
- vec->status[i] = aesni_gcm_sgl_op_finalize_encryption(
- s, gdata_ctx, vec->digest[i].va, ops);
- processed += (vec->status[i] == 0);
- }
-
- return processed;
-}
-
-static inline uint32_t
-aesni_gcm_sgl_decrypt(struct aesni_gcm_session *s,
- struct gcm_context_data *gdata_ctx,
- struct rte_crypto_sym_vec *vec,
- struct aesni_gcm_ops ops)
-{
- uint32_t i, processed;
-
- processed = 0;
- for (i = 0; i < vec->num; ++i) {
- aesni_gcm_process_gcm_sgl_op(s, gdata_ctx, &vec->src_sgl[i],
- vec->iv[i].va, vec->aad[i].va,
- ops);
- vec->status[i] = aesni_gcm_sgl_op_finalize_decryption(
- s, gdata_ctx, vec->digest[i].va, ops);
- processed += (vec->status[i] == 0);
- }
-
- return processed;
-}
-
-static inline uint32_t
-aesni_gmac_sgl_generate(struct aesni_gcm_session *s,
- struct gcm_context_data *gdata_ctx,
- struct rte_crypto_sym_vec *vec,
- struct aesni_gcm_ops ops)
-{
- uint32_t i, processed;
-
- processed = 0;
- for (i = 0; i < vec->num; ++i) {
- if (vec->src_sgl[i].num != 1) {
- vec->status[i] = ENOTSUP;
- continue;
- }
-
- aesni_gcm_process_gmac_sgl_op(s, gdata_ctx, &vec->src_sgl[i],
- vec->iv[i].va, ops);
- vec->status[i] = aesni_gcm_sgl_op_finalize_encryption(
- s, gdata_ctx, vec->digest[i].va, ops);
- processed += (vec->status[i] == 0);
- }
-
- return processed;
-}
-
-static inline uint32_t
-aesni_gmac_sgl_verify(struct aesni_gcm_session *s,
- struct gcm_context_data *gdata_ctx,
- struct rte_crypto_sym_vec *vec,
- struct aesni_gcm_ops ops)
-{
- uint32_t i, processed;
-
- processed = 0;
- for (i = 0; i < vec->num; ++i) {
- if (vec->src_sgl[i].num != 1) {
- vec->status[i] = ENOTSUP;
- continue;
- }
-
- aesni_gcm_process_gmac_sgl_op(s, gdata_ctx, &vec->src_sgl[i],
- vec->iv[i].va, ops);
- vec->status[i] = aesni_gcm_sgl_op_finalize_decryption(
- s, gdata_ctx, vec->digest[i].va, ops);
- processed += (vec->status[i] == 0);
- }
-
- return processed;
-}
-
-/** Process CPU crypto bulk operations */
-static uint32_t
-aesni_gcm_process_bulk(struct rte_cryptodev *dev __rte_unused,
- struct rte_cryptodev_sym_session *sess,
- __rte_unused union rte_crypto_sym_ofs ofs,
- struct rte_crypto_sym_vec *vec)
-{
- struct aesni_gcm_session *s = CRYPTODEV_GET_SYM_SESS_PRIV(sess);
- struct gcm_context_data gdata_ctx;
- IMB_MGR *mb_mgr;
-
- /* get per-thread MB MGR, create one if needed */
- mb_mgr = get_per_thread_mb_mgr();
- if (unlikely(mb_mgr == NULL))
- return 0;
-
- /* Check if function pointers have been set for this thread ops. */
- if (unlikely(RTE_PER_LCORE(gcm_ops)[s->key_length].init == NULL))
- aesni_gcm_set_ops(RTE_PER_LCORE(gcm_ops), mb_mgr);
-
- switch (s->op) {
- case IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT:
- return aesni_gcm_sgl_encrypt(s, &gdata_ctx, vec,
- RTE_PER_LCORE(gcm_ops)[s->key_length]);
- case IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT:
- return aesni_gcm_sgl_decrypt(s, &gdata_ctx, vec,
- RTE_PER_LCORE(gcm_ops)[s->key_length]);
- case IPSEC_MB_OP_HASH_GEN_ONLY:
- return aesni_gmac_sgl_generate(s, &gdata_ctx, vec,
- RTE_PER_LCORE(gcm_ops)[s->key_length]);
- case IPSEC_MB_OP_HASH_VERIFY_ONLY:
- return aesni_gmac_sgl_verify(s, &gdata_ctx, vec,
- RTE_PER_LCORE(gcm_ops)[s->key_length]);
- default:
- aesni_gcm_fill_error_code(vec, EINVAL);
- return 0;
- }
-}
-
-static int
-aesni_gcm_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
- const struct rte_cryptodev_qp_conf *qp_conf,
- int socket_id)
-{
- int ret = ipsec_mb_qp_setup(dev, qp_id, qp_conf, socket_id);
- if (ret < 0)
- return ret;
-
- struct ipsec_mb_qp *qp = dev->data->queue_pairs[qp_id];
- struct aesni_gcm_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
- aesni_gcm_set_ops(qp_data->ops, qp->mb_mgr);
- return 0;
-}
+#include "pmd_aesni_mb_priv.h"
struct rte_cryptodev_ops aesni_gcm_pmd_ops = {
.dev_configure = ipsec_mb_config,
@@ -762,10 +16,10 @@ struct rte_cryptodev_ops aesni_gcm_pmd_ops = {
.dev_infos_get = ipsec_mb_info_get,
- .queue_pair_setup = aesni_gcm_qp_setup,
+ .queue_pair_setup = ipsec_mb_qp_setup,
.queue_pair_release = ipsec_mb_qp_release,
- .sym_cpu_process = aesni_gcm_process_bulk,
+ .sym_cpu_process = aesni_mb_process_bulk,
.sym_session_get_size = ipsec_mb_sym_session_get_size,
.sym_session_configure = ipsec_mb_sym_session_configure,
@@ -801,7 +55,7 @@ RTE_INIT(ipsec_mb_register_aesni_gcm)
&ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_AESNI_GCM];
aesni_gcm_data->caps = aesni_gcm_capabilities;
- aesni_gcm_data->dequeue_burst = aesni_gcm_pmd_dequeue_burst;
+ aesni_gcm_data->dequeue_burst = aesni_mb_dequeue_burst;
aesni_gcm_data->feature_flags =
RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
@@ -814,6 +68,6 @@ RTE_INIT(ipsec_mb_register_aesni_gcm)
aesni_gcm_data->ops = &aesni_gcm_pmd_ops;
aesni_gcm_data->qp_priv_size = sizeof(struct aesni_gcm_qp_data);
aesni_gcm_data->queue_pair_configure = NULL;
- aesni_gcm_data->session_configure = aesni_gcm_session_configure;
- aesni_gcm_data->session_priv_size = sizeof(struct aesni_gcm_session);
+ aesni_gcm_data->session_configure = aesni_mb_session_configure;
+ aesni_gcm_data->session_priv_size = sizeof(struct aesni_mb_session);
}
diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_gcm_priv.h b/drivers/crypto/ipsec_mb/pmd_aesni_gcm_priv.h
index 55a0416030..a40543ad15 100644
--- a/drivers/crypto/ipsec_mb/pmd_aesni_gcm_priv.h
+++ b/drivers/crypto/ipsec_mb/pmd_aesni_gcm_priv.h
@@ -143,25 +143,4 @@ struct aesni_gcm_qp_data {
/**< Operation Handlers */
};
-/** AESNI GCM private session structure */
-struct aesni_gcm_session {
- struct {
- uint16_t length;
- uint16_t offset;
- } iv;
- /**< IV parameters */
- uint16_t aad_length;
- /**< AAD length */
- uint16_t req_digest_length;
- /**< Requested digest length */
- uint16_t gen_digest_length;
- /**< Generated digest length */
- enum ipsec_mb_operation op;
- /**< GCM operation type */
- struct gcm_key_data gdata_key;
- /**< GCM parameters */
- enum aesni_gcm_key_length key_length;
- /** Key Length */
-};
-
#endif /* _PMD_AESNI_GCM_PRIV_H_ */
diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c
index ece9cfd5ed..712188216d 100644
--- a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c
+++ b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c
@@ -759,7 +759,7 @@ aesni_mb_set_session_aead_parameters(const IMB_MGR *mb_mgr,
}
/** Configure a aesni multi-buffer session from a crypto xform chain */
-static int
+int
aesni_mb_session_configure(IMB_MGR *mb_mgr,
void *priv_sess,
const struct rte_crypto_sym_xform *xform)
@@ -2124,7 +2124,7 @@ set_job_null_op(IMB_JOB *job, struct rte_crypto_op *op)
}
#if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM
-static uint16_t
+uint16_t
aesni_mb_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
uint16_t nb_ops)
{
@@ -2442,7 +2442,7 @@ verify_sync_dgst(struct rte_crypto_sym_vec *vec,
return k;
}
-static uint32_t
+uint32_t
aesni_mb_process_bulk(struct rte_cryptodev *dev __rte_unused,
struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs sofs,
struct rte_crypto_sym_vec *vec)
diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h b/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h
index 85994fe5a1..9f0a89d20b 100644
--- a/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h
+++ b/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h
@@ -21,6 +21,19 @@
#define MAX_NUM_SEGS 16
#endif
+int
+aesni_mb_session_configure(IMB_MGR * m __rte_unused, void *priv_sess,
+ const struct rte_crypto_sym_xform *xform);
+
+uint16_t
+aesni_mb_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops);
+
+uint32_t
+aesni_mb_process_bulk(struct rte_cryptodev *dev __rte_unused,
+ struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs sofs,
+ struct rte_crypto_sym_vec *vec);
+
static const struct rte_cryptodev_capabilities aesni_mb_capabilities[] = {
{ /* MD5 HMAC */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
diff --git a/drivers/crypto/ipsec_mb/pmd_chacha_poly.c b/drivers/crypto/ipsec_mb/pmd_chacha_poly.c
index 97e7cef233..93f8e3588e 100644
--- a/drivers/crypto/ipsec_mb/pmd_chacha_poly.c
+++ b/drivers/crypto/ipsec_mb/pmd_chacha_poly.c
@@ -3,334 +3,7 @@
*/
#include "pmd_chacha_poly_priv.h"
-
-/** Parse crypto xform chain and set private session parameters. */
-static int
-chacha20_poly1305_session_configure(IMB_MGR * mb_mgr __rte_unused,
- void *priv_sess, const struct rte_crypto_sym_xform *xform)
-{
- struct chacha20_poly1305_session *sess = priv_sess;
- const struct rte_crypto_sym_xform *auth_xform;
- const struct rte_crypto_sym_xform *cipher_xform;
- const struct rte_crypto_sym_xform *aead_xform;
-
- uint8_t key_length;
- const uint8_t *key;
- enum ipsec_mb_operation mode;
- int ret = 0;
-
- ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform,
- &cipher_xform, &aead_xform);
- if (ret)
- return ret;
-
- sess->op = mode;
-
- switch (sess->op) {
- case IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT:
- case IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT:
- if (aead_xform->aead.algo !=
- RTE_CRYPTO_AEAD_CHACHA20_POLY1305) {
- IPSEC_MB_LOG(ERR,
- "The only combined operation supported is CHACHA20 POLY1305");
- ret = -ENOTSUP;
- goto error_exit;
- }
- /* Set IV parameters */
- sess->iv.offset = aead_xform->aead.iv.offset;
- sess->iv.length = aead_xform->aead.iv.length;
- key_length = aead_xform->aead.key.length;
- key = aead_xform->aead.key.data;
- sess->aad_length = aead_xform->aead.aad_length;
- sess->req_digest_length = aead_xform->aead.digest_length;
- break;
- default:
- IPSEC_MB_LOG(
- ERR, "Wrong xform type, has to be AEAD or authentication");
- ret = -ENOTSUP;
- goto error_exit;
- }
-
- /* IV check */
- if (sess->iv.length != CHACHA20_POLY1305_IV_LENGTH &&
- sess->iv.length != 0) {
- IPSEC_MB_LOG(ERR, "Wrong IV length");
- ret = -EINVAL;
- goto error_exit;
- }
-
- /* Check key length */
- if (key_length != CHACHA20_POLY1305_KEY_SIZE) {
- IPSEC_MB_LOG(ERR, "Invalid key length");
- ret = -EINVAL;
- goto error_exit;
- } else {
- memcpy(sess->key, key, CHACHA20_POLY1305_KEY_SIZE);
- }
-
- /* Digest check */
- if (sess->req_digest_length != CHACHA20_POLY1305_DIGEST_LENGTH) {
- IPSEC_MB_LOG(ERR, "Invalid digest length");
- ret = -EINVAL;
- goto error_exit;
- } else {
- sess->gen_digest_length = CHACHA20_POLY1305_DIGEST_LENGTH;
- }
-
-error_exit:
- return ret;
-}
-
-/**
- * Process a crypto operation, calling
- * the direct chacha poly API from the multi buffer library.
- *
- * @param qp queue pair
- * @param op symmetric crypto operation
- * @param session chacha poly session
- *
- * @return
- * - Return 0 if success
- */
-static int
-chacha20_poly1305_crypto_op(struct ipsec_mb_qp *qp, struct rte_crypto_op *op,
- struct chacha20_poly1305_session *session)
-{
- struct chacha20_poly1305_qp_data *qp_data =
- ipsec_mb_get_qp_private_data(qp);
- uint8_t *src, *dst;
- uint8_t *iv_ptr;
- struct rte_crypto_sym_op *sym_op = op->sym;
- struct rte_mbuf *m_src = sym_op->m_src;
- uint32_t offset, data_offset, data_length;
- uint32_t part_len, data_len;
- int total_len;
- uint8_t *tag;
- unsigned int oop = 0;
-
- offset = sym_op->aead.data.offset;
- data_offset = offset;
- data_length = sym_op->aead.data.length;
- RTE_ASSERT(m_src != NULL);
-
- while (offset >= m_src->data_len && data_length != 0) {
- offset -= m_src->data_len;
- m_src = m_src->next;
-
- RTE_ASSERT(m_src != NULL);
- }
-
- src = rte_pktmbuf_mtod_offset(m_src, uint8_t *, offset);
-
- data_len = m_src->data_len - offset;
- part_len = (data_len < data_length) ? data_len :
- data_length;
-
- /* In-place */
- if (sym_op->m_dst == NULL || (sym_op->m_dst == sym_op->m_src))
- dst = src;
- /* Out-of-place */
- else {
- oop = 1;
- /* Segmented destination buffer is not supported
- * if operation is Out-of-place
- */
- RTE_ASSERT(rte_pktmbuf_is_contiguous(sym_op->m_dst));
- dst = rte_pktmbuf_mtod_offset(sym_op->m_dst, uint8_t *,
- data_offset);
- }
-
- iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
- session->iv.offset);
-
- IMB_CHACHA20_POLY1305_INIT(qp->mb_mgr, session->key,
- &qp_data->chacha20_poly1305_ctx_data,
- iv_ptr, sym_op->aead.aad.data,
- (uint64_t)session->aad_length);
-
- if (session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT) {
- IMB_CHACHA20_POLY1305_ENC_UPDATE(qp->mb_mgr,
- session->key,
- &qp_data->chacha20_poly1305_ctx_data,
- dst, src, (uint64_t)part_len);
- total_len = data_length - part_len;
-
- while (total_len) {
- m_src = m_src->next;
- RTE_ASSERT(m_src != NULL);
-
- src = rte_pktmbuf_mtod(m_src, uint8_t *);
- if (oop)
- dst += part_len;
- else
- dst = src;
- part_len = (m_src->data_len < total_len) ?
- m_src->data_len : total_len;
-
- if (dst == NULL || src == NULL) {
- IPSEC_MB_LOG(ERR, "Invalid src or dst input");
- return -EINVAL;
- }
- IMB_CHACHA20_POLY1305_ENC_UPDATE(qp->mb_mgr,
- session->key,
- &qp_data->chacha20_poly1305_ctx_data,
- dst, src, (uint64_t)part_len);
- total_len -= part_len;
- if (total_len < 0) {
- IPSEC_MB_LOG(ERR, "Invalid part len");
- return -EINVAL;
- }
- }
-
- tag = sym_op->aead.digest.data;
- IMB_CHACHA20_POLY1305_ENC_FINALIZE(qp->mb_mgr,
- &qp_data->chacha20_poly1305_ctx_data,
- tag, session->gen_digest_length);
-
- } else {
- IMB_CHACHA20_POLY1305_DEC_UPDATE(qp->mb_mgr,
- session->key,
- &qp_data->chacha20_poly1305_ctx_data,
- dst, src, (uint64_t)part_len);
-
- total_len = data_length - part_len;
-
- while (total_len) {
- m_src = m_src->next;
-
- RTE_ASSERT(m_src != NULL);
-
- src = rte_pktmbuf_mtod(m_src, uint8_t *);
- if (oop)
- dst += part_len;
- else
- dst = src;
- part_len = (m_src->data_len < total_len) ?
- m_src->data_len : total_len;
-
- if (dst == NULL || src == NULL) {
- IPSEC_MB_LOG(ERR, "Invalid src or dst input");
- return -EINVAL;
- }
- IMB_CHACHA20_POLY1305_DEC_UPDATE(qp->mb_mgr,
- session->key,
- &qp_data->chacha20_poly1305_ctx_data,
- dst, src, (uint64_t)part_len);
- total_len -= part_len;
- if (total_len < 0) {
- IPSEC_MB_LOG(ERR, "Invalid part len");
- return -EINVAL;
- }
- }
-
- tag = qp_data->temp_digest;
- IMB_CHACHA20_POLY1305_DEC_FINALIZE(qp->mb_mgr,
- &qp_data->chacha20_poly1305_ctx_data,
- tag, session->gen_digest_length);
- }
-
- return 0;
-}
-
-/**
- * Process a completed chacha poly op
- *
- * @param qp Queue Pair to process
- * @param op Crypto operation
- * @param sess Crypto session
- *
- * @return
- * - void
- */
-static void
-post_process_chacha20_poly1305_crypto_op(struct ipsec_mb_qp *qp,
- struct rte_crypto_op *op,
- struct chacha20_poly1305_session *session)
-{
- struct chacha20_poly1305_qp_data *qp_data =
- ipsec_mb_get_qp_private_data(qp);
-
- op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
- /* Verify digest if required */
- if (session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT ||
- session->op == IPSEC_MB_OP_HASH_VERIFY_ONLY) {
- uint8_t *digest = op->sym->aead.digest.data;
- uint8_t *tag = qp_data->temp_digest;
-
-#ifdef RTE_LIBRTE_PMD_CHACHA20_POLY1305_DEBUG
- rte_hexdump(stdout, "auth tag (orig):",
- digest, session->req_digest_length);
- rte_hexdump(stdout, "auth tag (calc):",
- tag, session->req_digest_length);
-#endif
- if (memcmp(tag, digest, session->req_digest_length) != 0)
- op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
-
- }
-
-}
-
-/**
- * Process a completed Chacha20_poly1305 request
- *
- * @param qp Queue Pair to process
- * @param op Crypto operation
- * @param sess Crypto session
- *
- * @return
- * - void
- */
-static void
-handle_completed_chacha20_poly1305_crypto_op(struct ipsec_mb_qp *qp,
- struct rte_crypto_op *op,
- struct chacha20_poly1305_session *sess)
-{
- post_process_chacha20_poly1305_crypto_op(qp, op, sess);
-
- /* Free session if a session-less crypto op */
- if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
- memset(sess, 0, sizeof(struct chacha20_poly1305_session));
- rte_mempool_put(qp->sess_mp, op->sym->session);
- op->sym->session = NULL;
- }
-}
-
-static uint16_t
-chacha20_poly1305_pmd_dequeue_burst(void *queue_pair,
- struct rte_crypto_op **ops, uint16_t nb_ops)
-{
- struct chacha20_poly1305_session *sess;
- struct ipsec_mb_qp *qp = queue_pair;
-
- int retval = 0;
- unsigned int i = 0, nb_dequeued;
-
- nb_dequeued = rte_ring_dequeue_burst(qp->ingress_queue,
- (void **)ops, nb_ops, NULL);
-
- for (i = 0; i < nb_dequeued; i++) {
-
- sess = ipsec_mb_get_session_private(qp, ops[i]);
- if (unlikely(sess == NULL)) {
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- qp->stats.dequeue_err_count++;
- break;
- }
-
- retval = chacha20_poly1305_crypto_op(qp, ops[i], sess);
- if (retval < 0) {
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- qp->stats.dequeue_err_count++;
- break;
- }
-
- handle_completed_chacha20_poly1305_crypto_op(qp, ops[i], sess);
- }
-
- qp->stats.dequeued_count += i;
-
- return i;
-}
+#include "pmd_aesni_mb_priv.h"
struct rte_cryptodev_ops chacha20_poly1305_pmd_ops = {
.dev_configure = ipsec_mb_config,
@@ -384,7 +57,7 @@ RTE_INIT(ipsec_mb_register_chacha20_poly1305)
= &ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_CHACHA20_POLY1305];
chacha_poly_data->caps = chacha20_poly1305_capabilities;
- chacha_poly_data->dequeue_burst = chacha20_poly1305_pmd_dequeue_burst;
+ chacha_poly_data->dequeue_burst = aesni_mb_dequeue_burst;
chacha_poly_data->feature_flags =
RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
@@ -398,7 +71,7 @@ RTE_INIT(ipsec_mb_register_chacha20_poly1305)
chacha_poly_data->qp_priv_size =
sizeof(struct chacha20_poly1305_qp_data);
chacha_poly_data->session_configure =
- chacha20_poly1305_session_configure;
+ aesni_mb_session_configure;
chacha_poly_data->session_priv_size =
- sizeof(struct chacha20_poly1305_session);
+ sizeof(struct aesni_mb_session);
}
diff --git a/drivers/crypto/ipsec_mb/pmd_chacha_poly_priv.h b/drivers/crypto/ipsec_mb/pmd_chacha_poly_priv.h
index 842f62f5d1..5b04c2edeb 100644
--- a/drivers/crypto/ipsec_mb/pmd_chacha_poly_priv.h
+++ b/drivers/crypto/ipsec_mb/pmd_chacha_poly_priv.h
@@ -7,9 +7,7 @@
#include "ipsec_mb_private.h"
-#define CHACHA20_POLY1305_IV_LENGTH 12
#define CHACHA20_POLY1305_DIGEST_LENGTH 16
-#define CHACHA20_POLY1305_KEY_SIZE 32
static const
struct rte_cryptodev_capabilities chacha20_poly1305_capabilities[] = {
@@ -45,23 +43,6 @@ struct rte_cryptodev_capabilities chacha20_poly1305_capabilities[] = {
uint8_t pmd_driver_id_chacha20_poly1305;
-/** CHACHA20 POLY1305 private session structure */
-struct chacha20_poly1305_session {
- struct {
- uint16_t length;
- uint16_t offset;
- } iv;
- /**< IV parameters */
- uint16_t aad_length;
- /**< AAD length */
- uint16_t req_digest_length;
- /**< Requested digest length */
- uint16_t gen_digest_length;
- /**< Generated digest length */
- uint8_t key[CHACHA20_POLY1305_KEY_SIZE];
- enum ipsec_mb_operation op;
-} __rte_cache_aligned;
-
struct chacha20_poly1305_qp_data {
struct chacha20_poly1305_context_data chacha20_poly1305_ctx_data;
uint8_t temp_digest[CHACHA20_POLY1305_DIGEST_LENGTH];
diff --git a/drivers/crypto/ipsec_mb/pmd_kasumi.c b/drivers/crypto/ipsec_mb/pmd_kasumi.c
index 5db9c523cd..0c549f9459 100644
--- a/drivers/crypto/ipsec_mb/pmd_kasumi.c
+++ b/drivers/crypto/ipsec_mb/pmd_kasumi.c
@@ -10,403 +10,7 @@
#include <rte_malloc.h>
#include "pmd_kasumi_priv.h"
-
-/** Parse crypto xform chain and set private session parameters. */
-static int
-kasumi_session_configure(IMB_MGR *mgr, void *priv_sess,
- const struct rte_crypto_sym_xform *xform)
-{
- const struct rte_crypto_sym_xform *auth_xform = NULL;
- const struct rte_crypto_sym_xform *cipher_xform = NULL;
- enum ipsec_mb_operation mode;
- struct kasumi_session *sess = (struct kasumi_session *)priv_sess;
- /* Select Crypto operation - hash then cipher / cipher then hash */
- int ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform,
- &cipher_xform, NULL);
-
- if (ret)
- return ret;
-
- if (cipher_xform) {
- /* Only KASUMI F8 supported */
- if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_KASUMI_F8) {
- IPSEC_MB_LOG(ERR, "Unsupported cipher algorithm ");
- return -ENOTSUP;
- }
-
- sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
- if (cipher_xform->cipher.iv.length != KASUMI_IV_LENGTH) {
- IPSEC_MB_LOG(ERR, "Wrong IV length");
- return -EINVAL;
- }
-
- /* Initialize key */
- IMB_KASUMI_INIT_F8_KEY_SCHED(mgr,
- cipher_xform->cipher.key.data,
- &sess->pKeySched_cipher);
- }
-
- if (auth_xform) {
- /* Only KASUMI F9 supported */
- if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_KASUMI_F9) {
- IPSEC_MB_LOG(ERR, "Unsupported authentication");
- return -ENOTSUP;
- }
-
- if (auth_xform->auth.digest_length != KASUMI_DIGEST_LENGTH) {
- IPSEC_MB_LOG(ERR, "Wrong digest length");
- return -EINVAL;
- }
-
- sess->auth_op = auth_xform->auth.op;
-
- /* Initialize key */
- IMB_KASUMI_INIT_F9_KEY_SCHED(mgr, auth_xform->auth.key.data,
- &sess->pKeySched_hash);
- }
-
- sess->op = mode;
- return ret;
-}
-
-/** Encrypt/decrypt mbufs with same cipher key. */
-static uint8_t
-process_kasumi_cipher_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
- struct kasumi_session *session, uint8_t num_ops)
-{
- unsigned int i;
- uint8_t processed_ops = 0;
- const void *src[num_ops];
- void *dst[num_ops];
- uint8_t *iv_ptr;
- uint64_t iv[num_ops];
- uint32_t num_bytes[num_ops];
-
- for (i = 0; i < num_ops; i++) {
- src[i] = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *)
- + (ops[i]->sym->cipher.data.offset >> 3);
- dst[i] = ops[i]->sym->m_dst
- ? rte_pktmbuf_mtod(ops[i]->sym->m_dst, uint8_t *)
- + (ops[i]->sym->cipher.data.offset >> 3)
- : rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *)
- + (ops[i]->sym->cipher.data.offset >> 3);
- iv_ptr = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
- session->cipher_iv_offset);
- iv[i] = *((uint64_t *)(iv_ptr));
- num_bytes[i] = ops[i]->sym->cipher.data.length >> 3;
-
- processed_ops++;
- }
-
- if (processed_ops != 0)
- IMB_KASUMI_F8_N_BUFFER(qp->mb_mgr, &session->pKeySched_cipher,
- iv, src, dst, num_bytes,
- processed_ops);
-
- return processed_ops;
-}
-
-/** Encrypt/decrypt mbuf (bit level function). */
-static uint8_t
-process_kasumi_cipher_op_bit(struct ipsec_mb_qp *qp, struct rte_crypto_op *op,
- struct kasumi_session *session)
-{
- uint8_t *src, *dst;
- uint8_t *iv_ptr;
- uint64_t iv;
- uint32_t length_in_bits, offset_in_bits;
-
- offset_in_bits = op->sym->cipher.data.offset;
- src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *);
- if (op->sym->m_dst == NULL)
- dst = src;
- else
- dst = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *);
- iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
- session->cipher_iv_offset);
- iv = *((uint64_t *)(iv_ptr));
- length_in_bits = op->sym->cipher.data.length;
-
- IMB_KASUMI_F8_1_BUFFER_BIT(qp->mb_mgr, &session->pKeySched_cipher, iv,
- src, dst, length_in_bits, offset_in_bits);
-
- return 1;
-}
-
-/** Generate/verify hash from mbufs with same hash key. */
-static int
-process_kasumi_hash_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
- struct kasumi_session *session, uint8_t num_ops)
-{
- unsigned int i;
- uint8_t processed_ops = 0;
- uint8_t *src, *dst;
- uint32_t length_in_bits;
- uint32_t num_bytes;
- struct kasumi_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
-
- for (i = 0; i < num_ops; i++) {
- /* Data must be byte aligned */
- if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- IPSEC_MB_LOG(ERR, "Invalid Offset");
- break;
- }
-
- length_in_bits = ops[i]->sym->auth.data.length;
-
- src = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *)
- + (ops[i]->sym->auth.data.offset >> 3);
- /* Direction from next bit after end of message */
- num_bytes = length_in_bits >> 3;
-
- if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
- dst = qp_data->temp_digest;
- IMB_KASUMI_F9_1_BUFFER(qp->mb_mgr,
- &session->pKeySched_hash, src,
- num_bytes, dst);
-
- /* Verify digest. */
- if (memcmp(dst, ops[i]->sym->auth.digest.data,
- KASUMI_DIGEST_LENGTH)
- != 0)
- ops[i]->status
- = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
- } else {
- dst = ops[i]->sym->auth.digest.data;
-
- IMB_KASUMI_F9_1_BUFFER(qp->mb_mgr,
- &session->pKeySched_hash, src,
- num_bytes, dst);
- }
- processed_ops++;
- }
-
- return processed_ops;
-}
-
-/** Process a batch of crypto ops which shares the same session. */
-static int
-process_ops(struct rte_crypto_op **ops, struct kasumi_session *session,
- struct ipsec_mb_qp *qp, uint8_t num_ops)
-{
- unsigned int i;
- unsigned int processed_ops;
-
- switch (session->op) {
- case IPSEC_MB_OP_ENCRYPT_ONLY:
- case IPSEC_MB_OP_DECRYPT_ONLY:
- processed_ops
- = process_kasumi_cipher_op(qp, ops, session, num_ops);
- break;
- case IPSEC_MB_OP_HASH_GEN_ONLY:
- case IPSEC_MB_OP_HASH_VERIFY_ONLY:
- processed_ops
- = process_kasumi_hash_op(qp, ops, session, num_ops);
- break;
- case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN:
- case IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY:
- processed_ops
- = process_kasumi_cipher_op(qp, ops, session, num_ops);
- process_kasumi_hash_op(qp, ops, session, processed_ops);
- break;
- case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
- case IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT:
- processed_ops
- = process_kasumi_hash_op(qp, ops, session, num_ops);
- process_kasumi_cipher_op(qp, ops, session, processed_ops);
- break;
- default:
- /* Operation not supported. */
- processed_ops = 0;
- }
-
- for (i = 0; i < num_ops; i++) {
- /*
- * If there was no error/authentication failure,
- * change status to successful.
- */
- if (ops[i]->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
- ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
- /* Free session if a session-less crypto op. */
- if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
- memset(session, 0, sizeof(struct kasumi_session));
- rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
- ops[i]->sym->session = NULL;
- }
- }
- return processed_ops;
-}
-
-/** Process a crypto op with length/offset in bits. */
-static int
-process_op_bit(struct rte_crypto_op *op, struct kasumi_session *session,
- struct ipsec_mb_qp *qp)
-{
- unsigned int processed_op;
-
- switch (session->op) {
- /* case KASUMI_OP_ONLY_CIPHER: */
- case IPSEC_MB_OP_ENCRYPT_ONLY:
- case IPSEC_MB_OP_DECRYPT_ONLY:
- processed_op = process_kasumi_cipher_op_bit(qp, op, session);
- break;
- /* case KASUMI_OP_ONLY_AUTH: */
- case IPSEC_MB_OP_HASH_GEN_ONLY:
- case IPSEC_MB_OP_HASH_VERIFY_ONLY:
- processed_op = process_kasumi_hash_op(qp, &op, session, 1);
- break;
- /* case KASUMI_OP_CIPHER_AUTH: */
- case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN:
- processed_op = process_kasumi_cipher_op_bit(qp, op, session);
- if (processed_op == 1)
- process_kasumi_hash_op(qp, &op, session, 1);
- break;
- /* case KASUMI_OP_AUTH_CIPHER: */
- case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
- processed_op = process_kasumi_hash_op(qp, &op, session, 1);
- if (processed_op == 1)
- process_kasumi_cipher_op_bit(qp, op, session);
- break;
- default:
- /* Operation not supported. */
- processed_op = 0;
- }
-
- /*
- * If there was no error/authentication failure,
- * change status to successful.
- */
- if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
- op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-
- /* Free session if a session-less crypto op. */
- if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
- memset(CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session), 0,
- sizeof(struct kasumi_session));
- rte_mempool_put(qp->sess_mp, (void *)op->sym->session);
- op->sym->session = NULL;
- }
- return processed_op;
-}
-
-static uint16_t
-kasumi_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
- uint16_t nb_ops)
-{
- struct rte_crypto_op *c_ops[nb_ops];
- struct rte_crypto_op *curr_c_op = NULL;
-
- struct kasumi_session *prev_sess = NULL, *curr_sess = NULL;
- struct ipsec_mb_qp *qp = queue_pair;
- unsigned int i;
- uint8_t burst_size = 0;
- uint8_t processed_ops;
- unsigned int nb_dequeued;
-
- nb_dequeued = rte_ring_dequeue_burst(qp->ingress_queue,
- (void **)ops, nb_ops, NULL);
- for (i = 0; i < nb_dequeued; i++) {
- curr_c_op = ops[i];
-
-#ifdef RTE_LIBRTE_PMD_KASUMI_DEBUG
- if (!rte_pktmbuf_is_contiguous(curr_c_op->sym->m_src)
- || (curr_c_op->sym->m_dst != NULL
- && !rte_pktmbuf_is_contiguous(
- curr_c_op->sym->m_dst))) {
- IPSEC_MB_LOG(ERR,
- "PMD supports only contiguous mbufs, op (%p) provides noncontiguous mbuf as source/destination buffer.",
- curr_c_op);
- curr_c_op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- break;
- }
-#endif
-
- /* Set status as enqueued (not processed yet) by default. */
- curr_c_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
-
- curr_sess = (struct kasumi_session *)
- ipsec_mb_get_session_private(qp, curr_c_op);
- if (unlikely(curr_sess == NULL
- || curr_sess->op == IPSEC_MB_OP_NOT_SUPPORTED)) {
- curr_c_op->status
- = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
- break;
- }
-
- /* If length/offset is at bit-level, process this buffer alone.
- */
- if (((curr_c_op->sym->cipher.data.length % BYTE_LEN) != 0)
- || ((ops[i]->sym->cipher.data.offset % BYTE_LEN) != 0)) {
- /* Process the ops of the previous session. */
- if (prev_sess != NULL) {
- processed_ops = process_ops(c_ops, prev_sess,
- qp, burst_size);
- if (processed_ops < burst_size) {
- burst_size = 0;
- break;
- }
-
- burst_size = 0;
- prev_sess = NULL;
- }
-
- processed_ops = process_op_bit(curr_c_op,
- curr_sess, qp);
- if (processed_ops != 1)
- break;
-
- continue;
- }
-
- /* Batch ops that share the same session. */
- if (prev_sess == NULL) {
- prev_sess = curr_sess;
- c_ops[burst_size++] = curr_c_op;
- } else if (curr_sess == prev_sess) {
- c_ops[burst_size++] = curr_c_op;
- /*
- * When there are enough ops to process in a batch,
- * process them, and start a new batch.
- */
- if (burst_size == KASUMI_MAX_BURST) {
- processed_ops = process_ops(c_ops, prev_sess,
- qp, burst_size);
- if (processed_ops < burst_size) {
- burst_size = 0;
- break;
- }
-
- burst_size = 0;
- prev_sess = NULL;
- }
- } else {
- /*
- * Different session, process the ops
- * of the previous session.
- */
- processed_ops = process_ops(c_ops, prev_sess, qp,
- burst_size);
- if (processed_ops < burst_size) {
- burst_size = 0;
- break;
- }
-
- burst_size = 0;
- prev_sess = curr_sess;
-
- c_ops[burst_size++] = curr_c_op;
- }
- }
-
- if (burst_size != 0) {
- /* Process the crypto ops of the last session. */
- processed_ops = process_ops(c_ops, prev_sess, qp, burst_size);
- }
-
- qp->stats.dequeued_count += i;
- return i;
-}
+#include "pmd_aesni_mb_priv.h"
struct rte_cryptodev_ops kasumi_pmd_ops = {
.dev_configure = ipsec_mb_config,
@@ -457,7 +61,7 @@ RTE_INIT(ipsec_mb_register_kasumi)
= &ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_KASUMI];
kasumi_data->caps = kasumi_capabilities;
- kasumi_data->dequeue_burst = kasumi_pmd_dequeue_burst;
+ kasumi_data->dequeue_burst = aesni_mb_dequeue_burst;
kasumi_data->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO
| RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING
| RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA
@@ -467,6 +71,6 @@ RTE_INIT(ipsec_mb_register_kasumi)
kasumi_data->internals_priv_size = 0;
kasumi_data->ops = &kasumi_pmd_ops;
kasumi_data->qp_priv_size = sizeof(struct kasumi_qp_data);
- kasumi_data->session_configure = kasumi_session_configure;
- kasumi_data->session_priv_size = sizeof(struct kasumi_session);
+ kasumi_data->session_configure = aesni_mb_session_configure;
+ kasumi_data->session_priv_size = sizeof(struct aesni_mb_session);
}
diff --git a/drivers/crypto/ipsec_mb/pmd_kasumi_priv.h b/drivers/crypto/ipsec_mb/pmd_kasumi_priv.h
index 8db1d1cc5b..fc962115ff 100644
--- a/drivers/crypto/ipsec_mb/pmd_kasumi_priv.h
+++ b/drivers/crypto/ipsec_mb/pmd_kasumi_priv.h
@@ -9,8 +9,6 @@
#define KASUMI_KEY_LENGTH 16
#define KASUMI_IV_LENGTH 8
-#define KASUMI_MAX_BURST 4
-#define BYTE_LEN 8
#define KASUMI_DIGEST_LENGTH 4
uint8_t pmd_driver_id_kasumi;
@@ -60,16 +58,6 @@ static const struct rte_cryptodev_capabilities kasumi_capabilities[] = {
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
};
-/** KASUMI private session structure */
-struct kasumi_session {
- /* Keys have to be 16-byte aligned */
- kasumi_key_sched_t pKeySched_cipher;
- kasumi_key_sched_t pKeySched_hash;
- enum ipsec_mb_operation op;
- enum rte_crypto_auth_operation auth_op;
- uint16_t cipher_iv_offset;
-} __rte_cache_aligned;
-
struct kasumi_qp_data {
uint8_t temp_digest[KASUMI_DIGEST_LENGTH];
/* *< Buffers used to store the digest generated
diff --git a/drivers/crypto/ipsec_mb/pmd_snow3g.c b/drivers/crypto/ipsec_mb/pmd_snow3g.c
index e64df1a462..92ec955baa 100644
--- a/drivers/crypto/ipsec_mb/pmd_snow3g.c
+++ b/drivers/crypto/ipsec_mb/pmd_snow3g.c
@@ -3,539 +3,7 @@
*/
#include "pmd_snow3g_priv.h"
-
-/** Parse crypto xform chain and set private session parameters. */
-static int
-snow3g_session_configure(IMB_MGR *mgr, void *priv_sess,
- const struct rte_crypto_sym_xform *xform)
-{
- struct snow3g_session *sess = (struct snow3g_session *)priv_sess;
- const struct rte_crypto_sym_xform *auth_xform = NULL;
- const struct rte_crypto_sym_xform *cipher_xform = NULL;
- enum ipsec_mb_operation mode;
-
- /* Select Crypto operation - hash then cipher / cipher then hash */
- int ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform,
- &cipher_xform, NULL);
- if (ret)
- return ret;
-
- if (cipher_xform) {
- /* Only SNOW 3G UEA2 supported */
- if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_SNOW3G_UEA2)
- return -ENOTSUP;
-
- if (cipher_xform->cipher.iv.length != SNOW3G_IV_LENGTH) {
- IPSEC_MB_LOG(ERR, "Wrong IV length");
- return -EINVAL;
- }
- if (cipher_xform->cipher.key.length > SNOW3G_MAX_KEY_SIZE) {
- IPSEC_MB_LOG(ERR, "Not enough memory to store the key");
- return -ENOMEM;
- }
-
- sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
-
- /* Initialize key */
- IMB_SNOW3G_INIT_KEY_SCHED(mgr, cipher_xform->cipher.key.data,
- &sess->pKeySched_cipher);
- }
-
- if (auth_xform) {
- /* Only SNOW 3G UIA2 supported */
- if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_SNOW3G_UIA2)
- return -ENOTSUP;
-
- if (auth_xform->auth.digest_length != SNOW3G_DIGEST_LENGTH) {
- IPSEC_MB_LOG(ERR, "Wrong digest length");
- return -EINVAL;
- }
- if (auth_xform->auth.key.length > SNOW3G_MAX_KEY_SIZE) {
- IPSEC_MB_LOG(ERR, "Not enough memory to store the key");
- return -ENOMEM;
- }
-
- sess->auth_op = auth_xform->auth.op;
-
- if (auth_xform->auth.iv.length != SNOW3G_IV_LENGTH) {
- IPSEC_MB_LOG(ERR, "Wrong IV length");
- return -EINVAL;
- }
- sess->auth_iv_offset = auth_xform->auth.iv.offset;
-
- /* Initialize key */
- IMB_SNOW3G_INIT_KEY_SCHED(mgr, auth_xform->auth.key.data,
- &sess->pKeySched_hash);
- }
-
- sess->op = mode;
-
- return 0;
-}
-
-/** Check if conditions are met for digest-appended operations */
-static uint8_t *
-snow3g_digest_appended_in_src(struct rte_crypto_op *op)
-{
- unsigned int auth_size, cipher_size;
-
- auth_size = (op->sym->auth.data.offset >> 3) +
- (op->sym->auth.data.length >> 3);
- cipher_size = (op->sym->cipher.data.offset >> 3) +
- (op->sym->cipher.data.length >> 3);
-
- if (auth_size < cipher_size)
- return rte_pktmbuf_mtod_offset(op->sym->m_src,
- uint8_t *, auth_size);
-
- return NULL;
-}
-
-/** Encrypt/decrypt mbufs with same cipher key. */
-static uint8_t
-process_snow3g_cipher_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
- struct snow3g_session *session,
- uint8_t num_ops)
-{
- uint32_t i;
- uint8_t processed_ops = 0;
- const void *src[SNOW3G_MAX_BURST] = {NULL};
- void *dst[SNOW3G_MAX_BURST] = {NULL};
- uint8_t *digest_appended[SNOW3G_MAX_BURST] = {NULL};
- const void *iv[SNOW3G_MAX_BURST] = {NULL};
- uint32_t num_bytes[SNOW3G_MAX_BURST] = {0};
- uint32_t cipher_off, cipher_len;
- int unencrypted_bytes = 0;
-
- for (i = 0; i < num_ops; i++) {
-
- cipher_off = ops[i]->sym->cipher.data.offset >> 3;
- cipher_len = ops[i]->sym->cipher.data.length >> 3;
- src[i] = rte_pktmbuf_mtod_offset(
- ops[i]->sym->m_src, uint8_t *, cipher_off);
-
- /* If out-of-place operation */
- if (ops[i]->sym->m_dst &&
- ops[i]->sym->m_src != ops[i]->sym->m_dst) {
- dst[i] = rte_pktmbuf_mtod_offset(
- ops[i]->sym->m_dst, uint8_t *, cipher_off);
-
- /* In case of out-of-place, auth-cipher operation
- * with partial encryption of the digest, copy
- * the remaining, unencrypted part.
- */
- if (session->op == IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT
- || session->op == IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT)
- unencrypted_bytes =
- (ops[i]->sym->auth.data.offset >> 3) +
- (ops[i]->sym->auth.data.length >> 3) +
- (SNOW3G_DIGEST_LENGTH) -
- cipher_off - cipher_len;
- if (unencrypted_bytes > 0)
- rte_memcpy(
- rte_pktmbuf_mtod_offset(
- ops[i]->sym->m_dst, uint8_t *,
- cipher_off + cipher_len),
- rte_pktmbuf_mtod_offset(
- ops[i]->sym->m_src, uint8_t *,
- cipher_off + cipher_len),
- unencrypted_bytes);
- } else
- dst[i] = rte_pktmbuf_mtod_offset(ops[i]->sym->m_src,
- uint8_t *, cipher_off);
-
- iv[i] = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
- session->cipher_iv_offset);
- num_bytes[i] = cipher_len;
- processed_ops++;
- }
-
- IMB_SNOW3G_F8_N_BUFFER(qp->mb_mgr, &session->pKeySched_cipher, iv,
- src, dst, num_bytes, processed_ops);
-
- /* Take care of the raw digest data in src buffer */
- for (i = 0; i < num_ops; i++) {
- if ((session->op == IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT ||
- session->op == IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT) &&
- ops[i]->sym->m_dst != NULL) {
- digest_appended[i] =
- snow3g_digest_appended_in_src(ops[i]);
- /* Clear unencrypted digest from
- * the src buffer
- */
- if (digest_appended[i] != NULL)
- memset(digest_appended[i],
- 0, SNOW3G_DIGEST_LENGTH);
- }
- }
- return processed_ops;
-}
-
-/** Encrypt/decrypt mbuf (bit level function). */
-static uint8_t
-process_snow3g_cipher_op_bit(struct ipsec_mb_qp *qp,
- struct rte_crypto_op *op,
- struct snow3g_session *session)
-{
- uint8_t *src, *dst;
- uint8_t *iv;
- uint32_t length_in_bits, offset_in_bits;
- int unencrypted_bytes = 0;
-
- offset_in_bits = op->sym->cipher.data.offset;
- src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *);
- if (op->sym->m_dst == NULL) {
- op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- IPSEC_MB_LOG(ERR, "bit-level in-place not supported\n");
- return 0;
- }
- length_in_bits = op->sym->cipher.data.length;
- dst = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *);
- /* In case of out-of-place, auth-cipher operation
- * with partial encryption of the digest, copy
- * the remaining, unencrypted part.
- */
- if (session->op == IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT ||
- session->op == IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT)
- unencrypted_bytes =
- (op->sym->auth.data.offset >> 3) +
- (op->sym->auth.data.length >> 3) +
- (SNOW3G_DIGEST_LENGTH) -
- (offset_in_bits >> 3) -
- (length_in_bits >> 3);
- if (unencrypted_bytes > 0)
- rte_memcpy(
- rte_pktmbuf_mtod_offset(
- op->sym->m_dst, uint8_t *,
- (length_in_bits >> 3)),
- rte_pktmbuf_mtod_offset(
- op->sym->m_src, uint8_t *,
- (length_in_bits >> 3)),
- unencrypted_bytes);
-
- iv = rte_crypto_op_ctod_offset(op, uint8_t *,
- session->cipher_iv_offset);
-
- IMB_SNOW3G_F8_1_BUFFER_BIT(qp->mb_mgr, &session->pKeySched_cipher, iv,
- src, dst, length_in_bits, offset_in_bits);
-
- return 1;
-}
-
-/** Generate/verify hash from mbufs with same hash key. */
-static int
-process_snow3g_hash_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
- struct snow3g_session *session,
- uint8_t num_ops)
-{
- uint32_t i;
- uint8_t processed_ops = 0;
- uint8_t *src, *dst;
- uint32_t length_in_bits;
- uint8_t *iv;
- uint8_t digest_appended = 0;
- struct snow3g_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
-
- for (i = 0; i < num_ops; i++) {
- /* Data must be byte aligned */
- if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- IPSEC_MB_LOG(ERR, "Offset");
- break;
- }
-
- dst = NULL;
-
- length_in_bits = ops[i]->sym->auth.data.length;
-
- src = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
- (ops[i]->sym->auth.data.offset >> 3);
- iv = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
- session->auth_iv_offset);
-
- if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
- dst = qp_data->temp_digest;
- /* Handle auth cipher verify oop case*/
- if ((session->op ==
- IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN ||
- session->op ==
- IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY) &&
- ops[i]->sym->m_dst != NULL)
- src = rte_pktmbuf_mtod_offset(
- ops[i]->sym->m_dst, uint8_t *,
- ops[i]->sym->auth.data.offset >> 3);
-
- IMB_SNOW3G_F9_1_BUFFER(qp->mb_mgr,
- &session->pKeySched_hash,
- iv, src, length_in_bits, dst);
- /* Verify digest. */
- if (memcmp(dst, ops[i]->sym->auth.digest.data,
- SNOW3G_DIGEST_LENGTH) != 0)
- ops[i]->status =
- RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
- } else {
- if (session->op ==
- IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT ||
- session->op ==
- IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT)
- dst = snow3g_digest_appended_in_src(ops[i]);
-
- if (dst != NULL)
- digest_appended = 1;
- else
- dst = ops[i]->sym->auth.digest.data;
-
- IMB_SNOW3G_F9_1_BUFFER(qp->mb_mgr,
- &session->pKeySched_hash,
- iv, src, length_in_bits, dst);
-
- /* Copy back digest from src to auth.digest.data */
- if (digest_appended)
- rte_memcpy(ops[i]->sym->auth.digest.data,
- dst, SNOW3G_DIGEST_LENGTH);
- }
- processed_ops++;
- }
-
- return processed_ops;
-}
-
-/** Process a batch of crypto ops which shares the same session. */
-static int
-process_ops(struct rte_crypto_op **ops, struct snow3g_session *session,
- struct ipsec_mb_qp *qp, uint8_t num_ops)
-{
- uint32_t i;
- uint32_t processed_ops;
-
-#ifdef RTE_LIBRTE_PMD_SNOW3G_DEBUG
- for (i = 0; i < num_ops; i++) {
- if (!rte_pktmbuf_is_contiguous(ops[i]->sym->m_src) ||
- (ops[i]->sym->m_dst != NULL &&
- !rte_pktmbuf_is_contiguous(
- ops[i]->sym->m_dst))) {
- IPSEC_MB_LOG(ERR,
- "PMD supports only contiguous mbufs, "
- "op (%p) provides noncontiguous mbuf as "
- "source/destination buffer.\n", ops[i]);
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- return 0;
- }
- }
-#endif
-
- switch (session->op) {
- case IPSEC_MB_OP_ENCRYPT_ONLY:
- case IPSEC_MB_OP_DECRYPT_ONLY:
- processed_ops = process_snow3g_cipher_op(qp, ops,
- session, num_ops);
- break;
- case IPSEC_MB_OP_HASH_GEN_ONLY:
- case IPSEC_MB_OP_HASH_VERIFY_ONLY:
- processed_ops = process_snow3g_hash_op(qp, ops, session,
- num_ops);
- break;
- case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN:
- case IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY:
- processed_ops = process_snow3g_cipher_op(qp, ops, session,
- num_ops);
- process_snow3g_hash_op(qp, ops, session, processed_ops);
- break;
- case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
- case IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT:
- processed_ops = process_snow3g_hash_op(qp, ops, session,
- num_ops);
- process_snow3g_cipher_op(qp, ops, session, processed_ops);
- break;
- default:
- /* Operation not supported. */
- processed_ops = 0;
- }
-
- for (i = 0; i < num_ops; i++) {
- /*
- * If there was no error/authentication failure,
- * change status to successful.
- */
- if (ops[i]->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
- ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
- /* Free session if a session-less crypto op. */
- if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
- memset(session, 0, sizeof(struct snow3g_session));
- rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
- ops[i]->sym->session = NULL;
- }
- }
- return processed_ops;
-}
-
-/** Process a crypto op with length/offset in bits. */
-static int
-process_op_bit(struct rte_crypto_op *op, struct snow3g_session *session,
- struct ipsec_mb_qp *qp)
-{
- unsigned int processed_op;
- int ret;
-
- switch (session->op) {
- case IPSEC_MB_OP_ENCRYPT_ONLY:
- case IPSEC_MB_OP_DECRYPT_ONLY:
-
- processed_op = process_snow3g_cipher_op_bit(qp, op,
- session);
- break;
- case IPSEC_MB_OP_HASH_GEN_ONLY:
- case IPSEC_MB_OP_HASH_VERIFY_ONLY:
- processed_op = process_snow3g_hash_op(qp, &op, session, 1);
- break;
- case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN:
- case IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY:
- processed_op = process_snow3g_cipher_op_bit(qp, op, session);
- if (processed_op == 1)
- process_snow3g_hash_op(qp, &op, session, 1);
- break;
- case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
- case IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT:
- processed_op = process_snow3g_hash_op(qp, &op, session, 1);
- if (processed_op == 1)
- process_snow3g_cipher_op_bit(qp, op, session);
- break;
- default:
- /* Operation not supported. */
- processed_op = 0;
- }
-
- /*
- * If there was no error/authentication failure,
- * change status to successful.
- */
- if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
- op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-
- /* Free session if a session-less crypto op. */
- if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
- memset(CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session), 0,
- sizeof(struct snow3g_session));
- rte_mempool_put(qp->sess_mp, (void *)op->sym->session);
- op->sym->session = NULL;
- }
-
- if (unlikely(processed_op != 1))
- return 0;
-
- ret = rte_ring_enqueue(qp->ingress_queue, op);
- if (ret != 0)
- return ret;
-
- return 1;
-}
-
-static uint16_t
-snow3g_pmd_dequeue_burst(void *queue_pair,
- struct rte_crypto_op **ops, uint16_t nb_ops)
-{
- struct ipsec_mb_qp *qp = queue_pair;
- struct rte_crypto_op *c_ops[SNOW3G_MAX_BURST];
- struct rte_crypto_op *curr_c_op;
-
- struct snow3g_session *prev_sess = NULL, *curr_sess = NULL;
- uint32_t i;
- uint8_t burst_size = 0;
- uint8_t processed_ops;
- uint32_t nb_dequeued;
-
- nb_dequeued = rte_ring_dequeue_burst(qp->ingress_queue,
- (void **)ops, nb_ops, NULL);
-
- for (i = 0; i < nb_dequeued; i++) {
- curr_c_op = ops[i];
-
- /* Set status as enqueued (not processed yet) by default. */
- curr_c_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
-
- curr_sess = ipsec_mb_get_session_private(qp, curr_c_op);
- if (unlikely(curr_sess == NULL ||
- curr_sess->op == IPSEC_MB_OP_NOT_SUPPORTED)) {
- curr_c_op->status =
- RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
- break;
- }
-
- /* If length/offset is at bit-level,
- * process this buffer alone.
- */
- if (((curr_c_op->sym->cipher.data.length % BYTE_LEN) != 0)
- || ((curr_c_op->sym->cipher.data.offset
- % BYTE_LEN) != 0)) {
- /* Process the ops of the previous session. */
- if (prev_sess != NULL) {
- processed_ops = process_ops(c_ops, prev_sess,
- qp, burst_size);
- if (processed_ops < burst_size) {
- burst_size = 0;
- break;
- }
-
- burst_size = 0;
- prev_sess = NULL;
- }
-
- processed_ops = process_op_bit(curr_c_op, curr_sess, qp);
- if (processed_ops != 1)
- break;
-
- continue;
- }
-
- /* Batch ops that share the same session. */
- if (prev_sess == NULL) {
- prev_sess = curr_sess;
- c_ops[burst_size++] = curr_c_op;
- } else if (curr_sess == prev_sess) {
- c_ops[burst_size++] = curr_c_op;
- /*
- * When there are enough ops to process in a batch,
- * process them, and start a new batch.
- */
- if (burst_size == SNOW3G_MAX_BURST) {
- processed_ops = process_ops(c_ops, prev_sess,
- qp, burst_size);
- if (processed_ops < burst_size) {
- burst_size = 0;
- break;
- }
-
- burst_size = 0;
- prev_sess = NULL;
- }
- } else {
- /*
- * Different session, process the ops
- * of the previous session.
- */
- processed_ops = process_ops(c_ops, prev_sess,
- qp, burst_size);
- if (processed_ops < burst_size) {
- burst_size = 0;
- break;
- }
-
- burst_size = 0;
- prev_sess = curr_sess;
-
- c_ops[burst_size++] = curr_c_op;
- }
- }
-
- if (burst_size != 0) {
- /* Process the crypto ops of the last session. */
- processed_ops = process_ops(c_ops, prev_sess,
- qp, burst_size);
- }
-
- qp->stats.dequeued_count += i;
- return i;
-}
+#include "pmd_aesni_mb_priv.h"
struct rte_cryptodev_ops snow3g_pmd_ops = {
.dev_configure = ipsec_mb_config,
@@ -586,7 +54,7 @@ RTE_INIT(ipsec_mb_register_snow3g)
= &ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_SNOW3G];
snow3g_data->caps = snow3g_capabilities;
- snow3g_data->dequeue_burst = snow3g_pmd_dequeue_burst;
+ snow3g_data->dequeue_burst = aesni_mb_dequeue_burst;
snow3g_data->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA |
@@ -596,6 +64,6 @@ RTE_INIT(ipsec_mb_register_snow3g)
snow3g_data->internals_priv_size = 0;
snow3g_data->ops = &snow3g_pmd_ops;
snow3g_data->qp_priv_size = sizeof(struct snow3g_qp_data);
- snow3g_data->session_configure = snow3g_session_configure;
- snow3g_data->session_priv_size = sizeof(struct snow3g_session);
+ snow3g_data->session_configure = aesni_mb_session_configure;
+ snow3g_data->session_priv_size = sizeof(struct aesni_mb_session);
}
diff --git a/drivers/crypto/ipsec_mb/pmd_snow3g_priv.h b/drivers/crypto/ipsec_mb/pmd_snow3g_priv.h
index ca1ce7f9d6..75c9a8e525 100644
--- a/drivers/crypto/ipsec_mb/pmd_snow3g_priv.h
+++ b/drivers/crypto/ipsec_mb/pmd_snow3g_priv.h
@@ -8,10 +8,7 @@
#include "ipsec_mb_private.h"
#define SNOW3G_IV_LENGTH 16
-#define SNOW3G_MAX_BURST 8
-#define BYTE_LEN 8
#define SNOW3G_DIGEST_LENGTH 4
-#define SNOW3G_MAX_KEY_SIZE 128
uint8_t pmd_driver_id_snow3g;
@@ -64,16 +61,6 @@ static const struct rte_cryptodev_capabilities snow3g_capabilities[] = {
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
};
-/** SNOW 3G private session structure */
-struct snow3g_session {
- enum ipsec_mb_operation op;
- enum rte_crypto_auth_operation auth_op;
- snow3g_key_schedule_t pKeySched_cipher;
- snow3g_key_schedule_t pKeySched_hash;
- uint16_t cipher_iv_offset;
- uint16_t auth_iv_offset;
-} __rte_cache_aligned;
-
struct snow3g_qp_data {
uint8_t temp_digest[SNOW3G_DIGEST_LENGTH];
/**< Buffer used to store the digest generated
diff --git a/drivers/crypto/ipsec_mb/pmd_zuc.c b/drivers/crypto/ipsec_mb/pmd_zuc.c
index 92fd9d1808..a4eef57d62 100644
--- a/drivers/crypto/ipsec_mb/pmd_zuc.c
+++ b/drivers/crypto/ipsec_mb/pmd_zuc.c
@@ -3,341 +3,7 @@
*/
#include "pmd_zuc_priv.h"
-
-/** Parse crypto xform chain and set private session parameters. */
-static int
-zuc_session_configure(__rte_unused IMB_MGR * mgr, void *zuc_sess,
- const struct rte_crypto_sym_xform *xform)
-{
- struct zuc_session *sess = (struct zuc_session *) zuc_sess;
- const struct rte_crypto_sym_xform *auth_xform = NULL;
- const struct rte_crypto_sym_xform *cipher_xform = NULL;
- enum ipsec_mb_operation mode;
- /* Select Crypto operation - hash then cipher / cipher then hash */
- int ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform,
- &cipher_xform, NULL);
-
- if (ret)
- return ret;
-
- if (cipher_xform) {
- /* Only ZUC EEA3 supported */
- if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_ZUC_EEA3)
- return -ENOTSUP;
-
- if (cipher_xform->cipher.iv.length != ZUC_IV_KEY_LENGTH) {
- IPSEC_MB_LOG(ERR, "Wrong IV length");
- return -EINVAL;
- }
- sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
-
- /* Copy the key */
- memcpy(sess->pKey_cipher, cipher_xform->cipher.key.data,
- ZUC_IV_KEY_LENGTH);
- }
-
- if (auth_xform) {
- /* Only ZUC EIA3 supported */
- if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_ZUC_EIA3)
- return -ENOTSUP;
-
- if (auth_xform->auth.digest_length != ZUC_DIGEST_LENGTH) {
- IPSEC_MB_LOG(ERR, "Wrong digest length");
- return -EINVAL;
- }
-
- sess->auth_op = auth_xform->auth.op;
-
- if (auth_xform->auth.iv.length != ZUC_IV_KEY_LENGTH) {
- IPSEC_MB_LOG(ERR, "Wrong IV length");
- return -EINVAL;
- }
- sess->auth_iv_offset = auth_xform->auth.iv.offset;
-
- /* Copy the key */
- memcpy(sess->pKey_hash, auth_xform->auth.key.data,
- ZUC_IV_KEY_LENGTH);
- }
-
- sess->op = mode;
- return 0;
-}
-
-/** Encrypt/decrypt mbufs. */
-static uint8_t
-process_zuc_cipher_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
- struct zuc_session **sessions,
- uint8_t num_ops)
-{
- unsigned int i;
- uint8_t processed_ops = 0;
- const void *src[ZUC_MAX_BURST];
- void *dst[ZUC_MAX_BURST];
- const void *iv[ZUC_MAX_BURST];
- uint32_t num_bytes[ZUC_MAX_BURST];
- const void *cipher_keys[ZUC_MAX_BURST];
- struct zuc_session *sess;
-
- for (i = 0; i < num_ops; i++) {
- if (((ops[i]->sym->cipher.data.length % BYTE_LEN) != 0)
- || ((ops[i]->sym->cipher.data.offset
- % BYTE_LEN) != 0)) {
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- IPSEC_MB_LOG(ERR, "Data Length or offset");
- break;
- }
-
- sess = sessions[i];
-
-#ifdef RTE_LIBRTE_PMD_ZUC_DEBUG
- if (!rte_pktmbuf_is_contiguous(ops[i]->sym->m_src) ||
- (ops[i]->sym->m_dst != NULL &&
- !rte_pktmbuf_is_contiguous(
- ops[i]->sym->m_dst))) {
- IPSEC_MB_LOG(ERR, "PMD supports only "
- " contiguous mbufs, op (%p) "
- "provides noncontiguous mbuf "
- "as source/destination buffer.\n",
- "PMD supports only contiguous mbufs, "
- "op (%p) provides noncontiguous mbuf "
- "as source/destination buffer.\n",
- ops[i]);
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- break;
- }
-#endif
-
- src[i] = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
- (ops[i]->sym->cipher.data.offset >> 3);
- dst[i] = ops[i]->sym->m_dst ?
- rte_pktmbuf_mtod(ops[i]->sym->m_dst, uint8_t *) +
- (ops[i]->sym->cipher.data.offset >> 3) :
- rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
- (ops[i]->sym->cipher.data.offset >> 3);
- iv[i] = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
- sess->cipher_iv_offset);
- num_bytes[i] = ops[i]->sym->cipher.data.length >> 3;
-
- cipher_keys[i] = sess->pKey_cipher;
-
- processed_ops++;
- }
-
- IMB_ZUC_EEA3_N_BUFFER(qp->mb_mgr, (const void **)cipher_keys,
- (const void **)iv, (const void **)src, (void **)dst,
- num_bytes, processed_ops);
-
- return processed_ops;
-}
-
-/** Generate/verify hash from mbufs. */
-static int
-process_zuc_hash_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
- struct zuc_session **sessions,
- uint8_t num_ops)
-{
- unsigned int i;
- uint8_t processed_ops = 0;
- uint8_t *src[ZUC_MAX_BURST] = { 0 };
- uint32_t *dst[ZUC_MAX_BURST];
- uint32_t length_in_bits[ZUC_MAX_BURST] = { 0 };
- uint8_t *iv[ZUC_MAX_BURST] = { 0 };
- const void *hash_keys[ZUC_MAX_BURST] = { 0 };
- struct zuc_session *sess;
- struct zuc_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
-
-
- for (i = 0; i < num_ops; i++) {
- /* Data must be byte aligned */
- if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- IPSEC_MB_LOG(ERR, "Offset");
- break;
- }
-
- sess = sessions[i];
-
- length_in_bits[i] = ops[i]->sym->auth.data.length;
-
- src[i] = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
- (ops[i]->sym->auth.data.offset >> 3);
- iv[i] = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
- sess->auth_iv_offset);
-
- hash_keys[i] = sess->pKey_hash;
- if (sess->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY)
- dst[i] = (uint32_t *)qp_data->temp_digest[i];
- else
- dst[i] = (uint32_t *)ops[i]->sym->auth.digest.data;
-
- processed_ops++;
- }
-
- IMB_ZUC_EIA3_N_BUFFER(qp->mb_mgr, (const void **)hash_keys,
- (const void * const *)iv, (const void * const *)src,
- length_in_bits, dst, processed_ops);
-
- /*
- * If tag needs to be verified, compare generated tag
- * with attached tag
- */
- for (i = 0; i < processed_ops; i++)
- if (sessions[i]->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY)
- if (memcmp(dst[i], ops[i]->sym->auth.digest.data,
- ZUC_DIGEST_LENGTH) != 0)
- ops[i]->status =
- RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
-
- return processed_ops;
-}
-
-/** Process a batch of crypto ops which shares the same operation type. */
-static int
-process_ops(struct rte_crypto_op **ops, enum ipsec_mb_operation op_type,
- struct zuc_session **sessions,
- struct ipsec_mb_qp *qp, uint8_t num_ops)
-{
- unsigned int i;
- unsigned int processed_ops = 0;
-
- switch (op_type) {
- case IPSEC_MB_OP_ENCRYPT_ONLY:
- case IPSEC_MB_OP_DECRYPT_ONLY:
- processed_ops = process_zuc_cipher_op(qp, ops,
- sessions, num_ops);
- break;
- case IPSEC_MB_OP_HASH_GEN_ONLY:
- case IPSEC_MB_OP_HASH_VERIFY_ONLY:
- processed_ops = process_zuc_hash_op(qp, ops, sessions,
- num_ops);
- break;
- case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN:
- case IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY:
- processed_ops = process_zuc_cipher_op(qp, ops, sessions,
- num_ops);
- process_zuc_hash_op(qp, ops, sessions, processed_ops);
- break;
- case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
- case IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT:
- processed_ops = process_zuc_hash_op(qp, ops, sessions,
- num_ops);
- process_zuc_cipher_op(qp, ops, sessions, processed_ops);
- break;
- default:
- /* Operation not supported. */
- for (i = 0; i < num_ops; i++)
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
- }
-
- for (i = 0; i < num_ops; i++) {
- /*
- * If there was no error/authentication failure,
- * change status to successful.
- */
- if (ops[i]->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
- ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
- /* Free session if a session-less crypto op. */
- if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
- memset(sessions[i], 0, sizeof(struct zuc_session));
- rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
- ops[i]->sym->session = NULL;
- }
- }
- return processed_ops;
-}
-
-static uint16_t
-zuc_pmd_dequeue_burst(void *queue_pair,
- struct rte_crypto_op **c_ops, uint16_t nb_ops)
-{
-
- struct rte_crypto_op *curr_c_op;
-
- struct zuc_session *curr_sess;
- struct zuc_session *sessions[ZUC_MAX_BURST];
- struct rte_crypto_op *int_c_ops[ZUC_MAX_BURST];
- enum ipsec_mb_operation prev_zuc_op = IPSEC_MB_OP_NOT_SUPPORTED;
- enum ipsec_mb_operation curr_zuc_op;
- struct ipsec_mb_qp *qp = queue_pair;
- unsigned int nb_dequeued;
- unsigned int i;
- uint8_t burst_size = 0;
- uint8_t processed_ops;
-
- nb_dequeued = rte_ring_dequeue_burst(qp->ingress_queue,
- (void **)c_ops, nb_ops, NULL);
-
-
- for (i = 0; i < nb_dequeued; i++) {
- curr_c_op = c_ops[i];
-
- curr_sess = (struct zuc_session *)
- ipsec_mb_get_session_private(qp, curr_c_op);
- if (unlikely(curr_sess == NULL)) {
- curr_c_op->status =
- RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
- break;
- }
-
- curr_zuc_op = curr_sess->op;
-
- /*
- * Batch ops that share the same operation type
- * (cipher only, auth only...).
- */
- if (burst_size == 0) {
- prev_zuc_op = curr_zuc_op;
- int_c_ops[0] = curr_c_op;
- sessions[0] = curr_sess;
- burst_size++;
- } else if (curr_zuc_op == prev_zuc_op) {
- int_c_ops[burst_size] = curr_c_op;
- sessions[burst_size] = curr_sess;
- burst_size++;
- /*
- * When there are enough ops to process in a batch,
- * process them, and start a new batch.
- */
- if (burst_size == ZUC_MAX_BURST) {
- processed_ops = process_ops(int_c_ops, curr_zuc_op,
- sessions, qp, burst_size);
- if (processed_ops < burst_size) {
- burst_size = 0;
- break;
- }
-
- burst_size = 0;
- }
- } else {
- /*
- * Different operation type, process the ops
- * of the previous type.
- */
- processed_ops = process_ops(int_c_ops, prev_zuc_op,
- sessions, qp, burst_size);
- if (processed_ops < burst_size) {
- burst_size = 0;
- break;
- }
-
- burst_size = 0;
- prev_zuc_op = curr_zuc_op;
-
- int_c_ops[0] = curr_c_op;
- sessions[0] = curr_sess;
- burst_size++;
- }
- }
-
- if (burst_size != 0) {
- /* Process the crypto ops of the last operation type. */
- processed_ops = process_ops(int_c_ops, prev_zuc_op,
- sessions, qp, burst_size);
- }
-
- qp->stats.dequeued_count += i;
- return i;
-}
+#include "pmd_aesni_mb_priv.h"
struct rte_cryptodev_ops zuc_pmd_ops = {
.dev_configure = ipsec_mb_config,
@@ -388,7 +54,7 @@ RTE_INIT(ipsec_mb_register_zuc)
= &ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_ZUC];
zuc_data->caps = zuc_capabilities;
- zuc_data->dequeue_burst = zuc_pmd_dequeue_burst;
+ zuc_data->dequeue_burst = aesni_mb_dequeue_burst;
zuc_data->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO
| RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING
| RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA
@@ -398,6 +64,6 @@ RTE_INIT(ipsec_mb_register_zuc)
zuc_data->internals_priv_size = 0;
zuc_data->ops = &zuc_pmd_ops;
zuc_data->qp_priv_size = sizeof(struct zuc_qp_data);
- zuc_data->session_configure = zuc_session_configure;
- zuc_data->session_priv_size = sizeof(struct zuc_session);
+ zuc_data->session_configure = aesni_mb_session_configure;
+ zuc_data->session_priv_size = sizeof(struct aesni_mb_session);
}
diff --git a/drivers/crypto/ipsec_mb/pmd_zuc_priv.h b/drivers/crypto/ipsec_mb/pmd_zuc_priv.h
index 76fd6758c2..2e6eebc409 100644
--- a/drivers/crypto/ipsec_mb/pmd_zuc_priv.h
+++ b/drivers/crypto/ipsec_mb/pmd_zuc_priv.h
@@ -10,7 +10,6 @@
#define ZUC_IV_KEY_LENGTH 16
#define ZUC_DIGEST_LENGTH 4
#define ZUC_MAX_BURST 16
-#define BYTE_LEN 8
uint8_t pmd_driver_id_zuc;
@@ -63,16 +62,6 @@ static const struct rte_cryptodev_capabilities zuc_capabilities[] = {
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
};
-/** ZUC private session structure */
-struct zuc_session {
- enum ipsec_mb_operation op;
- enum rte_crypto_auth_operation auth_op;
- uint8_t pKey_cipher[ZUC_IV_KEY_LENGTH];
- uint8_t pKey_hash[ZUC_IV_KEY_LENGTH];
- uint16_t cipher_iv_offset;
- uint16_t auth_iv_offset;
-} __rte_cache_aligned;
-
struct zuc_qp_data {
uint8_t temp_digest[ZUC_MAX_BURST][ZUC_DIGEST_LENGTH];
--
2.25.1
^ permalink raw reply [flat|nested] 45+ messages in thread
* [PATCH v2] crypto/ipsec_mb: unified IPsec MB interface
2023-12-12 15:36 [PATCH v1] crypto/ipsec_mb: unified IPsec MB interface Brian Dooley
@ 2023-12-14 15:15 ` Brian Dooley
2024-01-18 12:00 ` [PATCH v3] " Brian Dooley
` (4 subsequent siblings)
5 siblings, 0 replies; 45+ messages in thread
From: Brian Dooley @ 2023-12-14 15:15 UTC (permalink / raw)
To: Kai Ji, Pablo de Lara; +Cc: dev, gakhil, Brian Dooley
Currently IPsec MB provides both the JOB API and direct API.
AESNI_MB PMD is using the JOB API codepath while ZUC, KASUMI, SNOW3G,
CHACHA20_POLY1305 and AESNI_GCM are using the direct API.
Instead of using the direct API for these PMDs, they should now make
use of the JOB API codepath. This would remove all use of the IPsec MB
direct API for these PMDs.
Signed-off-by: Brian Dooley <brian.dooley@intel.com>
---
v2:
- Fix compilation failure
---
drivers/crypto/ipsec_mb/pmd_aesni_gcm.c | 758 +-------------------
drivers/crypto/ipsec_mb/pmd_aesni_mb.c | 8 +-
drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h | 13 +
drivers/crypto/ipsec_mb/pmd_chacha_poly.c | 335 +--------
drivers/crypto/ipsec_mb/pmd_kasumi.c | 404 +----------
drivers/crypto/ipsec_mb/pmd_snow3g.c | 540 +-------------
drivers/crypto/ipsec_mb/pmd_zuc.c | 342 +--------
7 files changed, 39 insertions(+), 2361 deletions(-)
diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_gcm.c b/drivers/crypto/ipsec_mb/pmd_aesni_gcm.c
index 8d40bd9169..44609333ee 100644
--- a/drivers/crypto/ipsec_mb/pmd_aesni_gcm.c
+++ b/drivers/crypto/ipsec_mb/pmd_aesni_gcm.c
@@ -3,753 +3,7 @@
*/
#include "pmd_aesni_gcm_priv.h"
-
-static void
-aesni_gcm_set_ops(struct aesni_gcm_ops *ops, IMB_MGR *mb_mgr)
-{
- /* Set 128 bit function pointers. */
- ops[GCM_KEY_128].pre = mb_mgr->gcm128_pre;
- ops[GCM_KEY_128].init = mb_mgr->gcm128_init;
-
- ops[GCM_KEY_128].enc = mb_mgr->gcm128_enc;
- ops[GCM_KEY_128].update_enc = mb_mgr->gcm128_enc_update;
- ops[GCM_KEY_128].finalize_enc = mb_mgr->gcm128_enc_finalize;
-
- ops[GCM_KEY_128].dec = mb_mgr->gcm128_dec;
- ops[GCM_KEY_128].update_dec = mb_mgr->gcm128_dec_update;
- ops[GCM_KEY_128].finalize_dec = mb_mgr->gcm128_dec_finalize;
-
- ops[GCM_KEY_128].gmac_init = mb_mgr->gmac128_init;
- ops[GCM_KEY_128].gmac_update = mb_mgr->gmac128_update;
- ops[GCM_KEY_128].gmac_finalize = mb_mgr->gmac128_finalize;
-
- /* Set 192 bit function pointers. */
- ops[GCM_KEY_192].pre = mb_mgr->gcm192_pre;
- ops[GCM_KEY_192].init = mb_mgr->gcm192_init;
-
- ops[GCM_KEY_192].enc = mb_mgr->gcm192_enc;
- ops[GCM_KEY_192].update_enc = mb_mgr->gcm192_enc_update;
- ops[GCM_KEY_192].finalize_enc = mb_mgr->gcm192_enc_finalize;
-
- ops[GCM_KEY_192].dec = mb_mgr->gcm192_dec;
- ops[GCM_KEY_192].update_dec = mb_mgr->gcm192_dec_update;
- ops[GCM_KEY_192].finalize_dec = mb_mgr->gcm192_dec_finalize;
-
- ops[GCM_KEY_192].gmac_init = mb_mgr->gmac192_init;
- ops[GCM_KEY_192].gmac_update = mb_mgr->gmac192_update;
- ops[GCM_KEY_192].gmac_finalize = mb_mgr->gmac192_finalize;
-
- /* Set 256 bit function pointers. */
- ops[GCM_KEY_256].pre = mb_mgr->gcm256_pre;
- ops[GCM_KEY_256].init = mb_mgr->gcm256_init;
-
- ops[GCM_KEY_256].enc = mb_mgr->gcm256_enc;
- ops[GCM_KEY_256].update_enc = mb_mgr->gcm256_enc_update;
- ops[GCM_KEY_256].finalize_enc = mb_mgr->gcm256_enc_finalize;
-
- ops[GCM_KEY_256].dec = mb_mgr->gcm256_dec;
- ops[GCM_KEY_256].update_dec = mb_mgr->gcm256_dec_update;
- ops[GCM_KEY_256].finalize_dec = mb_mgr->gcm256_dec_finalize;
-
- ops[GCM_KEY_256].gmac_init = mb_mgr->gmac256_init;
- ops[GCM_KEY_256].gmac_update = mb_mgr->gmac256_update;
- ops[GCM_KEY_256].gmac_finalize = mb_mgr->gmac256_finalize;
-}
-
-static int
-aesni_gcm_session_configure(IMB_MGR *mb_mgr, void *session,
- const struct rte_crypto_sym_xform *xform)
-{
- struct aesni_gcm_session *sess = session;
- const struct rte_crypto_sym_xform *auth_xform;
- const struct rte_crypto_sym_xform *cipher_xform;
- const struct rte_crypto_sym_xform *aead_xform;
-
- uint8_t key_length;
- const uint8_t *key;
- enum ipsec_mb_operation mode;
- int ret = 0;
-
- ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform,
- &cipher_xform, &aead_xform);
- if (ret)
- return ret;
-
- /**< GCM key type */
-
- sess->op = mode;
-
- switch (sess->op) {
- case IPSEC_MB_OP_HASH_GEN_ONLY:
- case IPSEC_MB_OP_HASH_VERIFY_ONLY:
- /* AES-GMAC
- * auth_xform = xform;
- */
- if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GMAC) {
- IPSEC_MB_LOG(ERR,
- "Only AES GMAC is supported as an authentication only algorithm");
- ret = -ENOTSUP;
- goto error_exit;
- }
- /* Set IV parameters */
- sess->iv.offset = auth_xform->auth.iv.offset;
- sess->iv.length = auth_xform->auth.iv.length;
- key_length = auth_xform->auth.key.length;
- key = auth_xform->auth.key.data;
- sess->req_digest_length =
- RTE_MIN(auth_xform->auth.digest_length,
- DIGEST_LENGTH_MAX);
- break;
- case IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT:
- case IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT:
- /* AES-GCM
- * aead_xform = xform;
- */
-
- if (aead_xform->aead.algo != RTE_CRYPTO_AEAD_AES_GCM) {
- IPSEC_MB_LOG(ERR,
- "The only combined operation supported is AES GCM");
- ret = -ENOTSUP;
- goto error_exit;
- }
- /* Set IV parameters */
- sess->iv.offset = aead_xform->aead.iv.offset;
- sess->iv.length = aead_xform->aead.iv.length;
- key_length = aead_xform->aead.key.length;
- key = aead_xform->aead.key.data;
- sess->aad_length = aead_xform->aead.aad_length;
- sess->req_digest_length =
- RTE_MIN(aead_xform->aead.digest_length,
- DIGEST_LENGTH_MAX);
- break;
- default:
- IPSEC_MB_LOG(
- ERR, "Wrong xform type, has to be AEAD or authentication");
- ret = -ENOTSUP;
- goto error_exit;
- }
-
- /* Check key length, and calculate GCM pre-compute. */
- switch (key_length) {
- case 16:
- sess->key_length = GCM_KEY_128;
- mb_mgr->gcm128_pre(key, &sess->gdata_key);
- break;
- case 24:
- sess->key_length = GCM_KEY_192;
- mb_mgr->gcm192_pre(key, &sess->gdata_key);
- break;
- case 32:
- sess->key_length = GCM_KEY_256;
- mb_mgr->gcm256_pre(key, &sess->gdata_key);
- break;
- default:
- IPSEC_MB_LOG(ERR, "Invalid key length");
- ret = -EINVAL;
- goto error_exit;
- }
-
- /* Digest check */
- if (sess->req_digest_length > 16) {
- IPSEC_MB_LOG(ERR, "Invalid digest length");
- ret = -EINVAL;
- goto error_exit;
- }
- /*
- * If size requested is different, generate the full digest
- * (16 bytes) in a temporary location and then memcpy
- * the requested number of bytes.
- */
- if (sess->req_digest_length < 4)
- sess->gen_digest_length = 16;
- else
- sess->gen_digest_length = sess->req_digest_length;
-
-error_exit:
- return ret;
-}
-
-/**
- * Process a completed job and return rte_mbuf which job processed
- *
- * @param job IMB_JOB job to process
- *
- * @return
- * - Returns processed mbuf which is trimmed of output digest used in
- * verification of supplied digest in the case of a HASH_CIPHER operation
- * - Returns NULL on invalid job
- */
-static void
-post_process_gcm_crypto_op(struct ipsec_mb_qp *qp,
- struct rte_crypto_op *op,
- struct aesni_gcm_session *session)
-{
- struct aesni_gcm_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
-
- op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
- /* Verify digest if required */
- if (session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT ||
- session->op == IPSEC_MB_OP_HASH_VERIFY_ONLY) {
- uint8_t *digest;
-
- uint8_t *tag = qp_data->temp_digest;
-
- if (session->op == IPSEC_MB_OP_HASH_VERIFY_ONLY)
- digest = op->sym->auth.digest.data;
- else
- digest = op->sym->aead.digest.data;
-
-#ifdef RTE_LIBRTE_PMD_AESNI_GCM_DEBUG
- rte_hexdump(stdout, "auth tag (orig):",
- digest, session->req_digest_length);
- rte_hexdump(stdout, "auth tag (calc):",
- tag, session->req_digest_length);
-#endif
-
- if (memcmp(tag, digest, session->req_digest_length) != 0)
- op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
- } else {
- if (session->req_digest_length != session->gen_digest_length) {
- if (session->op ==
- IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT)
- memcpy(op->sym->aead.digest.data,
- qp_data->temp_digest,
- session->req_digest_length);
- else
- memcpy(op->sym->auth.digest.data,
- qp_data->temp_digest,
- session->req_digest_length);
- }
- }
-}
-
-/**
- * Process a completed GCM request
- *
- * @param qp Queue Pair to process
- * @param op Crypto operation
- * @param sess AESNI-GCM session
- *
- */
-static void
-handle_completed_gcm_crypto_op(struct ipsec_mb_qp *qp,
- struct rte_crypto_op *op,
- struct aesni_gcm_session *sess)
-{
- post_process_gcm_crypto_op(qp, op, sess);
-
- /* Free session if a session-less crypto op */
- if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
- memset(sess, 0, sizeof(struct aesni_gcm_session));
- rte_mempool_put(qp->sess_mp, op->sym->session);
- op->sym->session = NULL;
- }
-}
-
-/**
- * Process a crypto operation, calling
- * the GCM API from the multi buffer library.
- *
- * @param qp queue pair
- * @param op symmetric crypto operation
- * @param session GCM session
- *
- * @return
- * 0 on success
- */
-static int
-process_gcm_crypto_op(struct ipsec_mb_qp *qp, struct rte_crypto_op *op,
- struct aesni_gcm_session *session)
-{
- struct aesni_gcm_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
- uint8_t *src, *dst;
- uint8_t *iv_ptr;
- struct rte_crypto_sym_op *sym_op = op->sym;
- struct rte_mbuf *m_src = sym_op->m_src;
- uint32_t offset, data_offset, data_length;
- uint32_t part_len, total_len, data_len;
- uint8_t *tag;
- unsigned int oop = 0;
- struct aesni_gcm_ops *ops = &qp_data->ops[session->key_length];
-
- if (session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT ||
- session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT) {
- offset = sym_op->aead.data.offset;
- data_offset = offset;
- data_length = sym_op->aead.data.length;
- } else {
- offset = sym_op->auth.data.offset;
- data_offset = offset;
- data_length = sym_op->auth.data.length;
- }
-
- RTE_ASSERT(m_src != NULL);
-
- while (offset >= m_src->data_len && data_length != 0) {
- offset -= m_src->data_len;
- m_src = m_src->next;
-
- RTE_ASSERT(m_src != NULL);
- }
-
- src = rte_pktmbuf_mtod_offset(m_src, uint8_t *, offset);
-
- data_len = m_src->data_len - offset;
- part_len = (data_len < data_length) ? data_len :
- data_length;
-
- RTE_ASSERT((sym_op->m_dst == NULL) ||
- ((sym_op->m_dst != NULL) &&
- rte_pktmbuf_is_contiguous(sym_op->m_dst)));
-
- /* In-place */
- if (sym_op->m_dst == NULL || (sym_op->m_dst == sym_op->m_src))
- dst = src;
- /* Out-of-place */
- else {
- oop = 1;
- /* Segmented destination buffer is not supported
- * if operation is Out-of-place
- */
- RTE_ASSERT(rte_pktmbuf_is_contiguous(sym_op->m_dst));
- dst = rte_pktmbuf_mtod_offset(sym_op->m_dst, uint8_t *,
- data_offset);
- }
-
- iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
- session->iv.offset);
-
- if (session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT) {
- ops->init(&session->gdata_key, &qp_data->gcm_ctx_data, iv_ptr,
- sym_op->aead.aad.data,
- (uint64_t)session->aad_length);
-
- ops->update_enc(&session->gdata_key, &qp_data->gcm_ctx_data,
- dst, src, (uint64_t)part_len);
- total_len = data_length - part_len;
-
- while (total_len) {
- m_src = m_src->next;
-
- RTE_ASSERT(m_src != NULL);
-
- src = rte_pktmbuf_mtod(m_src, uint8_t *);
- if (oop)
- dst += part_len;
- else
- dst = src;
- part_len = (m_src->data_len < total_len) ?
- m_src->data_len : total_len;
-
- ops->update_enc(&session->gdata_key,
- &qp_data->gcm_ctx_data,
- dst, src, (uint64_t)part_len);
- total_len -= part_len;
- }
-
- if (session->req_digest_length != session->gen_digest_length)
- tag = qp_data->temp_digest;
- else
- tag = sym_op->aead.digest.data;
-
- ops->finalize_enc(&session->gdata_key, &qp_data->gcm_ctx_data,
- tag, session->gen_digest_length);
- } else if (session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT) {
- ops->init(&session->gdata_key, &qp_data->gcm_ctx_data, iv_ptr,
- sym_op->aead.aad.data,
- (uint64_t)session->aad_length);
-
- ops->update_dec(&session->gdata_key, &qp_data->gcm_ctx_data,
- dst, src, (uint64_t)part_len);
- total_len = data_length - part_len;
-
- while (total_len) {
- m_src = m_src->next;
-
- RTE_ASSERT(m_src != NULL);
-
- src = rte_pktmbuf_mtod(m_src, uint8_t *);
- if (oop)
- dst += part_len;
- else
- dst = src;
- part_len = (m_src->data_len < total_len) ?
- m_src->data_len : total_len;
-
- ops->update_dec(&session->gdata_key,
- &qp_data->gcm_ctx_data,
- dst, src, (uint64_t)part_len);
- total_len -= part_len;
- }
-
- tag = qp_data->temp_digest;
- ops->finalize_dec(&session->gdata_key, &qp_data->gcm_ctx_data,
- tag, session->gen_digest_length);
- } else if (session->op == IPSEC_MB_OP_HASH_GEN_ONLY) {
- ops->gmac_init(&session->gdata_key, &qp_data->gcm_ctx_data,
- iv_ptr, session->iv.length);
-
- ops->gmac_update(&session->gdata_key, &qp_data->gcm_ctx_data,
- src, (uint64_t)part_len);
- total_len = data_length - part_len;
-
- while (total_len) {
- m_src = m_src->next;
-
- RTE_ASSERT(m_src != NULL);
-
- src = rte_pktmbuf_mtod(m_src, uint8_t *);
- part_len = (m_src->data_len < total_len) ?
- m_src->data_len : total_len;
-
- ops->gmac_update(&session->gdata_key,
- &qp_data->gcm_ctx_data, src,
- (uint64_t)part_len);
- total_len -= part_len;
- }
-
- if (session->req_digest_length != session->gen_digest_length)
- tag = qp_data->temp_digest;
- else
- tag = sym_op->auth.digest.data;
-
- ops->gmac_finalize(&session->gdata_key, &qp_data->gcm_ctx_data,
- tag, session->gen_digest_length);
- } else { /* IPSEC_MB_OP_HASH_VERIFY_ONLY */
- ops->gmac_init(&session->gdata_key, &qp_data->gcm_ctx_data,
- iv_ptr, session->iv.length);
-
- ops->gmac_update(&session->gdata_key, &qp_data->gcm_ctx_data,
- src, (uint64_t)part_len);
- total_len = data_length - part_len;
-
- while (total_len) {
- m_src = m_src->next;
-
- RTE_ASSERT(m_src != NULL);
-
- src = rte_pktmbuf_mtod(m_src, uint8_t *);
- part_len = (m_src->data_len < total_len) ?
- m_src->data_len : total_len;
-
- ops->gmac_update(&session->gdata_key,
- &qp_data->gcm_ctx_data, src,
- (uint64_t)part_len);
- total_len -= part_len;
- }
-
- tag = qp_data->temp_digest;
-
- ops->gmac_finalize(&session->gdata_key, &qp_data->gcm_ctx_data,
- tag, session->gen_digest_length);
- }
- return 0;
-}
-
-/** Get gcm session */
-static inline struct aesni_gcm_session *
-aesni_gcm_get_session(struct ipsec_mb_qp *qp,
- struct rte_crypto_op *op)
-{
- struct rte_cryptodev_sym_session *sess = NULL;
- struct rte_crypto_sym_op *sym_op = op->sym;
-
- if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
- if (likely(sym_op->session != NULL))
- sess = sym_op->session;
- } else {
- if (rte_mempool_get(qp->sess_mp, (void **)&sess))
- return NULL;
-
- if (unlikely(sess->sess_data_sz <
- sizeof(struct aesni_gcm_session))) {
- rte_mempool_put(qp->sess_mp, sess);
- return NULL;
- }
-
- if (unlikely(aesni_gcm_session_configure(qp->mb_mgr,
- CRYPTODEV_GET_SYM_SESS_PRIV(sess),
- sym_op->xform) != 0)) {
- rte_mempool_put(qp->sess_mp, sess);
- sess = NULL;
- }
- sym_op->session = sess;
- }
-
- if (unlikely(sess == NULL))
- op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
-
- return CRYPTODEV_GET_SYM_SESS_PRIV(sess);
-}
-
-static uint16_t
-aesni_gcm_pmd_dequeue_burst(void *queue_pair,
- struct rte_crypto_op **ops, uint16_t nb_ops)
-{
- struct aesni_gcm_session *sess;
- struct ipsec_mb_qp *qp = queue_pair;
-
- int retval = 0;
- unsigned int i, nb_dequeued;
-
- nb_dequeued = rte_ring_dequeue_burst(qp->ingress_queue,
- (void **)ops, nb_ops, NULL);
-
- for (i = 0; i < nb_dequeued; i++) {
-
- sess = aesni_gcm_get_session(qp, ops[i]);
- if (unlikely(sess == NULL)) {
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- qp->stats.dequeue_err_count++;
- break;
- }
-
- retval = process_gcm_crypto_op(qp, ops[i], sess);
- if (retval < 0) {
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- qp->stats.dequeue_err_count++;
- break;
- }
-
- handle_completed_gcm_crypto_op(qp, ops[i], sess);
- }
-
- qp->stats.dequeued_count += i;
-
- return i;
-}
-
-static inline void
-aesni_gcm_fill_error_code(struct rte_crypto_sym_vec *vec,
- int32_t errnum)
-{
- uint32_t i;
-
- for (i = 0; i < vec->num; i++)
- vec->status[i] = errnum;
-}
-
-static inline int32_t
-aesni_gcm_sgl_op_finalize_encryption(const struct aesni_gcm_session *s,
- struct gcm_context_data *gdata_ctx,
- uint8_t *digest, struct aesni_gcm_ops ops)
-{
- if (s->req_digest_length != s->gen_digest_length) {
- uint8_t tmpdigest[s->gen_digest_length];
-
- ops.finalize_enc(&s->gdata_key, gdata_ctx, tmpdigest,
- s->gen_digest_length);
- memcpy(digest, tmpdigest, s->req_digest_length);
- } else {
- ops.finalize_enc(&s->gdata_key, gdata_ctx, digest,
- s->gen_digest_length);
- }
-
- return 0;
-}
-
-static inline int32_t
-aesni_gcm_sgl_op_finalize_decryption(const struct aesni_gcm_session *s,
- struct gcm_context_data *gdata_ctx,
- uint8_t *digest, struct aesni_gcm_ops ops)
-{
- uint8_t tmpdigest[s->gen_digest_length];
-
- ops.finalize_dec(&s->gdata_key, gdata_ctx, tmpdigest,
- s->gen_digest_length);
-
- return memcmp(digest, tmpdigest, s->req_digest_length) == 0 ? 0
- : EBADMSG;
-}
-
-static inline void
-aesni_gcm_process_gcm_sgl_op(const struct aesni_gcm_session *s,
- struct gcm_context_data *gdata_ctx,
- struct rte_crypto_sgl *sgl, void *iv, void *aad,
- struct aesni_gcm_ops ops)
-{
- uint32_t i;
-
- /* init crypto operation */
- ops.init(&s->gdata_key, gdata_ctx, iv, aad,
- (uint64_t)s->aad_length);
-
- /* update with sgl data */
- for (i = 0; i < sgl->num; i++) {
- struct rte_crypto_vec *vec = &sgl->vec[i];
-
- switch (s->op) {
- case IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT:
- ops.update_enc(&s->gdata_key, gdata_ctx,
- vec->base, vec->base, vec->len);
- break;
- case IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT:
- ops.update_dec(&s->gdata_key, gdata_ctx,
- vec->base, vec->base, vec->len);
- break;
- default:
- IPSEC_MB_LOG(ERR, "Invalid session op");
- break;
- }
-
- }
-}
-
-static inline void
-aesni_gcm_process_gmac_sgl_op(const struct aesni_gcm_session *s,
- struct gcm_context_data *gdata_ctx,
- struct rte_crypto_sgl *sgl, void *iv,
- struct aesni_gcm_ops ops)
-{
- ops.init(&s->gdata_key, gdata_ctx, iv, sgl->vec[0].base,
- sgl->vec[0].len);
-}
-
-static inline uint32_t
-aesni_gcm_sgl_encrypt(struct aesni_gcm_session *s,
- struct gcm_context_data *gdata_ctx,
- struct rte_crypto_sym_vec *vec,
- struct aesni_gcm_ops ops)
-{
- uint32_t i, processed;
-
- processed = 0;
- for (i = 0; i < vec->num; ++i) {
- aesni_gcm_process_gcm_sgl_op(s, gdata_ctx, &vec->src_sgl[i],
- vec->iv[i].va, vec->aad[i].va,
- ops);
- vec->status[i] = aesni_gcm_sgl_op_finalize_encryption(
- s, gdata_ctx, vec->digest[i].va, ops);
- processed += (vec->status[i] == 0);
- }
-
- return processed;
-}
-
-static inline uint32_t
-aesni_gcm_sgl_decrypt(struct aesni_gcm_session *s,
- struct gcm_context_data *gdata_ctx,
- struct rte_crypto_sym_vec *vec,
- struct aesni_gcm_ops ops)
-{
- uint32_t i, processed;
-
- processed = 0;
- for (i = 0; i < vec->num; ++i) {
- aesni_gcm_process_gcm_sgl_op(s, gdata_ctx, &vec->src_sgl[i],
- vec->iv[i].va, vec->aad[i].va,
- ops);
- vec->status[i] = aesni_gcm_sgl_op_finalize_decryption(
- s, gdata_ctx, vec->digest[i].va, ops);
- processed += (vec->status[i] == 0);
- }
-
- return processed;
-}
-
-static inline uint32_t
-aesni_gmac_sgl_generate(struct aesni_gcm_session *s,
- struct gcm_context_data *gdata_ctx,
- struct rte_crypto_sym_vec *vec,
- struct aesni_gcm_ops ops)
-{
- uint32_t i, processed;
-
- processed = 0;
- for (i = 0; i < vec->num; ++i) {
- if (vec->src_sgl[i].num != 1) {
- vec->status[i] = ENOTSUP;
- continue;
- }
-
- aesni_gcm_process_gmac_sgl_op(s, gdata_ctx, &vec->src_sgl[i],
- vec->iv[i].va, ops);
- vec->status[i] = aesni_gcm_sgl_op_finalize_encryption(
- s, gdata_ctx, vec->digest[i].va, ops);
- processed += (vec->status[i] == 0);
- }
-
- return processed;
-}
-
-static inline uint32_t
-aesni_gmac_sgl_verify(struct aesni_gcm_session *s,
- struct gcm_context_data *gdata_ctx,
- struct rte_crypto_sym_vec *vec,
- struct aesni_gcm_ops ops)
-{
- uint32_t i, processed;
-
- processed = 0;
- for (i = 0; i < vec->num; ++i) {
- if (vec->src_sgl[i].num != 1) {
- vec->status[i] = ENOTSUP;
- continue;
- }
-
- aesni_gcm_process_gmac_sgl_op(s, gdata_ctx, &vec->src_sgl[i],
- vec->iv[i].va, ops);
- vec->status[i] = aesni_gcm_sgl_op_finalize_decryption(
- s, gdata_ctx, vec->digest[i].va, ops);
- processed += (vec->status[i] == 0);
- }
-
- return processed;
-}
-
-/** Process CPU crypto bulk operations */
-static uint32_t
-aesni_gcm_process_bulk(struct rte_cryptodev *dev __rte_unused,
- struct rte_cryptodev_sym_session *sess,
- __rte_unused union rte_crypto_sym_ofs ofs,
- struct rte_crypto_sym_vec *vec)
-{
- struct aesni_gcm_session *s = CRYPTODEV_GET_SYM_SESS_PRIV(sess);
- struct gcm_context_data gdata_ctx;
- IMB_MGR *mb_mgr;
-
- /* get per-thread MB MGR, create one if needed */
- mb_mgr = get_per_thread_mb_mgr();
- if (unlikely(mb_mgr == NULL))
- return 0;
-
- /* Check if function pointers have been set for this thread ops. */
- if (unlikely(RTE_PER_LCORE(gcm_ops)[s->key_length].init == NULL))
- aesni_gcm_set_ops(RTE_PER_LCORE(gcm_ops), mb_mgr);
-
- switch (s->op) {
- case IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT:
- return aesni_gcm_sgl_encrypt(s, &gdata_ctx, vec,
- RTE_PER_LCORE(gcm_ops)[s->key_length]);
- case IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT:
- return aesni_gcm_sgl_decrypt(s, &gdata_ctx, vec,
- RTE_PER_LCORE(gcm_ops)[s->key_length]);
- case IPSEC_MB_OP_HASH_GEN_ONLY:
- return aesni_gmac_sgl_generate(s, &gdata_ctx, vec,
- RTE_PER_LCORE(gcm_ops)[s->key_length]);
- case IPSEC_MB_OP_HASH_VERIFY_ONLY:
- return aesni_gmac_sgl_verify(s, &gdata_ctx, vec,
- RTE_PER_LCORE(gcm_ops)[s->key_length]);
- default:
- aesni_gcm_fill_error_code(vec, EINVAL);
- return 0;
- }
-}
-
-static int
-aesni_gcm_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
- const struct rte_cryptodev_qp_conf *qp_conf,
- int socket_id)
-{
- int ret = ipsec_mb_qp_setup(dev, qp_id, qp_conf, socket_id);
- if (ret < 0)
- return ret;
-
- struct ipsec_mb_qp *qp = dev->data->queue_pairs[qp_id];
- struct aesni_gcm_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
- aesni_gcm_set_ops(qp_data->ops, qp->mb_mgr);
- return 0;
-}
+#include "pmd_aesni_mb_priv.h"
struct rte_cryptodev_ops aesni_gcm_pmd_ops = {
.dev_configure = ipsec_mb_config,
@@ -762,10 +16,10 @@ struct rte_cryptodev_ops aesni_gcm_pmd_ops = {
.dev_infos_get = ipsec_mb_info_get,
- .queue_pair_setup = aesni_gcm_qp_setup,
+ .queue_pair_setup = ipsec_mb_qp_setup,
.queue_pair_release = ipsec_mb_qp_release,
- .sym_cpu_process = aesni_gcm_process_bulk,
+ .sym_cpu_process = aesni_mb_process_bulk,
.sym_session_get_size = ipsec_mb_sym_session_get_size,
.sym_session_configure = ipsec_mb_sym_session_configure,
@@ -801,7 +55,7 @@ RTE_INIT(ipsec_mb_register_aesni_gcm)
&ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_AESNI_GCM];
aesni_gcm_data->caps = aesni_gcm_capabilities;
- aesni_gcm_data->dequeue_burst = aesni_gcm_pmd_dequeue_burst;
+ aesni_gcm_data->dequeue_burst = aesni_mb_dequeue_burst;
aesni_gcm_data->feature_flags =
RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
@@ -814,6 +68,6 @@ RTE_INIT(ipsec_mb_register_aesni_gcm)
aesni_gcm_data->ops = &aesni_gcm_pmd_ops;
aesni_gcm_data->qp_priv_size = sizeof(struct aesni_gcm_qp_data);
aesni_gcm_data->queue_pair_configure = NULL;
- aesni_gcm_data->session_configure = aesni_gcm_session_configure;
- aesni_gcm_data->session_priv_size = sizeof(struct aesni_gcm_session);
+ aesni_gcm_data->session_configure = aesni_mb_session_configure;
+ aesni_gcm_data->session_priv_size = sizeof(struct aesni_mb_session);
}
diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c
index 4de4866cf3..6f0a1de24d 100644
--- a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c
+++ b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c
@@ -761,7 +761,7 @@ aesni_mb_set_session_aead_parameters(const IMB_MGR *mb_mgr,
}
/** Configure a aesni multi-buffer session from a crypto xform chain */
-static int
+int
aesni_mb_session_configure(IMB_MGR *mb_mgr,
void *priv_sess,
const struct rte_crypto_sym_xform *xform)
@@ -2131,7 +2131,7 @@ set_job_null_op(IMB_JOB *job, struct rte_crypto_op *op)
}
#if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM
-static uint16_t
+uint16_t
aesni_mb_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
uint16_t nb_ops)
{
@@ -2321,7 +2321,7 @@ flush_mb_mgr(struct ipsec_mb_qp *qp, IMB_MGR *mb_mgr,
return processed_ops;
}
-static uint16_t
+uint16_t
aesni_mb_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
uint16_t nb_ops)
{
@@ -2456,7 +2456,7 @@ verify_sync_dgst(struct rte_crypto_sym_vec *vec,
return k;
}
-static uint32_t
+uint32_t
aesni_mb_process_bulk(struct rte_cryptodev *dev __rte_unused,
struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs sofs,
struct rte_crypto_sym_vec *vec)
diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h b/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h
index 85994fe5a1..9f0a89d20b 100644
--- a/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h
+++ b/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h
@@ -21,6 +21,19 @@
#define MAX_NUM_SEGS 16
#endif
+int
+aesni_mb_session_configure(IMB_MGR * m __rte_unused, void *priv_sess,
+ const struct rte_crypto_sym_xform *xform);
+
+uint16_t
+aesni_mb_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops);
+
+uint32_t
+aesni_mb_process_bulk(struct rte_cryptodev *dev __rte_unused,
+ struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs sofs,
+ struct rte_crypto_sym_vec *vec);
+
static const struct rte_cryptodev_capabilities aesni_mb_capabilities[] = {
{ /* MD5 HMAC */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
diff --git a/drivers/crypto/ipsec_mb/pmd_chacha_poly.c b/drivers/crypto/ipsec_mb/pmd_chacha_poly.c
index 97e7cef233..93f8e3588e 100644
--- a/drivers/crypto/ipsec_mb/pmd_chacha_poly.c
+++ b/drivers/crypto/ipsec_mb/pmd_chacha_poly.c
@@ -3,334 +3,7 @@
*/
#include "pmd_chacha_poly_priv.h"
-
-/** Parse crypto xform chain and set private session parameters. */
-static int
-chacha20_poly1305_session_configure(IMB_MGR * mb_mgr __rte_unused,
- void *priv_sess, const struct rte_crypto_sym_xform *xform)
-{
- struct chacha20_poly1305_session *sess = priv_sess;
- const struct rte_crypto_sym_xform *auth_xform;
- const struct rte_crypto_sym_xform *cipher_xform;
- const struct rte_crypto_sym_xform *aead_xform;
-
- uint8_t key_length;
- const uint8_t *key;
- enum ipsec_mb_operation mode;
- int ret = 0;
-
- ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform,
- &cipher_xform, &aead_xform);
- if (ret)
- return ret;
-
- sess->op = mode;
-
- switch (sess->op) {
- case IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT:
- case IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT:
- if (aead_xform->aead.algo !=
- RTE_CRYPTO_AEAD_CHACHA20_POLY1305) {
- IPSEC_MB_LOG(ERR,
- "The only combined operation supported is CHACHA20 POLY1305");
- ret = -ENOTSUP;
- goto error_exit;
- }
- /* Set IV parameters */
- sess->iv.offset = aead_xform->aead.iv.offset;
- sess->iv.length = aead_xform->aead.iv.length;
- key_length = aead_xform->aead.key.length;
- key = aead_xform->aead.key.data;
- sess->aad_length = aead_xform->aead.aad_length;
- sess->req_digest_length = aead_xform->aead.digest_length;
- break;
- default:
- IPSEC_MB_LOG(
- ERR, "Wrong xform type, has to be AEAD or authentication");
- ret = -ENOTSUP;
- goto error_exit;
- }
-
- /* IV check */
- if (sess->iv.length != CHACHA20_POLY1305_IV_LENGTH &&
- sess->iv.length != 0) {
- IPSEC_MB_LOG(ERR, "Wrong IV length");
- ret = -EINVAL;
- goto error_exit;
- }
-
- /* Check key length */
- if (key_length != CHACHA20_POLY1305_KEY_SIZE) {
- IPSEC_MB_LOG(ERR, "Invalid key length");
- ret = -EINVAL;
- goto error_exit;
- } else {
- memcpy(sess->key, key, CHACHA20_POLY1305_KEY_SIZE);
- }
-
- /* Digest check */
- if (sess->req_digest_length != CHACHA20_POLY1305_DIGEST_LENGTH) {
- IPSEC_MB_LOG(ERR, "Invalid digest length");
- ret = -EINVAL;
- goto error_exit;
- } else {
- sess->gen_digest_length = CHACHA20_POLY1305_DIGEST_LENGTH;
- }
-
-error_exit:
- return ret;
-}
-
-/**
- * Process a crypto operation, calling
- * the direct chacha poly API from the multi buffer library.
- *
- * @param qp queue pair
- * @param op symmetric crypto operation
- * @param session chacha poly session
- *
- * @return
- * - Return 0 if success
- */
-static int
-chacha20_poly1305_crypto_op(struct ipsec_mb_qp *qp, struct rte_crypto_op *op,
- struct chacha20_poly1305_session *session)
-{
- struct chacha20_poly1305_qp_data *qp_data =
- ipsec_mb_get_qp_private_data(qp);
- uint8_t *src, *dst;
- uint8_t *iv_ptr;
- struct rte_crypto_sym_op *sym_op = op->sym;
- struct rte_mbuf *m_src = sym_op->m_src;
- uint32_t offset, data_offset, data_length;
- uint32_t part_len, data_len;
- int total_len;
- uint8_t *tag;
- unsigned int oop = 0;
-
- offset = sym_op->aead.data.offset;
- data_offset = offset;
- data_length = sym_op->aead.data.length;
- RTE_ASSERT(m_src != NULL);
-
- while (offset >= m_src->data_len && data_length != 0) {
- offset -= m_src->data_len;
- m_src = m_src->next;
-
- RTE_ASSERT(m_src != NULL);
- }
-
- src = rte_pktmbuf_mtod_offset(m_src, uint8_t *, offset);
-
- data_len = m_src->data_len - offset;
- part_len = (data_len < data_length) ? data_len :
- data_length;
-
- /* In-place */
- if (sym_op->m_dst == NULL || (sym_op->m_dst == sym_op->m_src))
- dst = src;
- /* Out-of-place */
- else {
- oop = 1;
- /* Segmented destination buffer is not supported
- * if operation is Out-of-place
- */
- RTE_ASSERT(rte_pktmbuf_is_contiguous(sym_op->m_dst));
- dst = rte_pktmbuf_mtod_offset(sym_op->m_dst, uint8_t *,
- data_offset);
- }
-
- iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
- session->iv.offset);
-
- IMB_CHACHA20_POLY1305_INIT(qp->mb_mgr, session->key,
- &qp_data->chacha20_poly1305_ctx_data,
- iv_ptr, sym_op->aead.aad.data,
- (uint64_t)session->aad_length);
-
- if (session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT) {
- IMB_CHACHA20_POLY1305_ENC_UPDATE(qp->mb_mgr,
- session->key,
- &qp_data->chacha20_poly1305_ctx_data,
- dst, src, (uint64_t)part_len);
- total_len = data_length - part_len;
-
- while (total_len) {
- m_src = m_src->next;
- RTE_ASSERT(m_src != NULL);
-
- src = rte_pktmbuf_mtod(m_src, uint8_t *);
- if (oop)
- dst += part_len;
- else
- dst = src;
- part_len = (m_src->data_len < total_len) ?
- m_src->data_len : total_len;
-
- if (dst == NULL || src == NULL) {
- IPSEC_MB_LOG(ERR, "Invalid src or dst input");
- return -EINVAL;
- }
- IMB_CHACHA20_POLY1305_ENC_UPDATE(qp->mb_mgr,
- session->key,
- &qp_data->chacha20_poly1305_ctx_data,
- dst, src, (uint64_t)part_len);
- total_len -= part_len;
- if (total_len < 0) {
- IPSEC_MB_LOG(ERR, "Invalid part len");
- return -EINVAL;
- }
- }
-
- tag = sym_op->aead.digest.data;
- IMB_CHACHA20_POLY1305_ENC_FINALIZE(qp->mb_mgr,
- &qp_data->chacha20_poly1305_ctx_data,
- tag, session->gen_digest_length);
-
- } else {
- IMB_CHACHA20_POLY1305_DEC_UPDATE(qp->mb_mgr,
- session->key,
- &qp_data->chacha20_poly1305_ctx_data,
- dst, src, (uint64_t)part_len);
-
- total_len = data_length - part_len;
-
- while (total_len) {
- m_src = m_src->next;
-
- RTE_ASSERT(m_src != NULL);
-
- src = rte_pktmbuf_mtod(m_src, uint8_t *);
- if (oop)
- dst += part_len;
- else
- dst = src;
- part_len = (m_src->data_len < total_len) ?
- m_src->data_len : total_len;
-
- if (dst == NULL || src == NULL) {
- IPSEC_MB_LOG(ERR, "Invalid src or dst input");
- return -EINVAL;
- }
- IMB_CHACHA20_POLY1305_DEC_UPDATE(qp->mb_mgr,
- session->key,
- &qp_data->chacha20_poly1305_ctx_data,
- dst, src, (uint64_t)part_len);
- total_len -= part_len;
- if (total_len < 0) {
- IPSEC_MB_LOG(ERR, "Invalid part len");
- return -EINVAL;
- }
- }
-
- tag = qp_data->temp_digest;
- IMB_CHACHA20_POLY1305_DEC_FINALIZE(qp->mb_mgr,
- &qp_data->chacha20_poly1305_ctx_data,
- tag, session->gen_digest_length);
- }
-
- return 0;
-}
-
-/**
- * Process a completed chacha poly op
- *
- * @param qp Queue Pair to process
- * @param op Crypto operation
- * @param sess Crypto session
- *
- * @return
- * - void
- */
-static void
-post_process_chacha20_poly1305_crypto_op(struct ipsec_mb_qp *qp,
- struct rte_crypto_op *op,
- struct chacha20_poly1305_session *session)
-{
- struct chacha20_poly1305_qp_data *qp_data =
- ipsec_mb_get_qp_private_data(qp);
-
- op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
- /* Verify digest if required */
- if (session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT ||
- session->op == IPSEC_MB_OP_HASH_VERIFY_ONLY) {
- uint8_t *digest = op->sym->aead.digest.data;
- uint8_t *tag = qp_data->temp_digest;
-
-#ifdef RTE_LIBRTE_PMD_CHACHA20_POLY1305_DEBUG
- rte_hexdump(stdout, "auth tag (orig):",
- digest, session->req_digest_length);
- rte_hexdump(stdout, "auth tag (calc):",
- tag, session->req_digest_length);
-#endif
- if (memcmp(tag, digest, session->req_digest_length) != 0)
- op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
-
- }
-
-}
-
-/**
- * Process a completed Chacha20_poly1305 request
- *
- * @param qp Queue Pair to process
- * @param op Crypto operation
- * @param sess Crypto session
- *
- * @return
- * - void
- */
-static void
-handle_completed_chacha20_poly1305_crypto_op(struct ipsec_mb_qp *qp,
- struct rte_crypto_op *op,
- struct chacha20_poly1305_session *sess)
-{
- post_process_chacha20_poly1305_crypto_op(qp, op, sess);
-
- /* Free session if a session-less crypto op */
- if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
- memset(sess, 0, sizeof(struct chacha20_poly1305_session));
- rte_mempool_put(qp->sess_mp, op->sym->session);
- op->sym->session = NULL;
- }
-}
-
-static uint16_t
-chacha20_poly1305_pmd_dequeue_burst(void *queue_pair,
- struct rte_crypto_op **ops, uint16_t nb_ops)
-{
- struct chacha20_poly1305_session *sess;
- struct ipsec_mb_qp *qp = queue_pair;
-
- int retval = 0;
- unsigned int i = 0, nb_dequeued;
-
- nb_dequeued = rte_ring_dequeue_burst(qp->ingress_queue,
- (void **)ops, nb_ops, NULL);
-
- for (i = 0; i < nb_dequeued; i++) {
-
- sess = ipsec_mb_get_session_private(qp, ops[i]);
- if (unlikely(sess == NULL)) {
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- qp->stats.dequeue_err_count++;
- break;
- }
-
- retval = chacha20_poly1305_crypto_op(qp, ops[i], sess);
- if (retval < 0) {
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- qp->stats.dequeue_err_count++;
- break;
- }
-
- handle_completed_chacha20_poly1305_crypto_op(qp, ops[i], sess);
- }
-
- qp->stats.dequeued_count += i;
-
- return i;
-}
+#include "pmd_aesni_mb_priv.h"
struct rte_cryptodev_ops chacha20_poly1305_pmd_ops = {
.dev_configure = ipsec_mb_config,
@@ -384,7 +57,7 @@ RTE_INIT(ipsec_mb_register_chacha20_poly1305)
= &ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_CHACHA20_POLY1305];
chacha_poly_data->caps = chacha20_poly1305_capabilities;
- chacha_poly_data->dequeue_burst = chacha20_poly1305_pmd_dequeue_burst;
+ chacha_poly_data->dequeue_burst = aesni_mb_dequeue_burst;
chacha_poly_data->feature_flags =
RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
@@ -398,7 +71,7 @@ RTE_INIT(ipsec_mb_register_chacha20_poly1305)
chacha_poly_data->qp_priv_size =
sizeof(struct chacha20_poly1305_qp_data);
chacha_poly_data->session_configure =
- chacha20_poly1305_session_configure;
+ aesni_mb_session_configure;
chacha_poly_data->session_priv_size =
- sizeof(struct chacha20_poly1305_session);
+ sizeof(struct aesni_mb_session);
}
diff --git a/drivers/crypto/ipsec_mb/pmd_kasumi.c b/drivers/crypto/ipsec_mb/pmd_kasumi.c
index 5db9c523cd..0c549f9459 100644
--- a/drivers/crypto/ipsec_mb/pmd_kasumi.c
+++ b/drivers/crypto/ipsec_mb/pmd_kasumi.c
@@ -10,403 +10,7 @@
#include <rte_malloc.h>
#include "pmd_kasumi_priv.h"
-
-/** Parse crypto xform chain and set private session parameters. */
-static int
-kasumi_session_configure(IMB_MGR *mgr, void *priv_sess,
- const struct rte_crypto_sym_xform *xform)
-{
- const struct rte_crypto_sym_xform *auth_xform = NULL;
- const struct rte_crypto_sym_xform *cipher_xform = NULL;
- enum ipsec_mb_operation mode;
- struct kasumi_session *sess = (struct kasumi_session *)priv_sess;
- /* Select Crypto operation - hash then cipher / cipher then hash */
- int ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform,
- &cipher_xform, NULL);
-
- if (ret)
- return ret;
-
- if (cipher_xform) {
- /* Only KASUMI F8 supported */
- if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_KASUMI_F8) {
- IPSEC_MB_LOG(ERR, "Unsupported cipher algorithm ");
- return -ENOTSUP;
- }
-
- sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
- if (cipher_xform->cipher.iv.length != KASUMI_IV_LENGTH) {
- IPSEC_MB_LOG(ERR, "Wrong IV length");
- return -EINVAL;
- }
-
- /* Initialize key */
- IMB_KASUMI_INIT_F8_KEY_SCHED(mgr,
- cipher_xform->cipher.key.data,
- &sess->pKeySched_cipher);
- }
-
- if (auth_xform) {
- /* Only KASUMI F9 supported */
- if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_KASUMI_F9) {
- IPSEC_MB_LOG(ERR, "Unsupported authentication");
- return -ENOTSUP;
- }
-
- if (auth_xform->auth.digest_length != KASUMI_DIGEST_LENGTH) {
- IPSEC_MB_LOG(ERR, "Wrong digest length");
- return -EINVAL;
- }
-
- sess->auth_op = auth_xform->auth.op;
-
- /* Initialize key */
- IMB_KASUMI_INIT_F9_KEY_SCHED(mgr, auth_xform->auth.key.data,
- &sess->pKeySched_hash);
- }
-
- sess->op = mode;
- return ret;
-}
-
-/** Encrypt/decrypt mbufs with same cipher key. */
-static uint8_t
-process_kasumi_cipher_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
- struct kasumi_session *session, uint8_t num_ops)
-{
- unsigned int i;
- uint8_t processed_ops = 0;
- const void *src[num_ops];
- void *dst[num_ops];
- uint8_t *iv_ptr;
- uint64_t iv[num_ops];
- uint32_t num_bytes[num_ops];
-
- for (i = 0; i < num_ops; i++) {
- src[i] = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *)
- + (ops[i]->sym->cipher.data.offset >> 3);
- dst[i] = ops[i]->sym->m_dst
- ? rte_pktmbuf_mtod(ops[i]->sym->m_dst, uint8_t *)
- + (ops[i]->sym->cipher.data.offset >> 3)
- : rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *)
- + (ops[i]->sym->cipher.data.offset >> 3);
- iv_ptr = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
- session->cipher_iv_offset);
- iv[i] = *((uint64_t *)(iv_ptr));
- num_bytes[i] = ops[i]->sym->cipher.data.length >> 3;
-
- processed_ops++;
- }
-
- if (processed_ops != 0)
- IMB_KASUMI_F8_N_BUFFER(qp->mb_mgr, &session->pKeySched_cipher,
- iv, src, dst, num_bytes,
- processed_ops);
-
- return processed_ops;
-}
-
-/** Encrypt/decrypt mbuf (bit level function). */
-static uint8_t
-process_kasumi_cipher_op_bit(struct ipsec_mb_qp *qp, struct rte_crypto_op *op,
- struct kasumi_session *session)
-{
- uint8_t *src, *dst;
- uint8_t *iv_ptr;
- uint64_t iv;
- uint32_t length_in_bits, offset_in_bits;
-
- offset_in_bits = op->sym->cipher.data.offset;
- src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *);
- if (op->sym->m_dst == NULL)
- dst = src;
- else
- dst = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *);
- iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
- session->cipher_iv_offset);
- iv = *((uint64_t *)(iv_ptr));
- length_in_bits = op->sym->cipher.data.length;
-
- IMB_KASUMI_F8_1_BUFFER_BIT(qp->mb_mgr, &session->pKeySched_cipher, iv,
- src, dst, length_in_bits, offset_in_bits);
-
- return 1;
-}
-
-/** Generate/verify hash from mbufs with same hash key. */
-static int
-process_kasumi_hash_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
- struct kasumi_session *session, uint8_t num_ops)
-{
- unsigned int i;
- uint8_t processed_ops = 0;
- uint8_t *src, *dst;
- uint32_t length_in_bits;
- uint32_t num_bytes;
- struct kasumi_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
-
- for (i = 0; i < num_ops; i++) {
- /* Data must be byte aligned */
- if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- IPSEC_MB_LOG(ERR, "Invalid Offset");
- break;
- }
-
- length_in_bits = ops[i]->sym->auth.data.length;
-
- src = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *)
- + (ops[i]->sym->auth.data.offset >> 3);
- /* Direction from next bit after end of message */
- num_bytes = length_in_bits >> 3;
-
- if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
- dst = qp_data->temp_digest;
- IMB_KASUMI_F9_1_BUFFER(qp->mb_mgr,
- &session->pKeySched_hash, src,
- num_bytes, dst);
-
- /* Verify digest. */
- if (memcmp(dst, ops[i]->sym->auth.digest.data,
- KASUMI_DIGEST_LENGTH)
- != 0)
- ops[i]->status
- = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
- } else {
- dst = ops[i]->sym->auth.digest.data;
-
- IMB_KASUMI_F9_1_BUFFER(qp->mb_mgr,
- &session->pKeySched_hash, src,
- num_bytes, dst);
- }
- processed_ops++;
- }
-
- return processed_ops;
-}
-
-/** Process a batch of crypto ops which shares the same session. */
-static int
-process_ops(struct rte_crypto_op **ops, struct kasumi_session *session,
- struct ipsec_mb_qp *qp, uint8_t num_ops)
-{
- unsigned int i;
- unsigned int processed_ops;
-
- switch (session->op) {
- case IPSEC_MB_OP_ENCRYPT_ONLY:
- case IPSEC_MB_OP_DECRYPT_ONLY:
- processed_ops
- = process_kasumi_cipher_op(qp, ops, session, num_ops);
- break;
- case IPSEC_MB_OP_HASH_GEN_ONLY:
- case IPSEC_MB_OP_HASH_VERIFY_ONLY:
- processed_ops
- = process_kasumi_hash_op(qp, ops, session, num_ops);
- break;
- case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN:
- case IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY:
- processed_ops
- = process_kasumi_cipher_op(qp, ops, session, num_ops);
- process_kasumi_hash_op(qp, ops, session, processed_ops);
- break;
- case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
- case IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT:
- processed_ops
- = process_kasumi_hash_op(qp, ops, session, num_ops);
- process_kasumi_cipher_op(qp, ops, session, processed_ops);
- break;
- default:
- /* Operation not supported. */
- processed_ops = 0;
- }
-
- for (i = 0; i < num_ops; i++) {
- /*
- * If there was no error/authentication failure,
- * change status to successful.
- */
- if (ops[i]->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
- ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
- /* Free session if a session-less crypto op. */
- if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
- memset(session, 0, sizeof(struct kasumi_session));
- rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
- ops[i]->sym->session = NULL;
- }
- }
- return processed_ops;
-}
-
-/** Process a crypto op with length/offset in bits. */
-static int
-process_op_bit(struct rte_crypto_op *op, struct kasumi_session *session,
- struct ipsec_mb_qp *qp)
-{
- unsigned int processed_op;
-
- switch (session->op) {
- /* case KASUMI_OP_ONLY_CIPHER: */
- case IPSEC_MB_OP_ENCRYPT_ONLY:
- case IPSEC_MB_OP_DECRYPT_ONLY:
- processed_op = process_kasumi_cipher_op_bit(qp, op, session);
- break;
- /* case KASUMI_OP_ONLY_AUTH: */
- case IPSEC_MB_OP_HASH_GEN_ONLY:
- case IPSEC_MB_OP_HASH_VERIFY_ONLY:
- processed_op = process_kasumi_hash_op(qp, &op, session, 1);
- break;
- /* case KASUMI_OP_CIPHER_AUTH: */
- case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN:
- processed_op = process_kasumi_cipher_op_bit(qp, op, session);
- if (processed_op == 1)
- process_kasumi_hash_op(qp, &op, session, 1);
- break;
- /* case KASUMI_OP_AUTH_CIPHER: */
- case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
- processed_op = process_kasumi_hash_op(qp, &op, session, 1);
- if (processed_op == 1)
- process_kasumi_cipher_op_bit(qp, op, session);
- break;
- default:
- /* Operation not supported. */
- processed_op = 0;
- }
-
- /*
- * If there was no error/authentication failure,
- * change status to successful.
- */
- if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
- op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-
- /* Free session if a session-less crypto op. */
- if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
- memset(CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session), 0,
- sizeof(struct kasumi_session));
- rte_mempool_put(qp->sess_mp, (void *)op->sym->session);
- op->sym->session = NULL;
- }
- return processed_op;
-}
-
-static uint16_t
-kasumi_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
- uint16_t nb_ops)
-{
- struct rte_crypto_op *c_ops[nb_ops];
- struct rte_crypto_op *curr_c_op = NULL;
-
- struct kasumi_session *prev_sess = NULL, *curr_sess = NULL;
- struct ipsec_mb_qp *qp = queue_pair;
- unsigned int i;
- uint8_t burst_size = 0;
- uint8_t processed_ops;
- unsigned int nb_dequeued;
-
- nb_dequeued = rte_ring_dequeue_burst(qp->ingress_queue,
- (void **)ops, nb_ops, NULL);
- for (i = 0; i < nb_dequeued; i++) {
- curr_c_op = ops[i];
-
-#ifdef RTE_LIBRTE_PMD_KASUMI_DEBUG
- if (!rte_pktmbuf_is_contiguous(curr_c_op->sym->m_src)
- || (curr_c_op->sym->m_dst != NULL
- && !rte_pktmbuf_is_contiguous(
- curr_c_op->sym->m_dst))) {
- IPSEC_MB_LOG(ERR,
- "PMD supports only contiguous mbufs, op (%p) provides noncontiguous mbuf as source/destination buffer.",
- curr_c_op);
- curr_c_op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- break;
- }
-#endif
-
- /* Set status as enqueued (not processed yet) by default. */
- curr_c_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
-
- curr_sess = (struct kasumi_session *)
- ipsec_mb_get_session_private(qp, curr_c_op);
- if (unlikely(curr_sess == NULL
- || curr_sess->op == IPSEC_MB_OP_NOT_SUPPORTED)) {
- curr_c_op->status
- = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
- break;
- }
-
- /* If length/offset is at bit-level, process this buffer alone.
- */
- if (((curr_c_op->sym->cipher.data.length % BYTE_LEN) != 0)
- || ((ops[i]->sym->cipher.data.offset % BYTE_LEN) != 0)) {
- /* Process the ops of the previous session. */
- if (prev_sess != NULL) {
- processed_ops = process_ops(c_ops, prev_sess,
- qp, burst_size);
- if (processed_ops < burst_size) {
- burst_size = 0;
- break;
- }
-
- burst_size = 0;
- prev_sess = NULL;
- }
-
- processed_ops = process_op_bit(curr_c_op,
- curr_sess, qp);
- if (processed_ops != 1)
- break;
-
- continue;
- }
-
- /* Batch ops that share the same session. */
- if (prev_sess == NULL) {
- prev_sess = curr_sess;
- c_ops[burst_size++] = curr_c_op;
- } else if (curr_sess == prev_sess) {
- c_ops[burst_size++] = curr_c_op;
- /*
- * When there are enough ops to process in a batch,
- * process them, and start a new batch.
- */
- if (burst_size == KASUMI_MAX_BURST) {
- processed_ops = process_ops(c_ops, prev_sess,
- qp, burst_size);
- if (processed_ops < burst_size) {
- burst_size = 0;
- break;
- }
-
- burst_size = 0;
- prev_sess = NULL;
- }
- } else {
- /*
- * Different session, process the ops
- * of the previous session.
- */
- processed_ops = process_ops(c_ops, prev_sess, qp,
- burst_size);
- if (processed_ops < burst_size) {
- burst_size = 0;
- break;
- }
-
- burst_size = 0;
- prev_sess = curr_sess;
-
- c_ops[burst_size++] = curr_c_op;
- }
- }
-
- if (burst_size != 0) {
- /* Process the crypto ops of the last session. */
- processed_ops = process_ops(c_ops, prev_sess, qp, burst_size);
- }
-
- qp->stats.dequeued_count += i;
- return i;
-}
+#include "pmd_aesni_mb_priv.h"
struct rte_cryptodev_ops kasumi_pmd_ops = {
.dev_configure = ipsec_mb_config,
@@ -457,7 +61,7 @@ RTE_INIT(ipsec_mb_register_kasumi)
= &ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_KASUMI];
kasumi_data->caps = kasumi_capabilities;
- kasumi_data->dequeue_burst = kasumi_pmd_dequeue_burst;
+ kasumi_data->dequeue_burst = aesni_mb_dequeue_burst;
kasumi_data->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO
| RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING
| RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA
@@ -467,6 +71,6 @@ RTE_INIT(ipsec_mb_register_kasumi)
kasumi_data->internals_priv_size = 0;
kasumi_data->ops = &kasumi_pmd_ops;
kasumi_data->qp_priv_size = sizeof(struct kasumi_qp_data);
- kasumi_data->session_configure = kasumi_session_configure;
- kasumi_data->session_priv_size = sizeof(struct kasumi_session);
+ kasumi_data->session_configure = aesni_mb_session_configure;
+ kasumi_data->session_priv_size = sizeof(struct aesni_mb_session);
}
diff --git a/drivers/crypto/ipsec_mb/pmd_snow3g.c b/drivers/crypto/ipsec_mb/pmd_snow3g.c
index e64df1a462..92ec955baa 100644
--- a/drivers/crypto/ipsec_mb/pmd_snow3g.c
+++ b/drivers/crypto/ipsec_mb/pmd_snow3g.c
@@ -3,539 +3,7 @@
*/
#include "pmd_snow3g_priv.h"
-
-/** Parse crypto xform chain and set private session parameters. */
-static int
-snow3g_session_configure(IMB_MGR *mgr, void *priv_sess,
- const struct rte_crypto_sym_xform *xform)
-{
- struct snow3g_session *sess = (struct snow3g_session *)priv_sess;
- const struct rte_crypto_sym_xform *auth_xform = NULL;
- const struct rte_crypto_sym_xform *cipher_xform = NULL;
- enum ipsec_mb_operation mode;
-
- /* Select Crypto operation - hash then cipher / cipher then hash */
- int ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform,
- &cipher_xform, NULL);
- if (ret)
- return ret;
-
- if (cipher_xform) {
- /* Only SNOW 3G UEA2 supported */
- if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_SNOW3G_UEA2)
- return -ENOTSUP;
-
- if (cipher_xform->cipher.iv.length != SNOW3G_IV_LENGTH) {
- IPSEC_MB_LOG(ERR, "Wrong IV length");
- return -EINVAL;
- }
- if (cipher_xform->cipher.key.length > SNOW3G_MAX_KEY_SIZE) {
- IPSEC_MB_LOG(ERR, "Not enough memory to store the key");
- return -ENOMEM;
- }
-
- sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
-
- /* Initialize key */
- IMB_SNOW3G_INIT_KEY_SCHED(mgr, cipher_xform->cipher.key.data,
- &sess->pKeySched_cipher);
- }
-
- if (auth_xform) {
- /* Only SNOW 3G UIA2 supported */
- if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_SNOW3G_UIA2)
- return -ENOTSUP;
-
- if (auth_xform->auth.digest_length != SNOW3G_DIGEST_LENGTH) {
- IPSEC_MB_LOG(ERR, "Wrong digest length");
- return -EINVAL;
- }
- if (auth_xform->auth.key.length > SNOW3G_MAX_KEY_SIZE) {
- IPSEC_MB_LOG(ERR, "Not enough memory to store the key");
- return -ENOMEM;
- }
-
- sess->auth_op = auth_xform->auth.op;
-
- if (auth_xform->auth.iv.length != SNOW3G_IV_LENGTH) {
- IPSEC_MB_LOG(ERR, "Wrong IV length");
- return -EINVAL;
- }
- sess->auth_iv_offset = auth_xform->auth.iv.offset;
-
- /* Initialize key */
- IMB_SNOW3G_INIT_KEY_SCHED(mgr, auth_xform->auth.key.data,
- &sess->pKeySched_hash);
- }
-
- sess->op = mode;
-
- return 0;
-}
-
-/** Check if conditions are met for digest-appended operations */
-static uint8_t *
-snow3g_digest_appended_in_src(struct rte_crypto_op *op)
-{
- unsigned int auth_size, cipher_size;
-
- auth_size = (op->sym->auth.data.offset >> 3) +
- (op->sym->auth.data.length >> 3);
- cipher_size = (op->sym->cipher.data.offset >> 3) +
- (op->sym->cipher.data.length >> 3);
-
- if (auth_size < cipher_size)
- return rte_pktmbuf_mtod_offset(op->sym->m_src,
- uint8_t *, auth_size);
-
- return NULL;
-}
-
-/** Encrypt/decrypt mbufs with same cipher key. */
-static uint8_t
-process_snow3g_cipher_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
- struct snow3g_session *session,
- uint8_t num_ops)
-{
- uint32_t i;
- uint8_t processed_ops = 0;
- const void *src[SNOW3G_MAX_BURST] = {NULL};
- void *dst[SNOW3G_MAX_BURST] = {NULL};
- uint8_t *digest_appended[SNOW3G_MAX_BURST] = {NULL};
- const void *iv[SNOW3G_MAX_BURST] = {NULL};
- uint32_t num_bytes[SNOW3G_MAX_BURST] = {0};
- uint32_t cipher_off, cipher_len;
- int unencrypted_bytes = 0;
-
- for (i = 0; i < num_ops; i++) {
-
- cipher_off = ops[i]->sym->cipher.data.offset >> 3;
- cipher_len = ops[i]->sym->cipher.data.length >> 3;
- src[i] = rte_pktmbuf_mtod_offset(
- ops[i]->sym->m_src, uint8_t *, cipher_off);
-
- /* If out-of-place operation */
- if (ops[i]->sym->m_dst &&
- ops[i]->sym->m_src != ops[i]->sym->m_dst) {
- dst[i] = rte_pktmbuf_mtod_offset(
- ops[i]->sym->m_dst, uint8_t *, cipher_off);
-
- /* In case of out-of-place, auth-cipher operation
- * with partial encryption of the digest, copy
- * the remaining, unencrypted part.
- */
- if (session->op == IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT
- || session->op == IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT)
- unencrypted_bytes =
- (ops[i]->sym->auth.data.offset >> 3) +
- (ops[i]->sym->auth.data.length >> 3) +
- (SNOW3G_DIGEST_LENGTH) -
- cipher_off - cipher_len;
- if (unencrypted_bytes > 0)
- rte_memcpy(
- rte_pktmbuf_mtod_offset(
- ops[i]->sym->m_dst, uint8_t *,
- cipher_off + cipher_len),
- rte_pktmbuf_mtod_offset(
- ops[i]->sym->m_src, uint8_t *,
- cipher_off + cipher_len),
- unencrypted_bytes);
- } else
- dst[i] = rte_pktmbuf_mtod_offset(ops[i]->sym->m_src,
- uint8_t *, cipher_off);
-
- iv[i] = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
- session->cipher_iv_offset);
- num_bytes[i] = cipher_len;
- processed_ops++;
- }
-
- IMB_SNOW3G_F8_N_BUFFER(qp->mb_mgr, &session->pKeySched_cipher, iv,
- src, dst, num_bytes, processed_ops);
-
- /* Take care of the raw digest data in src buffer */
- for (i = 0; i < num_ops; i++) {
- if ((session->op == IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT ||
- session->op == IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT) &&
- ops[i]->sym->m_dst != NULL) {
- digest_appended[i] =
- snow3g_digest_appended_in_src(ops[i]);
- /* Clear unencrypted digest from
- * the src buffer
- */
- if (digest_appended[i] != NULL)
- memset(digest_appended[i],
- 0, SNOW3G_DIGEST_LENGTH);
- }
- }
- return processed_ops;
-}
-
-/** Encrypt/decrypt mbuf (bit level function). */
-static uint8_t
-process_snow3g_cipher_op_bit(struct ipsec_mb_qp *qp,
- struct rte_crypto_op *op,
- struct snow3g_session *session)
-{
- uint8_t *src, *dst;
- uint8_t *iv;
- uint32_t length_in_bits, offset_in_bits;
- int unencrypted_bytes = 0;
-
- offset_in_bits = op->sym->cipher.data.offset;
- src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *);
- if (op->sym->m_dst == NULL) {
- op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- IPSEC_MB_LOG(ERR, "bit-level in-place not supported\n");
- return 0;
- }
- length_in_bits = op->sym->cipher.data.length;
- dst = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *);
- /* In case of out-of-place, auth-cipher operation
- * with partial encryption of the digest, copy
- * the remaining, unencrypted part.
- */
- if (session->op == IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT ||
- session->op == IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT)
- unencrypted_bytes =
- (op->sym->auth.data.offset >> 3) +
- (op->sym->auth.data.length >> 3) +
- (SNOW3G_DIGEST_LENGTH) -
- (offset_in_bits >> 3) -
- (length_in_bits >> 3);
- if (unencrypted_bytes > 0)
- rte_memcpy(
- rte_pktmbuf_mtod_offset(
- op->sym->m_dst, uint8_t *,
- (length_in_bits >> 3)),
- rte_pktmbuf_mtod_offset(
- op->sym->m_src, uint8_t *,
- (length_in_bits >> 3)),
- unencrypted_bytes);
-
- iv = rte_crypto_op_ctod_offset(op, uint8_t *,
- session->cipher_iv_offset);
-
- IMB_SNOW3G_F8_1_BUFFER_BIT(qp->mb_mgr, &session->pKeySched_cipher, iv,
- src, dst, length_in_bits, offset_in_bits);
-
- return 1;
-}
-
-/** Generate/verify hash from mbufs with same hash key. */
-static int
-process_snow3g_hash_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
- struct snow3g_session *session,
- uint8_t num_ops)
-{
- uint32_t i;
- uint8_t processed_ops = 0;
- uint8_t *src, *dst;
- uint32_t length_in_bits;
- uint8_t *iv;
- uint8_t digest_appended = 0;
- struct snow3g_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
-
- for (i = 0; i < num_ops; i++) {
- /* Data must be byte aligned */
- if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- IPSEC_MB_LOG(ERR, "Offset");
- break;
- }
-
- dst = NULL;
-
- length_in_bits = ops[i]->sym->auth.data.length;
-
- src = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
- (ops[i]->sym->auth.data.offset >> 3);
- iv = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
- session->auth_iv_offset);
-
- if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
- dst = qp_data->temp_digest;
- /* Handle auth cipher verify oop case*/
- if ((session->op ==
- IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN ||
- session->op ==
- IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY) &&
- ops[i]->sym->m_dst != NULL)
- src = rte_pktmbuf_mtod_offset(
- ops[i]->sym->m_dst, uint8_t *,
- ops[i]->sym->auth.data.offset >> 3);
-
- IMB_SNOW3G_F9_1_BUFFER(qp->mb_mgr,
- &session->pKeySched_hash,
- iv, src, length_in_bits, dst);
- /* Verify digest. */
- if (memcmp(dst, ops[i]->sym->auth.digest.data,
- SNOW3G_DIGEST_LENGTH) != 0)
- ops[i]->status =
- RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
- } else {
- if (session->op ==
- IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT ||
- session->op ==
- IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT)
- dst = snow3g_digest_appended_in_src(ops[i]);
-
- if (dst != NULL)
- digest_appended = 1;
- else
- dst = ops[i]->sym->auth.digest.data;
-
- IMB_SNOW3G_F9_1_BUFFER(qp->mb_mgr,
- &session->pKeySched_hash,
- iv, src, length_in_bits, dst);
-
- /* Copy back digest from src to auth.digest.data */
- if (digest_appended)
- rte_memcpy(ops[i]->sym->auth.digest.data,
- dst, SNOW3G_DIGEST_LENGTH);
- }
- processed_ops++;
- }
-
- return processed_ops;
-}
-
-/** Process a batch of crypto ops which shares the same session. */
-static int
-process_ops(struct rte_crypto_op **ops, struct snow3g_session *session,
- struct ipsec_mb_qp *qp, uint8_t num_ops)
-{
- uint32_t i;
- uint32_t processed_ops;
-
-#ifdef RTE_LIBRTE_PMD_SNOW3G_DEBUG
- for (i = 0; i < num_ops; i++) {
- if (!rte_pktmbuf_is_contiguous(ops[i]->sym->m_src) ||
- (ops[i]->sym->m_dst != NULL &&
- !rte_pktmbuf_is_contiguous(
- ops[i]->sym->m_dst))) {
- IPSEC_MB_LOG(ERR,
- "PMD supports only contiguous mbufs, "
- "op (%p) provides noncontiguous mbuf as "
- "source/destination buffer.\n", ops[i]);
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- return 0;
- }
- }
-#endif
-
- switch (session->op) {
- case IPSEC_MB_OP_ENCRYPT_ONLY:
- case IPSEC_MB_OP_DECRYPT_ONLY:
- processed_ops = process_snow3g_cipher_op(qp, ops,
- session, num_ops);
- break;
- case IPSEC_MB_OP_HASH_GEN_ONLY:
- case IPSEC_MB_OP_HASH_VERIFY_ONLY:
- processed_ops = process_snow3g_hash_op(qp, ops, session,
- num_ops);
- break;
- case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN:
- case IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY:
- processed_ops = process_snow3g_cipher_op(qp, ops, session,
- num_ops);
- process_snow3g_hash_op(qp, ops, session, processed_ops);
- break;
- case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
- case IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT:
- processed_ops = process_snow3g_hash_op(qp, ops, session,
- num_ops);
- process_snow3g_cipher_op(qp, ops, session, processed_ops);
- break;
- default:
- /* Operation not supported. */
- processed_ops = 0;
- }
-
- for (i = 0; i < num_ops; i++) {
- /*
- * If there was no error/authentication failure,
- * change status to successful.
- */
- if (ops[i]->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
- ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
- /* Free session if a session-less crypto op. */
- if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
- memset(session, 0, sizeof(struct snow3g_session));
- rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
- ops[i]->sym->session = NULL;
- }
- }
- return processed_ops;
-}
-
-/** Process a crypto op with length/offset in bits. */
-static int
-process_op_bit(struct rte_crypto_op *op, struct snow3g_session *session,
- struct ipsec_mb_qp *qp)
-{
- unsigned int processed_op;
- int ret;
-
- switch (session->op) {
- case IPSEC_MB_OP_ENCRYPT_ONLY:
- case IPSEC_MB_OP_DECRYPT_ONLY:
-
- processed_op = process_snow3g_cipher_op_bit(qp, op,
- session);
- break;
- case IPSEC_MB_OP_HASH_GEN_ONLY:
- case IPSEC_MB_OP_HASH_VERIFY_ONLY:
- processed_op = process_snow3g_hash_op(qp, &op, session, 1);
- break;
- case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN:
- case IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY:
- processed_op = process_snow3g_cipher_op_bit(qp, op, session);
- if (processed_op == 1)
- process_snow3g_hash_op(qp, &op, session, 1);
- break;
- case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
- case IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT:
- processed_op = process_snow3g_hash_op(qp, &op, session, 1);
- if (processed_op == 1)
- process_snow3g_cipher_op_bit(qp, op, session);
- break;
- default:
- /* Operation not supported. */
- processed_op = 0;
- }
-
- /*
- * If there was no error/authentication failure,
- * change status to successful.
- */
- if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
- op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-
- /* Free session if a session-less crypto op. */
- if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
- memset(CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session), 0,
- sizeof(struct snow3g_session));
- rte_mempool_put(qp->sess_mp, (void *)op->sym->session);
- op->sym->session = NULL;
- }
-
- if (unlikely(processed_op != 1))
- return 0;
-
- ret = rte_ring_enqueue(qp->ingress_queue, op);
- if (ret != 0)
- return ret;
-
- return 1;
-}
-
-static uint16_t
-snow3g_pmd_dequeue_burst(void *queue_pair,
- struct rte_crypto_op **ops, uint16_t nb_ops)
-{
- struct ipsec_mb_qp *qp = queue_pair;
- struct rte_crypto_op *c_ops[SNOW3G_MAX_BURST];
- struct rte_crypto_op *curr_c_op;
-
- struct snow3g_session *prev_sess = NULL, *curr_sess = NULL;
- uint32_t i;
- uint8_t burst_size = 0;
- uint8_t processed_ops;
- uint32_t nb_dequeued;
-
- nb_dequeued = rte_ring_dequeue_burst(qp->ingress_queue,
- (void **)ops, nb_ops, NULL);
-
- for (i = 0; i < nb_dequeued; i++) {
- curr_c_op = ops[i];
-
- /* Set status as enqueued (not processed yet) by default. */
- curr_c_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
-
- curr_sess = ipsec_mb_get_session_private(qp, curr_c_op);
- if (unlikely(curr_sess == NULL ||
- curr_sess->op == IPSEC_MB_OP_NOT_SUPPORTED)) {
- curr_c_op->status =
- RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
- break;
- }
-
- /* If length/offset is at bit-level,
- * process this buffer alone.
- */
- if (((curr_c_op->sym->cipher.data.length % BYTE_LEN) != 0)
- || ((curr_c_op->sym->cipher.data.offset
- % BYTE_LEN) != 0)) {
- /* Process the ops of the previous session. */
- if (prev_sess != NULL) {
- processed_ops = process_ops(c_ops, prev_sess,
- qp, burst_size);
- if (processed_ops < burst_size) {
- burst_size = 0;
- break;
- }
-
- burst_size = 0;
- prev_sess = NULL;
- }
-
- processed_ops = process_op_bit(curr_c_op, curr_sess, qp);
- if (processed_ops != 1)
- break;
-
- continue;
- }
-
- /* Batch ops that share the same session. */
- if (prev_sess == NULL) {
- prev_sess = curr_sess;
- c_ops[burst_size++] = curr_c_op;
- } else if (curr_sess == prev_sess) {
- c_ops[burst_size++] = curr_c_op;
- /*
- * When there are enough ops to process in a batch,
- * process them, and start a new batch.
- */
- if (burst_size == SNOW3G_MAX_BURST) {
- processed_ops = process_ops(c_ops, prev_sess,
- qp, burst_size);
- if (processed_ops < burst_size) {
- burst_size = 0;
- break;
- }
-
- burst_size = 0;
- prev_sess = NULL;
- }
- } else {
- /*
- * Different session, process the ops
- * of the previous session.
- */
- processed_ops = process_ops(c_ops, prev_sess,
- qp, burst_size);
- if (processed_ops < burst_size) {
- burst_size = 0;
- break;
- }
-
- burst_size = 0;
- prev_sess = curr_sess;
-
- c_ops[burst_size++] = curr_c_op;
- }
- }
-
- if (burst_size != 0) {
- /* Process the crypto ops of the last session. */
- processed_ops = process_ops(c_ops, prev_sess,
- qp, burst_size);
- }
-
- qp->stats.dequeued_count += i;
- return i;
-}
+#include "pmd_aesni_mb_priv.h"
struct rte_cryptodev_ops snow3g_pmd_ops = {
.dev_configure = ipsec_mb_config,
@@ -586,7 +54,7 @@ RTE_INIT(ipsec_mb_register_snow3g)
= &ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_SNOW3G];
snow3g_data->caps = snow3g_capabilities;
- snow3g_data->dequeue_burst = snow3g_pmd_dequeue_burst;
+ snow3g_data->dequeue_burst = aesni_mb_dequeue_burst;
snow3g_data->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA |
@@ -596,6 +64,6 @@ RTE_INIT(ipsec_mb_register_snow3g)
snow3g_data->internals_priv_size = 0;
snow3g_data->ops = &snow3g_pmd_ops;
snow3g_data->qp_priv_size = sizeof(struct snow3g_qp_data);
- snow3g_data->session_configure = snow3g_session_configure;
- snow3g_data->session_priv_size = sizeof(struct snow3g_session);
+ snow3g_data->session_configure = aesni_mb_session_configure;
+ snow3g_data->session_priv_size = sizeof(struct aesni_mb_session);
}
diff --git a/drivers/crypto/ipsec_mb/pmd_zuc.c b/drivers/crypto/ipsec_mb/pmd_zuc.c
index 92fd9d1808..a4eef57d62 100644
--- a/drivers/crypto/ipsec_mb/pmd_zuc.c
+++ b/drivers/crypto/ipsec_mb/pmd_zuc.c
@@ -3,341 +3,7 @@
*/
#include "pmd_zuc_priv.h"
-
-/** Parse crypto xform chain and set private session parameters. */
-static int
-zuc_session_configure(__rte_unused IMB_MGR * mgr, void *zuc_sess,
- const struct rte_crypto_sym_xform *xform)
-{
- struct zuc_session *sess = (struct zuc_session *) zuc_sess;
- const struct rte_crypto_sym_xform *auth_xform = NULL;
- const struct rte_crypto_sym_xform *cipher_xform = NULL;
- enum ipsec_mb_operation mode;
- /* Select Crypto operation - hash then cipher / cipher then hash */
- int ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform,
- &cipher_xform, NULL);
-
- if (ret)
- return ret;
-
- if (cipher_xform) {
- /* Only ZUC EEA3 supported */
- if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_ZUC_EEA3)
- return -ENOTSUP;
-
- if (cipher_xform->cipher.iv.length != ZUC_IV_KEY_LENGTH) {
- IPSEC_MB_LOG(ERR, "Wrong IV length");
- return -EINVAL;
- }
- sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
-
- /* Copy the key */
- memcpy(sess->pKey_cipher, cipher_xform->cipher.key.data,
- ZUC_IV_KEY_LENGTH);
- }
-
- if (auth_xform) {
- /* Only ZUC EIA3 supported */
- if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_ZUC_EIA3)
- return -ENOTSUP;
-
- if (auth_xform->auth.digest_length != ZUC_DIGEST_LENGTH) {
- IPSEC_MB_LOG(ERR, "Wrong digest length");
- return -EINVAL;
- }
-
- sess->auth_op = auth_xform->auth.op;
-
- if (auth_xform->auth.iv.length != ZUC_IV_KEY_LENGTH) {
- IPSEC_MB_LOG(ERR, "Wrong IV length");
- return -EINVAL;
- }
- sess->auth_iv_offset = auth_xform->auth.iv.offset;
-
- /* Copy the key */
- memcpy(sess->pKey_hash, auth_xform->auth.key.data,
- ZUC_IV_KEY_LENGTH);
- }
-
- sess->op = mode;
- return 0;
-}
-
-/** Encrypt/decrypt mbufs. */
-static uint8_t
-process_zuc_cipher_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
- struct zuc_session **sessions,
- uint8_t num_ops)
-{
- unsigned int i;
- uint8_t processed_ops = 0;
- const void *src[ZUC_MAX_BURST];
- void *dst[ZUC_MAX_BURST];
- const void *iv[ZUC_MAX_BURST];
- uint32_t num_bytes[ZUC_MAX_BURST];
- const void *cipher_keys[ZUC_MAX_BURST];
- struct zuc_session *sess;
-
- for (i = 0; i < num_ops; i++) {
- if (((ops[i]->sym->cipher.data.length % BYTE_LEN) != 0)
- || ((ops[i]->sym->cipher.data.offset
- % BYTE_LEN) != 0)) {
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- IPSEC_MB_LOG(ERR, "Data Length or offset");
- break;
- }
-
- sess = sessions[i];
-
-#ifdef RTE_LIBRTE_PMD_ZUC_DEBUG
- if (!rte_pktmbuf_is_contiguous(ops[i]->sym->m_src) ||
- (ops[i]->sym->m_dst != NULL &&
- !rte_pktmbuf_is_contiguous(
- ops[i]->sym->m_dst))) {
- IPSEC_MB_LOG(ERR, "PMD supports only "
- " contiguous mbufs, op (%p) "
- "provides noncontiguous mbuf "
- "as source/destination buffer.\n",
- "PMD supports only contiguous mbufs, "
- "op (%p) provides noncontiguous mbuf "
- "as source/destination buffer.\n",
- ops[i]);
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- break;
- }
-#endif
-
- src[i] = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
- (ops[i]->sym->cipher.data.offset >> 3);
- dst[i] = ops[i]->sym->m_dst ?
- rte_pktmbuf_mtod(ops[i]->sym->m_dst, uint8_t *) +
- (ops[i]->sym->cipher.data.offset >> 3) :
- rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
- (ops[i]->sym->cipher.data.offset >> 3);
- iv[i] = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
- sess->cipher_iv_offset);
- num_bytes[i] = ops[i]->sym->cipher.data.length >> 3;
-
- cipher_keys[i] = sess->pKey_cipher;
-
- processed_ops++;
- }
-
- IMB_ZUC_EEA3_N_BUFFER(qp->mb_mgr, (const void **)cipher_keys,
- (const void **)iv, (const void **)src, (void **)dst,
- num_bytes, processed_ops);
-
- return processed_ops;
-}
-
-/** Generate/verify hash from mbufs. */
-static int
-process_zuc_hash_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
- struct zuc_session **sessions,
- uint8_t num_ops)
-{
- unsigned int i;
- uint8_t processed_ops = 0;
- uint8_t *src[ZUC_MAX_BURST] = { 0 };
- uint32_t *dst[ZUC_MAX_BURST];
- uint32_t length_in_bits[ZUC_MAX_BURST] = { 0 };
- uint8_t *iv[ZUC_MAX_BURST] = { 0 };
- const void *hash_keys[ZUC_MAX_BURST] = { 0 };
- struct zuc_session *sess;
- struct zuc_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
-
-
- for (i = 0; i < num_ops; i++) {
- /* Data must be byte aligned */
- if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- IPSEC_MB_LOG(ERR, "Offset");
- break;
- }
-
- sess = sessions[i];
-
- length_in_bits[i] = ops[i]->sym->auth.data.length;
-
- src[i] = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
- (ops[i]->sym->auth.data.offset >> 3);
- iv[i] = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
- sess->auth_iv_offset);
-
- hash_keys[i] = sess->pKey_hash;
- if (sess->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY)
- dst[i] = (uint32_t *)qp_data->temp_digest[i];
- else
- dst[i] = (uint32_t *)ops[i]->sym->auth.digest.data;
-
- processed_ops++;
- }
-
- IMB_ZUC_EIA3_N_BUFFER(qp->mb_mgr, (const void **)hash_keys,
- (const void * const *)iv, (const void * const *)src,
- length_in_bits, dst, processed_ops);
-
- /*
- * If tag needs to be verified, compare generated tag
- * with attached tag
- */
- for (i = 0; i < processed_ops; i++)
- if (sessions[i]->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY)
- if (memcmp(dst[i], ops[i]->sym->auth.digest.data,
- ZUC_DIGEST_LENGTH) != 0)
- ops[i]->status =
- RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
-
- return processed_ops;
-}
-
-/** Process a batch of crypto ops which shares the same operation type. */
-static int
-process_ops(struct rte_crypto_op **ops, enum ipsec_mb_operation op_type,
- struct zuc_session **sessions,
- struct ipsec_mb_qp *qp, uint8_t num_ops)
-{
- unsigned int i;
- unsigned int processed_ops = 0;
-
- switch (op_type) {
- case IPSEC_MB_OP_ENCRYPT_ONLY:
- case IPSEC_MB_OP_DECRYPT_ONLY:
- processed_ops = process_zuc_cipher_op(qp, ops,
- sessions, num_ops);
- break;
- case IPSEC_MB_OP_HASH_GEN_ONLY:
- case IPSEC_MB_OP_HASH_VERIFY_ONLY:
- processed_ops = process_zuc_hash_op(qp, ops, sessions,
- num_ops);
- break;
- case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN:
- case IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY:
- processed_ops = process_zuc_cipher_op(qp, ops, sessions,
- num_ops);
- process_zuc_hash_op(qp, ops, sessions, processed_ops);
- break;
- case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
- case IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT:
- processed_ops = process_zuc_hash_op(qp, ops, sessions,
- num_ops);
- process_zuc_cipher_op(qp, ops, sessions, processed_ops);
- break;
- default:
- /* Operation not supported. */
- for (i = 0; i < num_ops; i++)
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
- }
-
- for (i = 0; i < num_ops; i++) {
- /*
- * If there was no error/authentication failure,
- * change status to successful.
- */
- if (ops[i]->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
- ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
- /* Free session if a session-less crypto op. */
- if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
- memset(sessions[i], 0, sizeof(struct zuc_session));
- rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
- ops[i]->sym->session = NULL;
- }
- }
- return processed_ops;
-}
-
-static uint16_t
-zuc_pmd_dequeue_burst(void *queue_pair,
- struct rte_crypto_op **c_ops, uint16_t nb_ops)
-{
-
- struct rte_crypto_op *curr_c_op;
-
- struct zuc_session *curr_sess;
- struct zuc_session *sessions[ZUC_MAX_BURST];
- struct rte_crypto_op *int_c_ops[ZUC_MAX_BURST];
- enum ipsec_mb_operation prev_zuc_op = IPSEC_MB_OP_NOT_SUPPORTED;
- enum ipsec_mb_operation curr_zuc_op;
- struct ipsec_mb_qp *qp = queue_pair;
- unsigned int nb_dequeued;
- unsigned int i;
- uint8_t burst_size = 0;
- uint8_t processed_ops;
-
- nb_dequeued = rte_ring_dequeue_burst(qp->ingress_queue,
- (void **)c_ops, nb_ops, NULL);
-
-
- for (i = 0; i < nb_dequeued; i++) {
- curr_c_op = c_ops[i];
-
- curr_sess = (struct zuc_session *)
- ipsec_mb_get_session_private(qp, curr_c_op);
- if (unlikely(curr_sess == NULL)) {
- curr_c_op->status =
- RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
- break;
- }
-
- curr_zuc_op = curr_sess->op;
-
- /*
- * Batch ops that share the same operation type
- * (cipher only, auth only...).
- */
- if (burst_size == 0) {
- prev_zuc_op = curr_zuc_op;
- int_c_ops[0] = curr_c_op;
- sessions[0] = curr_sess;
- burst_size++;
- } else if (curr_zuc_op == prev_zuc_op) {
- int_c_ops[burst_size] = curr_c_op;
- sessions[burst_size] = curr_sess;
- burst_size++;
- /*
- * When there are enough ops to process in a batch,
- * process them, and start a new batch.
- */
- if (burst_size == ZUC_MAX_BURST) {
- processed_ops = process_ops(int_c_ops, curr_zuc_op,
- sessions, qp, burst_size);
- if (processed_ops < burst_size) {
- burst_size = 0;
- break;
- }
-
- burst_size = 0;
- }
- } else {
- /*
- * Different operation type, process the ops
- * of the previous type.
- */
- processed_ops = process_ops(int_c_ops, prev_zuc_op,
- sessions, qp, burst_size);
- if (processed_ops < burst_size) {
- burst_size = 0;
- break;
- }
-
- burst_size = 0;
- prev_zuc_op = curr_zuc_op;
-
- int_c_ops[0] = curr_c_op;
- sessions[0] = curr_sess;
- burst_size++;
- }
- }
-
- if (burst_size != 0) {
- /* Process the crypto ops of the last operation type. */
- processed_ops = process_ops(int_c_ops, prev_zuc_op,
- sessions, qp, burst_size);
- }
-
- qp->stats.dequeued_count += i;
- return i;
-}
+#include "pmd_aesni_mb_priv.h"
struct rte_cryptodev_ops zuc_pmd_ops = {
.dev_configure = ipsec_mb_config,
@@ -388,7 +54,7 @@ RTE_INIT(ipsec_mb_register_zuc)
= &ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_ZUC];
zuc_data->caps = zuc_capabilities;
- zuc_data->dequeue_burst = zuc_pmd_dequeue_burst;
+ zuc_data->dequeue_burst = aesni_mb_dequeue_burst;
zuc_data->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO
| RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING
| RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA
@@ -398,6 +64,6 @@ RTE_INIT(ipsec_mb_register_zuc)
zuc_data->internals_priv_size = 0;
zuc_data->ops = &zuc_pmd_ops;
zuc_data->qp_priv_size = sizeof(struct zuc_qp_data);
- zuc_data->session_configure = zuc_session_configure;
- zuc_data->session_priv_size = sizeof(struct zuc_session);
+ zuc_data->session_configure = aesni_mb_session_configure;
+ zuc_data->session_priv_size = sizeof(struct aesni_mb_session);
}
--
2.25.1
^ permalink raw reply [flat|nested] 45+ messages in thread
* [PATCH v3] crypto/ipsec_mb: unified IPsec MB interface
2023-12-12 15:36 [PATCH v1] crypto/ipsec_mb: unified IPsec MB interface Brian Dooley
2023-12-14 15:15 ` [PATCH v2] " Brian Dooley
@ 2024-01-18 12:00 ` Brian Dooley
2024-02-28 11:33 ` [PATCH v4] " Brian Dooley
` (3 subsequent siblings)
5 siblings, 0 replies; 45+ messages in thread
From: Brian Dooley @ 2024-01-18 12:00 UTC (permalink / raw)
To: Kai Ji, Pablo de Lara; +Cc: dev, gakhil, Brian Dooley
Currently IPsec MB provides both the JOB API and direct API.
AESNI_MB PMD is using the JOB API codepath while ZUC, KASUMI, SNOW3G,
CHACHA20_POLY1305 and AESNI_GCM are using the direct API.
Instead of using the direct API for these PMDs, they should now make
use of the JOB API codepath. This would remove all use of the IPsec MB
direct API for these PMDs.
Signed-off-by: Brian Dooley <brian.dooley@intel.com>
---
v2:
- Fix compilation failure
v3:
- Remove session configure pointer for each PMD
---
drivers/crypto/ipsec_mb/pmd_aesni_gcm.c | 757 +-----------------
drivers/crypto/ipsec_mb/pmd_aesni_gcm_priv.h | 21 -
drivers/crypto/ipsec_mb/pmd_aesni_mb.c | 8 +-
drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h | 13 +
drivers/crypto/ipsec_mb/pmd_chacha_poly.c | 335 +-------
.../crypto/ipsec_mb/pmd_chacha_poly_priv.h | 19 -
drivers/crypto/ipsec_mb/pmd_kasumi.c | 403 +---------
drivers/crypto/ipsec_mb/pmd_kasumi_priv.h | 12 -
drivers/crypto/ipsec_mb/pmd_snow3g.c | 539 +------------
drivers/crypto/ipsec_mb/pmd_snow3g_priv.h | 13 -
drivers/crypto/ipsec_mb/pmd_zuc.c | 341 +-------
drivers/crypto/ipsec_mb/pmd_zuc_priv.h | 11 -
12 files changed, 34 insertions(+), 2438 deletions(-)
diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_gcm.c b/drivers/crypto/ipsec_mb/pmd_aesni_gcm.c
index 8d40bd9169..50b65749a2 100644
--- a/drivers/crypto/ipsec_mb/pmd_aesni_gcm.c
+++ b/drivers/crypto/ipsec_mb/pmd_aesni_gcm.c
@@ -3,753 +3,7 @@
*/
#include "pmd_aesni_gcm_priv.h"
-
-static void
-aesni_gcm_set_ops(struct aesni_gcm_ops *ops, IMB_MGR *mb_mgr)
-{
- /* Set 128 bit function pointers. */
- ops[GCM_KEY_128].pre = mb_mgr->gcm128_pre;
- ops[GCM_KEY_128].init = mb_mgr->gcm128_init;
-
- ops[GCM_KEY_128].enc = mb_mgr->gcm128_enc;
- ops[GCM_KEY_128].update_enc = mb_mgr->gcm128_enc_update;
- ops[GCM_KEY_128].finalize_enc = mb_mgr->gcm128_enc_finalize;
-
- ops[GCM_KEY_128].dec = mb_mgr->gcm128_dec;
- ops[GCM_KEY_128].update_dec = mb_mgr->gcm128_dec_update;
- ops[GCM_KEY_128].finalize_dec = mb_mgr->gcm128_dec_finalize;
-
- ops[GCM_KEY_128].gmac_init = mb_mgr->gmac128_init;
- ops[GCM_KEY_128].gmac_update = mb_mgr->gmac128_update;
- ops[GCM_KEY_128].gmac_finalize = mb_mgr->gmac128_finalize;
-
- /* Set 192 bit function pointers. */
- ops[GCM_KEY_192].pre = mb_mgr->gcm192_pre;
- ops[GCM_KEY_192].init = mb_mgr->gcm192_init;
-
- ops[GCM_KEY_192].enc = mb_mgr->gcm192_enc;
- ops[GCM_KEY_192].update_enc = mb_mgr->gcm192_enc_update;
- ops[GCM_KEY_192].finalize_enc = mb_mgr->gcm192_enc_finalize;
-
- ops[GCM_KEY_192].dec = mb_mgr->gcm192_dec;
- ops[GCM_KEY_192].update_dec = mb_mgr->gcm192_dec_update;
- ops[GCM_KEY_192].finalize_dec = mb_mgr->gcm192_dec_finalize;
-
- ops[GCM_KEY_192].gmac_init = mb_mgr->gmac192_init;
- ops[GCM_KEY_192].gmac_update = mb_mgr->gmac192_update;
- ops[GCM_KEY_192].gmac_finalize = mb_mgr->gmac192_finalize;
-
- /* Set 256 bit function pointers. */
- ops[GCM_KEY_256].pre = mb_mgr->gcm256_pre;
- ops[GCM_KEY_256].init = mb_mgr->gcm256_init;
-
- ops[GCM_KEY_256].enc = mb_mgr->gcm256_enc;
- ops[GCM_KEY_256].update_enc = mb_mgr->gcm256_enc_update;
- ops[GCM_KEY_256].finalize_enc = mb_mgr->gcm256_enc_finalize;
-
- ops[GCM_KEY_256].dec = mb_mgr->gcm256_dec;
- ops[GCM_KEY_256].update_dec = mb_mgr->gcm256_dec_update;
- ops[GCM_KEY_256].finalize_dec = mb_mgr->gcm256_dec_finalize;
-
- ops[GCM_KEY_256].gmac_init = mb_mgr->gmac256_init;
- ops[GCM_KEY_256].gmac_update = mb_mgr->gmac256_update;
- ops[GCM_KEY_256].gmac_finalize = mb_mgr->gmac256_finalize;
-}
-
-static int
-aesni_gcm_session_configure(IMB_MGR *mb_mgr, void *session,
- const struct rte_crypto_sym_xform *xform)
-{
- struct aesni_gcm_session *sess = session;
- const struct rte_crypto_sym_xform *auth_xform;
- const struct rte_crypto_sym_xform *cipher_xform;
- const struct rte_crypto_sym_xform *aead_xform;
-
- uint8_t key_length;
- const uint8_t *key;
- enum ipsec_mb_operation mode;
- int ret = 0;
-
- ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform,
- &cipher_xform, &aead_xform);
- if (ret)
- return ret;
-
- /**< GCM key type */
-
- sess->op = mode;
-
- switch (sess->op) {
- case IPSEC_MB_OP_HASH_GEN_ONLY:
- case IPSEC_MB_OP_HASH_VERIFY_ONLY:
- /* AES-GMAC
- * auth_xform = xform;
- */
- if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GMAC) {
- IPSEC_MB_LOG(ERR,
- "Only AES GMAC is supported as an authentication only algorithm");
- ret = -ENOTSUP;
- goto error_exit;
- }
- /* Set IV parameters */
- sess->iv.offset = auth_xform->auth.iv.offset;
- sess->iv.length = auth_xform->auth.iv.length;
- key_length = auth_xform->auth.key.length;
- key = auth_xform->auth.key.data;
- sess->req_digest_length =
- RTE_MIN(auth_xform->auth.digest_length,
- DIGEST_LENGTH_MAX);
- break;
- case IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT:
- case IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT:
- /* AES-GCM
- * aead_xform = xform;
- */
-
- if (aead_xform->aead.algo != RTE_CRYPTO_AEAD_AES_GCM) {
- IPSEC_MB_LOG(ERR,
- "The only combined operation supported is AES GCM");
- ret = -ENOTSUP;
- goto error_exit;
- }
- /* Set IV parameters */
- sess->iv.offset = aead_xform->aead.iv.offset;
- sess->iv.length = aead_xform->aead.iv.length;
- key_length = aead_xform->aead.key.length;
- key = aead_xform->aead.key.data;
- sess->aad_length = aead_xform->aead.aad_length;
- sess->req_digest_length =
- RTE_MIN(aead_xform->aead.digest_length,
- DIGEST_LENGTH_MAX);
- break;
- default:
- IPSEC_MB_LOG(
- ERR, "Wrong xform type, has to be AEAD or authentication");
- ret = -ENOTSUP;
- goto error_exit;
- }
-
- /* Check key length, and calculate GCM pre-compute. */
- switch (key_length) {
- case 16:
- sess->key_length = GCM_KEY_128;
- mb_mgr->gcm128_pre(key, &sess->gdata_key);
- break;
- case 24:
- sess->key_length = GCM_KEY_192;
- mb_mgr->gcm192_pre(key, &sess->gdata_key);
- break;
- case 32:
- sess->key_length = GCM_KEY_256;
- mb_mgr->gcm256_pre(key, &sess->gdata_key);
- break;
- default:
- IPSEC_MB_LOG(ERR, "Invalid key length");
- ret = -EINVAL;
- goto error_exit;
- }
-
- /* Digest check */
- if (sess->req_digest_length > 16) {
- IPSEC_MB_LOG(ERR, "Invalid digest length");
- ret = -EINVAL;
- goto error_exit;
- }
- /*
- * If size requested is different, generate the full digest
- * (16 bytes) in a temporary location and then memcpy
- * the requested number of bytes.
- */
- if (sess->req_digest_length < 4)
- sess->gen_digest_length = 16;
- else
- sess->gen_digest_length = sess->req_digest_length;
-
-error_exit:
- return ret;
-}
-
-/**
- * Process a completed job and return rte_mbuf which job processed
- *
- * @param job IMB_JOB job to process
- *
- * @return
- * - Returns processed mbuf which is trimmed of output digest used in
- * verification of supplied digest in the case of a HASH_CIPHER operation
- * - Returns NULL on invalid job
- */
-static void
-post_process_gcm_crypto_op(struct ipsec_mb_qp *qp,
- struct rte_crypto_op *op,
- struct aesni_gcm_session *session)
-{
- struct aesni_gcm_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
-
- op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
- /* Verify digest if required */
- if (session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT ||
- session->op == IPSEC_MB_OP_HASH_VERIFY_ONLY) {
- uint8_t *digest;
-
- uint8_t *tag = qp_data->temp_digest;
-
- if (session->op == IPSEC_MB_OP_HASH_VERIFY_ONLY)
- digest = op->sym->auth.digest.data;
- else
- digest = op->sym->aead.digest.data;
-
-#ifdef RTE_LIBRTE_PMD_AESNI_GCM_DEBUG
- rte_hexdump(stdout, "auth tag (orig):",
- digest, session->req_digest_length);
- rte_hexdump(stdout, "auth tag (calc):",
- tag, session->req_digest_length);
-#endif
-
- if (memcmp(tag, digest, session->req_digest_length) != 0)
- op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
- } else {
- if (session->req_digest_length != session->gen_digest_length) {
- if (session->op ==
- IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT)
- memcpy(op->sym->aead.digest.data,
- qp_data->temp_digest,
- session->req_digest_length);
- else
- memcpy(op->sym->auth.digest.data,
- qp_data->temp_digest,
- session->req_digest_length);
- }
- }
-}
-
-/**
- * Process a completed GCM request
- *
- * @param qp Queue Pair to process
- * @param op Crypto operation
- * @param sess AESNI-GCM session
- *
- */
-static void
-handle_completed_gcm_crypto_op(struct ipsec_mb_qp *qp,
- struct rte_crypto_op *op,
- struct aesni_gcm_session *sess)
-{
- post_process_gcm_crypto_op(qp, op, sess);
-
- /* Free session if a session-less crypto op */
- if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
- memset(sess, 0, sizeof(struct aesni_gcm_session));
- rte_mempool_put(qp->sess_mp, op->sym->session);
- op->sym->session = NULL;
- }
-}
-
-/**
- * Process a crypto operation, calling
- * the GCM API from the multi buffer library.
- *
- * @param qp queue pair
- * @param op symmetric crypto operation
- * @param session GCM session
- *
- * @return
- * 0 on success
- */
-static int
-process_gcm_crypto_op(struct ipsec_mb_qp *qp, struct rte_crypto_op *op,
- struct aesni_gcm_session *session)
-{
- struct aesni_gcm_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
- uint8_t *src, *dst;
- uint8_t *iv_ptr;
- struct rte_crypto_sym_op *sym_op = op->sym;
- struct rte_mbuf *m_src = sym_op->m_src;
- uint32_t offset, data_offset, data_length;
- uint32_t part_len, total_len, data_len;
- uint8_t *tag;
- unsigned int oop = 0;
- struct aesni_gcm_ops *ops = &qp_data->ops[session->key_length];
-
- if (session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT ||
- session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT) {
- offset = sym_op->aead.data.offset;
- data_offset = offset;
- data_length = sym_op->aead.data.length;
- } else {
- offset = sym_op->auth.data.offset;
- data_offset = offset;
- data_length = sym_op->auth.data.length;
- }
-
- RTE_ASSERT(m_src != NULL);
-
- while (offset >= m_src->data_len && data_length != 0) {
- offset -= m_src->data_len;
- m_src = m_src->next;
-
- RTE_ASSERT(m_src != NULL);
- }
-
- src = rte_pktmbuf_mtod_offset(m_src, uint8_t *, offset);
-
- data_len = m_src->data_len - offset;
- part_len = (data_len < data_length) ? data_len :
- data_length;
-
- RTE_ASSERT((sym_op->m_dst == NULL) ||
- ((sym_op->m_dst != NULL) &&
- rte_pktmbuf_is_contiguous(sym_op->m_dst)));
-
- /* In-place */
- if (sym_op->m_dst == NULL || (sym_op->m_dst == sym_op->m_src))
- dst = src;
- /* Out-of-place */
- else {
- oop = 1;
- /* Segmented destination buffer is not supported
- * if operation is Out-of-place
- */
- RTE_ASSERT(rte_pktmbuf_is_contiguous(sym_op->m_dst));
- dst = rte_pktmbuf_mtod_offset(sym_op->m_dst, uint8_t *,
- data_offset);
- }
-
- iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
- session->iv.offset);
-
- if (session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT) {
- ops->init(&session->gdata_key, &qp_data->gcm_ctx_data, iv_ptr,
- sym_op->aead.aad.data,
- (uint64_t)session->aad_length);
-
- ops->update_enc(&session->gdata_key, &qp_data->gcm_ctx_data,
- dst, src, (uint64_t)part_len);
- total_len = data_length - part_len;
-
- while (total_len) {
- m_src = m_src->next;
-
- RTE_ASSERT(m_src != NULL);
-
- src = rte_pktmbuf_mtod(m_src, uint8_t *);
- if (oop)
- dst += part_len;
- else
- dst = src;
- part_len = (m_src->data_len < total_len) ?
- m_src->data_len : total_len;
-
- ops->update_enc(&session->gdata_key,
- &qp_data->gcm_ctx_data,
- dst, src, (uint64_t)part_len);
- total_len -= part_len;
- }
-
- if (session->req_digest_length != session->gen_digest_length)
- tag = qp_data->temp_digest;
- else
- tag = sym_op->aead.digest.data;
-
- ops->finalize_enc(&session->gdata_key, &qp_data->gcm_ctx_data,
- tag, session->gen_digest_length);
- } else if (session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT) {
- ops->init(&session->gdata_key, &qp_data->gcm_ctx_data, iv_ptr,
- sym_op->aead.aad.data,
- (uint64_t)session->aad_length);
-
- ops->update_dec(&session->gdata_key, &qp_data->gcm_ctx_data,
- dst, src, (uint64_t)part_len);
- total_len = data_length - part_len;
-
- while (total_len) {
- m_src = m_src->next;
-
- RTE_ASSERT(m_src != NULL);
-
- src = rte_pktmbuf_mtod(m_src, uint8_t *);
- if (oop)
- dst += part_len;
- else
- dst = src;
- part_len = (m_src->data_len < total_len) ?
- m_src->data_len : total_len;
-
- ops->update_dec(&session->gdata_key,
- &qp_data->gcm_ctx_data,
- dst, src, (uint64_t)part_len);
- total_len -= part_len;
- }
-
- tag = qp_data->temp_digest;
- ops->finalize_dec(&session->gdata_key, &qp_data->gcm_ctx_data,
- tag, session->gen_digest_length);
- } else if (session->op == IPSEC_MB_OP_HASH_GEN_ONLY) {
- ops->gmac_init(&session->gdata_key, &qp_data->gcm_ctx_data,
- iv_ptr, session->iv.length);
-
- ops->gmac_update(&session->gdata_key, &qp_data->gcm_ctx_data,
- src, (uint64_t)part_len);
- total_len = data_length - part_len;
-
- while (total_len) {
- m_src = m_src->next;
-
- RTE_ASSERT(m_src != NULL);
-
- src = rte_pktmbuf_mtod(m_src, uint8_t *);
- part_len = (m_src->data_len < total_len) ?
- m_src->data_len : total_len;
-
- ops->gmac_update(&session->gdata_key,
- &qp_data->gcm_ctx_data, src,
- (uint64_t)part_len);
- total_len -= part_len;
- }
-
- if (session->req_digest_length != session->gen_digest_length)
- tag = qp_data->temp_digest;
- else
- tag = sym_op->auth.digest.data;
-
- ops->gmac_finalize(&session->gdata_key, &qp_data->gcm_ctx_data,
- tag, session->gen_digest_length);
- } else { /* IPSEC_MB_OP_HASH_VERIFY_ONLY */
- ops->gmac_init(&session->gdata_key, &qp_data->gcm_ctx_data,
- iv_ptr, session->iv.length);
-
- ops->gmac_update(&session->gdata_key, &qp_data->gcm_ctx_data,
- src, (uint64_t)part_len);
- total_len = data_length - part_len;
-
- while (total_len) {
- m_src = m_src->next;
-
- RTE_ASSERT(m_src != NULL);
-
- src = rte_pktmbuf_mtod(m_src, uint8_t *);
- part_len = (m_src->data_len < total_len) ?
- m_src->data_len : total_len;
-
- ops->gmac_update(&session->gdata_key,
- &qp_data->gcm_ctx_data, src,
- (uint64_t)part_len);
- total_len -= part_len;
- }
-
- tag = qp_data->temp_digest;
-
- ops->gmac_finalize(&session->gdata_key, &qp_data->gcm_ctx_data,
- tag, session->gen_digest_length);
- }
- return 0;
-}
-
-/** Get gcm session */
-static inline struct aesni_gcm_session *
-aesni_gcm_get_session(struct ipsec_mb_qp *qp,
- struct rte_crypto_op *op)
-{
- struct rte_cryptodev_sym_session *sess = NULL;
- struct rte_crypto_sym_op *sym_op = op->sym;
-
- if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
- if (likely(sym_op->session != NULL))
- sess = sym_op->session;
- } else {
- if (rte_mempool_get(qp->sess_mp, (void **)&sess))
- return NULL;
-
- if (unlikely(sess->sess_data_sz <
- sizeof(struct aesni_gcm_session))) {
- rte_mempool_put(qp->sess_mp, sess);
- return NULL;
- }
-
- if (unlikely(aesni_gcm_session_configure(qp->mb_mgr,
- CRYPTODEV_GET_SYM_SESS_PRIV(sess),
- sym_op->xform) != 0)) {
- rte_mempool_put(qp->sess_mp, sess);
- sess = NULL;
- }
- sym_op->session = sess;
- }
-
- if (unlikely(sess == NULL))
- op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
-
- return CRYPTODEV_GET_SYM_SESS_PRIV(sess);
-}
-
-static uint16_t
-aesni_gcm_pmd_dequeue_burst(void *queue_pair,
- struct rte_crypto_op **ops, uint16_t nb_ops)
-{
- struct aesni_gcm_session *sess;
- struct ipsec_mb_qp *qp = queue_pair;
-
- int retval = 0;
- unsigned int i, nb_dequeued;
-
- nb_dequeued = rte_ring_dequeue_burst(qp->ingress_queue,
- (void **)ops, nb_ops, NULL);
-
- for (i = 0; i < nb_dequeued; i++) {
-
- sess = aesni_gcm_get_session(qp, ops[i]);
- if (unlikely(sess == NULL)) {
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- qp->stats.dequeue_err_count++;
- break;
- }
-
- retval = process_gcm_crypto_op(qp, ops[i], sess);
- if (retval < 0) {
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- qp->stats.dequeue_err_count++;
- break;
- }
-
- handle_completed_gcm_crypto_op(qp, ops[i], sess);
- }
-
- qp->stats.dequeued_count += i;
-
- return i;
-}
-
-static inline void
-aesni_gcm_fill_error_code(struct rte_crypto_sym_vec *vec,
- int32_t errnum)
-{
- uint32_t i;
-
- for (i = 0; i < vec->num; i++)
- vec->status[i] = errnum;
-}
-
-static inline int32_t
-aesni_gcm_sgl_op_finalize_encryption(const struct aesni_gcm_session *s,
- struct gcm_context_data *gdata_ctx,
- uint8_t *digest, struct aesni_gcm_ops ops)
-{
- if (s->req_digest_length != s->gen_digest_length) {
- uint8_t tmpdigest[s->gen_digest_length];
-
- ops.finalize_enc(&s->gdata_key, gdata_ctx, tmpdigest,
- s->gen_digest_length);
- memcpy(digest, tmpdigest, s->req_digest_length);
- } else {
- ops.finalize_enc(&s->gdata_key, gdata_ctx, digest,
- s->gen_digest_length);
- }
-
- return 0;
-}
-
-static inline int32_t
-aesni_gcm_sgl_op_finalize_decryption(const struct aesni_gcm_session *s,
- struct gcm_context_data *gdata_ctx,
- uint8_t *digest, struct aesni_gcm_ops ops)
-{
- uint8_t tmpdigest[s->gen_digest_length];
-
- ops.finalize_dec(&s->gdata_key, gdata_ctx, tmpdigest,
- s->gen_digest_length);
-
- return memcmp(digest, tmpdigest, s->req_digest_length) == 0 ? 0
- : EBADMSG;
-}
-
-static inline void
-aesni_gcm_process_gcm_sgl_op(const struct aesni_gcm_session *s,
- struct gcm_context_data *gdata_ctx,
- struct rte_crypto_sgl *sgl, void *iv, void *aad,
- struct aesni_gcm_ops ops)
-{
- uint32_t i;
-
- /* init crypto operation */
- ops.init(&s->gdata_key, gdata_ctx, iv, aad,
- (uint64_t)s->aad_length);
-
- /* update with sgl data */
- for (i = 0; i < sgl->num; i++) {
- struct rte_crypto_vec *vec = &sgl->vec[i];
-
- switch (s->op) {
- case IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT:
- ops.update_enc(&s->gdata_key, gdata_ctx,
- vec->base, vec->base, vec->len);
- break;
- case IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT:
- ops.update_dec(&s->gdata_key, gdata_ctx,
- vec->base, vec->base, vec->len);
- break;
- default:
- IPSEC_MB_LOG(ERR, "Invalid session op");
- break;
- }
-
- }
-}
-
-static inline void
-aesni_gcm_process_gmac_sgl_op(const struct aesni_gcm_session *s,
- struct gcm_context_data *gdata_ctx,
- struct rte_crypto_sgl *sgl, void *iv,
- struct aesni_gcm_ops ops)
-{
- ops.init(&s->gdata_key, gdata_ctx, iv, sgl->vec[0].base,
- sgl->vec[0].len);
-}
-
-static inline uint32_t
-aesni_gcm_sgl_encrypt(struct aesni_gcm_session *s,
- struct gcm_context_data *gdata_ctx,
- struct rte_crypto_sym_vec *vec,
- struct aesni_gcm_ops ops)
-{
- uint32_t i, processed;
-
- processed = 0;
- for (i = 0; i < vec->num; ++i) {
- aesni_gcm_process_gcm_sgl_op(s, gdata_ctx, &vec->src_sgl[i],
- vec->iv[i].va, vec->aad[i].va,
- ops);
- vec->status[i] = aesni_gcm_sgl_op_finalize_encryption(
- s, gdata_ctx, vec->digest[i].va, ops);
- processed += (vec->status[i] == 0);
- }
-
- return processed;
-}
-
-static inline uint32_t
-aesni_gcm_sgl_decrypt(struct aesni_gcm_session *s,
- struct gcm_context_data *gdata_ctx,
- struct rte_crypto_sym_vec *vec,
- struct aesni_gcm_ops ops)
-{
- uint32_t i, processed;
-
- processed = 0;
- for (i = 0; i < vec->num; ++i) {
- aesni_gcm_process_gcm_sgl_op(s, gdata_ctx, &vec->src_sgl[i],
- vec->iv[i].va, vec->aad[i].va,
- ops);
- vec->status[i] = aesni_gcm_sgl_op_finalize_decryption(
- s, gdata_ctx, vec->digest[i].va, ops);
- processed += (vec->status[i] == 0);
- }
-
- return processed;
-}
-
-static inline uint32_t
-aesni_gmac_sgl_generate(struct aesni_gcm_session *s,
- struct gcm_context_data *gdata_ctx,
- struct rte_crypto_sym_vec *vec,
- struct aesni_gcm_ops ops)
-{
- uint32_t i, processed;
-
- processed = 0;
- for (i = 0; i < vec->num; ++i) {
- if (vec->src_sgl[i].num != 1) {
- vec->status[i] = ENOTSUP;
- continue;
- }
-
- aesni_gcm_process_gmac_sgl_op(s, gdata_ctx, &vec->src_sgl[i],
- vec->iv[i].va, ops);
- vec->status[i] = aesni_gcm_sgl_op_finalize_encryption(
- s, gdata_ctx, vec->digest[i].va, ops);
- processed += (vec->status[i] == 0);
- }
-
- return processed;
-}
-
-static inline uint32_t
-aesni_gmac_sgl_verify(struct aesni_gcm_session *s,
- struct gcm_context_data *gdata_ctx,
- struct rte_crypto_sym_vec *vec,
- struct aesni_gcm_ops ops)
-{
- uint32_t i, processed;
-
- processed = 0;
- for (i = 0; i < vec->num; ++i) {
- if (vec->src_sgl[i].num != 1) {
- vec->status[i] = ENOTSUP;
- continue;
- }
-
- aesni_gcm_process_gmac_sgl_op(s, gdata_ctx, &vec->src_sgl[i],
- vec->iv[i].va, ops);
- vec->status[i] = aesni_gcm_sgl_op_finalize_decryption(
- s, gdata_ctx, vec->digest[i].va, ops);
- processed += (vec->status[i] == 0);
- }
-
- return processed;
-}
-
-/** Process CPU crypto bulk operations */
-static uint32_t
-aesni_gcm_process_bulk(struct rte_cryptodev *dev __rte_unused,
- struct rte_cryptodev_sym_session *sess,
- __rte_unused union rte_crypto_sym_ofs ofs,
- struct rte_crypto_sym_vec *vec)
-{
- struct aesni_gcm_session *s = CRYPTODEV_GET_SYM_SESS_PRIV(sess);
- struct gcm_context_data gdata_ctx;
- IMB_MGR *mb_mgr;
-
- /* get per-thread MB MGR, create one if needed */
- mb_mgr = get_per_thread_mb_mgr();
- if (unlikely(mb_mgr == NULL))
- return 0;
-
- /* Check if function pointers have been set for this thread ops. */
- if (unlikely(RTE_PER_LCORE(gcm_ops)[s->key_length].init == NULL))
- aesni_gcm_set_ops(RTE_PER_LCORE(gcm_ops), mb_mgr);
-
- switch (s->op) {
- case IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT:
- return aesni_gcm_sgl_encrypt(s, &gdata_ctx, vec,
- RTE_PER_LCORE(gcm_ops)[s->key_length]);
- case IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT:
- return aesni_gcm_sgl_decrypt(s, &gdata_ctx, vec,
- RTE_PER_LCORE(gcm_ops)[s->key_length]);
- case IPSEC_MB_OP_HASH_GEN_ONLY:
- return aesni_gmac_sgl_generate(s, &gdata_ctx, vec,
- RTE_PER_LCORE(gcm_ops)[s->key_length]);
- case IPSEC_MB_OP_HASH_VERIFY_ONLY:
- return aesni_gmac_sgl_verify(s, &gdata_ctx, vec,
- RTE_PER_LCORE(gcm_ops)[s->key_length]);
- default:
- aesni_gcm_fill_error_code(vec, EINVAL);
- return 0;
- }
-}
-
-static int
-aesni_gcm_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
- const struct rte_cryptodev_qp_conf *qp_conf,
- int socket_id)
-{
- int ret = ipsec_mb_qp_setup(dev, qp_id, qp_conf, socket_id);
- if (ret < 0)
- return ret;
-
- struct ipsec_mb_qp *qp = dev->data->queue_pairs[qp_id];
- struct aesni_gcm_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
- aesni_gcm_set_ops(qp_data->ops, qp->mb_mgr);
- return 0;
-}
+#include "pmd_aesni_mb_priv.h"
struct rte_cryptodev_ops aesni_gcm_pmd_ops = {
.dev_configure = ipsec_mb_config,
@@ -762,10 +16,10 @@ struct rte_cryptodev_ops aesni_gcm_pmd_ops = {
.dev_infos_get = ipsec_mb_info_get,
- .queue_pair_setup = aesni_gcm_qp_setup,
+ .queue_pair_setup = ipsec_mb_qp_setup,
.queue_pair_release = ipsec_mb_qp_release,
- .sym_cpu_process = aesni_gcm_process_bulk,
+ .sym_cpu_process = aesni_mb_process_bulk,
.sym_session_get_size = ipsec_mb_sym_session_get_size,
.sym_session_configure = ipsec_mb_sym_session_configure,
@@ -801,7 +55,7 @@ RTE_INIT(ipsec_mb_register_aesni_gcm)
&ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_AESNI_GCM];
aesni_gcm_data->caps = aesni_gcm_capabilities;
- aesni_gcm_data->dequeue_burst = aesni_gcm_pmd_dequeue_burst;
+ aesni_gcm_data->dequeue_burst = aesni_mb_dequeue_burst;
aesni_gcm_data->feature_flags =
RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
@@ -814,6 +68,5 @@ RTE_INIT(ipsec_mb_register_aesni_gcm)
aesni_gcm_data->ops = &aesni_gcm_pmd_ops;
aesni_gcm_data->qp_priv_size = sizeof(struct aesni_gcm_qp_data);
aesni_gcm_data->queue_pair_configure = NULL;
- aesni_gcm_data->session_configure = aesni_gcm_session_configure;
- aesni_gcm_data->session_priv_size = sizeof(struct aesni_gcm_session);
+ aesni_gcm_data->session_configure = aesni_mb_session_configure;
}
diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_gcm_priv.h b/drivers/crypto/ipsec_mb/pmd_aesni_gcm_priv.h
index 55a0416030..a40543ad15 100644
--- a/drivers/crypto/ipsec_mb/pmd_aesni_gcm_priv.h
+++ b/drivers/crypto/ipsec_mb/pmd_aesni_gcm_priv.h
@@ -143,25 +143,4 @@ struct aesni_gcm_qp_data {
/**< Operation Handlers */
};
-/** AESNI GCM private session structure */
-struct aesni_gcm_session {
- struct {
- uint16_t length;
- uint16_t offset;
- } iv;
- /**< IV parameters */
- uint16_t aad_length;
- /**< AAD length */
- uint16_t req_digest_length;
- /**< Requested digest length */
- uint16_t gen_digest_length;
- /**< Generated digest length */
- enum ipsec_mb_operation op;
- /**< GCM operation type */
- struct gcm_key_data gdata_key;
- /**< GCM parameters */
- enum aesni_gcm_key_length key_length;
- /** Key Length */
-};
-
#endif /* _PMD_AESNI_GCM_PRIV_H_ */
diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c
index 4de4866cf3..6f0a1de24d 100644
--- a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c
+++ b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c
@@ -761,7 +761,7 @@ aesni_mb_set_session_aead_parameters(const IMB_MGR *mb_mgr,
}
/** Configure a aesni multi-buffer session from a crypto xform chain */
-static int
+int
aesni_mb_session_configure(IMB_MGR *mb_mgr,
void *priv_sess,
const struct rte_crypto_sym_xform *xform)
@@ -2131,7 +2131,7 @@ set_job_null_op(IMB_JOB *job, struct rte_crypto_op *op)
}
#if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM
-static uint16_t
+uint16_t
aesni_mb_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
uint16_t nb_ops)
{
@@ -2321,7 +2321,7 @@ flush_mb_mgr(struct ipsec_mb_qp *qp, IMB_MGR *mb_mgr,
return processed_ops;
}
-static uint16_t
+uint16_t
aesni_mb_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
uint16_t nb_ops)
{
@@ -2456,7 +2456,7 @@ verify_sync_dgst(struct rte_crypto_sym_vec *vec,
return k;
}
-static uint32_t
+uint32_t
aesni_mb_process_bulk(struct rte_cryptodev *dev __rte_unused,
struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs sofs,
struct rte_crypto_sym_vec *vec)
diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h b/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h
index 85994fe5a1..9f0a89d20b 100644
--- a/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h
+++ b/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h
@@ -21,6 +21,19 @@
#define MAX_NUM_SEGS 16
#endif
+int
+aesni_mb_session_configure(IMB_MGR * m __rte_unused, void *priv_sess,
+ const struct rte_crypto_sym_xform *xform);
+
+uint16_t
+aesni_mb_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops);
+
+uint32_t
+aesni_mb_process_bulk(struct rte_cryptodev *dev __rte_unused,
+ struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs sofs,
+ struct rte_crypto_sym_vec *vec);
+
static const struct rte_cryptodev_capabilities aesni_mb_capabilities[] = {
{ /* MD5 HMAC */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
diff --git a/drivers/crypto/ipsec_mb/pmd_chacha_poly.c b/drivers/crypto/ipsec_mb/pmd_chacha_poly.c
index 97e7cef233..20bdf1b0c5 100644
--- a/drivers/crypto/ipsec_mb/pmd_chacha_poly.c
+++ b/drivers/crypto/ipsec_mb/pmd_chacha_poly.c
@@ -3,334 +3,7 @@
*/
#include "pmd_chacha_poly_priv.h"
-
-/** Parse crypto xform chain and set private session parameters. */
-static int
-chacha20_poly1305_session_configure(IMB_MGR * mb_mgr __rte_unused,
- void *priv_sess, const struct rte_crypto_sym_xform *xform)
-{
- struct chacha20_poly1305_session *sess = priv_sess;
- const struct rte_crypto_sym_xform *auth_xform;
- const struct rte_crypto_sym_xform *cipher_xform;
- const struct rte_crypto_sym_xform *aead_xform;
-
- uint8_t key_length;
- const uint8_t *key;
- enum ipsec_mb_operation mode;
- int ret = 0;
-
- ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform,
- &cipher_xform, &aead_xform);
- if (ret)
- return ret;
-
- sess->op = mode;
-
- switch (sess->op) {
- case IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT:
- case IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT:
- if (aead_xform->aead.algo !=
- RTE_CRYPTO_AEAD_CHACHA20_POLY1305) {
- IPSEC_MB_LOG(ERR,
- "The only combined operation supported is CHACHA20 POLY1305");
- ret = -ENOTSUP;
- goto error_exit;
- }
- /* Set IV parameters */
- sess->iv.offset = aead_xform->aead.iv.offset;
- sess->iv.length = aead_xform->aead.iv.length;
- key_length = aead_xform->aead.key.length;
- key = aead_xform->aead.key.data;
- sess->aad_length = aead_xform->aead.aad_length;
- sess->req_digest_length = aead_xform->aead.digest_length;
- break;
- default:
- IPSEC_MB_LOG(
- ERR, "Wrong xform type, has to be AEAD or authentication");
- ret = -ENOTSUP;
- goto error_exit;
- }
-
- /* IV check */
- if (sess->iv.length != CHACHA20_POLY1305_IV_LENGTH &&
- sess->iv.length != 0) {
- IPSEC_MB_LOG(ERR, "Wrong IV length");
- ret = -EINVAL;
- goto error_exit;
- }
-
- /* Check key length */
- if (key_length != CHACHA20_POLY1305_KEY_SIZE) {
- IPSEC_MB_LOG(ERR, "Invalid key length");
- ret = -EINVAL;
- goto error_exit;
- } else {
- memcpy(sess->key, key, CHACHA20_POLY1305_KEY_SIZE);
- }
-
- /* Digest check */
- if (sess->req_digest_length != CHACHA20_POLY1305_DIGEST_LENGTH) {
- IPSEC_MB_LOG(ERR, "Invalid digest length");
- ret = -EINVAL;
- goto error_exit;
- } else {
- sess->gen_digest_length = CHACHA20_POLY1305_DIGEST_LENGTH;
- }
-
-error_exit:
- return ret;
-}
-
-/**
- * Process a crypto operation, calling
- * the direct chacha poly API from the multi buffer library.
- *
- * @param qp queue pair
- * @param op symmetric crypto operation
- * @param session chacha poly session
- *
- * @return
- * - Return 0 if success
- */
-static int
-chacha20_poly1305_crypto_op(struct ipsec_mb_qp *qp, struct rte_crypto_op *op,
- struct chacha20_poly1305_session *session)
-{
- struct chacha20_poly1305_qp_data *qp_data =
- ipsec_mb_get_qp_private_data(qp);
- uint8_t *src, *dst;
- uint8_t *iv_ptr;
- struct rte_crypto_sym_op *sym_op = op->sym;
- struct rte_mbuf *m_src = sym_op->m_src;
- uint32_t offset, data_offset, data_length;
- uint32_t part_len, data_len;
- int total_len;
- uint8_t *tag;
- unsigned int oop = 0;
-
- offset = sym_op->aead.data.offset;
- data_offset = offset;
- data_length = sym_op->aead.data.length;
- RTE_ASSERT(m_src != NULL);
-
- while (offset >= m_src->data_len && data_length != 0) {
- offset -= m_src->data_len;
- m_src = m_src->next;
-
- RTE_ASSERT(m_src != NULL);
- }
-
- src = rte_pktmbuf_mtod_offset(m_src, uint8_t *, offset);
-
- data_len = m_src->data_len - offset;
- part_len = (data_len < data_length) ? data_len :
- data_length;
-
- /* In-place */
- if (sym_op->m_dst == NULL || (sym_op->m_dst == sym_op->m_src))
- dst = src;
- /* Out-of-place */
- else {
- oop = 1;
- /* Segmented destination buffer is not supported
- * if operation is Out-of-place
- */
- RTE_ASSERT(rte_pktmbuf_is_contiguous(sym_op->m_dst));
- dst = rte_pktmbuf_mtod_offset(sym_op->m_dst, uint8_t *,
- data_offset);
- }
-
- iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
- session->iv.offset);
-
- IMB_CHACHA20_POLY1305_INIT(qp->mb_mgr, session->key,
- &qp_data->chacha20_poly1305_ctx_data,
- iv_ptr, sym_op->aead.aad.data,
- (uint64_t)session->aad_length);
-
- if (session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT) {
- IMB_CHACHA20_POLY1305_ENC_UPDATE(qp->mb_mgr,
- session->key,
- &qp_data->chacha20_poly1305_ctx_data,
- dst, src, (uint64_t)part_len);
- total_len = data_length - part_len;
-
- while (total_len) {
- m_src = m_src->next;
- RTE_ASSERT(m_src != NULL);
-
- src = rte_pktmbuf_mtod(m_src, uint8_t *);
- if (oop)
- dst += part_len;
- else
- dst = src;
- part_len = (m_src->data_len < total_len) ?
- m_src->data_len : total_len;
-
- if (dst == NULL || src == NULL) {
- IPSEC_MB_LOG(ERR, "Invalid src or dst input");
- return -EINVAL;
- }
- IMB_CHACHA20_POLY1305_ENC_UPDATE(qp->mb_mgr,
- session->key,
- &qp_data->chacha20_poly1305_ctx_data,
- dst, src, (uint64_t)part_len);
- total_len -= part_len;
- if (total_len < 0) {
- IPSEC_MB_LOG(ERR, "Invalid part len");
- return -EINVAL;
- }
- }
-
- tag = sym_op->aead.digest.data;
- IMB_CHACHA20_POLY1305_ENC_FINALIZE(qp->mb_mgr,
- &qp_data->chacha20_poly1305_ctx_data,
- tag, session->gen_digest_length);
-
- } else {
- IMB_CHACHA20_POLY1305_DEC_UPDATE(qp->mb_mgr,
- session->key,
- &qp_data->chacha20_poly1305_ctx_data,
- dst, src, (uint64_t)part_len);
-
- total_len = data_length - part_len;
-
- while (total_len) {
- m_src = m_src->next;
-
- RTE_ASSERT(m_src != NULL);
-
- src = rte_pktmbuf_mtod(m_src, uint8_t *);
- if (oop)
- dst += part_len;
- else
- dst = src;
- part_len = (m_src->data_len < total_len) ?
- m_src->data_len : total_len;
-
- if (dst == NULL || src == NULL) {
- IPSEC_MB_LOG(ERR, "Invalid src or dst input");
- return -EINVAL;
- }
- IMB_CHACHA20_POLY1305_DEC_UPDATE(qp->mb_mgr,
- session->key,
- &qp_data->chacha20_poly1305_ctx_data,
- dst, src, (uint64_t)part_len);
- total_len -= part_len;
- if (total_len < 0) {
- IPSEC_MB_LOG(ERR, "Invalid part len");
- return -EINVAL;
- }
- }
-
- tag = qp_data->temp_digest;
- IMB_CHACHA20_POLY1305_DEC_FINALIZE(qp->mb_mgr,
- &qp_data->chacha20_poly1305_ctx_data,
- tag, session->gen_digest_length);
- }
-
- return 0;
-}
-
-/**
- * Process a completed chacha poly op
- *
- * @param qp Queue Pair to process
- * @param op Crypto operation
- * @param sess Crypto session
- *
- * @return
- * - void
- */
-static void
-post_process_chacha20_poly1305_crypto_op(struct ipsec_mb_qp *qp,
- struct rte_crypto_op *op,
- struct chacha20_poly1305_session *session)
-{
- struct chacha20_poly1305_qp_data *qp_data =
- ipsec_mb_get_qp_private_data(qp);
-
- op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
- /* Verify digest if required */
- if (session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT ||
- session->op == IPSEC_MB_OP_HASH_VERIFY_ONLY) {
- uint8_t *digest = op->sym->aead.digest.data;
- uint8_t *tag = qp_data->temp_digest;
-
-#ifdef RTE_LIBRTE_PMD_CHACHA20_POLY1305_DEBUG
- rte_hexdump(stdout, "auth tag (orig):",
- digest, session->req_digest_length);
- rte_hexdump(stdout, "auth tag (calc):",
- tag, session->req_digest_length);
-#endif
- if (memcmp(tag, digest, session->req_digest_length) != 0)
- op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
-
- }
-
-}
-
-/**
- * Process a completed Chacha20_poly1305 request
- *
- * @param qp Queue Pair to process
- * @param op Crypto operation
- * @param sess Crypto session
- *
- * @return
- * - void
- */
-static void
-handle_completed_chacha20_poly1305_crypto_op(struct ipsec_mb_qp *qp,
- struct rte_crypto_op *op,
- struct chacha20_poly1305_session *sess)
-{
- post_process_chacha20_poly1305_crypto_op(qp, op, sess);
-
- /* Free session if a session-less crypto op */
- if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
- memset(sess, 0, sizeof(struct chacha20_poly1305_session));
- rte_mempool_put(qp->sess_mp, op->sym->session);
- op->sym->session = NULL;
- }
-}
-
-static uint16_t
-chacha20_poly1305_pmd_dequeue_burst(void *queue_pair,
- struct rte_crypto_op **ops, uint16_t nb_ops)
-{
- struct chacha20_poly1305_session *sess;
- struct ipsec_mb_qp *qp = queue_pair;
-
- int retval = 0;
- unsigned int i = 0, nb_dequeued;
-
- nb_dequeued = rte_ring_dequeue_burst(qp->ingress_queue,
- (void **)ops, nb_ops, NULL);
-
- for (i = 0; i < nb_dequeued; i++) {
-
- sess = ipsec_mb_get_session_private(qp, ops[i]);
- if (unlikely(sess == NULL)) {
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- qp->stats.dequeue_err_count++;
- break;
- }
-
- retval = chacha20_poly1305_crypto_op(qp, ops[i], sess);
- if (retval < 0) {
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- qp->stats.dequeue_err_count++;
- break;
- }
-
- handle_completed_chacha20_poly1305_crypto_op(qp, ops[i], sess);
- }
-
- qp->stats.dequeued_count += i;
-
- return i;
-}
+#include "pmd_aesni_mb_priv.h"
struct rte_cryptodev_ops chacha20_poly1305_pmd_ops = {
.dev_configure = ipsec_mb_config,
@@ -384,7 +57,7 @@ RTE_INIT(ipsec_mb_register_chacha20_poly1305)
= &ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_CHACHA20_POLY1305];
chacha_poly_data->caps = chacha20_poly1305_capabilities;
- chacha_poly_data->dequeue_burst = chacha20_poly1305_pmd_dequeue_burst;
+ chacha_poly_data->dequeue_burst = aesni_mb_dequeue_burst;
chacha_poly_data->feature_flags =
RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
@@ -398,7 +71,5 @@ RTE_INIT(ipsec_mb_register_chacha20_poly1305)
chacha_poly_data->qp_priv_size =
sizeof(struct chacha20_poly1305_qp_data);
chacha_poly_data->session_configure =
- chacha20_poly1305_session_configure;
- chacha_poly_data->session_priv_size =
- sizeof(struct chacha20_poly1305_session);
+ aesni_mb_session_configure;
}
diff --git a/drivers/crypto/ipsec_mb/pmd_chacha_poly_priv.h b/drivers/crypto/ipsec_mb/pmd_chacha_poly_priv.h
index 842f62f5d1..5b04c2edeb 100644
--- a/drivers/crypto/ipsec_mb/pmd_chacha_poly_priv.h
+++ b/drivers/crypto/ipsec_mb/pmd_chacha_poly_priv.h
@@ -7,9 +7,7 @@
#include "ipsec_mb_private.h"
-#define CHACHA20_POLY1305_IV_LENGTH 12
#define CHACHA20_POLY1305_DIGEST_LENGTH 16
-#define CHACHA20_POLY1305_KEY_SIZE 32
static const
struct rte_cryptodev_capabilities chacha20_poly1305_capabilities[] = {
@@ -45,23 +43,6 @@ struct rte_cryptodev_capabilities chacha20_poly1305_capabilities[] = {
uint8_t pmd_driver_id_chacha20_poly1305;
-/** CHACHA20 POLY1305 private session structure */
-struct chacha20_poly1305_session {
- struct {
- uint16_t length;
- uint16_t offset;
- } iv;
- /**< IV parameters */
- uint16_t aad_length;
- /**< AAD length */
- uint16_t req_digest_length;
- /**< Requested digest length */
- uint16_t gen_digest_length;
- /**< Generated digest length */
- uint8_t key[CHACHA20_POLY1305_KEY_SIZE];
- enum ipsec_mb_operation op;
-} __rte_cache_aligned;
-
struct chacha20_poly1305_qp_data {
struct chacha20_poly1305_context_data chacha20_poly1305_ctx_data;
uint8_t temp_digest[CHACHA20_POLY1305_DIGEST_LENGTH];
diff --git a/drivers/crypto/ipsec_mb/pmd_kasumi.c b/drivers/crypto/ipsec_mb/pmd_kasumi.c
index 5db9c523cd..1911ae1a74 100644
--- a/drivers/crypto/ipsec_mb/pmd_kasumi.c
+++ b/drivers/crypto/ipsec_mb/pmd_kasumi.c
@@ -10,403 +10,7 @@
#include <rte_malloc.h>
#include "pmd_kasumi_priv.h"
-
-/** Parse crypto xform chain and set private session parameters. */
-static int
-kasumi_session_configure(IMB_MGR *mgr, void *priv_sess,
- const struct rte_crypto_sym_xform *xform)
-{
- const struct rte_crypto_sym_xform *auth_xform = NULL;
- const struct rte_crypto_sym_xform *cipher_xform = NULL;
- enum ipsec_mb_operation mode;
- struct kasumi_session *sess = (struct kasumi_session *)priv_sess;
- /* Select Crypto operation - hash then cipher / cipher then hash */
- int ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform,
- &cipher_xform, NULL);
-
- if (ret)
- return ret;
-
- if (cipher_xform) {
- /* Only KASUMI F8 supported */
- if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_KASUMI_F8) {
- IPSEC_MB_LOG(ERR, "Unsupported cipher algorithm ");
- return -ENOTSUP;
- }
-
- sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
- if (cipher_xform->cipher.iv.length != KASUMI_IV_LENGTH) {
- IPSEC_MB_LOG(ERR, "Wrong IV length");
- return -EINVAL;
- }
-
- /* Initialize key */
- IMB_KASUMI_INIT_F8_KEY_SCHED(mgr,
- cipher_xform->cipher.key.data,
- &sess->pKeySched_cipher);
- }
-
- if (auth_xform) {
- /* Only KASUMI F9 supported */
- if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_KASUMI_F9) {
- IPSEC_MB_LOG(ERR, "Unsupported authentication");
- return -ENOTSUP;
- }
-
- if (auth_xform->auth.digest_length != KASUMI_DIGEST_LENGTH) {
- IPSEC_MB_LOG(ERR, "Wrong digest length");
- return -EINVAL;
- }
-
- sess->auth_op = auth_xform->auth.op;
-
- /* Initialize key */
- IMB_KASUMI_INIT_F9_KEY_SCHED(mgr, auth_xform->auth.key.data,
- &sess->pKeySched_hash);
- }
-
- sess->op = mode;
- return ret;
-}
-
-/** Encrypt/decrypt mbufs with same cipher key. */
-static uint8_t
-process_kasumi_cipher_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
- struct kasumi_session *session, uint8_t num_ops)
-{
- unsigned int i;
- uint8_t processed_ops = 0;
- const void *src[num_ops];
- void *dst[num_ops];
- uint8_t *iv_ptr;
- uint64_t iv[num_ops];
- uint32_t num_bytes[num_ops];
-
- for (i = 0; i < num_ops; i++) {
- src[i] = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *)
- + (ops[i]->sym->cipher.data.offset >> 3);
- dst[i] = ops[i]->sym->m_dst
- ? rte_pktmbuf_mtod(ops[i]->sym->m_dst, uint8_t *)
- + (ops[i]->sym->cipher.data.offset >> 3)
- : rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *)
- + (ops[i]->sym->cipher.data.offset >> 3);
- iv_ptr = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
- session->cipher_iv_offset);
- iv[i] = *((uint64_t *)(iv_ptr));
- num_bytes[i] = ops[i]->sym->cipher.data.length >> 3;
-
- processed_ops++;
- }
-
- if (processed_ops != 0)
- IMB_KASUMI_F8_N_BUFFER(qp->mb_mgr, &session->pKeySched_cipher,
- iv, src, dst, num_bytes,
- processed_ops);
-
- return processed_ops;
-}
-
-/** Encrypt/decrypt mbuf (bit level function). */
-static uint8_t
-process_kasumi_cipher_op_bit(struct ipsec_mb_qp *qp, struct rte_crypto_op *op,
- struct kasumi_session *session)
-{
- uint8_t *src, *dst;
- uint8_t *iv_ptr;
- uint64_t iv;
- uint32_t length_in_bits, offset_in_bits;
-
- offset_in_bits = op->sym->cipher.data.offset;
- src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *);
- if (op->sym->m_dst == NULL)
- dst = src;
- else
- dst = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *);
- iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
- session->cipher_iv_offset);
- iv = *((uint64_t *)(iv_ptr));
- length_in_bits = op->sym->cipher.data.length;
-
- IMB_KASUMI_F8_1_BUFFER_BIT(qp->mb_mgr, &session->pKeySched_cipher, iv,
- src, dst, length_in_bits, offset_in_bits);
-
- return 1;
-}
-
-/** Generate/verify hash from mbufs with same hash key. */
-static int
-process_kasumi_hash_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
- struct kasumi_session *session, uint8_t num_ops)
-{
- unsigned int i;
- uint8_t processed_ops = 0;
- uint8_t *src, *dst;
- uint32_t length_in_bits;
- uint32_t num_bytes;
- struct kasumi_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
-
- for (i = 0; i < num_ops; i++) {
- /* Data must be byte aligned */
- if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- IPSEC_MB_LOG(ERR, "Invalid Offset");
- break;
- }
-
- length_in_bits = ops[i]->sym->auth.data.length;
-
- src = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *)
- + (ops[i]->sym->auth.data.offset >> 3);
- /* Direction from next bit after end of message */
- num_bytes = length_in_bits >> 3;
-
- if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
- dst = qp_data->temp_digest;
- IMB_KASUMI_F9_1_BUFFER(qp->mb_mgr,
- &session->pKeySched_hash, src,
- num_bytes, dst);
-
- /* Verify digest. */
- if (memcmp(dst, ops[i]->sym->auth.digest.data,
- KASUMI_DIGEST_LENGTH)
- != 0)
- ops[i]->status
- = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
- } else {
- dst = ops[i]->sym->auth.digest.data;
-
- IMB_KASUMI_F9_1_BUFFER(qp->mb_mgr,
- &session->pKeySched_hash, src,
- num_bytes, dst);
- }
- processed_ops++;
- }
-
- return processed_ops;
-}
-
-/** Process a batch of crypto ops which shares the same session. */
-static int
-process_ops(struct rte_crypto_op **ops, struct kasumi_session *session,
- struct ipsec_mb_qp *qp, uint8_t num_ops)
-{
- unsigned int i;
- unsigned int processed_ops;
-
- switch (session->op) {
- case IPSEC_MB_OP_ENCRYPT_ONLY:
- case IPSEC_MB_OP_DECRYPT_ONLY:
- processed_ops
- = process_kasumi_cipher_op(qp, ops, session, num_ops);
- break;
- case IPSEC_MB_OP_HASH_GEN_ONLY:
- case IPSEC_MB_OP_HASH_VERIFY_ONLY:
- processed_ops
- = process_kasumi_hash_op(qp, ops, session, num_ops);
- break;
- case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN:
- case IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY:
- processed_ops
- = process_kasumi_cipher_op(qp, ops, session, num_ops);
- process_kasumi_hash_op(qp, ops, session, processed_ops);
- break;
- case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
- case IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT:
- processed_ops
- = process_kasumi_hash_op(qp, ops, session, num_ops);
- process_kasumi_cipher_op(qp, ops, session, processed_ops);
- break;
- default:
- /* Operation not supported. */
- processed_ops = 0;
- }
-
- for (i = 0; i < num_ops; i++) {
- /*
- * If there was no error/authentication failure,
- * change status to successful.
- */
- if (ops[i]->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
- ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
- /* Free session if a session-less crypto op. */
- if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
- memset(session, 0, sizeof(struct kasumi_session));
- rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
- ops[i]->sym->session = NULL;
- }
- }
- return processed_ops;
-}
-
-/** Process a crypto op with length/offset in bits. */
-static int
-process_op_bit(struct rte_crypto_op *op, struct kasumi_session *session,
- struct ipsec_mb_qp *qp)
-{
- unsigned int processed_op;
-
- switch (session->op) {
- /* case KASUMI_OP_ONLY_CIPHER: */
- case IPSEC_MB_OP_ENCRYPT_ONLY:
- case IPSEC_MB_OP_DECRYPT_ONLY:
- processed_op = process_kasumi_cipher_op_bit(qp, op, session);
- break;
- /* case KASUMI_OP_ONLY_AUTH: */
- case IPSEC_MB_OP_HASH_GEN_ONLY:
- case IPSEC_MB_OP_HASH_VERIFY_ONLY:
- processed_op = process_kasumi_hash_op(qp, &op, session, 1);
- break;
- /* case KASUMI_OP_CIPHER_AUTH: */
- case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN:
- processed_op = process_kasumi_cipher_op_bit(qp, op, session);
- if (processed_op == 1)
- process_kasumi_hash_op(qp, &op, session, 1);
- break;
- /* case KASUMI_OP_AUTH_CIPHER: */
- case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
- processed_op = process_kasumi_hash_op(qp, &op, session, 1);
- if (processed_op == 1)
- process_kasumi_cipher_op_bit(qp, op, session);
- break;
- default:
- /* Operation not supported. */
- processed_op = 0;
- }
-
- /*
- * If there was no error/authentication failure,
- * change status to successful.
- */
- if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
- op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-
- /* Free session if a session-less crypto op. */
- if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
- memset(CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session), 0,
- sizeof(struct kasumi_session));
- rte_mempool_put(qp->sess_mp, (void *)op->sym->session);
- op->sym->session = NULL;
- }
- return processed_op;
-}
-
-static uint16_t
-kasumi_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
- uint16_t nb_ops)
-{
- struct rte_crypto_op *c_ops[nb_ops];
- struct rte_crypto_op *curr_c_op = NULL;
-
- struct kasumi_session *prev_sess = NULL, *curr_sess = NULL;
- struct ipsec_mb_qp *qp = queue_pair;
- unsigned int i;
- uint8_t burst_size = 0;
- uint8_t processed_ops;
- unsigned int nb_dequeued;
-
- nb_dequeued = rte_ring_dequeue_burst(qp->ingress_queue,
- (void **)ops, nb_ops, NULL);
- for (i = 0; i < nb_dequeued; i++) {
- curr_c_op = ops[i];
-
-#ifdef RTE_LIBRTE_PMD_KASUMI_DEBUG
- if (!rte_pktmbuf_is_contiguous(curr_c_op->sym->m_src)
- || (curr_c_op->sym->m_dst != NULL
- && !rte_pktmbuf_is_contiguous(
- curr_c_op->sym->m_dst))) {
- IPSEC_MB_LOG(ERR,
- "PMD supports only contiguous mbufs, op (%p) provides noncontiguous mbuf as source/destination buffer.",
- curr_c_op);
- curr_c_op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- break;
- }
-#endif
-
- /* Set status as enqueued (not processed yet) by default. */
- curr_c_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
-
- curr_sess = (struct kasumi_session *)
- ipsec_mb_get_session_private(qp, curr_c_op);
- if (unlikely(curr_sess == NULL
- || curr_sess->op == IPSEC_MB_OP_NOT_SUPPORTED)) {
- curr_c_op->status
- = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
- break;
- }
-
- /* If length/offset is at bit-level, process this buffer alone.
- */
- if (((curr_c_op->sym->cipher.data.length % BYTE_LEN) != 0)
- || ((ops[i]->sym->cipher.data.offset % BYTE_LEN) != 0)) {
- /* Process the ops of the previous session. */
- if (prev_sess != NULL) {
- processed_ops = process_ops(c_ops, prev_sess,
- qp, burst_size);
- if (processed_ops < burst_size) {
- burst_size = 0;
- break;
- }
-
- burst_size = 0;
- prev_sess = NULL;
- }
-
- processed_ops = process_op_bit(curr_c_op,
- curr_sess, qp);
- if (processed_ops != 1)
- break;
-
- continue;
- }
-
- /* Batch ops that share the same session. */
- if (prev_sess == NULL) {
- prev_sess = curr_sess;
- c_ops[burst_size++] = curr_c_op;
- } else if (curr_sess == prev_sess) {
- c_ops[burst_size++] = curr_c_op;
- /*
- * When there are enough ops to process in a batch,
- * process them, and start a new batch.
- */
- if (burst_size == KASUMI_MAX_BURST) {
- processed_ops = process_ops(c_ops, prev_sess,
- qp, burst_size);
- if (processed_ops < burst_size) {
- burst_size = 0;
- break;
- }
-
- burst_size = 0;
- prev_sess = NULL;
- }
- } else {
- /*
- * Different session, process the ops
- * of the previous session.
- */
- processed_ops = process_ops(c_ops, prev_sess, qp,
- burst_size);
- if (processed_ops < burst_size) {
- burst_size = 0;
- break;
- }
-
- burst_size = 0;
- prev_sess = curr_sess;
-
- c_ops[burst_size++] = curr_c_op;
- }
- }
-
- if (burst_size != 0) {
- /* Process the crypto ops of the last session. */
- processed_ops = process_ops(c_ops, prev_sess, qp, burst_size);
- }
-
- qp->stats.dequeued_count += i;
- return i;
-}
+#include "pmd_aesni_mb_priv.h"
struct rte_cryptodev_ops kasumi_pmd_ops = {
.dev_configure = ipsec_mb_config,
@@ -457,7 +61,7 @@ RTE_INIT(ipsec_mb_register_kasumi)
= &ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_KASUMI];
kasumi_data->caps = kasumi_capabilities;
- kasumi_data->dequeue_burst = kasumi_pmd_dequeue_burst;
+ kasumi_data->dequeue_burst = aesni_mb_dequeue_burst;
kasumi_data->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO
| RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING
| RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA
@@ -467,6 +71,5 @@ RTE_INIT(ipsec_mb_register_kasumi)
kasumi_data->internals_priv_size = 0;
kasumi_data->ops = &kasumi_pmd_ops;
kasumi_data->qp_priv_size = sizeof(struct kasumi_qp_data);
- kasumi_data->session_configure = kasumi_session_configure;
- kasumi_data->session_priv_size = sizeof(struct kasumi_session);
+ kasumi_data->session_configure = aesni_mb_session_configure;
}
diff --git a/drivers/crypto/ipsec_mb/pmd_kasumi_priv.h b/drivers/crypto/ipsec_mb/pmd_kasumi_priv.h
index 8db1d1cc5b..fc962115ff 100644
--- a/drivers/crypto/ipsec_mb/pmd_kasumi_priv.h
+++ b/drivers/crypto/ipsec_mb/pmd_kasumi_priv.h
@@ -9,8 +9,6 @@
#define KASUMI_KEY_LENGTH 16
#define KASUMI_IV_LENGTH 8
-#define KASUMI_MAX_BURST 4
-#define BYTE_LEN 8
#define KASUMI_DIGEST_LENGTH 4
uint8_t pmd_driver_id_kasumi;
@@ -60,16 +58,6 @@ static const struct rte_cryptodev_capabilities kasumi_capabilities[] = {
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
};
-/** KASUMI private session structure */
-struct kasumi_session {
- /* Keys have to be 16-byte aligned */
- kasumi_key_sched_t pKeySched_cipher;
- kasumi_key_sched_t pKeySched_hash;
- enum ipsec_mb_operation op;
- enum rte_crypto_auth_operation auth_op;
- uint16_t cipher_iv_offset;
-} __rte_cache_aligned;
-
struct kasumi_qp_data {
uint8_t temp_digest[KASUMI_DIGEST_LENGTH];
/* *< Buffers used to store the digest generated
diff --git a/drivers/crypto/ipsec_mb/pmd_snow3g.c b/drivers/crypto/ipsec_mb/pmd_snow3g.c
index e64df1a462..4adbc25913 100644
--- a/drivers/crypto/ipsec_mb/pmd_snow3g.c
+++ b/drivers/crypto/ipsec_mb/pmd_snow3g.c
@@ -3,539 +3,7 @@
*/
#include "pmd_snow3g_priv.h"
-
-/** Parse crypto xform chain and set private session parameters. */
-static int
-snow3g_session_configure(IMB_MGR *mgr, void *priv_sess,
- const struct rte_crypto_sym_xform *xform)
-{
- struct snow3g_session *sess = (struct snow3g_session *)priv_sess;
- const struct rte_crypto_sym_xform *auth_xform = NULL;
- const struct rte_crypto_sym_xform *cipher_xform = NULL;
- enum ipsec_mb_operation mode;
-
- /* Select Crypto operation - hash then cipher / cipher then hash */
- int ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform,
- &cipher_xform, NULL);
- if (ret)
- return ret;
-
- if (cipher_xform) {
- /* Only SNOW 3G UEA2 supported */
- if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_SNOW3G_UEA2)
- return -ENOTSUP;
-
- if (cipher_xform->cipher.iv.length != SNOW3G_IV_LENGTH) {
- IPSEC_MB_LOG(ERR, "Wrong IV length");
- return -EINVAL;
- }
- if (cipher_xform->cipher.key.length > SNOW3G_MAX_KEY_SIZE) {
- IPSEC_MB_LOG(ERR, "Not enough memory to store the key");
- return -ENOMEM;
- }
-
- sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
-
- /* Initialize key */
- IMB_SNOW3G_INIT_KEY_SCHED(mgr, cipher_xform->cipher.key.data,
- &sess->pKeySched_cipher);
- }
-
- if (auth_xform) {
- /* Only SNOW 3G UIA2 supported */
- if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_SNOW3G_UIA2)
- return -ENOTSUP;
-
- if (auth_xform->auth.digest_length != SNOW3G_DIGEST_LENGTH) {
- IPSEC_MB_LOG(ERR, "Wrong digest length");
- return -EINVAL;
- }
- if (auth_xform->auth.key.length > SNOW3G_MAX_KEY_SIZE) {
- IPSEC_MB_LOG(ERR, "Not enough memory to store the key");
- return -ENOMEM;
- }
-
- sess->auth_op = auth_xform->auth.op;
-
- if (auth_xform->auth.iv.length != SNOW3G_IV_LENGTH) {
- IPSEC_MB_LOG(ERR, "Wrong IV length");
- return -EINVAL;
- }
- sess->auth_iv_offset = auth_xform->auth.iv.offset;
-
- /* Initialize key */
- IMB_SNOW3G_INIT_KEY_SCHED(mgr, auth_xform->auth.key.data,
- &sess->pKeySched_hash);
- }
-
- sess->op = mode;
-
- return 0;
-}
-
-/** Check if conditions are met for digest-appended operations */
-static uint8_t *
-snow3g_digest_appended_in_src(struct rte_crypto_op *op)
-{
- unsigned int auth_size, cipher_size;
-
- auth_size = (op->sym->auth.data.offset >> 3) +
- (op->sym->auth.data.length >> 3);
- cipher_size = (op->sym->cipher.data.offset >> 3) +
- (op->sym->cipher.data.length >> 3);
-
- if (auth_size < cipher_size)
- return rte_pktmbuf_mtod_offset(op->sym->m_src,
- uint8_t *, auth_size);
-
- return NULL;
-}
-
-/** Encrypt/decrypt mbufs with same cipher key. */
-static uint8_t
-process_snow3g_cipher_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
- struct snow3g_session *session,
- uint8_t num_ops)
-{
- uint32_t i;
- uint8_t processed_ops = 0;
- const void *src[SNOW3G_MAX_BURST] = {NULL};
- void *dst[SNOW3G_MAX_BURST] = {NULL};
- uint8_t *digest_appended[SNOW3G_MAX_BURST] = {NULL};
- const void *iv[SNOW3G_MAX_BURST] = {NULL};
- uint32_t num_bytes[SNOW3G_MAX_BURST] = {0};
- uint32_t cipher_off, cipher_len;
- int unencrypted_bytes = 0;
-
- for (i = 0; i < num_ops; i++) {
-
- cipher_off = ops[i]->sym->cipher.data.offset >> 3;
- cipher_len = ops[i]->sym->cipher.data.length >> 3;
- src[i] = rte_pktmbuf_mtod_offset(
- ops[i]->sym->m_src, uint8_t *, cipher_off);
-
- /* If out-of-place operation */
- if (ops[i]->sym->m_dst &&
- ops[i]->sym->m_src != ops[i]->sym->m_dst) {
- dst[i] = rte_pktmbuf_mtod_offset(
- ops[i]->sym->m_dst, uint8_t *, cipher_off);
-
- /* In case of out-of-place, auth-cipher operation
- * with partial encryption of the digest, copy
- * the remaining, unencrypted part.
- */
- if (session->op == IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT
- || session->op == IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT)
- unencrypted_bytes =
- (ops[i]->sym->auth.data.offset >> 3) +
- (ops[i]->sym->auth.data.length >> 3) +
- (SNOW3G_DIGEST_LENGTH) -
- cipher_off - cipher_len;
- if (unencrypted_bytes > 0)
- rte_memcpy(
- rte_pktmbuf_mtod_offset(
- ops[i]->sym->m_dst, uint8_t *,
- cipher_off + cipher_len),
- rte_pktmbuf_mtod_offset(
- ops[i]->sym->m_src, uint8_t *,
- cipher_off + cipher_len),
- unencrypted_bytes);
- } else
- dst[i] = rte_pktmbuf_mtod_offset(ops[i]->sym->m_src,
- uint8_t *, cipher_off);
-
- iv[i] = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
- session->cipher_iv_offset);
- num_bytes[i] = cipher_len;
- processed_ops++;
- }
-
- IMB_SNOW3G_F8_N_BUFFER(qp->mb_mgr, &session->pKeySched_cipher, iv,
- src, dst, num_bytes, processed_ops);
-
- /* Take care of the raw digest data in src buffer */
- for (i = 0; i < num_ops; i++) {
- if ((session->op == IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT ||
- session->op == IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT) &&
- ops[i]->sym->m_dst != NULL) {
- digest_appended[i] =
- snow3g_digest_appended_in_src(ops[i]);
- /* Clear unencrypted digest from
- * the src buffer
- */
- if (digest_appended[i] != NULL)
- memset(digest_appended[i],
- 0, SNOW3G_DIGEST_LENGTH);
- }
- }
- return processed_ops;
-}
-
-/** Encrypt/decrypt mbuf (bit level function). */
-static uint8_t
-process_snow3g_cipher_op_bit(struct ipsec_mb_qp *qp,
- struct rte_crypto_op *op,
- struct snow3g_session *session)
-{
- uint8_t *src, *dst;
- uint8_t *iv;
- uint32_t length_in_bits, offset_in_bits;
- int unencrypted_bytes = 0;
-
- offset_in_bits = op->sym->cipher.data.offset;
- src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *);
- if (op->sym->m_dst == NULL) {
- op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- IPSEC_MB_LOG(ERR, "bit-level in-place not supported\n");
- return 0;
- }
- length_in_bits = op->sym->cipher.data.length;
- dst = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *);
- /* In case of out-of-place, auth-cipher operation
- * with partial encryption of the digest, copy
- * the remaining, unencrypted part.
- */
- if (session->op == IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT ||
- session->op == IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT)
- unencrypted_bytes =
- (op->sym->auth.data.offset >> 3) +
- (op->sym->auth.data.length >> 3) +
- (SNOW3G_DIGEST_LENGTH) -
- (offset_in_bits >> 3) -
- (length_in_bits >> 3);
- if (unencrypted_bytes > 0)
- rte_memcpy(
- rte_pktmbuf_mtod_offset(
- op->sym->m_dst, uint8_t *,
- (length_in_bits >> 3)),
- rte_pktmbuf_mtod_offset(
- op->sym->m_src, uint8_t *,
- (length_in_bits >> 3)),
- unencrypted_bytes);
-
- iv = rte_crypto_op_ctod_offset(op, uint8_t *,
- session->cipher_iv_offset);
-
- IMB_SNOW3G_F8_1_BUFFER_BIT(qp->mb_mgr, &session->pKeySched_cipher, iv,
- src, dst, length_in_bits, offset_in_bits);
-
- return 1;
-}
-
-/** Generate/verify hash from mbufs with same hash key. */
-static int
-process_snow3g_hash_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
- struct snow3g_session *session,
- uint8_t num_ops)
-{
- uint32_t i;
- uint8_t processed_ops = 0;
- uint8_t *src, *dst;
- uint32_t length_in_bits;
- uint8_t *iv;
- uint8_t digest_appended = 0;
- struct snow3g_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
-
- for (i = 0; i < num_ops; i++) {
- /* Data must be byte aligned */
- if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- IPSEC_MB_LOG(ERR, "Offset");
- break;
- }
-
- dst = NULL;
-
- length_in_bits = ops[i]->sym->auth.data.length;
-
- src = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
- (ops[i]->sym->auth.data.offset >> 3);
- iv = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
- session->auth_iv_offset);
-
- if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
- dst = qp_data->temp_digest;
- /* Handle auth cipher verify oop case*/
- if ((session->op ==
- IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN ||
- session->op ==
- IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY) &&
- ops[i]->sym->m_dst != NULL)
- src = rte_pktmbuf_mtod_offset(
- ops[i]->sym->m_dst, uint8_t *,
- ops[i]->sym->auth.data.offset >> 3);
-
- IMB_SNOW3G_F9_1_BUFFER(qp->mb_mgr,
- &session->pKeySched_hash,
- iv, src, length_in_bits, dst);
- /* Verify digest. */
- if (memcmp(dst, ops[i]->sym->auth.digest.data,
- SNOW3G_DIGEST_LENGTH) != 0)
- ops[i]->status =
- RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
- } else {
- if (session->op ==
- IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT ||
- session->op ==
- IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT)
- dst = snow3g_digest_appended_in_src(ops[i]);
-
- if (dst != NULL)
- digest_appended = 1;
- else
- dst = ops[i]->sym->auth.digest.data;
-
- IMB_SNOW3G_F9_1_BUFFER(qp->mb_mgr,
- &session->pKeySched_hash,
- iv, src, length_in_bits, dst);
-
- /* Copy back digest from src to auth.digest.data */
- if (digest_appended)
- rte_memcpy(ops[i]->sym->auth.digest.data,
- dst, SNOW3G_DIGEST_LENGTH);
- }
- processed_ops++;
- }
-
- return processed_ops;
-}
-
-/** Process a batch of crypto ops which shares the same session. */
-static int
-process_ops(struct rte_crypto_op **ops, struct snow3g_session *session,
- struct ipsec_mb_qp *qp, uint8_t num_ops)
-{
- uint32_t i;
- uint32_t processed_ops;
-
-#ifdef RTE_LIBRTE_PMD_SNOW3G_DEBUG
- for (i = 0; i < num_ops; i++) {
- if (!rte_pktmbuf_is_contiguous(ops[i]->sym->m_src) ||
- (ops[i]->sym->m_dst != NULL &&
- !rte_pktmbuf_is_contiguous(
- ops[i]->sym->m_dst))) {
- IPSEC_MB_LOG(ERR,
- "PMD supports only contiguous mbufs, "
- "op (%p) provides noncontiguous mbuf as "
- "source/destination buffer.\n", ops[i]);
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- return 0;
- }
- }
-#endif
-
- switch (session->op) {
- case IPSEC_MB_OP_ENCRYPT_ONLY:
- case IPSEC_MB_OP_DECRYPT_ONLY:
- processed_ops = process_snow3g_cipher_op(qp, ops,
- session, num_ops);
- break;
- case IPSEC_MB_OP_HASH_GEN_ONLY:
- case IPSEC_MB_OP_HASH_VERIFY_ONLY:
- processed_ops = process_snow3g_hash_op(qp, ops, session,
- num_ops);
- break;
- case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN:
- case IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY:
- processed_ops = process_snow3g_cipher_op(qp, ops, session,
- num_ops);
- process_snow3g_hash_op(qp, ops, session, processed_ops);
- break;
- case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
- case IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT:
- processed_ops = process_snow3g_hash_op(qp, ops, session,
- num_ops);
- process_snow3g_cipher_op(qp, ops, session, processed_ops);
- break;
- default:
- /* Operation not supported. */
- processed_ops = 0;
- }
-
- for (i = 0; i < num_ops; i++) {
- /*
- * If there was no error/authentication failure,
- * change status to successful.
- */
- if (ops[i]->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
- ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
- /* Free session if a session-less crypto op. */
- if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
- memset(session, 0, sizeof(struct snow3g_session));
- rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
- ops[i]->sym->session = NULL;
- }
- }
- return processed_ops;
-}
-
-/** Process a crypto op with length/offset in bits. */
-static int
-process_op_bit(struct rte_crypto_op *op, struct snow3g_session *session,
- struct ipsec_mb_qp *qp)
-{
- unsigned int processed_op;
- int ret;
-
- switch (session->op) {
- case IPSEC_MB_OP_ENCRYPT_ONLY:
- case IPSEC_MB_OP_DECRYPT_ONLY:
-
- processed_op = process_snow3g_cipher_op_bit(qp, op,
- session);
- break;
- case IPSEC_MB_OP_HASH_GEN_ONLY:
- case IPSEC_MB_OP_HASH_VERIFY_ONLY:
- processed_op = process_snow3g_hash_op(qp, &op, session, 1);
- break;
- case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN:
- case IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY:
- processed_op = process_snow3g_cipher_op_bit(qp, op, session);
- if (processed_op == 1)
- process_snow3g_hash_op(qp, &op, session, 1);
- break;
- case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
- case IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT:
- processed_op = process_snow3g_hash_op(qp, &op, session, 1);
- if (processed_op == 1)
- process_snow3g_cipher_op_bit(qp, op, session);
- break;
- default:
- /* Operation not supported. */
- processed_op = 0;
- }
-
- /*
- * If there was no error/authentication failure,
- * change status to successful.
- */
- if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
- op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-
- /* Free session if a session-less crypto op. */
- if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
- memset(CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session), 0,
- sizeof(struct snow3g_session));
- rte_mempool_put(qp->sess_mp, (void *)op->sym->session);
- op->sym->session = NULL;
- }
-
- if (unlikely(processed_op != 1))
- return 0;
-
- ret = rte_ring_enqueue(qp->ingress_queue, op);
- if (ret != 0)
- return ret;
-
- return 1;
-}
-
-static uint16_t
-snow3g_pmd_dequeue_burst(void *queue_pair,
- struct rte_crypto_op **ops, uint16_t nb_ops)
-{
- struct ipsec_mb_qp *qp = queue_pair;
- struct rte_crypto_op *c_ops[SNOW3G_MAX_BURST];
- struct rte_crypto_op *curr_c_op;
-
- struct snow3g_session *prev_sess = NULL, *curr_sess = NULL;
- uint32_t i;
- uint8_t burst_size = 0;
- uint8_t processed_ops;
- uint32_t nb_dequeued;
-
- nb_dequeued = rte_ring_dequeue_burst(qp->ingress_queue,
- (void **)ops, nb_ops, NULL);
-
- for (i = 0; i < nb_dequeued; i++) {
- curr_c_op = ops[i];
-
- /* Set status as enqueued (not processed yet) by default. */
- curr_c_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
-
- curr_sess = ipsec_mb_get_session_private(qp, curr_c_op);
- if (unlikely(curr_sess == NULL ||
- curr_sess->op == IPSEC_MB_OP_NOT_SUPPORTED)) {
- curr_c_op->status =
- RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
- break;
- }
-
- /* If length/offset is at bit-level,
- * process this buffer alone.
- */
- if (((curr_c_op->sym->cipher.data.length % BYTE_LEN) != 0)
- || ((curr_c_op->sym->cipher.data.offset
- % BYTE_LEN) != 0)) {
- /* Process the ops of the previous session. */
- if (prev_sess != NULL) {
- processed_ops = process_ops(c_ops, prev_sess,
- qp, burst_size);
- if (processed_ops < burst_size) {
- burst_size = 0;
- break;
- }
-
- burst_size = 0;
- prev_sess = NULL;
- }
-
- processed_ops = process_op_bit(curr_c_op, curr_sess, qp);
- if (processed_ops != 1)
- break;
-
- continue;
- }
-
- /* Batch ops that share the same session. */
- if (prev_sess == NULL) {
- prev_sess = curr_sess;
- c_ops[burst_size++] = curr_c_op;
- } else if (curr_sess == prev_sess) {
- c_ops[burst_size++] = curr_c_op;
- /*
- * When there are enough ops to process in a batch,
- * process them, and start a new batch.
- */
- if (burst_size == SNOW3G_MAX_BURST) {
- processed_ops = process_ops(c_ops, prev_sess,
- qp, burst_size);
- if (processed_ops < burst_size) {
- burst_size = 0;
- break;
- }
-
- burst_size = 0;
- prev_sess = NULL;
- }
- } else {
- /*
- * Different session, process the ops
- * of the previous session.
- */
- processed_ops = process_ops(c_ops, prev_sess,
- qp, burst_size);
- if (processed_ops < burst_size) {
- burst_size = 0;
- break;
- }
-
- burst_size = 0;
- prev_sess = curr_sess;
-
- c_ops[burst_size++] = curr_c_op;
- }
- }
-
- if (burst_size != 0) {
- /* Process the crypto ops of the last session. */
- processed_ops = process_ops(c_ops, prev_sess,
- qp, burst_size);
- }
-
- qp->stats.dequeued_count += i;
- return i;
-}
+#include "pmd_aesni_mb_priv.h"
struct rte_cryptodev_ops snow3g_pmd_ops = {
.dev_configure = ipsec_mb_config,
@@ -586,7 +54,7 @@ RTE_INIT(ipsec_mb_register_snow3g)
= &ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_SNOW3G];
snow3g_data->caps = snow3g_capabilities;
- snow3g_data->dequeue_burst = snow3g_pmd_dequeue_burst;
+ snow3g_data->dequeue_burst = aesni_mb_dequeue_burst;
snow3g_data->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA |
@@ -596,6 +64,5 @@ RTE_INIT(ipsec_mb_register_snow3g)
snow3g_data->internals_priv_size = 0;
snow3g_data->ops = &snow3g_pmd_ops;
snow3g_data->qp_priv_size = sizeof(struct snow3g_qp_data);
- snow3g_data->session_configure = snow3g_session_configure;
- snow3g_data->session_priv_size = sizeof(struct snow3g_session);
+ snow3g_data->session_configure = aesni_mb_session_configure;
}
diff --git a/drivers/crypto/ipsec_mb/pmd_snow3g_priv.h b/drivers/crypto/ipsec_mb/pmd_snow3g_priv.h
index ca1ce7f9d6..75c9a8e525 100644
--- a/drivers/crypto/ipsec_mb/pmd_snow3g_priv.h
+++ b/drivers/crypto/ipsec_mb/pmd_snow3g_priv.h
@@ -8,10 +8,7 @@
#include "ipsec_mb_private.h"
#define SNOW3G_IV_LENGTH 16
-#define SNOW3G_MAX_BURST 8
-#define BYTE_LEN 8
#define SNOW3G_DIGEST_LENGTH 4
-#define SNOW3G_MAX_KEY_SIZE 128
uint8_t pmd_driver_id_snow3g;
@@ -64,16 +61,6 @@ static const struct rte_cryptodev_capabilities snow3g_capabilities[] = {
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
};
-/** SNOW 3G private session structure */
-struct snow3g_session {
- enum ipsec_mb_operation op;
- enum rte_crypto_auth_operation auth_op;
- snow3g_key_schedule_t pKeySched_cipher;
- snow3g_key_schedule_t pKeySched_hash;
- uint16_t cipher_iv_offset;
- uint16_t auth_iv_offset;
-} __rte_cache_aligned;
-
struct snow3g_qp_data {
uint8_t temp_digest[SNOW3G_DIGEST_LENGTH];
/**< Buffer used to store the digest generated
diff --git a/drivers/crypto/ipsec_mb/pmd_zuc.c b/drivers/crypto/ipsec_mb/pmd_zuc.c
index 92fd9d1808..04115b7fd5 100644
--- a/drivers/crypto/ipsec_mb/pmd_zuc.c
+++ b/drivers/crypto/ipsec_mb/pmd_zuc.c
@@ -3,341 +3,7 @@
*/
#include "pmd_zuc_priv.h"
-
-/** Parse crypto xform chain and set private session parameters. */
-static int
-zuc_session_configure(__rte_unused IMB_MGR * mgr, void *zuc_sess,
- const struct rte_crypto_sym_xform *xform)
-{
- struct zuc_session *sess = (struct zuc_session *) zuc_sess;
- const struct rte_crypto_sym_xform *auth_xform = NULL;
- const struct rte_crypto_sym_xform *cipher_xform = NULL;
- enum ipsec_mb_operation mode;
- /* Select Crypto operation - hash then cipher / cipher then hash */
- int ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform,
- &cipher_xform, NULL);
-
- if (ret)
- return ret;
-
- if (cipher_xform) {
- /* Only ZUC EEA3 supported */
- if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_ZUC_EEA3)
- return -ENOTSUP;
-
- if (cipher_xform->cipher.iv.length != ZUC_IV_KEY_LENGTH) {
- IPSEC_MB_LOG(ERR, "Wrong IV length");
- return -EINVAL;
- }
- sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
-
- /* Copy the key */
- memcpy(sess->pKey_cipher, cipher_xform->cipher.key.data,
- ZUC_IV_KEY_LENGTH);
- }
-
- if (auth_xform) {
- /* Only ZUC EIA3 supported */
- if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_ZUC_EIA3)
- return -ENOTSUP;
-
- if (auth_xform->auth.digest_length != ZUC_DIGEST_LENGTH) {
- IPSEC_MB_LOG(ERR, "Wrong digest length");
- return -EINVAL;
- }
-
- sess->auth_op = auth_xform->auth.op;
-
- if (auth_xform->auth.iv.length != ZUC_IV_KEY_LENGTH) {
- IPSEC_MB_LOG(ERR, "Wrong IV length");
- return -EINVAL;
- }
- sess->auth_iv_offset = auth_xform->auth.iv.offset;
-
- /* Copy the key */
- memcpy(sess->pKey_hash, auth_xform->auth.key.data,
- ZUC_IV_KEY_LENGTH);
- }
-
- sess->op = mode;
- return 0;
-}
-
-/** Encrypt/decrypt mbufs. */
-static uint8_t
-process_zuc_cipher_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
- struct zuc_session **sessions,
- uint8_t num_ops)
-{
- unsigned int i;
- uint8_t processed_ops = 0;
- const void *src[ZUC_MAX_BURST];
- void *dst[ZUC_MAX_BURST];
- const void *iv[ZUC_MAX_BURST];
- uint32_t num_bytes[ZUC_MAX_BURST];
- const void *cipher_keys[ZUC_MAX_BURST];
- struct zuc_session *sess;
-
- for (i = 0; i < num_ops; i++) {
- if (((ops[i]->sym->cipher.data.length % BYTE_LEN) != 0)
- || ((ops[i]->sym->cipher.data.offset
- % BYTE_LEN) != 0)) {
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- IPSEC_MB_LOG(ERR, "Data Length or offset");
- break;
- }
-
- sess = sessions[i];
-
-#ifdef RTE_LIBRTE_PMD_ZUC_DEBUG
- if (!rte_pktmbuf_is_contiguous(ops[i]->sym->m_src) ||
- (ops[i]->sym->m_dst != NULL &&
- !rte_pktmbuf_is_contiguous(
- ops[i]->sym->m_dst))) {
- IPSEC_MB_LOG(ERR, "PMD supports only "
- " contiguous mbufs, op (%p) "
- "provides noncontiguous mbuf "
- "as source/destination buffer.\n",
- "PMD supports only contiguous mbufs, "
- "op (%p) provides noncontiguous mbuf "
- "as source/destination buffer.\n",
- ops[i]);
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- break;
- }
-#endif
-
- src[i] = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
- (ops[i]->sym->cipher.data.offset >> 3);
- dst[i] = ops[i]->sym->m_dst ?
- rte_pktmbuf_mtod(ops[i]->sym->m_dst, uint8_t *) +
- (ops[i]->sym->cipher.data.offset >> 3) :
- rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
- (ops[i]->sym->cipher.data.offset >> 3);
- iv[i] = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
- sess->cipher_iv_offset);
- num_bytes[i] = ops[i]->sym->cipher.data.length >> 3;
-
- cipher_keys[i] = sess->pKey_cipher;
-
- processed_ops++;
- }
-
- IMB_ZUC_EEA3_N_BUFFER(qp->mb_mgr, (const void **)cipher_keys,
- (const void **)iv, (const void **)src, (void **)dst,
- num_bytes, processed_ops);
-
- return processed_ops;
-}
-
-/** Generate/verify hash from mbufs. */
-static int
-process_zuc_hash_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
- struct zuc_session **sessions,
- uint8_t num_ops)
-{
- unsigned int i;
- uint8_t processed_ops = 0;
- uint8_t *src[ZUC_MAX_BURST] = { 0 };
- uint32_t *dst[ZUC_MAX_BURST];
- uint32_t length_in_bits[ZUC_MAX_BURST] = { 0 };
- uint8_t *iv[ZUC_MAX_BURST] = { 0 };
- const void *hash_keys[ZUC_MAX_BURST] = { 0 };
- struct zuc_session *sess;
- struct zuc_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
-
-
- for (i = 0; i < num_ops; i++) {
- /* Data must be byte aligned */
- if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- IPSEC_MB_LOG(ERR, "Offset");
- break;
- }
-
- sess = sessions[i];
-
- length_in_bits[i] = ops[i]->sym->auth.data.length;
-
- src[i] = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
- (ops[i]->sym->auth.data.offset >> 3);
- iv[i] = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
- sess->auth_iv_offset);
-
- hash_keys[i] = sess->pKey_hash;
- if (sess->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY)
- dst[i] = (uint32_t *)qp_data->temp_digest[i];
- else
- dst[i] = (uint32_t *)ops[i]->sym->auth.digest.data;
-
- processed_ops++;
- }
-
- IMB_ZUC_EIA3_N_BUFFER(qp->mb_mgr, (const void **)hash_keys,
- (const void * const *)iv, (const void * const *)src,
- length_in_bits, dst, processed_ops);
-
- /*
- * If tag needs to be verified, compare generated tag
- * with attached tag
- */
- for (i = 0; i < processed_ops; i++)
- if (sessions[i]->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY)
- if (memcmp(dst[i], ops[i]->sym->auth.digest.data,
- ZUC_DIGEST_LENGTH) != 0)
- ops[i]->status =
- RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
-
- return processed_ops;
-}
-
-/** Process a batch of crypto ops which shares the same operation type. */
-static int
-process_ops(struct rte_crypto_op **ops, enum ipsec_mb_operation op_type,
- struct zuc_session **sessions,
- struct ipsec_mb_qp *qp, uint8_t num_ops)
-{
- unsigned int i;
- unsigned int processed_ops = 0;
-
- switch (op_type) {
- case IPSEC_MB_OP_ENCRYPT_ONLY:
- case IPSEC_MB_OP_DECRYPT_ONLY:
- processed_ops = process_zuc_cipher_op(qp, ops,
- sessions, num_ops);
- break;
- case IPSEC_MB_OP_HASH_GEN_ONLY:
- case IPSEC_MB_OP_HASH_VERIFY_ONLY:
- processed_ops = process_zuc_hash_op(qp, ops, sessions,
- num_ops);
- break;
- case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN:
- case IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY:
- processed_ops = process_zuc_cipher_op(qp, ops, sessions,
- num_ops);
- process_zuc_hash_op(qp, ops, sessions, processed_ops);
- break;
- case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
- case IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT:
- processed_ops = process_zuc_hash_op(qp, ops, sessions,
- num_ops);
- process_zuc_cipher_op(qp, ops, sessions, processed_ops);
- break;
- default:
- /* Operation not supported. */
- for (i = 0; i < num_ops; i++)
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
- }
-
- for (i = 0; i < num_ops; i++) {
- /*
- * If there was no error/authentication failure,
- * change status to successful.
- */
- if (ops[i]->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
- ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
- /* Free session if a session-less crypto op. */
- if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
- memset(sessions[i], 0, sizeof(struct zuc_session));
- rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
- ops[i]->sym->session = NULL;
- }
- }
- return processed_ops;
-}
-
-static uint16_t
-zuc_pmd_dequeue_burst(void *queue_pair,
- struct rte_crypto_op **c_ops, uint16_t nb_ops)
-{
-
- struct rte_crypto_op *curr_c_op;
-
- struct zuc_session *curr_sess;
- struct zuc_session *sessions[ZUC_MAX_BURST];
- struct rte_crypto_op *int_c_ops[ZUC_MAX_BURST];
- enum ipsec_mb_operation prev_zuc_op = IPSEC_MB_OP_NOT_SUPPORTED;
- enum ipsec_mb_operation curr_zuc_op;
- struct ipsec_mb_qp *qp = queue_pair;
- unsigned int nb_dequeued;
- unsigned int i;
- uint8_t burst_size = 0;
- uint8_t processed_ops;
-
- nb_dequeued = rte_ring_dequeue_burst(qp->ingress_queue,
- (void **)c_ops, nb_ops, NULL);
-
-
- for (i = 0; i < nb_dequeued; i++) {
- curr_c_op = c_ops[i];
-
- curr_sess = (struct zuc_session *)
- ipsec_mb_get_session_private(qp, curr_c_op);
- if (unlikely(curr_sess == NULL)) {
- curr_c_op->status =
- RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
- break;
- }
-
- curr_zuc_op = curr_sess->op;
-
- /*
- * Batch ops that share the same operation type
- * (cipher only, auth only...).
- */
- if (burst_size == 0) {
- prev_zuc_op = curr_zuc_op;
- int_c_ops[0] = curr_c_op;
- sessions[0] = curr_sess;
- burst_size++;
- } else if (curr_zuc_op == prev_zuc_op) {
- int_c_ops[burst_size] = curr_c_op;
- sessions[burst_size] = curr_sess;
- burst_size++;
- /*
- * When there are enough ops to process in a batch,
- * process them, and start a new batch.
- */
- if (burst_size == ZUC_MAX_BURST) {
- processed_ops = process_ops(int_c_ops, curr_zuc_op,
- sessions, qp, burst_size);
- if (processed_ops < burst_size) {
- burst_size = 0;
- break;
- }
-
- burst_size = 0;
- }
- } else {
- /*
- * Different operation type, process the ops
- * of the previous type.
- */
- processed_ops = process_ops(int_c_ops, prev_zuc_op,
- sessions, qp, burst_size);
- if (processed_ops < burst_size) {
- burst_size = 0;
- break;
- }
-
- burst_size = 0;
- prev_zuc_op = curr_zuc_op;
-
- int_c_ops[0] = curr_c_op;
- sessions[0] = curr_sess;
- burst_size++;
- }
- }
-
- if (burst_size != 0) {
- /* Process the crypto ops of the last operation type. */
- processed_ops = process_ops(int_c_ops, prev_zuc_op,
- sessions, qp, burst_size);
- }
-
- qp->stats.dequeued_count += i;
- return i;
-}
+#include "pmd_aesni_mb_priv.h"
struct rte_cryptodev_ops zuc_pmd_ops = {
.dev_configure = ipsec_mb_config,
@@ -388,7 +54,7 @@ RTE_INIT(ipsec_mb_register_zuc)
= &ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_ZUC];
zuc_data->caps = zuc_capabilities;
- zuc_data->dequeue_burst = zuc_pmd_dequeue_burst;
+ zuc_data->dequeue_burst = aesni_mb_dequeue_burst;
zuc_data->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO
| RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING
| RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA
@@ -398,6 +64,5 @@ RTE_INIT(ipsec_mb_register_zuc)
zuc_data->internals_priv_size = 0;
zuc_data->ops = &zuc_pmd_ops;
zuc_data->qp_priv_size = sizeof(struct zuc_qp_data);
- zuc_data->session_configure = zuc_session_configure;
- zuc_data->session_priv_size = sizeof(struct zuc_session);
+ zuc_data->session_configure = aesni_mb_session_configure;
}
diff --git a/drivers/crypto/ipsec_mb/pmd_zuc_priv.h b/drivers/crypto/ipsec_mb/pmd_zuc_priv.h
index 76fd6758c2..2e6eebc409 100644
--- a/drivers/crypto/ipsec_mb/pmd_zuc_priv.h
+++ b/drivers/crypto/ipsec_mb/pmd_zuc_priv.h
@@ -10,7 +10,6 @@
#define ZUC_IV_KEY_LENGTH 16
#define ZUC_DIGEST_LENGTH 4
#define ZUC_MAX_BURST 16
-#define BYTE_LEN 8
uint8_t pmd_driver_id_zuc;
@@ -63,16 +62,6 @@ static const struct rte_cryptodev_capabilities zuc_capabilities[] = {
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
};
-/** ZUC private session structure */
-struct zuc_session {
- enum ipsec_mb_operation op;
- enum rte_crypto_auth_operation auth_op;
- uint8_t pKey_cipher[ZUC_IV_KEY_LENGTH];
- uint8_t pKey_hash[ZUC_IV_KEY_LENGTH];
- uint16_t cipher_iv_offset;
- uint16_t auth_iv_offset;
-} __rte_cache_aligned;
-
struct zuc_qp_data {
uint8_t temp_digest[ZUC_MAX_BURST][ZUC_DIGEST_LENGTH];
--
2.25.1
^ permalink raw reply [flat|nested] 45+ messages in thread
* [PATCH v4] crypto/ipsec_mb: unified IPsec MB interface
2023-12-12 15:36 [PATCH v1] crypto/ipsec_mb: unified IPsec MB interface Brian Dooley
2023-12-14 15:15 ` [PATCH v2] " Brian Dooley
2024-01-18 12:00 ` [PATCH v3] " Brian Dooley
@ 2024-02-28 11:33 ` Brian Dooley
2024-02-28 11:50 ` Power, Ciara
` (2 more replies)
2024-03-05 17:42 ` [PATCH v5 1/4] crypto/ipsec_mb: bump minimum IPsec Multi-buffer version Brian Dooley
` (2 subsequent siblings)
5 siblings, 3 replies; 45+ messages in thread
From: Brian Dooley @ 2024-02-28 11:33 UTC (permalink / raw)
To: Kai Ji, Pablo de Lara; +Cc: dev, gakhil, Brian Dooley
Currently IPsec MB provides both the JOB API and direct API.
AESNI_MB PMD is using the JOB API codepath while ZUC, KASUMI, SNOW3G
and CHACHA20_POLY1305 are using the direct API.
Instead of using the direct API for these PMDs, they should now make
use of the JOB API codepath. This would remove all use of the IPsec MB
direct API for these PMDs.
Signed-off-by: Brian Dooley <brian.dooley@intel.com>
---
v2:
- Fix compilation failure
v3:
- Remove session configure pointer for each PMD
v4:
- Keep AES GCM PMD and fix extern issue
---
doc/guides/rel_notes/release_24_03.rst | 6 +
drivers/crypto/ipsec_mb/pmd_aesni_mb.c | 10 +-
drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h | 15 +-
drivers/crypto/ipsec_mb/pmd_chacha_poly.c | 338 +----------
.../crypto/ipsec_mb/pmd_chacha_poly_priv.h | 28 -
drivers/crypto/ipsec_mb/pmd_kasumi.c | 410 +------------
drivers/crypto/ipsec_mb/pmd_kasumi_priv.h | 20 -
drivers/crypto/ipsec_mb/pmd_snow3g.c | 543 +-----------------
drivers/crypto/ipsec_mb/pmd_snow3g_priv.h | 21 -
drivers/crypto/ipsec_mb/pmd_zuc.c | 347 +----------
drivers/crypto/ipsec_mb/pmd_zuc_priv.h | 20 -
11 files changed, 48 insertions(+), 1710 deletions(-)
diff --git a/doc/guides/rel_notes/release_24_03.rst b/doc/guides/rel_notes/release_24_03.rst
index 879bb4944c..6c5b76cef5 100644
--- a/doc/guides/rel_notes/release_24_03.rst
+++ b/doc/guides/rel_notes/release_24_03.rst
@@ -138,6 +138,12 @@ New Features
to support TLS v1.2, TLS v1.3 and DTLS v1.2.
* Added PMD API to allow raw submission of instructions to CPT.
+* **Updated ipsec_mb crypto driver.**
+
+ * Kasumi, Snow3G, ChaChaPoly and ZUC PMDs now share the job API codepath
+ with AESNI_MB PMD. Depending on the architecture, the performance of ZUC
+ crypto PMD is approximately 10% less for small fixed packet sizes.
+
Removed Items
-------------
diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c
index 4de4866cf3..7d4dbc91ef 100644
--- a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c
+++ b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c
@@ -8,6 +8,8 @@
RTE_DEFINE_PER_LCORE(pid_t, pid);
+uint8_t pmd_driver_id_aesni_mb;
+
struct aesni_mb_op_buf_data {
struct rte_mbuf *m;
uint32_t offset;
@@ -761,7 +763,7 @@ aesni_mb_set_session_aead_parameters(const IMB_MGR *mb_mgr,
}
/** Configure a aesni multi-buffer session from a crypto xform chain */
-static int
+int
aesni_mb_session_configure(IMB_MGR *mb_mgr,
void *priv_sess,
const struct rte_crypto_sym_xform *xform)
@@ -2131,7 +2133,7 @@ set_job_null_op(IMB_JOB *job, struct rte_crypto_op *op)
}
#if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM
-static uint16_t
+uint16_t
aesni_mb_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
uint16_t nb_ops)
{
@@ -2321,7 +2323,7 @@ flush_mb_mgr(struct ipsec_mb_qp *qp, IMB_MGR *mb_mgr,
return processed_ops;
}
-static uint16_t
+uint16_t
aesni_mb_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
uint16_t nb_ops)
{
@@ -2456,7 +2458,7 @@ verify_sync_dgst(struct rte_crypto_sym_vec *vec,
return k;
}
-static uint32_t
+uint32_t
aesni_mb_process_bulk(struct rte_cryptodev *dev __rte_unused,
struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs sofs,
struct rte_crypto_sym_vec *vec)
diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h b/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h
index 85994fe5a1..2d462a7f68 100644
--- a/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h
+++ b/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h
@@ -21,6 +21,19 @@
#define MAX_NUM_SEGS 16
#endif
+int
+aesni_mb_session_configure(IMB_MGR * m __rte_unused, void *priv_sess,
+ const struct rte_crypto_sym_xform *xform);
+
+uint16_t
+aesni_mb_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops);
+
+uint32_t
+aesni_mb_process_bulk(struct rte_cryptodev *dev __rte_unused,
+ struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs sofs,
+ struct rte_crypto_sym_vec *vec);
+
static const struct rte_cryptodev_capabilities aesni_mb_capabilities[] = {
{ /* MD5 HMAC */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
@@ -722,8 +735,6 @@ static const struct rte_cryptodev_capabilities aesni_mb_capabilities[] = {
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
};
-uint8_t pmd_driver_id_aesni_mb;
-
struct aesni_mb_qp_data {
uint8_t temp_digests[IMB_MAX_JOBS][DIGEST_LENGTH_MAX];
/* *< Buffers used to store the digest generated
diff --git a/drivers/crypto/ipsec_mb/pmd_chacha_poly.c b/drivers/crypto/ipsec_mb/pmd_chacha_poly.c
index 97e7cef233..7436353fc2 100644
--- a/drivers/crypto/ipsec_mb/pmd_chacha_poly.c
+++ b/drivers/crypto/ipsec_mb/pmd_chacha_poly.c
@@ -3,334 +3,7 @@
*/
#include "pmd_chacha_poly_priv.h"
-
-/** Parse crypto xform chain and set private session parameters. */
-static int
-chacha20_poly1305_session_configure(IMB_MGR * mb_mgr __rte_unused,
- void *priv_sess, const struct rte_crypto_sym_xform *xform)
-{
- struct chacha20_poly1305_session *sess = priv_sess;
- const struct rte_crypto_sym_xform *auth_xform;
- const struct rte_crypto_sym_xform *cipher_xform;
- const struct rte_crypto_sym_xform *aead_xform;
-
- uint8_t key_length;
- const uint8_t *key;
- enum ipsec_mb_operation mode;
- int ret = 0;
-
- ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform,
- &cipher_xform, &aead_xform);
- if (ret)
- return ret;
-
- sess->op = mode;
-
- switch (sess->op) {
- case IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT:
- case IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT:
- if (aead_xform->aead.algo !=
- RTE_CRYPTO_AEAD_CHACHA20_POLY1305) {
- IPSEC_MB_LOG(ERR,
- "The only combined operation supported is CHACHA20 POLY1305");
- ret = -ENOTSUP;
- goto error_exit;
- }
- /* Set IV parameters */
- sess->iv.offset = aead_xform->aead.iv.offset;
- sess->iv.length = aead_xform->aead.iv.length;
- key_length = aead_xform->aead.key.length;
- key = aead_xform->aead.key.data;
- sess->aad_length = aead_xform->aead.aad_length;
- sess->req_digest_length = aead_xform->aead.digest_length;
- break;
- default:
- IPSEC_MB_LOG(
- ERR, "Wrong xform type, has to be AEAD or authentication");
- ret = -ENOTSUP;
- goto error_exit;
- }
-
- /* IV check */
- if (sess->iv.length != CHACHA20_POLY1305_IV_LENGTH &&
- sess->iv.length != 0) {
- IPSEC_MB_LOG(ERR, "Wrong IV length");
- ret = -EINVAL;
- goto error_exit;
- }
-
- /* Check key length */
- if (key_length != CHACHA20_POLY1305_KEY_SIZE) {
- IPSEC_MB_LOG(ERR, "Invalid key length");
- ret = -EINVAL;
- goto error_exit;
- } else {
- memcpy(sess->key, key, CHACHA20_POLY1305_KEY_SIZE);
- }
-
- /* Digest check */
- if (sess->req_digest_length != CHACHA20_POLY1305_DIGEST_LENGTH) {
- IPSEC_MB_LOG(ERR, "Invalid digest length");
- ret = -EINVAL;
- goto error_exit;
- } else {
- sess->gen_digest_length = CHACHA20_POLY1305_DIGEST_LENGTH;
- }
-
-error_exit:
- return ret;
-}
-
-/**
- * Process a crypto operation, calling
- * the direct chacha poly API from the multi buffer library.
- *
- * @param qp queue pair
- * @param op symmetric crypto operation
- * @param session chacha poly session
- *
- * @return
- * - Return 0 if success
- */
-static int
-chacha20_poly1305_crypto_op(struct ipsec_mb_qp *qp, struct rte_crypto_op *op,
- struct chacha20_poly1305_session *session)
-{
- struct chacha20_poly1305_qp_data *qp_data =
- ipsec_mb_get_qp_private_data(qp);
- uint8_t *src, *dst;
- uint8_t *iv_ptr;
- struct rte_crypto_sym_op *sym_op = op->sym;
- struct rte_mbuf *m_src = sym_op->m_src;
- uint32_t offset, data_offset, data_length;
- uint32_t part_len, data_len;
- int total_len;
- uint8_t *tag;
- unsigned int oop = 0;
-
- offset = sym_op->aead.data.offset;
- data_offset = offset;
- data_length = sym_op->aead.data.length;
- RTE_ASSERT(m_src != NULL);
-
- while (offset >= m_src->data_len && data_length != 0) {
- offset -= m_src->data_len;
- m_src = m_src->next;
-
- RTE_ASSERT(m_src != NULL);
- }
-
- src = rte_pktmbuf_mtod_offset(m_src, uint8_t *, offset);
-
- data_len = m_src->data_len - offset;
- part_len = (data_len < data_length) ? data_len :
- data_length;
-
- /* In-place */
- if (sym_op->m_dst == NULL || (sym_op->m_dst == sym_op->m_src))
- dst = src;
- /* Out-of-place */
- else {
- oop = 1;
- /* Segmented destination buffer is not supported
- * if operation is Out-of-place
- */
- RTE_ASSERT(rte_pktmbuf_is_contiguous(sym_op->m_dst));
- dst = rte_pktmbuf_mtod_offset(sym_op->m_dst, uint8_t *,
- data_offset);
- }
-
- iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
- session->iv.offset);
-
- IMB_CHACHA20_POLY1305_INIT(qp->mb_mgr, session->key,
- &qp_data->chacha20_poly1305_ctx_data,
- iv_ptr, sym_op->aead.aad.data,
- (uint64_t)session->aad_length);
-
- if (session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT) {
- IMB_CHACHA20_POLY1305_ENC_UPDATE(qp->mb_mgr,
- session->key,
- &qp_data->chacha20_poly1305_ctx_data,
- dst, src, (uint64_t)part_len);
- total_len = data_length - part_len;
-
- while (total_len) {
- m_src = m_src->next;
- RTE_ASSERT(m_src != NULL);
-
- src = rte_pktmbuf_mtod(m_src, uint8_t *);
- if (oop)
- dst += part_len;
- else
- dst = src;
- part_len = (m_src->data_len < total_len) ?
- m_src->data_len : total_len;
-
- if (dst == NULL || src == NULL) {
- IPSEC_MB_LOG(ERR, "Invalid src or dst input");
- return -EINVAL;
- }
- IMB_CHACHA20_POLY1305_ENC_UPDATE(qp->mb_mgr,
- session->key,
- &qp_data->chacha20_poly1305_ctx_data,
- dst, src, (uint64_t)part_len);
- total_len -= part_len;
- if (total_len < 0) {
- IPSEC_MB_LOG(ERR, "Invalid part len");
- return -EINVAL;
- }
- }
-
- tag = sym_op->aead.digest.data;
- IMB_CHACHA20_POLY1305_ENC_FINALIZE(qp->mb_mgr,
- &qp_data->chacha20_poly1305_ctx_data,
- tag, session->gen_digest_length);
-
- } else {
- IMB_CHACHA20_POLY1305_DEC_UPDATE(qp->mb_mgr,
- session->key,
- &qp_data->chacha20_poly1305_ctx_data,
- dst, src, (uint64_t)part_len);
-
- total_len = data_length - part_len;
-
- while (total_len) {
- m_src = m_src->next;
-
- RTE_ASSERT(m_src != NULL);
-
- src = rte_pktmbuf_mtod(m_src, uint8_t *);
- if (oop)
- dst += part_len;
- else
- dst = src;
- part_len = (m_src->data_len < total_len) ?
- m_src->data_len : total_len;
-
- if (dst == NULL || src == NULL) {
- IPSEC_MB_LOG(ERR, "Invalid src or dst input");
- return -EINVAL;
- }
- IMB_CHACHA20_POLY1305_DEC_UPDATE(qp->mb_mgr,
- session->key,
- &qp_data->chacha20_poly1305_ctx_data,
- dst, src, (uint64_t)part_len);
- total_len -= part_len;
- if (total_len < 0) {
- IPSEC_MB_LOG(ERR, "Invalid part len");
- return -EINVAL;
- }
- }
-
- tag = qp_data->temp_digest;
- IMB_CHACHA20_POLY1305_DEC_FINALIZE(qp->mb_mgr,
- &qp_data->chacha20_poly1305_ctx_data,
- tag, session->gen_digest_length);
- }
-
- return 0;
-}
-
-/**
- * Process a completed chacha poly op
- *
- * @param qp Queue Pair to process
- * @param op Crypto operation
- * @param sess Crypto session
- *
- * @return
- * - void
- */
-static void
-post_process_chacha20_poly1305_crypto_op(struct ipsec_mb_qp *qp,
- struct rte_crypto_op *op,
- struct chacha20_poly1305_session *session)
-{
- struct chacha20_poly1305_qp_data *qp_data =
- ipsec_mb_get_qp_private_data(qp);
-
- op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
- /* Verify digest if required */
- if (session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT ||
- session->op == IPSEC_MB_OP_HASH_VERIFY_ONLY) {
- uint8_t *digest = op->sym->aead.digest.data;
- uint8_t *tag = qp_data->temp_digest;
-
-#ifdef RTE_LIBRTE_PMD_CHACHA20_POLY1305_DEBUG
- rte_hexdump(stdout, "auth tag (orig):",
- digest, session->req_digest_length);
- rte_hexdump(stdout, "auth tag (calc):",
- tag, session->req_digest_length);
-#endif
- if (memcmp(tag, digest, session->req_digest_length) != 0)
- op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
-
- }
-
-}
-
-/**
- * Process a completed Chacha20_poly1305 request
- *
- * @param qp Queue Pair to process
- * @param op Crypto operation
- * @param sess Crypto session
- *
- * @return
- * - void
- */
-static void
-handle_completed_chacha20_poly1305_crypto_op(struct ipsec_mb_qp *qp,
- struct rte_crypto_op *op,
- struct chacha20_poly1305_session *sess)
-{
- post_process_chacha20_poly1305_crypto_op(qp, op, sess);
-
- /* Free session if a session-less crypto op */
- if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
- memset(sess, 0, sizeof(struct chacha20_poly1305_session));
- rte_mempool_put(qp->sess_mp, op->sym->session);
- op->sym->session = NULL;
- }
-}
-
-static uint16_t
-chacha20_poly1305_pmd_dequeue_burst(void *queue_pair,
- struct rte_crypto_op **ops, uint16_t nb_ops)
-{
- struct chacha20_poly1305_session *sess;
- struct ipsec_mb_qp *qp = queue_pair;
-
- int retval = 0;
- unsigned int i = 0, nb_dequeued;
-
- nb_dequeued = rte_ring_dequeue_burst(qp->ingress_queue,
- (void **)ops, nb_ops, NULL);
-
- for (i = 0; i < nb_dequeued; i++) {
-
- sess = ipsec_mb_get_session_private(qp, ops[i]);
- if (unlikely(sess == NULL)) {
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- qp->stats.dequeue_err_count++;
- break;
- }
-
- retval = chacha20_poly1305_crypto_op(qp, ops[i], sess);
- if (retval < 0) {
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- qp->stats.dequeue_err_count++;
- break;
- }
-
- handle_completed_chacha20_poly1305_crypto_op(qp, ops[i], sess);
- }
-
- qp->stats.dequeued_count += i;
-
- return i;
-}
+#include "pmd_aesni_mb_priv.h"
struct rte_cryptodev_ops chacha20_poly1305_pmd_ops = {
.dev_configure = ipsec_mb_config,
@@ -384,7 +57,7 @@ RTE_INIT(ipsec_mb_register_chacha20_poly1305)
= &ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_CHACHA20_POLY1305];
chacha_poly_data->caps = chacha20_poly1305_capabilities;
- chacha_poly_data->dequeue_burst = chacha20_poly1305_pmd_dequeue_burst;
+ chacha_poly_data->dequeue_burst = aesni_mb_dequeue_burst;
chacha_poly_data->feature_flags =
RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
@@ -395,10 +68,9 @@ RTE_INIT(ipsec_mb_register_chacha20_poly1305)
RTE_CRYPTODEV_FF_SYM_SESSIONLESS;
chacha_poly_data->internals_priv_size = 0;
chacha_poly_data->ops = &chacha20_poly1305_pmd_ops;
- chacha_poly_data->qp_priv_size =
- sizeof(struct chacha20_poly1305_qp_data);
+ chacha_poly_data->qp_priv_size = sizeof(struct aesni_mb_qp_data);
chacha_poly_data->session_configure =
- chacha20_poly1305_session_configure;
+ aesni_mb_session_configure;
chacha_poly_data->session_priv_size =
- sizeof(struct chacha20_poly1305_session);
+ sizeof(struct aesni_mb_session);
}
diff --git a/drivers/crypto/ipsec_mb/pmd_chacha_poly_priv.h b/drivers/crypto/ipsec_mb/pmd_chacha_poly_priv.h
index 842f62f5d1..e668bfe07f 100644
--- a/drivers/crypto/ipsec_mb/pmd_chacha_poly_priv.h
+++ b/drivers/crypto/ipsec_mb/pmd_chacha_poly_priv.h
@@ -7,9 +7,7 @@
#include "ipsec_mb_private.h"
-#define CHACHA20_POLY1305_IV_LENGTH 12
#define CHACHA20_POLY1305_DIGEST_LENGTH 16
-#define CHACHA20_POLY1305_KEY_SIZE 32
static const
struct rte_cryptodev_capabilities chacha20_poly1305_capabilities[] = {
@@ -45,30 +43,4 @@ struct rte_cryptodev_capabilities chacha20_poly1305_capabilities[] = {
uint8_t pmd_driver_id_chacha20_poly1305;
-/** CHACHA20 POLY1305 private session structure */
-struct chacha20_poly1305_session {
- struct {
- uint16_t length;
- uint16_t offset;
- } iv;
- /**< IV parameters */
- uint16_t aad_length;
- /**< AAD length */
- uint16_t req_digest_length;
- /**< Requested digest length */
- uint16_t gen_digest_length;
- /**< Generated digest length */
- uint8_t key[CHACHA20_POLY1305_KEY_SIZE];
- enum ipsec_mb_operation op;
-} __rte_cache_aligned;
-
-struct chacha20_poly1305_qp_data {
- struct chacha20_poly1305_context_data chacha20_poly1305_ctx_data;
- uint8_t temp_digest[CHACHA20_POLY1305_DIGEST_LENGTH];
- /**< Buffer used to store the digest generated
- * by the driver when verifying a digest provided
- * by the user (using authentication verify operation)
- */
-};
-
#endif /* _PMD_CHACHA_POLY_PRIV_H_ */
diff --git a/drivers/crypto/ipsec_mb/pmd_kasumi.c b/drivers/crypto/ipsec_mb/pmd_kasumi.c
index 70536ec3dc..c3571ec81b 100644
--- a/drivers/crypto/ipsec_mb/pmd_kasumi.c
+++ b/drivers/crypto/ipsec_mb/pmd_kasumi.c
@@ -10,406 +10,7 @@
#include <rte_malloc.h>
#include "pmd_kasumi_priv.h"
-
-/** Parse crypto xform chain and set private session parameters. */
-static int
-kasumi_session_configure(IMB_MGR *mgr, void *priv_sess,
- const struct rte_crypto_sym_xform *xform)
-{
- const struct rte_crypto_sym_xform *auth_xform = NULL;
- const struct rte_crypto_sym_xform *cipher_xform = NULL;
- enum ipsec_mb_operation mode;
- struct kasumi_session *sess = (struct kasumi_session *)priv_sess;
- /* Select Crypto operation - hash then cipher / cipher then hash */
- int ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform,
- &cipher_xform, NULL);
-
- if (ret)
- return ret;
-
- if (cipher_xform) {
- /* Only KASUMI F8 supported */
- if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_KASUMI_F8) {
- IPSEC_MB_LOG(ERR, "Unsupported cipher algorithm ");
- return -ENOTSUP;
- }
-
- sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
- if (cipher_xform->cipher.iv.length != KASUMI_IV_LENGTH) {
- IPSEC_MB_LOG(ERR, "Wrong IV length");
- return -EINVAL;
- }
-
- /* Initialize key */
- IMB_KASUMI_INIT_F8_KEY_SCHED(mgr,
- cipher_xform->cipher.key.data,
- &sess->pKeySched_cipher);
- }
-
- if (auth_xform) {
- /* Only KASUMI F9 supported */
- if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_KASUMI_F9) {
- IPSEC_MB_LOG(ERR, "Unsupported authentication");
- return -ENOTSUP;
- }
-
- if (auth_xform->auth.digest_length != KASUMI_DIGEST_LENGTH) {
- IPSEC_MB_LOG(ERR, "Wrong digest length");
- return -EINVAL;
- }
-
- sess->auth_op = auth_xform->auth.op;
-
- /* Initialize key */
- IMB_KASUMI_INIT_F9_KEY_SCHED(mgr, auth_xform->auth.key.data,
- &sess->pKeySched_hash);
- }
-
- sess->op = mode;
- return ret;
-}
-
-/** Encrypt/decrypt mbufs with same cipher key. */
-static uint8_t
-process_kasumi_cipher_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
- struct kasumi_session *session, uint8_t num_ops)
-{
- unsigned int i;
- uint8_t processed_ops = 0;
- const void *src[num_ops];
- void *dst[num_ops];
- uint8_t *iv_ptr;
- uint64_t iv[num_ops];
- uint32_t num_bytes[num_ops];
-
- for (i = 0; i < num_ops; i++) {
- src[i] = rte_pktmbuf_mtod_offset(ops[i]->sym->m_src,
- uint8_t *,
- (ops[i]->sym->cipher.data.offset >> 3));
- dst[i] = ops[i]->sym->m_dst
- ? rte_pktmbuf_mtod_offset(ops[i]->sym->m_dst,
- uint8_t *,
- (ops[i]->sym->cipher.data.offset >> 3))
- : rte_pktmbuf_mtod_offset(ops[i]->sym->m_src,
- uint8_t *,
- (ops[i]->sym->cipher.data.offset >> 3));
- iv_ptr = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
- session->cipher_iv_offset);
- iv[i] = *((uint64_t *)(iv_ptr));
- num_bytes[i] = ops[i]->sym->cipher.data.length >> 3;
-
- processed_ops++;
- }
-
- if (processed_ops != 0)
- IMB_KASUMI_F8_N_BUFFER(qp->mb_mgr, &session->pKeySched_cipher,
- iv, src, dst, num_bytes,
- processed_ops);
-
- return processed_ops;
-}
-
-/** Encrypt/decrypt mbuf (bit level function). */
-static uint8_t
-process_kasumi_cipher_op_bit(struct ipsec_mb_qp *qp, struct rte_crypto_op *op,
- struct kasumi_session *session)
-{
- uint8_t *src, *dst;
- uint8_t *iv_ptr;
- uint64_t iv;
- uint32_t length_in_bits, offset_in_bits;
-
- offset_in_bits = op->sym->cipher.data.offset;
- src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *);
- if (op->sym->m_dst == NULL)
- dst = src;
- else
- dst = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *);
- iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
- session->cipher_iv_offset);
- iv = *((uint64_t *)(iv_ptr));
- length_in_bits = op->sym->cipher.data.length;
-
- IMB_KASUMI_F8_1_BUFFER_BIT(qp->mb_mgr, &session->pKeySched_cipher, iv,
- src, dst, length_in_bits, offset_in_bits);
-
- return 1;
-}
-
-/** Generate/verify hash from mbufs with same hash key. */
-static int
-process_kasumi_hash_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
- struct kasumi_session *session, uint8_t num_ops)
-{
- unsigned int i;
- uint8_t processed_ops = 0;
- uint8_t *src, *dst;
- uint32_t length_in_bits;
- uint32_t num_bytes;
- struct kasumi_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
-
- for (i = 0; i < num_ops; i++) {
- /* Data must be byte aligned */
- if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- IPSEC_MB_LOG(ERR, "Invalid Offset");
- break;
- }
-
- length_in_bits = ops[i]->sym->auth.data.length;
-
- src = rte_pktmbuf_mtod_offset(ops[i]->sym->m_src, uint8_t *,
- (ops[i]->sym->auth.data.offset >> 3));
- /* Direction from next bit after end of message */
- num_bytes = length_in_bits >> 3;
-
- if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
- dst = qp_data->temp_digest;
- IMB_KASUMI_F9_1_BUFFER(qp->mb_mgr,
- &session->pKeySched_hash, src,
- num_bytes, dst);
-
- /* Verify digest. */
- if (memcmp(dst, ops[i]->sym->auth.digest.data,
- KASUMI_DIGEST_LENGTH)
- != 0)
- ops[i]->status
- = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
- } else {
- dst = ops[i]->sym->auth.digest.data;
-
- IMB_KASUMI_F9_1_BUFFER(qp->mb_mgr,
- &session->pKeySched_hash, src,
- num_bytes, dst);
- }
- processed_ops++;
- }
-
- return processed_ops;
-}
-
-/** Process a batch of crypto ops which shares the same session. */
-static int
-process_ops(struct rte_crypto_op **ops, struct kasumi_session *session,
- struct ipsec_mb_qp *qp, uint8_t num_ops)
-{
- unsigned int i;
- unsigned int processed_ops;
-
- switch (session->op) {
- case IPSEC_MB_OP_ENCRYPT_ONLY:
- case IPSEC_MB_OP_DECRYPT_ONLY:
- processed_ops
- = process_kasumi_cipher_op(qp, ops, session, num_ops);
- break;
- case IPSEC_MB_OP_HASH_GEN_ONLY:
- case IPSEC_MB_OP_HASH_VERIFY_ONLY:
- processed_ops
- = process_kasumi_hash_op(qp, ops, session, num_ops);
- break;
- case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN:
- case IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY:
- processed_ops
- = process_kasumi_cipher_op(qp, ops, session, num_ops);
- process_kasumi_hash_op(qp, ops, session, processed_ops);
- break;
- case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
- case IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT:
- processed_ops
- = process_kasumi_hash_op(qp, ops, session, num_ops);
- process_kasumi_cipher_op(qp, ops, session, processed_ops);
- break;
- default:
- /* Operation not supported. */
- processed_ops = 0;
- }
-
- for (i = 0; i < num_ops; i++) {
- /*
- * If there was no error/authentication failure,
- * change status to successful.
- */
- if (ops[i]->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
- ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
- /* Free session if a session-less crypto op. */
- if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
- memset(session, 0, sizeof(struct kasumi_session));
- rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
- ops[i]->sym->session = NULL;
- }
- }
- return processed_ops;
-}
-
-/** Process a crypto op with length/offset in bits. */
-static int
-process_op_bit(struct rte_crypto_op *op, struct kasumi_session *session,
- struct ipsec_mb_qp *qp)
-{
- unsigned int processed_op;
-
- switch (session->op) {
- /* case KASUMI_OP_ONLY_CIPHER: */
- case IPSEC_MB_OP_ENCRYPT_ONLY:
- case IPSEC_MB_OP_DECRYPT_ONLY:
- processed_op = process_kasumi_cipher_op_bit(qp, op, session);
- break;
- /* case KASUMI_OP_ONLY_AUTH: */
- case IPSEC_MB_OP_HASH_GEN_ONLY:
- case IPSEC_MB_OP_HASH_VERIFY_ONLY:
- processed_op = process_kasumi_hash_op(qp, &op, session, 1);
- break;
- /* case KASUMI_OP_CIPHER_AUTH: */
- case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN:
- processed_op = process_kasumi_cipher_op_bit(qp, op, session);
- if (processed_op == 1)
- process_kasumi_hash_op(qp, &op, session, 1);
- break;
- /* case KASUMI_OP_AUTH_CIPHER: */
- case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
- processed_op = process_kasumi_hash_op(qp, &op, session, 1);
- if (processed_op == 1)
- process_kasumi_cipher_op_bit(qp, op, session);
- break;
- default:
- /* Operation not supported. */
- processed_op = 0;
- }
-
- /*
- * If there was no error/authentication failure,
- * change status to successful.
- */
- if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
- op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-
- /* Free session if a session-less crypto op. */
- if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
- memset(CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session), 0,
- sizeof(struct kasumi_session));
- rte_mempool_put(qp->sess_mp, (void *)op->sym->session);
- op->sym->session = NULL;
- }
- return processed_op;
-}
-
-static uint16_t
-kasumi_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
- uint16_t nb_ops)
-{
- struct rte_crypto_op *c_ops[nb_ops];
- struct rte_crypto_op *curr_c_op = NULL;
-
- struct kasumi_session *prev_sess = NULL, *curr_sess = NULL;
- struct ipsec_mb_qp *qp = queue_pair;
- unsigned int i;
- uint8_t burst_size = 0;
- uint8_t processed_ops;
- unsigned int nb_dequeued;
-
- nb_dequeued = rte_ring_dequeue_burst(qp->ingress_queue,
- (void **)ops, nb_ops, NULL);
- for (i = 0; i < nb_dequeued; i++) {
- curr_c_op = ops[i];
-
-#ifdef RTE_LIBRTE_PMD_KASUMI_DEBUG
- if (!rte_pktmbuf_is_contiguous(curr_c_op->sym->m_src)
- || (curr_c_op->sym->m_dst != NULL
- && !rte_pktmbuf_is_contiguous(
- curr_c_op->sym->m_dst))) {
- IPSEC_MB_LOG(ERR,
- "PMD supports only contiguous mbufs, op (%p) provides noncontiguous mbuf as source/destination buffer.",
- curr_c_op);
- curr_c_op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- break;
- }
-#endif
-
- /* Set status as enqueued (not processed yet) by default. */
- curr_c_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
-
- curr_sess = (struct kasumi_session *)
- ipsec_mb_get_session_private(qp, curr_c_op);
- if (unlikely(curr_sess == NULL
- || curr_sess->op == IPSEC_MB_OP_NOT_SUPPORTED)) {
- curr_c_op->status
- = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
- break;
- }
-
- /* If length/offset is at bit-level, process this buffer alone.
- */
- if (((curr_c_op->sym->cipher.data.length % BYTE_LEN) != 0)
- || ((ops[i]->sym->cipher.data.offset % BYTE_LEN) != 0)) {
- /* Process the ops of the previous session. */
- if (prev_sess != NULL) {
- processed_ops = process_ops(c_ops, prev_sess,
- qp, burst_size);
- if (processed_ops < burst_size) {
- burst_size = 0;
- break;
- }
-
- burst_size = 0;
- prev_sess = NULL;
- }
-
- processed_ops = process_op_bit(curr_c_op,
- curr_sess, qp);
- if (processed_ops != 1)
- break;
-
- continue;
- }
-
- /* Batch ops that share the same session. */
- if (prev_sess == NULL) {
- prev_sess = curr_sess;
- c_ops[burst_size++] = curr_c_op;
- } else if (curr_sess == prev_sess) {
- c_ops[burst_size++] = curr_c_op;
- /*
- * When there are enough ops to process in a batch,
- * process them, and start a new batch.
- */
- if (burst_size == KASUMI_MAX_BURST) {
- processed_ops = process_ops(c_ops, prev_sess,
- qp, burst_size);
- if (processed_ops < burst_size) {
- burst_size = 0;
- break;
- }
-
- burst_size = 0;
- prev_sess = NULL;
- }
- } else {
- /*
- * Different session, process the ops
- * of the previous session.
- */
- processed_ops = process_ops(c_ops, prev_sess, qp,
- burst_size);
- if (processed_ops < burst_size) {
- burst_size = 0;
- break;
- }
-
- burst_size = 0;
- prev_sess = curr_sess;
-
- c_ops[burst_size++] = curr_c_op;
- }
- }
-
- if (burst_size != 0) {
- /* Process the crypto ops of the last session. */
- processed_ops = process_ops(c_ops, prev_sess, qp, burst_size);
- }
-
- qp->stats.dequeued_count += i;
- return i;
-}
+#include "pmd_aesni_mb_priv.h"
struct rte_cryptodev_ops kasumi_pmd_ops = {
.dev_configure = ipsec_mb_config,
@@ -460,7 +61,7 @@ RTE_INIT(ipsec_mb_register_kasumi)
= &ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_KASUMI];
kasumi_data->caps = kasumi_capabilities;
- kasumi_data->dequeue_burst = kasumi_pmd_dequeue_burst;
+ kasumi_data->dequeue_burst = aesni_mb_dequeue_burst;
kasumi_data->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO
| RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING
| RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA
@@ -469,7 +70,8 @@ RTE_INIT(ipsec_mb_register_kasumi)
| RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
kasumi_data->internals_priv_size = 0;
kasumi_data->ops = &kasumi_pmd_ops;
- kasumi_data->qp_priv_size = sizeof(struct kasumi_qp_data);
- kasumi_data->session_configure = kasumi_session_configure;
- kasumi_data->session_priv_size = sizeof(struct kasumi_session);
+ kasumi_data->qp_priv_size = sizeof(struct aesni_mb_qp_data);
+ kasumi_data->session_configure = aesni_mb_session_configure;
+ kasumi_data->session_priv_size =
+ sizeof(struct aesni_mb_session);
}
diff --git a/drivers/crypto/ipsec_mb/pmd_kasumi_priv.h b/drivers/crypto/ipsec_mb/pmd_kasumi_priv.h
index 8db1d1cc5b..3223cf1a14 100644
--- a/drivers/crypto/ipsec_mb/pmd_kasumi_priv.h
+++ b/drivers/crypto/ipsec_mb/pmd_kasumi_priv.h
@@ -9,8 +9,6 @@
#define KASUMI_KEY_LENGTH 16
#define KASUMI_IV_LENGTH 8
-#define KASUMI_MAX_BURST 4
-#define BYTE_LEN 8
#define KASUMI_DIGEST_LENGTH 4
uint8_t pmd_driver_id_kasumi;
@@ -60,22 +58,4 @@ static const struct rte_cryptodev_capabilities kasumi_capabilities[] = {
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
};
-/** KASUMI private session structure */
-struct kasumi_session {
- /* Keys have to be 16-byte aligned */
- kasumi_key_sched_t pKeySched_cipher;
- kasumi_key_sched_t pKeySched_hash;
- enum ipsec_mb_operation op;
- enum rte_crypto_auth_operation auth_op;
- uint16_t cipher_iv_offset;
-} __rte_cache_aligned;
-
-struct kasumi_qp_data {
- uint8_t temp_digest[KASUMI_DIGEST_LENGTH];
- /* *< Buffers used to store the digest generated
- * by the driver when verifying a digest provided
- * by the user (using authentication verify operation)
- */
-};
-
#endif /* _PMD_KASUMI_PRIV_H_ */
diff --git a/drivers/crypto/ipsec_mb/pmd_snow3g.c b/drivers/crypto/ipsec_mb/pmd_snow3g.c
index a96779f059..957f6aade8 100644
--- a/drivers/crypto/ipsec_mb/pmd_snow3g.c
+++ b/drivers/crypto/ipsec_mb/pmd_snow3g.c
@@ -3,539 +3,7 @@
*/
#include "pmd_snow3g_priv.h"
-
-/** Parse crypto xform chain and set private session parameters. */
-static int
-snow3g_session_configure(IMB_MGR *mgr, void *priv_sess,
- const struct rte_crypto_sym_xform *xform)
-{
- struct snow3g_session *sess = (struct snow3g_session *)priv_sess;
- const struct rte_crypto_sym_xform *auth_xform = NULL;
- const struct rte_crypto_sym_xform *cipher_xform = NULL;
- enum ipsec_mb_operation mode;
-
- /* Select Crypto operation - hash then cipher / cipher then hash */
- int ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform,
- &cipher_xform, NULL);
- if (ret)
- return ret;
-
- if (cipher_xform) {
- /* Only SNOW 3G UEA2 supported */
- if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_SNOW3G_UEA2)
- return -ENOTSUP;
-
- if (cipher_xform->cipher.iv.length != SNOW3G_IV_LENGTH) {
- IPSEC_MB_LOG(ERR, "Wrong IV length");
- return -EINVAL;
- }
- if (cipher_xform->cipher.key.length > SNOW3G_MAX_KEY_SIZE) {
- IPSEC_MB_LOG(ERR, "Not enough memory to store the key");
- return -ENOMEM;
- }
-
- sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
-
- /* Initialize key */
- IMB_SNOW3G_INIT_KEY_SCHED(mgr, cipher_xform->cipher.key.data,
- &sess->pKeySched_cipher);
- }
-
- if (auth_xform) {
- /* Only SNOW 3G UIA2 supported */
- if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_SNOW3G_UIA2)
- return -ENOTSUP;
-
- if (auth_xform->auth.digest_length != SNOW3G_DIGEST_LENGTH) {
- IPSEC_MB_LOG(ERR, "Wrong digest length");
- return -EINVAL;
- }
- if (auth_xform->auth.key.length > SNOW3G_MAX_KEY_SIZE) {
- IPSEC_MB_LOG(ERR, "Not enough memory to store the key");
- return -ENOMEM;
- }
-
- sess->auth_op = auth_xform->auth.op;
-
- if (auth_xform->auth.iv.length != SNOW3G_IV_LENGTH) {
- IPSEC_MB_LOG(ERR, "Wrong IV length");
- return -EINVAL;
- }
- sess->auth_iv_offset = auth_xform->auth.iv.offset;
-
- /* Initialize key */
- IMB_SNOW3G_INIT_KEY_SCHED(mgr, auth_xform->auth.key.data,
- &sess->pKeySched_hash);
- }
-
- sess->op = mode;
-
- return 0;
-}
-
-/** Check if conditions are met for digest-appended operations */
-static uint8_t *
-snow3g_digest_appended_in_src(struct rte_crypto_op *op)
-{
- unsigned int auth_size, cipher_size;
-
- auth_size = (op->sym->auth.data.offset >> 3) +
- (op->sym->auth.data.length >> 3);
- cipher_size = (op->sym->cipher.data.offset >> 3) +
- (op->sym->cipher.data.length >> 3);
-
- if (auth_size < cipher_size)
- return rte_pktmbuf_mtod_offset(op->sym->m_src,
- uint8_t *, auth_size);
-
- return NULL;
-}
-
-/** Encrypt/decrypt mbufs with same cipher key. */
-static uint8_t
-process_snow3g_cipher_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
- struct snow3g_session *session,
- uint8_t num_ops)
-{
- uint32_t i;
- uint8_t processed_ops = 0;
- const void *src[SNOW3G_MAX_BURST] = {NULL};
- void *dst[SNOW3G_MAX_BURST] = {NULL};
- uint8_t *digest_appended[SNOW3G_MAX_BURST] = {NULL};
- const void *iv[SNOW3G_MAX_BURST] = {NULL};
- uint32_t num_bytes[SNOW3G_MAX_BURST] = {0};
- uint32_t cipher_off, cipher_len;
- int unencrypted_bytes = 0;
-
- for (i = 0; i < num_ops; i++) {
-
- cipher_off = ops[i]->sym->cipher.data.offset >> 3;
- cipher_len = ops[i]->sym->cipher.data.length >> 3;
- src[i] = rte_pktmbuf_mtod_offset(
- ops[i]->sym->m_src, uint8_t *, cipher_off);
-
- /* If out-of-place operation */
- if (ops[i]->sym->m_dst &&
- ops[i]->sym->m_src != ops[i]->sym->m_dst) {
- dst[i] = rte_pktmbuf_mtod_offset(
- ops[i]->sym->m_dst, uint8_t *, cipher_off);
-
- /* In case of out-of-place, auth-cipher operation
- * with partial encryption of the digest, copy
- * the remaining, unencrypted part.
- */
- if (session->op == IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT
- || session->op == IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT)
- unencrypted_bytes =
- (ops[i]->sym->auth.data.offset >> 3) +
- (ops[i]->sym->auth.data.length >> 3) +
- (SNOW3G_DIGEST_LENGTH) -
- cipher_off - cipher_len;
- if (unencrypted_bytes > 0)
- rte_memcpy(
- rte_pktmbuf_mtod_offset(
- ops[i]->sym->m_dst, uint8_t *,
- cipher_off + cipher_len),
- rte_pktmbuf_mtod_offset(
- ops[i]->sym->m_src, uint8_t *,
- cipher_off + cipher_len),
- unencrypted_bytes);
- } else
- dst[i] = rte_pktmbuf_mtod_offset(ops[i]->sym->m_src,
- uint8_t *, cipher_off);
-
- iv[i] = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
- session->cipher_iv_offset);
- num_bytes[i] = cipher_len;
- processed_ops++;
- }
-
- IMB_SNOW3G_F8_N_BUFFER(qp->mb_mgr, &session->pKeySched_cipher, iv,
- src, dst, num_bytes, processed_ops);
-
- /* Take care of the raw digest data in src buffer */
- for (i = 0; i < num_ops; i++) {
- if ((session->op == IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT ||
- session->op == IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT) &&
- ops[i]->sym->m_dst != NULL) {
- digest_appended[i] =
- snow3g_digest_appended_in_src(ops[i]);
- /* Clear unencrypted digest from
- * the src buffer
- */
- if (digest_appended[i] != NULL)
- memset(digest_appended[i],
- 0, SNOW3G_DIGEST_LENGTH);
- }
- }
- return processed_ops;
-}
-
-/** Encrypt/decrypt mbuf (bit level function). */
-static uint8_t
-process_snow3g_cipher_op_bit(struct ipsec_mb_qp *qp,
- struct rte_crypto_op *op,
- struct snow3g_session *session)
-{
- uint8_t *src, *dst;
- uint8_t *iv;
- uint32_t length_in_bits, offset_in_bits;
- int unencrypted_bytes = 0;
-
- offset_in_bits = op->sym->cipher.data.offset;
- src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *);
- if (op->sym->m_dst == NULL) {
- op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- IPSEC_MB_LOG(ERR, "bit-level in-place not supported\n");
- return 0;
- }
- length_in_bits = op->sym->cipher.data.length;
- dst = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *);
- /* In case of out-of-place, auth-cipher operation
- * with partial encryption of the digest, copy
- * the remaining, unencrypted part.
- */
- if (session->op == IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT ||
- session->op == IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT)
- unencrypted_bytes =
- (op->sym->auth.data.offset >> 3) +
- (op->sym->auth.data.length >> 3) +
- (SNOW3G_DIGEST_LENGTH) -
- (offset_in_bits >> 3) -
- (length_in_bits >> 3);
- if (unencrypted_bytes > 0)
- rte_memcpy(
- rte_pktmbuf_mtod_offset(
- op->sym->m_dst, uint8_t *,
- (length_in_bits >> 3)),
- rte_pktmbuf_mtod_offset(
- op->sym->m_src, uint8_t *,
- (length_in_bits >> 3)),
- unencrypted_bytes);
-
- iv = rte_crypto_op_ctod_offset(op, uint8_t *,
- session->cipher_iv_offset);
-
- IMB_SNOW3G_F8_1_BUFFER_BIT(qp->mb_mgr, &session->pKeySched_cipher, iv,
- src, dst, length_in_bits, offset_in_bits);
-
- return 1;
-}
-
-/** Generate/verify hash from mbufs with same hash key. */
-static int
-process_snow3g_hash_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
- struct snow3g_session *session,
- uint8_t num_ops)
-{
- uint32_t i;
- uint8_t processed_ops = 0;
- uint8_t *src, *dst;
- uint32_t length_in_bits;
- uint8_t *iv;
- uint8_t digest_appended = 0;
- struct snow3g_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
-
- for (i = 0; i < num_ops; i++) {
- /* Data must be byte aligned */
- if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- IPSEC_MB_LOG(ERR, "Offset");
- break;
- }
-
- dst = NULL;
-
- length_in_bits = ops[i]->sym->auth.data.length;
-
- src = rte_pktmbuf_mtod_offset(ops[i]->sym->m_src, uint8_t *,
- (ops[i]->sym->auth.data.offset >> 3));
- iv = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
- session->auth_iv_offset);
-
- if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
- dst = qp_data->temp_digest;
- /* Handle auth cipher verify oop case*/
- if ((session->op ==
- IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN ||
- session->op ==
- IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY) &&
- ops[i]->sym->m_dst != NULL)
- src = rte_pktmbuf_mtod_offset(
- ops[i]->sym->m_dst, uint8_t *,
- ops[i]->sym->auth.data.offset >> 3);
-
- IMB_SNOW3G_F9_1_BUFFER(qp->mb_mgr,
- &session->pKeySched_hash,
- iv, src, length_in_bits, dst);
- /* Verify digest. */
- if (memcmp(dst, ops[i]->sym->auth.digest.data,
- SNOW3G_DIGEST_LENGTH) != 0)
- ops[i]->status =
- RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
- } else {
- if (session->op ==
- IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT ||
- session->op ==
- IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT)
- dst = snow3g_digest_appended_in_src(ops[i]);
-
- if (dst != NULL)
- digest_appended = 1;
- else
- dst = ops[i]->sym->auth.digest.data;
-
- IMB_SNOW3G_F9_1_BUFFER(qp->mb_mgr,
- &session->pKeySched_hash,
- iv, src, length_in_bits, dst);
-
- /* Copy back digest from src to auth.digest.data */
- if (digest_appended)
- rte_memcpy(ops[i]->sym->auth.digest.data,
- dst, SNOW3G_DIGEST_LENGTH);
- }
- processed_ops++;
- }
-
- return processed_ops;
-}
-
-/** Process a batch of crypto ops which shares the same session. */
-static int
-process_ops(struct rte_crypto_op **ops, struct snow3g_session *session,
- struct ipsec_mb_qp *qp, uint8_t num_ops)
-{
- uint32_t i;
- uint32_t processed_ops;
-
-#ifdef RTE_LIBRTE_PMD_SNOW3G_DEBUG
- for (i = 0; i < num_ops; i++) {
- if (!rte_pktmbuf_is_contiguous(ops[i]->sym->m_src) ||
- (ops[i]->sym->m_dst != NULL &&
- !rte_pktmbuf_is_contiguous(
- ops[i]->sym->m_dst))) {
- IPSEC_MB_LOG(ERR,
- "PMD supports only contiguous mbufs, "
- "op (%p) provides noncontiguous mbuf as "
- "source/destination buffer.\n", ops[i]);
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- return 0;
- }
- }
-#endif
-
- switch (session->op) {
- case IPSEC_MB_OP_ENCRYPT_ONLY:
- case IPSEC_MB_OP_DECRYPT_ONLY:
- processed_ops = process_snow3g_cipher_op(qp, ops,
- session, num_ops);
- break;
- case IPSEC_MB_OP_HASH_GEN_ONLY:
- case IPSEC_MB_OP_HASH_VERIFY_ONLY:
- processed_ops = process_snow3g_hash_op(qp, ops, session,
- num_ops);
- break;
- case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN:
- case IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY:
- processed_ops = process_snow3g_cipher_op(qp, ops, session,
- num_ops);
- process_snow3g_hash_op(qp, ops, session, processed_ops);
- break;
- case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
- case IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT:
- processed_ops = process_snow3g_hash_op(qp, ops, session,
- num_ops);
- process_snow3g_cipher_op(qp, ops, session, processed_ops);
- break;
- default:
- /* Operation not supported. */
- processed_ops = 0;
- }
-
- for (i = 0; i < num_ops; i++) {
- /*
- * If there was no error/authentication failure,
- * change status to successful.
- */
- if (ops[i]->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
- ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
- /* Free session if a session-less crypto op. */
- if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
- memset(session, 0, sizeof(struct snow3g_session));
- rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
- ops[i]->sym->session = NULL;
- }
- }
- return processed_ops;
-}
-
-/** Process a crypto op with length/offset in bits. */
-static int
-process_op_bit(struct rte_crypto_op *op, struct snow3g_session *session,
- struct ipsec_mb_qp *qp)
-{
- unsigned int processed_op;
- int ret;
-
- switch (session->op) {
- case IPSEC_MB_OP_ENCRYPT_ONLY:
- case IPSEC_MB_OP_DECRYPT_ONLY:
-
- processed_op = process_snow3g_cipher_op_bit(qp, op,
- session);
- break;
- case IPSEC_MB_OP_HASH_GEN_ONLY:
- case IPSEC_MB_OP_HASH_VERIFY_ONLY:
- processed_op = process_snow3g_hash_op(qp, &op, session, 1);
- break;
- case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN:
- case IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY:
- processed_op = process_snow3g_cipher_op_bit(qp, op, session);
- if (processed_op == 1)
- process_snow3g_hash_op(qp, &op, session, 1);
- break;
- case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
- case IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT:
- processed_op = process_snow3g_hash_op(qp, &op, session, 1);
- if (processed_op == 1)
- process_snow3g_cipher_op_bit(qp, op, session);
- break;
- default:
- /* Operation not supported. */
- processed_op = 0;
- }
-
- /*
- * If there was no error/authentication failure,
- * change status to successful.
- */
- if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
- op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-
- /* Free session if a session-less crypto op. */
- if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
- memset(CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session), 0,
- sizeof(struct snow3g_session));
- rte_mempool_put(qp->sess_mp, (void *)op->sym->session);
- op->sym->session = NULL;
- }
-
- if (unlikely(processed_op != 1))
- return 0;
-
- ret = rte_ring_enqueue(qp->ingress_queue, op);
- if (ret != 0)
- return ret;
-
- return 1;
-}
-
-static uint16_t
-snow3g_pmd_dequeue_burst(void *queue_pair,
- struct rte_crypto_op **ops, uint16_t nb_ops)
-{
- struct ipsec_mb_qp *qp = queue_pair;
- struct rte_crypto_op *c_ops[SNOW3G_MAX_BURST];
- struct rte_crypto_op *curr_c_op;
-
- struct snow3g_session *prev_sess = NULL, *curr_sess = NULL;
- uint32_t i;
- uint8_t burst_size = 0;
- uint8_t processed_ops;
- uint32_t nb_dequeued;
-
- nb_dequeued = rte_ring_dequeue_burst(qp->ingress_queue,
- (void **)ops, nb_ops, NULL);
-
- for (i = 0; i < nb_dequeued; i++) {
- curr_c_op = ops[i];
-
- /* Set status as enqueued (not processed yet) by default. */
- curr_c_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
-
- curr_sess = ipsec_mb_get_session_private(qp, curr_c_op);
- if (unlikely(curr_sess == NULL ||
- curr_sess->op == IPSEC_MB_OP_NOT_SUPPORTED)) {
- curr_c_op->status =
- RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
- break;
- }
-
- /* If length/offset is at bit-level,
- * process this buffer alone.
- */
- if (((curr_c_op->sym->cipher.data.length % BYTE_LEN) != 0)
- || ((curr_c_op->sym->cipher.data.offset
- % BYTE_LEN) != 0)) {
- /* Process the ops of the previous session. */
- if (prev_sess != NULL) {
- processed_ops = process_ops(c_ops, prev_sess,
- qp, burst_size);
- if (processed_ops < burst_size) {
- burst_size = 0;
- break;
- }
-
- burst_size = 0;
- prev_sess = NULL;
- }
-
- processed_ops = process_op_bit(curr_c_op, curr_sess, qp);
- if (processed_ops != 1)
- break;
-
- continue;
- }
-
- /* Batch ops that share the same session. */
- if (prev_sess == NULL) {
- prev_sess = curr_sess;
- c_ops[burst_size++] = curr_c_op;
- } else if (curr_sess == prev_sess) {
- c_ops[burst_size++] = curr_c_op;
- /*
- * When there are enough ops to process in a batch,
- * process them, and start a new batch.
- */
- if (burst_size == SNOW3G_MAX_BURST) {
- processed_ops = process_ops(c_ops, prev_sess,
- qp, burst_size);
- if (processed_ops < burst_size) {
- burst_size = 0;
- break;
- }
-
- burst_size = 0;
- prev_sess = NULL;
- }
- } else {
- /*
- * Different session, process the ops
- * of the previous session.
- */
- processed_ops = process_ops(c_ops, prev_sess,
- qp, burst_size);
- if (processed_ops < burst_size) {
- burst_size = 0;
- break;
- }
-
- burst_size = 0;
- prev_sess = curr_sess;
-
- c_ops[burst_size++] = curr_c_op;
- }
- }
-
- if (burst_size != 0) {
- /* Process the crypto ops of the last session. */
- processed_ops = process_ops(c_ops, prev_sess,
- qp, burst_size);
- }
-
- qp->stats.dequeued_count += i;
- return i;
-}
+#include "pmd_aesni_mb_priv.h"
struct rte_cryptodev_ops snow3g_pmd_ops = {
.dev_configure = ipsec_mb_config,
@@ -586,7 +54,7 @@ RTE_INIT(ipsec_mb_register_snow3g)
= &ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_SNOW3G];
snow3g_data->caps = snow3g_capabilities;
- snow3g_data->dequeue_burst = snow3g_pmd_dequeue_burst;
+ snow3g_data->dequeue_burst = aesni_mb_dequeue_burst;
snow3g_data->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA |
@@ -595,7 +63,8 @@ RTE_INIT(ipsec_mb_register_snow3g)
RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED;
snow3g_data->internals_priv_size = 0;
snow3g_data->ops = &snow3g_pmd_ops;
- snow3g_data->qp_priv_size = sizeof(struct snow3g_qp_data);
- snow3g_data->session_configure = snow3g_session_configure;
- snow3g_data->session_priv_size = sizeof(struct snow3g_session);
+ snow3g_data->qp_priv_size = sizeof(struct aesni_mb_qp_data);
+ snow3g_data->session_configure = aesni_mb_session_configure;
+ snow3g_data->session_priv_size =
+ sizeof(struct aesni_mb_session);
}
diff --git a/drivers/crypto/ipsec_mb/pmd_snow3g_priv.h b/drivers/crypto/ipsec_mb/pmd_snow3g_priv.h
index ca1ce7f9d6..3ceb33b602 100644
--- a/drivers/crypto/ipsec_mb/pmd_snow3g_priv.h
+++ b/drivers/crypto/ipsec_mb/pmd_snow3g_priv.h
@@ -8,10 +8,7 @@
#include "ipsec_mb_private.h"
#define SNOW3G_IV_LENGTH 16
-#define SNOW3G_MAX_BURST 8
-#define BYTE_LEN 8
#define SNOW3G_DIGEST_LENGTH 4
-#define SNOW3G_MAX_KEY_SIZE 128
uint8_t pmd_driver_id_snow3g;
@@ -64,22 +61,4 @@ static const struct rte_cryptodev_capabilities snow3g_capabilities[] = {
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
};
-/** SNOW 3G private session structure */
-struct snow3g_session {
- enum ipsec_mb_operation op;
- enum rte_crypto_auth_operation auth_op;
- snow3g_key_schedule_t pKeySched_cipher;
- snow3g_key_schedule_t pKeySched_hash;
- uint16_t cipher_iv_offset;
- uint16_t auth_iv_offset;
-} __rte_cache_aligned;
-
-struct snow3g_qp_data {
- uint8_t temp_digest[SNOW3G_DIGEST_LENGTH];
- /**< Buffer used to store the digest generated
- * by the driver when verifying a digest provided
- * by the user (using authentication verify operation)
- */
-};
-
#endif /* _PMD_SNOW3G_PRIV_H_ */
diff --git a/drivers/crypto/ipsec_mb/pmd_zuc.c b/drivers/crypto/ipsec_mb/pmd_zuc.c
index 44781be1d1..b72191c7a7 100644
--- a/drivers/crypto/ipsec_mb/pmd_zuc.c
+++ b/drivers/crypto/ipsec_mb/pmd_zuc.c
@@ -3,343 +3,7 @@
*/
#include "pmd_zuc_priv.h"
-
-/** Parse crypto xform chain and set private session parameters. */
-static int
-zuc_session_configure(__rte_unused IMB_MGR * mgr, void *zuc_sess,
- const struct rte_crypto_sym_xform *xform)
-{
- struct zuc_session *sess = (struct zuc_session *) zuc_sess;
- const struct rte_crypto_sym_xform *auth_xform = NULL;
- const struct rte_crypto_sym_xform *cipher_xform = NULL;
- enum ipsec_mb_operation mode;
- /* Select Crypto operation - hash then cipher / cipher then hash */
- int ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform,
- &cipher_xform, NULL);
-
- if (ret)
- return ret;
-
- if (cipher_xform) {
- /* Only ZUC EEA3 supported */
- if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_ZUC_EEA3)
- return -ENOTSUP;
-
- if (cipher_xform->cipher.iv.length != ZUC_IV_KEY_LENGTH) {
- IPSEC_MB_LOG(ERR, "Wrong IV length");
- return -EINVAL;
- }
- sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
-
- /* Copy the key */
- memcpy(sess->pKey_cipher, cipher_xform->cipher.key.data,
- ZUC_IV_KEY_LENGTH);
- }
-
- if (auth_xform) {
- /* Only ZUC EIA3 supported */
- if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_ZUC_EIA3)
- return -ENOTSUP;
-
- if (auth_xform->auth.digest_length != ZUC_DIGEST_LENGTH) {
- IPSEC_MB_LOG(ERR, "Wrong digest length");
- return -EINVAL;
- }
-
- sess->auth_op = auth_xform->auth.op;
-
- if (auth_xform->auth.iv.length != ZUC_IV_KEY_LENGTH) {
- IPSEC_MB_LOG(ERR, "Wrong IV length");
- return -EINVAL;
- }
- sess->auth_iv_offset = auth_xform->auth.iv.offset;
-
- /* Copy the key */
- memcpy(sess->pKey_hash, auth_xform->auth.key.data,
- ZUC_IV_KEY_LENGTH);
- }
-
- sess->op = mode;
- return 0;
-}
-
-/** Encrypt/decrypt mbufs. */
-static uint8_t
-process_zuc_cipher_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
- struct zuc_session **sessions,
- uint8_t num_ops)
-{
- unsigned int i;
- uint8_t processed_ops = 0;
- const void *src[ZUC_MAX_BURST];
- void *dst[ZUC_MAX_BURST];
- const void *iv[ZUC_MAX_BURST];
- uint32_t num_bytes[ZUC_MAX_BURST];
- const void *cipher_keys[ZUC_MAX_BURST];
- struct zuc_session *sess;
-
- for (i = 0; i < num_ops; i++) {
- if (((ops[i]->sym->cipher.data.length % BYTE_LEN) != 0)
- || ((ops[i]->sym->cipher.data.offset
- % BYTE_LEN) != 0)) {
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- IPSEC_MB_LOG(ERR, "Data Length or offset");
- break;
- }
-
- sess = sessions[i];
-
-#ifdef RTE_LIBRTE_PMD_ZUC_DEBUG
- if (!rte_pktmbuf_is_contiguous(ops[i]->sym->m_src) ||
- (ops[i]->sym->m_dst != NULL &&
- !rte_pktmbuf_is_contiguous(
- ops[i]->sym->m_dst))) {
- IPSEC_MB_LOG(ERR, "PMD supports only "
- " contiguous mbufs, op (%p) "
- "provides noncontiguous mbuf "
- "as source/destination buffer.\n",
- "PMD supports only contiguous mbufs, "
- "op (%p) provides noncontiguous mbuf "
- "as source/destination buffer.\n",
- ops[i]);
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- break;
- }
-#endif
-
- src[i] = rte_pktmbuf_mtod_offset(ops[i]->sym->m_src,
- uint8_t *,
- (ops[i]->sym->cipher.data.offset >> 3));
- dst[i] = ops[i]->sym->m_dst ?
- rte_pktmbuf_mtod_offset(ops[i]->sym->m_dst, uint8_t *,
- (ops[i]->sym->cipher.data.offset >> 3)) :
- rte_pktmbuf_mtod_offset(ops[i]->sym->m_src, uint8_t *,
- (ops[i]->sym->cipher.data.offset >> 3));
- iv[i] = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
- sess->cipher_iv_offset);
- num_bytes[i] = ops[i]->sym->cipher.data.length >> 3;
-
- cipher_keys[i] = sess->pKey_cipher;
-
- processed_ops++;
- }
-
- IMB_ZUC_EEA3_N_BUFFER(qp->mb_mgr, (const void **)cipher_keys,
- (const void **)iv, (const void **)src, (void **)dst,
- num_bytes, processed_ops);
-
- return processed_ops;
-}
-
-/** Generate/verify hash from mbufs. */
-static int
-process_zuc_hash_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
- struct zuc_session **sessions,
- uint8_t num_ops)
-{
- unsigned int i;
- uint8_t processed_ops = 0;
- uint8_t *src[ZUC_MAX_BURST] = { 0 };
- uint32_t *dst[ZUC_MAX_BURST];
- uint32_t length_in_bits[ZUC_MAX_BURST] = { 0 };
- uint8_t *iv[ZUC_MAX_BURST] = { 0 };
- const void *hash_keys[ZUC_MAX_BURST] = { 0 };
- struct zuc_session *sess;
- struct zuc_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
-
-
- for (i = 0; i < num_ops; i++) {
- /* Data must be byte aligned */
- if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- IPSEC_MB_LOG(ERR, "Offset");
- break;
- }
-
- sess = sessions[i];
-
- length_in_bits[i] = ops[i]->sym->auth.data.length;
-
- src[i] = rte_pktmbuf_mtod_offset(ops[i]->sym->m_src,
- uint8_t *,
- (ops[i]->sym->auth.data.offset >> 3));
- iv[i] = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
- sess->auth_iv_offset);
-
- hash_keys[i] = sess->pKey_hash;
- if (sess->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY)
- dst[i] = (uint32_t *)qp_data->temp_digest[i];
- else
- dst[i] = (uint32_t *)ops[i]->sym->auth.digest.data;
-
- processed_ops++;
- }
-
- IMB_ZUC_EIA3_N_BUFFER(qp->mb_mgr, (const void **)hash_keys,
- (const void * const *)iv, (const void * const *)src,
- length_in_bits, dst, processed_ops);
-
- /*
- * If tag needs to be verified, compare generated tag
- * with attached tag
- */
- for (i = 0; i < processed_ops; i++)
- if (sessions[i]->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY)
- if (memcmp(dst[i], ops[i]->sym->auth.digest.data,
- ZUC_DIGEST_LENGTH) != 0)
- ops[i]->status =
- RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
-
- return processed_ops;
-}
-
-/** Process a batch of crypto ops which shares the same operation type. */
-static int
-process_ops(struct rte_crypto_op **ops, enum ipsec_mb_operation op_type,
- struct zuc_session **sessions,
- struct ipsec_mb_qp *qp, uint8_t num_ops)
-{
- unsigned int i;
- unsigned int processed_ops = 0;
-
- switch (op_type) {
- case IPSEC_MB_OP_ENCRYPT_ONLY:
- case IPSEC_MB_OP_DECRYPT_ONLY:
- processed_ops = process_zuc_cipher_op(qp, ops,
- sessions, num_ops);
- break;
- case IPSEC_MB_OP_HASH_GEN_ONLY:
- case IPSEC_MB_OP_HASH_VERIFY_ONLY:
- processed_ops = process_zuc_hash_op(qp, ops, sessions,
- num_ops);
- break;
- case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN:
- case IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY:
- processed_ops = process_zuc_cipher_op(qp, ops, sessions,
- num_ops);
- process_zuc_hash_op(qp, ops, sessions, processed_ops);
- break;
- case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
- case IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT:
- processed_ops = process_zuc_hash_op(qp, ops, sessions,
- num_ops);
- process_zuc_cipher_op(qp, ops, sessions, processed_ops);
- break;
- default:
- /* Operation not supported. */
- for (i = 0; i < num_ops; i++)
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
- }
-
- for (i = 0; i < num_ops; i++) {
- /*
- * If there was no error/authentication failure,
- * change status to successful.
- */
- if (ops[i]->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
- ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
- /* Free session if a session-less crypto op. */
- if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
- memset(sessions[i], 0, sizeof(struct zuc_session));
- rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
- ops[i]->sym->session = NULL;
- }
- }
- return processed_ops;
-}
-
-static uint16_t
-zuc_pmd_dequeue_burst(void *queue_pair,
- struct rte_crypto_op **c_ops, uint16_t nb_ops)
-{
-
- struct rte_crypto_op *curr_c_op;
-
- struct zuc_session *curr_sess;
- struct zuc_session *sessions[ZUC_MAX_BURST];
- struct rte_crypto_op *int_c_ops[ZUC_MAX_BURST];
- enum ipsec_mb_operation prev_zuc_op = IPSEC_MB_OP_NOT_SUPPORTED;
- enum ipsec_mb_operation curr_zuc_op;
- struct ipsec_mb_qp *qp = queue_pair;
- unsigned int nb_dequeued;
- unsigned int i;
- uint8_t burst_size = 0;
- uint8_t processed_ops;
-
- nb_dequeued = rte_ring_dequeue_burst(qp->ingress_queue,
- (void **)c_ops, nb_ops, NULL);
-
-
- for (i = 0; i < nb_dequeued; i++) {
- curr_c_op = c_ops[i];
-
- curr_sess = (struct zuc_session *)
- ipsec_mb_get_session_private(qp, curr_c_op);
- if (unlikely(curr_sess == NULL)) {
- curr_c_op->status =
- RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
- break;
- }
-
- curr_zuc_op = curr_sess->op;
-
- /*
- * Batch ops that share the same operation type
- * (cipher only, auth only...).
- */
- if (burst_size == 0) {
- prev_zuc_op = curr_zuc_op;
- int_c_ops[0] = curr_c_op;
- sessions[0] = curr_sess;
- burst_size++;
- } else if (curr_zuc_op == prev_zuc_op) {
- int_c_ops[burst_size] = curr_c_op;
- sessions[burst_size] = curr_sess;
- burst_size++;
- /*
- * When there are enough ops to process in a batch,
- * process them, and start a new batch.
- */
- if (burst_size == ZUC_MAX_BURST) {
- processed_ops = process_ops(int_c_ops, curr_zuc_op,
- sessions, qp, burst_size);
- if (processed_ops < burst_size) {
- burst_size = 0;
- break;
- }
-
- burst_size = 0;
- }
- } else {
- /*
- * Different operation type, process the ops
- * of the previous type.
- */
- processed_ops = process_ops(int_c_ops, prev_zuc_op,
- sessions, qp, burst_size);
- if (processed_ops < burst_size) {
- burst_size = 0;
- break;
- }
-
- burst_size = 0;
- prev_zuc_op = curr_zuc_op;
-
- int_c_ops[0] = curr_c_op;
- sessions[0] = curr_sess;
- burst_size++;
- }
- }
-
- if (burst_size != 0) {
- /* Process the crypto ops of the last operation type. */
- processed_ops = process_ops(int_c_ops, prev_zuc_op,
- sessions, qp, burst_size);
- }
-
- qp->stats.dequeued_count += i;
- return i;
-}
+#include "pmd_aesni_mb_priv.h"
struct rte_cryptodev_ops zuc_pmd_ops = {
.dev_configure = ipsec_mb_config,
@@ -390,7 +54,7 @@ RTE_INIT(ipsec_mb_register_zuc)
= &ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_ZUC];
zuc_data->caps = zuc_capabilities;
- zuc_data->dequeue_burst = zuc_pmd_dequeue_burst;
+ zuc_data->dequeue_burst = aesni_mb_dequeue_burst;
zuc_data->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO
| RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING
| RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA
@@ -399,7 +63,8 @@ RTE_INIT(ipsec_mb_register_zuc)
| RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
zuc_data->internals_priv_size = 0;
zuc_data->ops = &zuc_pmd_ops;
- zuc_data->qp_priv_size = sizeof(struct zuc_qp_data);
- zuc_data->session_configure = zuc_session_configure;
- zuc_data->session_priv_size = sizeof(struct zuc_session);
+ zuc_data->qp_priv_size = sizeof(struct aesni_mb_qp_data);
+ zuc_data->session_configure = aesni_mb_session_configure;
+ zuc_data->session_priv_size =
+ sizeof(struct aesni_mb_session);
}
diff --git a/drivers/crypto/ipsec_mb/pmd_zuc_priv.h b/drivers/crypto/ipsec_mb/pmd_zuc_priv.h
index 76fd6758c2..a1e8e3aade 100644
--- a/drivers/crypto/ipsec_mb/pmd_zuc_priv.h
+++ b/drivers/crypto/ipsec_mb/pmd_zuc_priv.h
@@ -10,7 +10,6 @@
#define ZUC_IV_KEY_LENGTH 16
#define ZUC_DIGEST_LENGTH 4
#define ZUC_MAX_BURST 16
-#define BYTE_LEN 8
uint8_t pmd_driver_id_zuc;
@@ -63,23 +62,4 @@ static const struct rte_cryptodev_capabilities zuc_capabilities[] = {
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
};
-/** ZUC private session structure */
-struct zuc_session {
- enum ipsec_mb_operation op;
- enum rte_crypto_auth_operation auth_op;
- uint8_t pKey_cipher[ZUC_IV_KEY_LENGTH];
- uint8_t pKey_hash[ZUC_IV_KEY_LENGTH];
- uint16_t cipher_iv_offset;
- uint16_t auth_iv_offset;
-} __rte_cache_aligned;
-
-struct zuc_qp_data {
-
- uint8_t temp_digest[ZUC_MAX_BURST][ZUC_DIGEST_LENGTH];
- /* *< Buffers used to store the digest generated
- * by the driver when verifying a digest provided
- * by the user (using authentication verify operation)
- */
-};
-
#endif /* _PMD_ZUC_PRIV_H_ */
--
2.25.1
^ permalink raw reply [flat|nested] 45+ messages in thread
* RE: [PATCH v4] crypto/ipsec_mb: unified IPsec MB interface
2024-02-28 11:33 ` [PATCH v4] " Brian Dooley
@ 2024-02-28 11:50 ` Power, Ciara
2024-02-29 16:23 ` Dooley, Brian
2024-03-05 15:21 ` Wathsala Wathawana Vithanage
2 siblings, 0 replies; 45+ messages in thread
From: Power, Ciara @ 2024-02-28 11:50 UTC (permalink / raw)
To: Dooley, Brian, Ji, Kai, De Lara Guarch, Pablo; +Cc: dev, gakhil, Dooley, Brian
> -----Original Message-----
> From: Brian Dooley <brian.dooley@intel.com>
> Sent: Wednesday, February 28, 2024 11:33 AM
> To: Ji, Kai <kai.ji@intel.com>; De Lara Guarch, Pablo
> <pablo.de.lara.guarch@intel.com>
> Cc: dev@dpdk.org; gakhil@marvell.com; Dooley, Brian
> <brian.dooley@intel.com>
> Subject: [PATCH v4] crypto/ipsec_mb: unified IPsec MB interface
>
> Currently IPsec MB provides both the JOB API and direct API.
> AESNI_MB PMD is using the JOB API codepath while ZUC, KASUMI, SNOW3G
> and CHACHA20_POLY1305 are using the direct API.
> Instead of using the direct API for these PMDs, they should now make
> use of the JOB API codepath. This would remove all use of the IPsec MB
> direct API for these PMDs.
>
> Signed-off-by: Brian Dooley <brian.dooley@intel.com>
> ---
> v2:
> - Fix compilation failure
> v3:
> - Remove session configure pointer for each PMD
> v4:
> - Keep AES GCM PMD and fix extern issue
> ---
<snip>
Acked-by: Ciara Power <ciara.power@intel.com>
^ permalink raw reply [flat|nested] 45+ messages in thread
* RE: [PATCH v4] crypto/ipsec_mb: unified IPsec MB interface
2024-02-28 11:33 ` [PATCH v4] " Brian Dooley
2024-02-28 11:50 ` Power, Ciara
@ 2024-02-29 16:23 ` Dooley, Brian
2024-02-29 16:32 ` Akhil Goyal
2024-03-05 15:21 ` Wathsala Wathawana Vithanage
2 siblings, 1 reply; 45+ messages in thread
From: Dooley, Brian @ 2024-02-29 16:23 UTC (permalink / raw)
To: Ji, Kai, De Lara Guarch, Pablo, Power, Ciara
Cc: dev, gakhil, wathsala.vithanage, ruifeng.wang,
honnappa.nagarahalli, Jack.Bond-Preston
Hi folks,
The introduction of a more unified IPsec MB library for DPDK is causing the snow3g tests to fail on ARM. Artifact here: https://lab.dpdk.org/results/dashboard/patchsets/29315/
PMDs using the direct API (KASUMI, CHACHA, ZUC, SNOW3G) will use the job API, from the AESNI MB PMD code.
We have come across a similar issue in the past that related to an offset issue as SNOW3G uses bits instead of bytes.
commit a501609ea6466ed8526c0dfadedee332a4d4a451
Author: Pablo de Lara pablo.de.lara.guarch@intel.com
Date: Wed Feb 23 16:01:16 2022 +0000
crypto/ipsec_mb: fix length and offset settings
KASUMI, SNOW3G and ZUC require lengths and offsets to
be set in bits or bytes depending on the algorithm.
There were some algorithms that were mixing these two,
so this commit is fixing this issue.
This bug only appeared recently when the ARM ipsec version was bumped to 1.4. It appears there could be a similar scenario happening now and this is a potential fix that needs to be made in the ARM IPsec-mb repo:
diff --git a/lib/aarch64/mb_mgr_snow3g_submit_flush_common_aarch64.h b/lib/aarch64/mb_mgr_snow3g_submit_flush_common_aarch64.h
index 13bca11b..de284ade 100644
--- a/lib/aarch64/mb_mgr_snow3g_submit_flush_common_aarch64.h
+++ b/lib/aarch64/mb_mgr_snow3g_submit_flush_common_aarch64.h
@@ -94,8 +94,8 @@ static void snow3g_mb_mgr_insert_uea2_job(MB_MGR_SNOW3G_OOO *state, IMB_JOB *job
state->num_lanes_inuse++;
state->args.iv[used_lane_idx] = job->iv;
state->args.keys[used_lane_idx] = job->enc_keys;
- state->args.in[used_lane_idx] = job->src + job->cipher_start_src_offset_in_bytes;
- state->args.out[used_lane_idx] = job->dst;
+ state->args.in[used_lane_idx] = job->src + (job->cipher_start_src_offset_in_bits / 8);
+ state->args.out[used_lane_idx] = job->dst + (job->cipher_start_src_offset_in_bits / 8);
state->args.byte_length[used_lane_idx] = job->msg_len_to_cipher_in_bits / 8;
state->args.INITIALIZED[used_lane_idx] = 0;
state->lens[used_lane_idx] = job->msg_len_to_cipher_in_bits / 8;
Thanks,
Brian
> -----Original Message-----
> From: Dooley, Brian <brian.dooley@intel.com>
> Sent: Wednesday, February 28, 2024 11:33 AM
> To: Ji, Kai <kai.ji@intel.com>; De Lara Guarch, Pablo
> <pablo.de.lara.guarch@intel.com>
> Cc: dev@dpdk.org; gakhil@marvell.com; Dooley, Brian
> <brian.dooley@intel.com>
> Subject: [PATCH v4] crypto/ipsec_mb: unified IPsec MB interface
>
> Currently IPsec MB provides both the JOB API and direct API.
> AESNI_MB PMD is using the JOB API codepath while ZUC, KASUMI, SNOW3G
> and CHACHA20_POLY1305 are using the direct API.
> Instead of using the direct API for these PMDs, they should now make
> use of the JOB API codepath. This would remove all use of the IPsec MB
> direct API for these PMDs.
>
> Signed-off-by: Brian Dooley <brian.dooley@intel.com>
> ---
> v2:
> - Fix compilation failure
> v3:
> - Remove session configure pointer for each PMD
> v4:
> - Keep AES GCM PMD and fix extern issue
> ---
> doc/guides/rel_notes/release_24_03.rst | 6 +
> drivers/crypto/ipsec_mb/pmd_aesni_mb.c | 10 +-
> drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h | 15 +-
> drivers/crypto/ipsec_mb/pmd_chacha_poly.c | 338 +----------
> .../crypto/ipsec_mb/pmd_chacha_poly_priv.h | 28 -
> drivers/crypto/ipsec_mb/pmd_kasumi.c | 410 +------------
> drivers/crypto/ipsec_mb/pmd_kasumi_priv.h | 20 -
> drivers/crypto/ipsec_mb/pmd_snow3g.c | 543 +-----------------
> drivers/crypto/ipsec_mb/pmd_snow3g_priv.h | 21 -
> drivers/crypto/ipsec_mb/pmd_zuc.c | 347 +----------
> drivers/crypto/ipsec_mb/pmd_zuc_priv.h | 20 -
> 11 files changed, 48 insertions(+), 1710 deletions(-)
>
<snip>
^ permalink raw reply [flat|nested] 45+ messages in thread
* RE: [PATCH v4] crypto/ipsec_mb: unified IPsec MB interface
2024-02-29 16:23 ` Dooley, Brian
@ 2024-02-29 16:32 ` Akhil Goyal
2024-03-04 7:33 ` Akhil Goyal
0 siblings, 1 reply; 45+ messages in thread
From: Akhil Goyal @ 2024-02-29 16:32 UTC (permalink / raw)
To: Dooley, Brian, Ji, Kai, De Lara Guarch, Pablo, Power, Ciara
Cc: dev, wathsala.vithanage, ruifeng.wang, honnappa.nagarahalli,
Jack.Bond-Preston
> Hi folks,
>
> The introduction of a more unified IPsec MB library for DPDK is causing the
> snow3g tests to fail on ARM. Artifact here:
> https://lab.dpdk.org/results/dashboard/patchsets/29315/
> PMDs using the direct API (KASUMI, CHACHA, ZUC, SNOW3G) will use the job API,
> from the AESNI MB PMD code.
> We have come across a similar issue in the past that related to an offset issue as
> SNOW3G uses bits instead of bytes.
The above link does not seem to be working.
I believe from now on, since we continue to maintain two separate repos,
it would be better to get ack from ARM folks as well
before merging anything onto crypto/ipsec_mb PMD.
Arm folks, Could you please get the below change tested/incorporated in the repo.
>
> commit a501609ea6466ed8526c0dfadedee332a4d4a451
> Author: Pablo de Lara pablo.de.lara.guarch@intel.com
> Date: Wed Feb 23 16:01:16 2022 +0000
>
> crypto/ipsec_mb: fix length and offset settings
>
> KASUMI, SNOW3G and ZUC require lengths and offsets to
> be set in bits or bytes depending on the algorithm.
> There were some algorithms that were mixing these two,
> so this commit is fixing this issue.
>
> This bug only appeared recently when the ARM ipsec version was bumped to 1.4.
> It appears there could be a similar scenario happening now and this is a potential
> fix that needs to be made in the ARM IPsec-mb repo:
>
> diff --git a/lib/aarch64/mb_mgr_snow3g_submit_flush_common_aarch64.h
> b/lib/aarch64/mb_mgr_snow3g_submit_flush_common_aarch64.h
> index 13bca11b..de284ade 100644
> --- a/lib/aarch64/mb_mgr_snow3g_submit_flush_common_aarch64.h
> +++ b/lib/aarch64/mb_mgr_snow3g_submit_flush_common_aarch64.h
> @@ -94,8 +94,8 @@ static void
> snow3g_mb_mgr_insert_uea2_job(MB_MGR_SNOW3G_OOO *state, IMB_JOB
> *job
> state->num_lanes_inuse++;
> state->args.iv[used_lane_idx] = job->iv;
> state->args.keys[used_lane_idx] = job->enc_keys;
> - state->args.in[used_lane_idx] = job->src + job-
> >cipher_start_src_offset_in_bytes;
> - state->args.out[used_lane_idx] = job->dst;
> + state->args.in[used_lane_idx] = job->src + (job-
> >cipher_start_src_offset_in_bits / 8);
> + state->args.out[used_lane_idx] = job->dst + (job-
> >cipher_start_src_offset_in_bits / 8);
> state->args.byte_length[used_lane_idx] = job->msg_len_to_cipher_in_bits / 8;
> state->args.INITIALIZED[used_lane_idx] = 0;
> state->lens[used_lane_idx] = job->msg_len_to_cipher_in_bits / 8;
>
> Thanks,
> Brian
>
> > -----Original Message-----
> > From: Dooley, Brian <brian.dooley@intel.com>
> > Sent: Wednesday, February 28, 2024 11:33 AM
> > To: Ji, Kai <kai.ji@intel.com>; De Lara Guarch, Pablo
> > <pablo.de.lara.guarch@intel.com>
> > Cc: dev@dpdk.org; gakhil@marvell.com; Dooley, Brian
> > <brian.dooley@intel.com>
> > Subject: [PATCH v4] crypto/ipsec_mb: unified IPsec MB interface
> >
> > Currently IPsec MB provides both the JOB API and direct API.
> > AESNI_MB PMD is using the JOB API codepath while ZUC, KASUMI, SNOW3G
> > and CHACHA20_POLY1305 are using the direct API.
> > Instead of using the direct API for these PMDs, they should now make
> > use of the JOB API codepath. This would remove all use of the IPsec MB
> > direct API for these PMDs.
> >
> > Signed-off-by: Brian Dooley <brian.dooley@intel.com>
> > ---
> > v2:
> > - Fix compilation failure
> > v3:
> > - Remove session configure pointer for each PMD
> > v4:
> > - Keep AES GCM PMD and fix extern issue
> > ---
> > doc/guides/rel_notes/release_24_03.rst | 6 +
> > drivers/crypto/ipsec_mb/pmd_aesni_mb.c | 10 +-
> > drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h | 15 +-
> > drivers/crypto/ipsec_mb/pmd_chacha_poly.c | 338 +----------
> > .../crypto/ipsec_mb/pmd_chacha_poly_priv.h | 28 -
> > drivers/crypto/ipsec_mb/pmd_kasumi.c | 410 +------------
> > drivers/crypto/ipsec_mb/pmd_kasumi_priv.h | 20 -
> > drivers/crypto/ipsec_mb/pmd_snow3g.c | 543 +-----------------
> > drivers/crypto/ipsec_mb/pmd_snow3g_priv.h | 21 -
> > drivers/crypto/ipsec_mb/pmd_zuc.c | 347 +----------
> > drivers/crypto/ipsec_mb/pmd_zuc_priv.h | 20 -
> > 11 files changed, 48 insertions(+), 1710 deletions(-)
> >
> <snip>
^ permalink raw reply [flat|nested] 45+ messages in thread
* RE: [PATCH v4] crypto/ipsec_mb: unified IPsec MB interface
2024-02-29 16:32 ` Akhil Goyal
@ 2024-03-04 7:33 ` Akhil Goyal
2024-03-05 5:39 ` Honnappa Nagarahalli
0 siblings, 1 reply; 45+ messages in thread
From: Akhil Goyal @ 2024-03-04 7:33 UTC (permalink / raw)
To: Dooley, Brian, Ji, Kai, De Lara Guarch, Pablo, Power, Ciara
Cc: dev, wathsala.vithanage, ruifeng.wang, honnappa.nagarahalli,
Jack.Bond-Preston
> > Hi folks,
> >
> > The introduction of a more unified IPsec MB library for DPDK is causing the
> > snow3g tests to fail on ARM. Artifact here:
> > https://lab.dpdk.org/results/dashboard/patchsets/29315/
> > PMDs using the direct API (KASUMI, CHACHA, ZUC, SNOW3G) will use the job
> API,
> > from the AESNI MB PMD code.
> > We have come across a similar issue in the past that related to an offset issue as
> > SNOW3G uses bits instead of bytes.
>
> The above link does not seem to be working.
> I believe from now on, since we continue to maintain two separate repos,
> it would be better to get ack from ARM folks as well
> before merging anything onto crypto/ipsec_mb PMD.
>
> Arm folks, Could you please get the below change tested/incorporated in the
> repo.
Hi Arm folks,
Any update on the below fix?
>
>
> >
> > commit a501609ea6466ed8526c0dfadedee332a4d4a451
> > Author: Pablo de Lara pablo.de.lara.guarch@intel.com
> > Date: Wed Feb 23 16:01:16 2022 +0000
> >
> > crypto/ipsec_mb: fix length and offset settings
> >
> > KASUMI, SNOW3G and ZUC require lengths and offsets to
> > be set in bits or bytes depending on the algorithm.
> > There were some algorithms that were mixing these two,
> > so this commit is fixing this issue.
> >
> > This bug only appeared recently when the ARM ipsec version was bumped to
> 1.4.
> > It appears there could be a similar scenario happening now and this is a
> potential
> > fix that needs to be made in the ARM IPsec-mb repo:
> >
> > diff --git a/lib/aarch64/mb_mgr_snow3g_submit_flush_common_aarch64.h
> > b/lib/aarch64/mb_mgr_snow3g_submit_flush_common_aarch64.h
> > index 13bca11b..de284ade 100644
> > --- a/lib/aarch64/mb_mgr_snow3g_submit_flush_common_aarch64.h
> > +++ b/lib/aarch64/mb_mgr_snow3g_submit_flush_common_aarch64.h
> > @@ -94,8 +94,8 @@ static void
> > snow3g_mb_mgr_insert_uea2_job(MB_MGR_SNOW3G_OOO *state, IMB_JOB
> > *job
> > state->num_lanes_inuse++;
> > state->args.iv[used_lane_idx] = job->iv;
> > state->args.keys[used_lane_idx] = job->enc_keys;
> > - state->args.in[used_lane_idx] = job->src + job-
> > >cipher_start_src_offset_in_bytes;
> > - state->args.out[used_lane_idx] = job->dst;
> > + state->args.in[used_lane_idx] = job->src + (job-
> > >cipher_start_src_offset_in_bits / 8);
> > + state->args.out[used_lane_idx] = job->dst + (job-
> > >cipher_start_src_offset_in_bits / 8);
> > state->args.byte_length[used_lane_idx] = job->msg_len_to_cipher_in_bits /
> 8;
> > state->args.INITIALIZED[used_lane_idx] = 0;
> > state->lens[used_lane_idx] = job->msg_len_to_cipher_in_bits / 8;
> >
> > Thanks,
> > Brian
> >
> > > -----Original Message-----
> > > From: Dooley, Brian <brian.dooley@intel.com>
> > > Sent: Wednesday, February 28, 2024 11:33 AM
> > > To: Ji, Kai <kai.ji@intel.com>; De Lara Guarch, Pablo
> > > <pablo.de.lara.guarch@intel.com>
> > > Cc: dev@dpdk.org; gakhil@marvell.com; Dooley, Brian
> > > <brian.dooley@intel.com>
> > > Subject: [PATCH v4] crypto/ipsec_mb: unified IPsec MB interface
> > >
> > > Currently IPsec MB provides both the JOB API and direct API.
> > > AESNI_MB PMD is using the JOB API codepath while ZUC, KASUMI, SNOW3G
> > > and CHACHA20_POLY1305 are using the direct API.
> > > Instead of using the direct API for these PMDs, they should now make
> > > use of the JOB API codepath. This would remove all use of the IPsec MB
> > > direct API for these PMDs.
> > >
> > > Signed-off-by: Brian Dooley <brian.dooley@intel.com>
> > > ---
> > > v2:
> > > - Fix compilation failure
> > > v3:
> > > - Remove session configure pointer for each PMD
> > > v4:
> > > - Keep AES GCM PMD and fix extern issue
> > > ---
> > > doc/guides/rel_notes/release_24_03.rst | 6 +
> > > drivers/crypto/ipsec_mb/pmd_aesni_mb.c | 10 +-
> > > drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h | 15 +-
> > > drivers/crypto/ipsec_mb/pmd_chacha_poly.c | 338 +----------
> > > .../crypto/ipsec_mb/pmd_chacha_poly_priv.h | 28 -
> > > drivers/crypto/ipsec_mb/pmd_kasumi.c | 410 +------------
> > > drivers/crypto/ipsec_mb/pmd_kasumi_priv.h | 20 -
> > > drivers/crypto/ipsec_mb/pmd_snow3g.c | 543 +-----------------
> > > drivers/crypto/ipsec_mb/pmd_snow3g_priv.h | 21 -
> > > drivers/crypto/ipsec_mb/pmd_zuc.c | 347 +----------
> > > drivers/crypto/ipsec_mb/pmd_zuc_priv.h | 20 -
> > > 11 files changed, 48 insertions(+), 1710 deletions(-)
> > >
> > <snip>
^ permalink raw reply [flat|nested] 45+ messages in thread
* Re: [PATCH v4] crypto/ipsec_mb: unified IPsec MB interface
2024-03-04 7:33 ` Akhil Goyal
@ 2024-03-05 5:39 ` Honnappa Nagarahalli
2024-03-05 17:31 ` Wathsala Wathawana Vithanage
0 siblings, 1 reply; 45+ messages in thread
From: Honnappa Nagarahalli @ 2024-03-05 5:39 UTC (permalink / raw)
To: Akhil Goyal
Cc: Dooley, Brian, Ji, Kai, De Lara Guarch, Pablo, Power, Ciara, dev,
Wathsala Wathawana Vithanage, Ruifeng Wang, Jack Bond-Preston,
nd
> On Mar 4, 2024, at 1:33 AM, Akhil Goyal <gakhil@marvell.com> wrote:
>
>>> Hi folks,
>>>
>>> The introduction of a more unified IPsec MB library for DPDK is causing the
>>> snow3g tests to fail on ARM. Artifact here:
>>> https://lab.dpdk.org/results/dashboard/patchsets/29315/
>>> PMDs using the direct API (KASUMI, CHACHA, ZUC, SNOW3G) will use the job
>> API,
>>> from the AESNI MB PMD code.
>>> We have come across a similar issue in the past that related to an offset issue as
>>> SNOW3G uses bits instead of bytes.
>>
>> The above link does not seem to be working.
>> I believe from now on, since we continue to maintain two separate repos,
>> it would be better to get ack from ARM folks as well
>> before merging anything onto crypto/ipsec_mb PMD.
>>
>> Arm folks, Could you please get the below change tested/incorporated in the
>> repo.
>
> Hi Arm folks,
> Any update on the below fix?
This is being worked on. We are in the process of creating a new tag. We will update soon.
>
>
>>
>>
>>>
>>> commit a501609ea6466ed8526c0dfadedee332a4d4a451
>>> Author: Pablo de Lara pablo.de.lara.guarch@intel.com
>>> Date: Wed Feb 23 16:01:16 2022 +0000
>>>
>>> crypto/ipsec_mb: fix length and offset settings
>>>
>>> KASUMI, SNOW3G and ZUC require lengths and offsets to
>>> be set in bits or bytes depending on the algorithm.
>>> There were some algorithms that were mixing these two,
>>> so this commit is fixing this issue.
>>>
>>> This bug only appeared recently when the ARM ipsec version was bumped to
>> 1.4.
>>> It appears there could be a similar scenario happening now and this is a
>> potential
>>> fix that needs to be made in the ARM IPsec-mb repo:
>>>
>>> diff --git a/lib/aarch64/mb_mgr_snow3g_submit_flush_common_aarch64.h
>>> b/lib/aarch64/mb_mgr_snow3g_submit_flush_common_aarch64.h
>>> index 13bca11b..de284ade 100644
>>> --- a/lib/aarch64/mb_mgr_snow3g_submit_flush_common_aarch64.h
>>> +++ b/lib/aarch64/mb_mgr_snow3g_submit_flush_common_aarch64.h
>>> @@ -94,8 +94,8 @@ static void
>>> snow3g_mb_mgr_insert_uea2_job(MB_MGR_SNOW3G_OOO *state, IMB_JOB
>>> *job
>>> state->num_lanes_inuse++;
>>> state->args.iv[used_lane_idx] = job->iv;
>>> state->args.keys[used_lane_idx] = job->enc_keys;
>>> - state->args.in[used_lane_idx] = job->src + job-
>>>> cipher_start_src_offset_in_bytes;
>>> - state->args.out[used_lane_idx] = job->dst;
>>> + state->args.in[used_lane_idx] = job->src + (job-
>>>> cipher_start_src_offset_in_bits / 8);
>>> + state->args.out[used_lane_idx] = job->dst + (job-
>>>> cipher_start_src_offset_in_bits / 8);
>>> state->args.byte_length[used_lane_idx] = job->msg_len_to_cipher_in_bits /
>> 8;
>>> state->args.INITIALIZED[used_lane_idx] = 0;
>>> state->lens[used_lane_idx] = job->msg_len_to_cipher_in_bits / 8;
>>>
>>> Thanks,
>>> Brian
>>>
>>>> -----Original Message-----
>>>> From: Dooley, Brian <brian.dooley@intel.com>
>>>> Sent: Wednesday, February 28, 2024 11:33 AM
>>>> To: Ji, Kai <kai.ji@intel.com>; De Lara Guarch, Pablo
>>>> <pablo.de.lara.guarch@intel.com>
>>>> Cc: dev@dpdk.org; gakhil@marvell.com; Dooley, Brian
>>>> <brian.dooley@intel.com>
>>>> Subject: [PATCH v4] crypto/ipsec_mb: unified IPsec MB interface
>>>>
>>>> Currently IPsec MB provides both the JOB API and direct API.
>>>> AESNI_MB PMD is using the JOB API codepath while ZUC, KASUMI, SNOW3G
>>>> and CHACHA20_POLY1305 are using the direct API.
>>>> Instead of using the direct API for these PMDs, they should now make
>>>> use of the JOB API codepath. This would remove all use of the IPsec MB
>>>> direct API for these PMDs.
>>>>
>>>> Signed-off-by: Brian Dooley <brian.dooley@intel.com>
>>>> ---
>>>> v2:
>>>> - Fix compilation failure
>>>> v3:
>>>> - Remove session configure pointer for each PMD
>>>> v4:
>>>> - Keep AES GCM PMD and fix extern issue
>>>> ---
>>>> doc/guides/rel_notes/release_24_03.rst | 6 +
>>>> drivers/crypto/ipsec_mb/pmd_aesni_mb.c | 10 +-
>>>> drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h | 15 +-
>>>> drivers/crypto/ipsec_mb/pmd_chacha_poly.c | 338 +----------
>>>> .../crypto/ipsec_mb/pmd_chacha_poly_priv.h | 28 -
>>>> drivers/crypto/ipsec_mb/pmd_kasumi.c | 410 +------------
>>>> drivers/crypto/ipsec_mb/pmd_kasumi_priv.h | 20 -
>>>> drivers/crypto/ipsec_mb/pmd_snow3g.c | 543 +-----------------
>>>> drivers/crypto/ipsec_mb/pmd_snow3g_priv.h | 21 -
>>>> drivers/crypto/ipsec_mb/pmd_zuc.c | 347 +----------
>>>> drivers/crypto/ipsec_mb/pmd_zuc_priv.h | 20 -
>>>> 11 files changed, 48 insertions(+), 1710 deletions(-)
>>>>
>>> <snip>
^ permalink raw reply [flat|nested] 45+ messages in thread
* RE: [PATCH v4] crypto/ipsec_mb: unified IPsec MB interface
2024-02-28 11:33 ` [PATCH v4] " Brian Dooley
2024-02-28 11:50 ` Power, Ciara
2024-02-29 16:23 ` Dooley, Brian
@ 2024-03-05 15:21 ` Wathsala Wathawana Vithanage
2 siblings, 0 replies; 45+ messages in thread
From: Wathsala Wathawana Vithanage @ 2024-03-05 15:21 UTC (permalink / raw)
To: Brian Dooley, Kai Ji, Pablo de Lara; +Cc: dev, gakhil, nd
> Subject: [PATCH v4] crypto/ipsec_mb: unified IPsec MB interface
>
> Currently IPsec MB provides both the JOB API and direct API.
> AESNI_MB PMD is using the JOB API codepath while ZUC, KASUMI, SNOW3G
> and CHACHA20_POLY1305 are using the direct API.
> Instead of using the direct API for these PMDs, they should now make
> use of the JOB API codepath. This would remove all use of the IPsec MB
> direct API for these PMDs.
>
> Signed-off-by: Brian Dooley <brian.dooley@intel.com>
Acked-by: Wathsala Vithanage <wathsala.vithanage@arm.com>
> ---
> v2:
> - Fix compilation failure
> v3:
> - Remove session configure pointer for each PMD
> v4:
> - Keep AES GCM PMD and fix extern issue
> ---
> doc/guides/rel_notes/release_24_03.rst | 6 +
> drivers/crypto/ipsec_mb/pmd_aesni_mb.c | 10 +-
> drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h | 15 +-
> drivers/crypto/ipsec_mb/pmd_chacha_poly.c | 338 +----------
> .../crypto/ipsec_mb/pmd_chacha_poly_priv.h | 28 -
> drivers/crypto/ipsec_mb/pmd_kasumi.c | 410 +------------
> drivers/crypto/ipsec_mb/pmd_kasumi_priv.h | 20 -
> drivers/crypto/ipsec_mb/pmd_snow3g.c | 543 +-----------------
> drivers/crypto/ipsec_mb/pmd_snow3g_priv.h | 21 -
> drivers/crypto/ipsec_mb/pmd_zuc.c | 347 +----------
> drivers/crypto/ipsec_mb/pmd_zuc_priv.h | 20 -
> 11 files changed, 48 insertions(+), 1710 deletions(-)
>
> diff --git a/doc/guides/rel_notes/release_24_03.rst
> b/doc/guides/rel_notes/release_24_03.rst
> index 879bb4944c..6c5b76cef5 100644
> --- a/doc/guides/rel_notes/release_24_03.rst
> +++ b/doc/guides/rel_notes/release_24_03.rst
> @@ -138,6 +138,12 @@ New Features
> to support TLS v1.2, TLS v1.3 and DTLS v1.2.
> * Added PMD API to allow raw submission of instructions to CPT.
>
> +* **Updated ipsec_mb crypto driver.**
> +
> + * Kasumi, Snow3G, ChaChaPoly and ZUC PMDs now share the job API
> codepath
> + with AESNI_MB PMD. Depending on the architecture, the performance of
> ZUC
> + crypto PMD is approximately 10% less for small fixed packet sizes.
> +
>
> Removed Items
> -------------
> diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c
> b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c
> index 4de4866cf3..7d4dbc91ef 100644
> --- a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c
> +++ b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c
> @@ -8,6 +8,8 @@
>
> RTE_DEFINE_PER_LCORE(pid_t, pid);
>
> +uint8_t pmd_driver_id_aesni_mb;
> +
> struct aesni_mb_op_buf_data {
> struct rte_mbuf *m;
> uint32_t offset;
> @@ -761,7 +763,7 @@ aesni_mb_set_session_aead_parameters(const
> IMB_MGR *mb_mgr,
> }
>
> /** Configure a aesni multi-buffer session from a crypto xform chain */
> -static int
> +int
> aesni_mb_session_configure(IMB_MGR *mb_mgr,
> void *priv_sess,
> const struct rte_crypto_sym_xform *xform)
> @@ -2131,7 +2133,7 @@ set_job_null_op(IMB_JOB *job, struct
> rte_crypto_op *op)
> }
>
> #if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM
> -static uint16_t
> +uint16_t
> aesni_mb_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
> uint16_t nb_ops)
> {
> @@ -2321,7 +2323,7 @@ flush_mb_mgr(struct ipsec_mb_qp *qp, IMB_MGR
> *mb_mgr,
> return processed_ops;
> }
>
> -static uint16_t
> +uint16_t
> aesni_mb_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
> uint16_t nb_ops)
> {
> @@ -2456,7 +2458,7 @@ verify_sync_dgst(struct rte_crypto_sym_vec *vec,
> return k;
> }
>
> -static uint32_t
> +uint32_t
> aesni_mb_process_bulk(struct rte_cryptodev *dev __rte_unused,
> struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs
> sofs,
> struct rte_crypto_sym_vec *vec)
> diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h
> b/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h
> index 85994fe5a1..2d462a7f68 100644
> --- a/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h
> +++ b/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h
> @@ -21,6 +21,19 @@
> #define MAX_NUM_SEGS 16
> #endif
>
> +int
> +aesni_mb_session_configure(IMB_MGR * m __rte_unused, void *priv_sess,
> + const struct rte_crypto_sym_xform *xform);
> +
> +uint16_t
> +aesni_mb_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
> + uint16_t nb_ops);
> +
> +uint32_t
> +aesni_mb_process_bulk(struct rte_cryptodev *dev __rte_unused,
> + struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs
> sofs,
> + struct rte_crypto_sym_vec *vec);
> +
> static const struct rte_cryptodev_capabilities aesni_mb_capabilities[] = {
> { /* MD5 HMAC */
> .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
> @@ -722,8 +735,6 @@ static const struct rte_cryptodev_capabilities
> aesni_mb_capabilities[] = {
> RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
> };
>
> -uint8_t pmd_driver_id_aesni_mb;
> -
> struct aesni_mb_qp_data {
> uint8_t temp_digests[IMB_MAX_JOBS][DIGEST_LENGTH_MAX];
> /* *< Buffers used to store the digest generated
> diff --git a/drivers/crypto/ipsec_mb/pmd_chacha_poly.c
> b/drivers/crypto/ipsec_mb/pmd_chacha_poly.c
> index 97e7cef233..7436353fc2 100644
> --- a/drivers/crypto/ipsec_mb/pmd_chacha_poly.c
> +++ b/drivers/crypto/ipsec_mb/pmd_chacha_poly.c
> @@ -3,334 +3,7 @@
> */
>
> #include "pmd_chacha_poly_priv.h"
> -
> -/** Parse crypto xform chain and set private session parameters. */
> -static int
> -chacha20_poly1305_session_configure(IMB_MGR * mb_mgr __rte_unused,
> - void *priv_sess, const struct rte_crypto_sym_xform *xform)
> -{
> - struct chacha20_poly1305_session *sess = priv_sess;
> - const struct rte_crypto_sym_xform *auth_xform;
> - const struct rte_crypto_sym_xform *cipher_xform;
> - const struct rte_crypto_sym_xform *aead_xform;
> -
> - uint8_t key_length;
> - const uint8_t *key;
> - enum ipsec_mb_operation mode;
> - int ret = 0;
> -
> - ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform,
> - &cipher_xform, &aead_xform);
> - if (ret)
> - return ret;
> -
> - sess->op = mode;
> -
> - switch (sess->op) {
> - case IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT:
> - case IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT:
> - if (aead_xform->aead.algo !=
> - RTE_CRYPTO_AEAD_CHACHA20_POLY1305) {
> - IPSEC_MB_LOG(ERR,
> - "The only combined operation supported is
> CHACHA20 POLY1305");
> - ret = -ENOTSUP;
> - goto error_exit;
> - }
> - /* Set IV parameters */
> - sess->iv.offset = aead_xform->aead.iv.offset;
> - sess->iv.length = aead_xform->aead.iv.length;
> - key_length = aead_xform->aead.key.length;
> - key = aead_xform->aead.key.data;
> - sess->aad_length = aead_xform->aead.aad_length;
> - sess->req_digest_length = aead_xform->aead.digest_length;
> - break;
> - default:
> - IPSEC_MB_LOG(
> - ERR, "Wrong xform type, has to be AEAD or
> authentication");
> - ret = -ENOTSUP;
> - goto error_exit;
> - }
> -
> - /* IV check */
> - if (sess->iv.length != CHACHA20_POLY1305_IV_LENGTH &&
> - sess->iv.length != 0) {
> - IPSEC_MB_LOG(ERR, "Wrong IV length");
> - ret = -EINVAL;
> - goto error_exit;
> - }
> -
> - /* Check key length */
> - if (key_length != CHACHA20_POLY1305_KEY_SIZE) {
> - IPSEC_MB_LOG(ERR, "Invalid key length");
> - ret = -EINVAL;
> - goto error_exit;
> - } else {
> - memcpy(sess->key, key, CHACHA20_POLY1305_KEY_SIZE);
> - }
> -
> - /* Digest check */
> - if (sess->req_digest_length !=
> CHACHA20_POLY1305_DIGEST_LENGTH) {
> - IPSEC_MB_LOG(ERR, "Invalid digest length");
> - ret = -EINVAL;
> - goto error_exit;
> - } else {
> - sess->gen_digest_length =
> CHACHA20_POLY1305_DIGEST_LENGTH;
> - }
> -
> -error_exit:
> - return ret;
> -}
> -
> -/**
> - * Process a crypto operation, calling
> - * the direct chacha poly API from the multi buffer library.
> - *
> - * @param qp queue pair
> - * @param op symmetric crypto operation
> - * @param session chacha poly session
> - *
> - * @return
> - * - Return 0 if success
> - */
> -static int
> -chacha20_poly1305_crypto_op(struct ipsec_mb_qp *qp, struct
> rte_crypto_op *op,
> - struct chacha20_poly1305_session *session)
> -{
> - struct chacha20_poly1305_qp_data *qp_data =
> - ipsec_mb_get_qp_private_data(qp);
> - uint8_t *src, *dst;
> - uint8_t *iv_ptr;
> - struct rte_crypto_sym_op *sym_op = op->sym;
> - struct rte_mbuf *m_src = sym_op->m_src;
> - uint32_t offset, data_offset, data_length;
> - uint32_t part_len, data_len;
> - int total_len;
> - uint8_t *tag;
> - unsigned int oop = 0;
> -
> - offset = sym_op->aead.data.offset;
> - data_offset = offset;
> - data_length = sym_op->aead.data.length;
> - RTE_ASSERT(m_src != NULL);
> -
> - while (offset >= m_src->data_len && data_length != 0) {
> - offset -= m_src->data_len;
> - m_src = m_src->next;
> -
> - RTE_ASSERT(m_src != NULL);
> - }
> -
> - src = rte_pktmbuf_mtod_offset(m_src, uint8_t *, offset);
> -
> - data_len = m_src->data_len - offset;
> - part_len = (data_len < data_length) ? data_len :
> - data_length;
> -
> - /* In-place */
> - if (sym_op->m_dst == NULL || (sym_op->m_dst == sym_op->m_src))
> - dst = src;
> - /* Out-of-place */
> - else {
> - oop = 1;
> - /* Segmented destination buffer is not supported
> - * if operation is Out-of-place
> - */
> - RTE_ASSERT(rte_pktmbuf_is_contiguous(sym_op->m_dst));
> - dst = rte_pktmbuf_mtod_offset(sym_op->m_dst, uint8_t *,
> - data_offset);
> - }
> -
> - iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
> - session->iv.offset);
> -
> - IMB_CHACHA20_POLY1305_INIT(qp->mb_mgr, session->key,
> - &qp_data->chacha20_poly1305_ctx_data,
> - iv_ptr, sym_op->aead.aad.data,
> - (uint64_t)session->aad_length);
> -
> - if (session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT) {
> - IMB_CHACHA20_POLY1305_ENC_UPDATE(qp->mb_mgr,
> - session->key,
> - &qp_data->chacha20_poly1305_ctx_data,
> - dst, src, (uint64_t)part_len);
> - total_len = data_length - part_len;
> -
> - while (total_len) {
> - m_src = m_src->next;
> - RTE_ASSERT(m_src != NULL);
> -
> - src = rte_pktmbuf_mtod(m_src, uint8_t *);
> - if (oop)
> - dst += part_len;
> - else
> - dst = src;
> - part_len = (m_src->data_len < total_len) ?
> - m_src->data_len : total_len;
> -
> - if (dst == NULL || src == NULL) {
> - IPSEC_MB_LOG(ERR, "Invalid src or dst
> input");
> - return -EINVAL;
> - }
> - IMB_CHACHA20_POLY1305_ENC_UPDATE(qp-
> >mb_mgr,
> - session->key,
> - &qp_data-
> >chacha20_poly1305_ctx_data,
> - dst, src, (uint64_t)part_len);
> - total_len -= part_len;
> - if (total_len < 0) {
> - IPSEC_MB_LOG(ERR, "Invalid part len");
> - return -EINVAL;
> - }
> - }
> -
> - tag = sym_op->aead.digest.data;
> - IMB_CHACHA20_POLY1305_ENC_FINALIZE(qp->mb_mgr,
> - &qp_data-
> >chacha20_poly1305_ctx_data,
> - tag, session->gen_digest_length);
> -
> - } else {
> - IMB_CHACHA20_POLY1305_DEC_UPDATE(qp->mb_mgr,
> - session->key,
> - &qp_data-
> >chacha20_poly1305_ctx_data,
> - dst, src, (uint64_t)part_len);
> -
> - total_len = data_length - part_len;
> -
> - while (total_len) {
> - m_src = m_src->next;
> -
> - RTE_ASSERT(m_src != NULL);
> -
> - src = rte_pktmbuf_mtod(m_src, uint8_t *);
> - if (oop)
> - dst += part_len;
> - else
> - dst = src;
> - part_len = (m_src->data_len < total_len) ?
> - m_src->data_len : total_len;
> -
> - if (dst == NULL || src == NULL) {
> - IPSEC_MB_LOG(ERR, "Invalid src or dst
> input");
> - return -EINVAL;
> - }
> - IMB_CHACHA20_POLY1305_DEC_UPDATE(qp-
> >mb_mgr,
> - session->key,
> - &qp_data-
> >chacha20_poly1305_ctx_data,
> - dst, src, (uint64_t)part_len);
> - total_len -= part_len;
> - if (total_len < 0) {
> - IPSEC_MB_LOG(ERR, "Invalid part len");
> - return -EINVAL;
> - }
> - }
> -
> - tag = qp_data->temp_digest;
> - IMB_CHACHA20_POLY1305_DEC_FINALIZE(qp->mb_mgr,
> - &qp_data-
> >chacha20_poly1305_ctx_data,
> - tag, session->gen_digest_length);
> - }
> -
> - return 0;
> -}
> -
> -/**
> - * Process a completed chacha poly op
> - *
> - * @param qp Queue Pair to process
> - * @param op Crypto operation
> - * @param sess Crypto session
> - *
> - * @return
> - * - void
> - */
> -static void
> -post_process_chacha20_poly1305_crypto_op(struct ipsec_mb_qp *qp,
> - struct rte_crypto_op *op,
> - struct chacha20_poly1305_session *session)
> -{
> - struct chacha20_poly1305_qp_data *qp_data =
> - ipsec_mb_get_qp_private_data(qp);
> -
> - op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
> - /* Verify digest if required */
> - if (session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT ||
> - session->op == IPSEC_MB_OP_HASH_VERIFY_ONLY) {
> - uint8_t *digest = op->sym->aead.digest.data;
> - uint8_t *tag = qp_data->temp_digest;
> -
> -#ifdef RTE_LIBRTE_PMD_CHACHA20_POLY1305_DEBUG
> - rte_hexdump(stdout, "auth tag (orig):",
> - digest, session->req_digest_length);
> - rte_hexdump(stdout, "auth tag (calc):",
> - tag, session->req_digest_length);
> -#endif
> - if (memcmp(tag, digest, session->req_digest_length)
> != 0)
> - op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
> -
> - }
> -
> -}
> -
> -/**
> - * Process a completed Chacha20_poly1305 request
> - *
> - * @param qp Queue Pair to process
> - * @param op Crypto operation
> - * @param sess Crypto session
> - *
> - * @return
> - * - void
> - */
> -static void
> -handle_completed_chacha20_poly1305_crypto_op(struct ipsec_mb_qp *qp,
> - struct rte_crypto_op *op,
> - struct chacha20_poly1305_session *sess)
> -{
> - post_process_chacha20_poly1305_crypto_op(qp, op, sess);
> -
> - /* Free session if a session-less crypto op */
> - if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
> - memset(sess, 0, sizeof(struct chacha20_poly1305_session));
> - rte_mempool_put(qp->sess_mp, op->sym->session);
> - op->sym->session = NULL;
> - }
> -}
> -
> -static uint16_t
> -chacha20_poly1305_pmd_dequeue_burst(void *queue_pair,
> - struct rte_crypto_op **ops, uint16_t nb_ops)
> -{
> - struct chacha20_poly1305_session *sess;
> - struct ipsec_mb_qp *qp = queue_pair;
> -
> - int retval = 0;
> - unsigned int i = 0, nb_dequeued;
> -
> - nb_dequeued = rte_ring_dequeue_burst(qp->ingress_queue,
> - (void **)ops, nb_ops, NULL);
> -
> - for (i = 0; i < nb_dequeued; i++) {
> -
> - sess = ipsec_mb_get_session_private(qp, ops[i]);
> - if (unlikely(sess == NULL)) {
> - ops[i]->status =
> RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
> - qp->stats.dequeue_err_count++;
> - break;
> - }
> -
> - retval = chacha20_poly1305_crypto_op(qp, ops[i], sess);
> - if (retval < 0) {
> - ops[i]->status =
> RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
> - qp->stats.dequeue_err_count++;
> - break;
> - }
> -
> - handle_completed_chacha20_poly1305_crypto_op(qp,
> ops[i], sess);
> - }
> -
> - qp->stats.dequeued_count += i;
> -
> - return i;
> -}
> +#include "pmd_aesni_mb_priv.h"
>
> struct rte_cryptodev_ops chacha20_poly1305_pmd_ops = {
> .dev_configure = ipsec_mb_config,
> @@ -384,7 +57,7 @@ RTE_INIT(ipsec_mb_register_chacha20_poly1305)
> =
> &ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_CHACHA20_POLY1305];
>
> chacha_poly_data->caps = chacha20_poly1305_capabilities;
> - chacha_poly_data->dequeue_burst =
> chacha20_poly1305_pmd_dequeue_burst;
> + chacha_poly_data->dequeue_burst = aesni_mb_dequeue_burst;
> chacha_poly_data->feature_flags =
> RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
> RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
> @@ -395,10 +68,9 @@ RTE_INIT(ipsec_mb_register_chacha20_poly1305)
> RTE_CRYPTODEV_FF_SYM_SESSIONLESS;
> chacha_poly_data->internals_priv_size = 0;
> chacha_poly_data->ops = &chacha20_poly1305_pmd_ops;
> - chacha_poly_data->qp_priv_size =
> - sizeof(struct chacha20_poly1305_qp_data);
> + chacha_poly_data->qp_priv_size = sizeof(struct aesni_mb_qp_data);
> chacha_poly_data->session_configure =
> - chacha20_poly1305_session_configure;
> + aesni_mb_session_configure;
> chacha_poly_data->session_priv_size =
> - sizeof(struct chacha20_poly1305_session);
> + sizeof(struct aesni_mb_session);
> }
> diff --git a/drivers/crypto/ipsec_mb/pmd_chacha_poly_priv.h
> b/drivers/crypto/ipsec_mb/pmd_chacha_poly_priv.h
> index 842f62f5d1..e668bfe07f 100644
> --- a/drivers/crypto/ipsec_mb/pmd_chacha_poly_priv.h
> +++ b/drivers/crypto/ipsec_mb/pmd_chacha_poly_priv.h
> @@ -7,9 +7,7 @@
>
> #include "ipsec_mb_private.h"
>
> -#define CHACHA20_POLY1305_IV_LENGTH 12
> #define CHACHA20_POLY1305_DIGEST_LENGTH 16
> -#define CHACHA20_POLY1305_KEY_SIZE 32
>
> static const
> struct rte_cryptodev_capabilities chacha20_poly1305_capabilities[] = {
> @@ -45,30 +43,4 @@ struct rte_cryptodev_capabilities
> chacha20_poly1305_capabilities[] = {
>
> uint8_t pmd_driver_id_chacha20_poly1305;
>
> -/** CHACHA20 POLY1305 private session structure */
> -struct chacha20_poly1305_session {
> - struct {
> - uint16_t length;
> - uint16_t offset;
> - } iv;
> - /**< IV parameters */
> - uint16_t aad_length;
> - /**< AAD length */
> - uint16_t req_digest_length;
> - /**< Requested digest length */
> - uint16_t gen_digest_length;
> - /**< Generated digest length */
> - uint8_t key[CHACHA20_POLY1305_KEY_SIZE];
> - enum ipsec_mb_operation op;
> -} __rte_cache_aligned;
> -
> -struct chacha20_poly1305_qp_data {
> - struct chacha20_poly1305_context_data
> chacha20_poly1305_ctx_data;
> - uint8_t temp_digest[CHACHA20_POLY1305_DIGEST_LENGTH];
> - /**< Buffer used to store the digest generated
> - * by the driver when verifying a digest provided
> - * by the user (using authentication verify operation)
> - */
> -};
> -
> #endif /* _PMD_CHACHA_POLY_PRIV_H_ */
> diff --git a/drivers/crypto/ipsec_mb/pmd_kasumi.c
> b/drivers/crypto/ipsec_mb/pmd_kasumi.c
> index 70536ec3dc..c3571ec81b 100644
> --- a/drivers/crypto/ipsec_mb/pmd_kasumi.c
> +++ b/drivers/crypto/ipsec_mb/pmd_kasumi.c
> @@ -10,406 +10,7 @@
> #include <rte_malloc.h>
>
> #include "pmd_kasumi_priv.h"
> -
> -/** Parse crypto xform chain and set private session parameters. */
> -static int
> -kasumi_session_configure(IMB_MGR *mgr, void *priv_sess,
> - const struct rte_crypto_sym_xform *xform)
> -{
> - const struct rte_crypto_sym_xform *auth_xform = NULL;
> - const struct rte_crypto_sym_xform *cipher_xform = NULL;
> - enum ipsec_mb_operation mode;
> - struct kasumi_session *sess = (struct kasumi_session *)priv_sess;
> - /* Select Crypto operation - hash then cipher / cipher then hash */
> - int ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform,
> - &cipher_xform, NULL);
> -
> - if (ret)
> - return ret;
> -
> - if (cipher_xform) {
> - /* Only KASUMI F8 supported */
> - if (cipher_xform->cipher.algo !=
> RTE_CRYPTO_CIPHER_KASUMI_F8) {
> - IPSEC_MB_LOG(ERR, "Unsupported cipher algorithm
> ");
> - return -ENOTSUP;
> - }
> -
> - sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
> - if (cipher_xform->cipher.iv.length != KASUMI_IV_LENGTH) {
> - IPSEC_MB_LOG(ERR, "Wrong IV length");
> - return -EINVAL;
> - }
> -
> - /* Initialize key */
> - IMB_KASUMI_INIT_F8_KEY_SCHED(mgr,
> - cipher_xform->cipher.key.data,
> - &sess->pKeySched_cipher);
> - }
> -
> - if (auth_xform) {
> - /* Only KASUMI F9 supported */
> - if (auth_xform->auth.algo !=
> RTE_CRYPTO_AUTH_KASUMI_F9) {
> - IPSEC_MB_LOG(ERR, "Unsupported authentication");
> - return -ENOTSUP;
> - }
> -
> - if (auth_xform->auth.digest_length !=
> KASUMI_DIGEST_LENGTH) {
> - IPSEC_MB_LOG(ERR, "Wrong digest length");
> - return -EINVAL;
> - }
> -
> - sess->auth_op = auth_xform->auth.op;
> -
> - /* Initialize key */
> - IMB_KASUMI_INIT_F9_KEY_SCHED(mgr, auth_xform-
> >auth.key.data,
> - &sess->pKeySched_hash);
> - }
> -
> - sess->op = mode;
> - return ret;
> -}
> -
> -/** Encrypt/decrypt mbufs with same cipher key. */
> -static uint8_t
> -process_kasumi_cipher_op(struct ipsec_mb_qp *qp, struct rte_crypto_op
> **ops,
> - struct kasumi_session *session, uint8_t num_ops)
> -{
> - unsigned int i;
> - uint8_t processed_ops = 0;
> - const void *src[num_ops];
> - void *dst[num_ops];
> - uint8_t *iv_ptr;
> - uint64_t iv[num_ops];
> - uint32_t num_bytes[num_ops];
> -
> - for (i = 0; i < num_ops; i++) {
> - src[i] = rte_pktmbuf_mtod_offset(ops[i]->sym->m_src,
> - uint8_t *,
> - (ops[i]->sym-
> >cipher.data.offset >> 3));
> - dst[i] = ops[i]->sym->m_dst
> - ? rte_pktmbuf_mtod_offset(ops[i]->sym->m_dst,
> - uint8_t *,
> - (ops[i]->sym-
> >cipher.data.offset >> 3))
> - : rte_pktmbuf_mtod_offset(ops[i]->sym->m_src,
> - uint8_t *,
> - (ops[i]->sym-
> >cipher.data.offset >> 3));
> - iv_ptr = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
> - session->cipher_iv_offset);
> - iv[i] = *((uint64_t *)(iv_ptr));
> - num_bytes[i] = ops[i]->sym->cipher.data.length >> 3;
> -
> - processed_ops++;
> - }
> -
> - if (processed_ops != 0)
> - IMB_KASUMI_F8_N_BUFFER(qp->mb_mgr, &session-
> >pKeySched_cipher,
> - iv, src, dst, num_bytes,
> - processed_ops);
> -
> - return processed_ops;
> -}
> -
> -/** Encrypt/decrypt mbuf (bit level function). */
> -static uint8_t
> -process_kasumi_cipher_op_bit(struct ipsec_mb_qp *qp, struct
> rte_crypto_op *op,
> - struct kasumi_session *session)
> -{
> - uint8_t *src, *dst;
> - uint8_t *iv_ptr;
> - uint64_t iv;
> - uint32_t length_in_bits, offset_in_bits;
> -
> - offset_in_bits = op->sym->cipher.data.offset;
> - src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *);
> - if (op->sym->m_dst == NULL)
> - dst = src;
> - else
> - dst = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *);
> - iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
> - session->cipher_iv_offset);
> - iv = *((uint64_t *)(iv_ptr));
> - length_in_bits = op->sym->cipher.data.length;
> -
> - IMB_KASUMI_F8_1_BUFFER_BIT(qp->mb_mgr, &session-
> >pKeySched_cipher, iv,
> - src, dst, length_in_bits, offset_in_bits);
> -
> - return 1;
> -}
> -
> -/** Generate/verify hash from mbufs with same hash key. */
> -static int
> -process_kasumi_hash_op(struct ipsec_mb_qp *qp, struct rte_crypto_op
> **ops,
> - struct kasumi_session *session, uint8_t num_ops)
> -{
> - unsigned int i;
> - uint8_t processed_ops = 0;
> - uint8_t *src, *dst;
> - uint32_t length_in_bits;
> - uint32_t num_bytes;
> - struct kasumi_qp_data *qp_data =
> ipsec_mb_get_qp_private_data(qp);
> -
> - for (i = 0; i < num_ops; i++) {
> - /* Data must be byte aligned */
> - if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
> - ops[i]->status =
> RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
> - IPSEC_MB_LOG(ERR, "Invalid Offset");
> - break;
> - }
> -
> - length_in_bits = ops[i]->sym->auth.data.length;
> -
> - src = rte_pktmbuf_mtod_offset(ops[i]->sym->m_src, uint8_t
> *,
> - (ops[i]->sym->auth.data.offset >>
> 3));
> - /* Direction from next bit after end of message */
> - num_bytes = length_in_bits >> 3;
> -
> - if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
> - dst = qp_data->temp_digest;
> - IMB_KASUMI_F9_1_BUFFER(qp->mb_mgr,
> - &session->pKeySched_hash,
> src,
> - num_bytes, dst);
> -
> - /* Verify digest. */
> - if (memcmp(dst, ops[i]->sym->auth.digest.data,
> - KASUMI_DIGEST_LENGTH)
> - != 0)
> - ops[i]->status
> - = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
> - } else {
> - dst = ops[i]->sym->auth.digest.data;
> -
> - IMB_KASUMI_F9_1_BUFFER(qp->mb_mgr,
> - &session->pKeySched_hash,
> src,
> - num_bytes, dst);
> - }
> - processed_ops++;
> - }
> -
> - return processed_ops;
> -}
> -
> -/** Process a batch of crypto ops which shares the same session. */
> -static int
> -process_ops(struct rte_crypto_op **ops, struct kasumi_session *session,
> - struct ipsec_mb_qp *qp, uint8_t num_ops)
> -{
> - unsigned int i;
> - unsigned int processed_ops;
> -
> - switch (session->op) {
> - case IPSEC_MB_OP_ENCRYPT_ONLY:
> - case IPSEC_MB_OP_DECRYPT_ONLY:
> - processed_ops
> - = process_kasumi_cipher_op(qp, ops, session, num_ops);
> - break;
> - case IPSEC_MB_OP_HASH_GEN_ONLY:
> - case IPSEC_MB_OP_HASH_VERIFY_ONLY:
> - processed_ops
> - = process_kasumi_hash_op(qp, ops, session, num_ops);
> - break;
> - case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN:
> - case IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY:
> - processed_ops
> - = process_kasumi_cipher_op(qp, ops, session, num_ops);
> - process_kasumi_hash_op(qp, ops, session, processed_ops);
> - break;
> - case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
> - case IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT:
> - processed_ops
> - = process_kasumi_hash_op(qp, ops, session, num_ops);
> - process_kasumi_cipher_op(qp, ops, session, processed_ops);
> - break;
> - default:
> - /* Operation not supported. */
> - processed_ops = 0;
> - }
> -
> - for (i = 0; i < num_ops; i++) {
> - /*
> - * If there was no error/authentication failure,
> - * change status to successful.
> - */
> - if (ops[i]->status ==
> RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
> - ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
> - /* Free session if a session-less crypto op. */
> - if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
> - memset(session, 0, sizeof(struct kasumi_session));
> - rte_mempool_put(qp->sess_mp, ops[i]->sym-
> >session);
> - ops[i]->sym->session = NULL;
> - }
> - }
> - return processed_ops;
> -}
> -
> -/** Process a crypto op with length/offset in bits. */
> -static int
> -process_op_bit(struct rte_crypto_op *op, struct kasumi_session *session,
> - struct ipsec_mb_qp *qp)
> -{
> - unsigned int processed_op;
> -
> - switch (session->op) {
> - /* case KASUMI_OP_ONLY_CIPHER: */
> - case IPSEC_MB_OP_ENCRYPT_ONLY:
> - case IPSEC_MB_OP_DECRYPT_ONLY:
> - processed_op = process_kasumi_cipher_op_bit(qp, op,
> session);
> - break;
> - /* case KASUMI_OP_ONLY_AUTH: */
> - case IPSEC_MB_OP_HASH_GEN_ONLY:
> - case IPSEC_MB_OP_HASH_VERIFY_ONLY:
> - processed_op = process_kasumi_hash_op(qp, &op, session,
> 1);
> - break;
> - /* case KASUMI_OP_CIPHER_AUTH: */
> - case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN:
> - processed_op = process_kasumi_cipher_op_bit(qp, op,
> session);
> - if (processed_op == 1)
> - process_kasumi_hash_op(qp, &op, session, 1);
> - break;
> - /* case KASUMI_OP_AUTH_CIPHER: */
> - case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
> - processed_op = process_kasumi_hash_op(qp, &op, session,
> 1);
> - if (processed_op == 1)
> - process_kasumi_cipher_op_bit(qp, op, session);
> - break;
> - default:
> - /* Operation not supported. */
> - processed_op = 0;
> - }
> -
> - /*
> - * If there was no error/authentication failure,
> - * change status to successful.
> - */
> - if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
> - op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
> -
> - /* Free session if a session-less crypto op. */
> - if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
> - memset(CRYPTODEV_GET_SYM_SESS_PRIV(op->sym-
> >session), 0,
> - sizeof(struct kasumi_session));
> - rte_mempool_put(qp->sess_mp, (void *)op->sym->session);
> - op->sym->session = NULL;
> - }
> - return processed_op;
> -}
> -
> -static uint16_t
> -kasumi_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
> - uint16_t nb_ops)
> -{
> - struct rte_crypto_op *c_ops[nb_ops];
> - struct rte_crypto_op *curr_c_op = NULL;
> -
> - struct kasumi_session *prev_sess = NULL, *curr_sess = NULL;
> - struct ipsec_mb_qp *qp = queue_pair;
> - unsigned int i;
> - uint8_t burst_size = 0;
> - uint8_t processed_ops;
> - unsigned int nb_dequeued;
> -
> - nb_dequeued = rte_ring_dequeue_burst(qp->ingress_queue,
> - (void **)ops, nb_ops, NULL);
> - for (i = 0; i < nb_dequeued; i++) {
> - curr_c_op = ops[i];
> -
> -#ifdef RTE_LIBRTE_PMD_KASUMI_DEBUG
> - if (!rte_pktmbuf_is_contiguous(curr_c_op->sym->m_src)
> - || (curr_c_op->sym->m_dst != NULL
> - && !rte_pktmbuf_is_contiguous(
> - curr_c_op->sym->m_dst))) {
> - IPSEC_MB_LOG(ERR,
> - "PMD supports only contiguous mbufs, op
> (%p) provides noncontiguous mbuf as source/destination buffer.",
> - curr_c_op);
> - curr_c_op->status =
> RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
> - break;
> - }
> -#endif
> -
> - /* Set status as enqueued (not processed yet) by default. */
> - curr_c_op->status =
> RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
> -
> - curr_sess = (struct kasumi_session *)
> - ipsec_mb_get_session_private(qp, curr_c_op);
> - if (unlikely(curr_sess == NULL
> - || curr_sess->op ==
> IPSEC_MB_OP_NOT_SUPPORTED)) {
> - curr_c_op->status
> - = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
> - break;
> - }
> -
> - /* If length/offset is at bit-level, process this buffer alone.
> - */
> - if (((curr_c_op->sym->cipher.data.length % BYTE_LEN) != 0)
> - || ((ops[i]->sym->cipher.data.offset % BYTE_LEN) != 0)) {
> - /* Process the ops of the previous session. */
> - if (prev_sess != NULL) {
> - processed_ops = process_ops(c_ops,
> prev_sess,
> - qp, burst_size);
> - if (processed_ops < burst_size) {
> - burst_size = 0;
> - break;
> - }
> -
> - burst_size = 0;
> - prev_sess = NULL;
> - }
> -
> - processed_ops = process_op_bit(curr_c_op,
> - curr_sess, qp);
> - if (processed_ops != 1)
> - break;
> -
> - continue;
> - }
> -
> - /* Batch ops that share the same session. */
> - if (prev_sess == NULL) {
> - prev_sess = curr_sess;
> - c_ops[burst_size++] = curr_c_op;
> - } else if (curr_sess == prev_sess) {
> - c_ops[burst_size++] = curr_c_op;
> - /*
> - * When there are enough ops to process in a batch,
> - * process them, and start a new batch.
> - */
> - if (burst_size == KASUMI_MAX_BURST) {
> - processed_ops = process_ops(c_ops,
> prev_sess,
> - qp, burst_size);
> - if (processed_ops < burst_size) {
> - burst_size = 0;
> - break;
> - }
> -
> - burst_size = 0;
> - prev_sess = NULL;
> - }
> - } else {
> - /*
> - * Different session, process the ops
> - * of the previous session.
> - */
> - processed_ops = process_ops(c_ops, prev_sess, qp,
> - burst_size);
> - if (processed_ops < burst_size) {
> - burst_size = 0;
> - break;
> - }
> -
> - burst_size = 0;
> - prev_sess = curr_sess;
> -
> - c_ops[burst_size++] = curr_c_op;
> - }
> - }
> -
> - if (burst_size != 0) {
> - /* Process the crypto ops of the last session. */
> - processed_ops = process_ops(c_ops, prev_sess, qp,
> burst_size);
> - }
> -
> - qp->stats.dequeued_count += i;
> - return i;
> -}
> +#include "pmd_aesni_mb_priv.h"
>
> struct rte_cryptodev_ops kasumi_pmd_ops = {
> .dev_configure = ipsec_mb_config,
> @@ -460,7 +61,7 @@ RTE_INIT(ipsec_mb_register_kasumi)
> = &ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_KASUMI];
>
> kasumi_data->caps = kasumi_capabilities;
> - kasumi_data->dequeue_burst = kasumi_pmd_dequeue_burst;
> + kasumi_data->dequeue_burst = aesni_mb_dequeue_burst;
> kasumi_data->feature_flags =
> RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO
> |
> RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING
> |
> RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA
> @@ -469,7 +70,8 @@ RTE_INIT(ipsec_mb_register_kasumi)
> | RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
> kasumi_data->internals_priv_size = 0;
> kasumi_data->ops = &kasumi_pmd_ops;
> - kasumi_data->qp_priv_size = sizeof(struct kasumi_qp_data);
> - kasumi_data->session_configure = kasumi_session_configure;
> - kasumi_data->session_priv_size = sizeof(struct kasumi_session);
> + kasumi_data->qp_priv_size = sizeof(struct aesni_mb_qp_data);
> + kasumi_data->session_configure = aesni_mb_session_configure;
> + kasumi_data->session_priv_size =
> + sizeof(struct aesni_mb_session);
> }
> diff --git a/drivers/crypto/ipsec_mb/pmd_kasumi_priv.h
> b/drivers/crypto/ipsec_mb/pmd_kasumi_priv.h
> index 8db1d1cc5b..3223cf1a14 100644
> --- a/drivers/crypto/ipsec_mb/pmd_kasumi_priv.h
> +++ b/drivers/crypto/ipsec_mb/pmd_kasumi_priv.h
> @@ -9,8 +9,6 @@
>
> #define KASUMI_KEY_LENGTH 16
> #define KASUMI_IV_LENGTH 8
> -#define KASUMI_MAX_BURST 4
> -#define BYTE_LEN 8
> #define KASUMI_DIGEST_LENGTH 4
>
> uint8_t pmd_driver_id_kasumi;
> @@ -60,22 +58,4 @@ static const struct rte_cryptodev_capabilities
> kasumi_capabilities[] = {
> RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
> };
>
> -/** KASUMI private session structure */
> -struct kasumi_session {
> - /* Keys have to be 16-byte aligned */
> - kasumi_key_sched_t pKeySched_cipher;
> - kasumi_key_sched_t pKeySched_hash;
> - enum ipsec_mb_operation op;
> - enum rte_crypto_auth_operation auth_op;
> - uint16_t cipher_iv_offset;
> -} __rte_cache_aligned;
> -
> -struct kasumi_qp_data {
> - uint8_t temp_digest[KASUMI_DIGEST_LENGTH];
> - /* *< Buffers used to store the digest generated
> - * by the driver when verifying a digest provided
> - * by the user (using authentication verify operation)
> - */
> -};
> -
> #endif /* _PMD_KASUMI_PRIV_H_ */
> diff --git a/drivers/crypto/ipsec_mb/pmd_snow3g.c
> b/drivers/crypto/ipsec_mb/pmd_snow3g.c
> index a96779f059..957f6aade8 100644
> --- a/drivers/crypto/ipsec_mb/pmd_snow3g.c
> +++ b/drivers/crypto/ipsec_mb/pmd_snow3g.c
> @@ -3,539 +3,7 @@
> */
>
> #include "pmd_snow3g_priv.h"
> -
> -/** Parse crypto xform chain and set private session parameters. */
> -static int
> -snow3g_session_configure(IMB_MGR *mgr, void *priv_sess,
> - const struct rte_crypto_sym_xform *xform)
> -{
> - struct snow3g_session *sess = (struct snow3g_session *)priv_sess;
> - const struct rte_crypto_sym_xform *auth_xform = NULL;
> - const struct rte_crypto_sym_xform *cipher_xform = NULL;
> - enum ipsec_mb_operation mode;
> -
> - /* Select Crypto operation - hash then cipher / cipher then hash */
> - int ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform,
> - &cipher_xform, NULL);
> - if (ret)
> - return ret;
> -
> - if (cipher_xform) {
> - /* Only SNOW 3G UEA2 supported */
> - if (cipher_xform->cipher.algo !=
> RTE_CRYPTO_CIPHER_SNOW3G_UEA2)
> - return -ENOTSUP;
> -
> - if (cipher_xform->cipher.iv.length != SNOW3G_IV_LENGTH) {
> - IPSEC_MB_LOG(ERR, "Wrong IV length");
> - return -EINVAL;
> - }
> - if (cipher_xform->cipher.key.length >
> SNOW3G_MAX_KEY_SIZE) {
> - IPSEC_MB_LOG(ERR, "Not enough memory to store
> the key");
> - return -ENOMEM;
> - }
> -
> - sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
> -
> - /* Initialize key */
> - IMB_SNOW3G_INIT_KEY_SCHED(mgr, cipher_xform-
> >cipher.key.data,
> - &sess->pKeySched_cipher);
> - }
> -
> - if (auth_xform) {
> - /* Only SNOW 3G UIA2 supported */
> - if (auth_xform->auth.algo !=
> RTE_CRYPTO_AUTH_SNOW3G_UIA2)
> - return -ENOTSUP;
> -
> - if (auth_xform->auth.digest_length !=
> SNOW3G_DIGEST_LENGTH) {
> - IPSEC_MB_LOG(ERR, "Wrong digest length");
> - return -EINVAL;
> - }
> - if (auth_xform->auth.key.length > SNOW3G_MAX_KEY_SIZE) {
> - IPSEC_MB_LOG(ERR, "Not enough memory to store
> the key");
> - return -ENOMEM;
> - }
> -
> - sess->auth_op = auth_xform->auth.op;
> -
> - if (auth_xform->auth.iv.length != SNOW3G_IV_LENGTH) {
> - IPSEC_MB_LOG(ERR, "Wrong IV length");
> - return -EINVAL;
> - }
> - sess->auth_iv_offset = auth_xform->auth.iv.offset;
> -
> - /* Initialize key */
> - IMB_SNOW3G_INIT_KEY_SCHED(mgr, auth_xform-
> >auth.key.data,
> - &sess->pKeySched_hash);
> - }
> -
> - sess->op = mode;
> -
> - return 0;
> -}
> -
> -/** Check if conditions are met for digest-appended operations */
> -static uint8_t *
> -snow3g_digest_appended_in_src(struct rte_crypto_op *op)
> -{
> - unsigned int auth_size, cipher_size;
> -
> - auth_size = (op->sym->auth.data.offset >> 3) +
> - (op->sym->auth.data.length >> 3);
> - cipher_size = (op->sym->cipher.data.offset >> 3) +
> - (op->sym->cipher.data.length >> 3);
> -
> - if (auth_size < cipher_size)
> - return rte_pktmbuf_mtod_offset(op->sym->m_src,
> - uint8_t *, auth_size);
> -
> - return NULL;
> -}
> -
> -/** Encrypt/decrypt mbufs with same cipher key. */
> -static uint8_t
> -process_snow3g_cipher_op(struct ipsec_mb_qp *qp, struct rte_crypto_op
> **ops,
> - struct snow3g_session *session,
> - uint8_t num_ops)
> -{
> - uint32_t i;
> - uint8_t processed_ops = 0;
> - const void *src[SNOW3G_MAX_BURST] = {NULL};
> - void *dst[SNOW3G_MAX_BURST] = {NULL};
> - uint8_t *digest_appended[SNOW3G_MAX_BURST] = {NULL};
> - const void *iv[SNOW3G_MAX_BURST] = {NULL};
> - uint32_t num_bytes[SNOW3G_MAX_BURST] = {0};
> - uint32_t cipher_off, cipher_len;
> - int unencrypted_bytes = 0;
> -
> - for (i = 0; i < num_ops; i++) {
> -
> - cipher_off = ops[i]->sym->cipher.data.offset >> 3;
> - cipher_len = ops[i]->sym->cipher.data.length >> 3;
> - src[i] = rte_pktmbuf_mtod_offset(
> - ops[i]->sym->m_src, uint8_t *, cipher_off);
> -
> - /* If out-of-place operation */
> - if (ops[i]->sym->m_dst &&
> - ops[i]->sym->m_src != ops[i]->sym->m_dst) {
> - dst[i] = rte_pktmbuf_mtod_offset(
> - ops[i]->sym->m_dst, uint8_t *, cipher_off);
> -
> - /* In case of out-of-place, auth-cipher operation
> - * with partial encryption of the digest, copy
> - * the remaining, unencrypted part.
> - */
> - if (session->op ==
> IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT
> - || session->op ==
> IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT)
> - unencrypted_bytes =
> - (ops[i]->sym->auth.data.offset >> 3) +
> - (ops[i]->sym->auth.data.length >> 3)
> +
> - (SNOW3G_DIGEST_LENGTH) -
> - cipher_off - cipher_len;
> - if (unencrypted_bytes > 0)
> - rte_memcpy(
> - rte_pktmbuf_mtod_offset(
> - ops[i]->sym->m_dst, uint8_t
> *,
> - cipher_off + cipher_len),
> - rte_pktmbuf_mtod_offset(
> - ops[i]->sym->m_src, uint8_t
> *,
> - cipher_off + cipher_len),
> - unencrypted_bytes);
> - } else
> - dst[i] = rte_pktmbuf_mtod_offset(ops[i]->sym-
> >m_src,
> - uint8_t *, cipher_off);
> -
> - iv[i] = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
> - session->cipher_iv_offset);
> - num_bytes[i] = cipher_len;
> - processed_ops++;
> - }
> -
> - IMB_SNOW3G_F8_N_BUFFER(qp->mb_mgr, &session-
> >pKeySched_cipher, iv,
> - src, dst, num_bytes, processed_ops);
> -
> - /* Take care of the raw digest data in src buffer */
> - for (i = 0; i < num_ops; i++) {
> - if ((session->op ==
> IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT ||
> - session->op ==
> IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT) &&
> - ops[i]->sym->m_dst != NULL) {
> - digest_appended[i] =
> - snow3g_digest_appended_in_src(ops[i]);
> - /* Clear unencrypted digest from
> - * the src buffer
> - */
> - if (digest_appended[i] != NULL)
> - memset(digest_appended[i],
> - 0, SNOW3G_DIGEST_LENGTH);
> - }
> - }
> - return processed_ops;
> -}
> -
> -/** Encrypt/decrypt mbuf (bit level function). */
> -static uint8_t
> -process_snow3g_cipher_op_bit(struct ipsec_mb_qp *qp,
> - struct rte_crypto_op *op,
> - struct snow3g_session *session)
> -{
> - uint8_t *src, *dst;
> - uint8_t *iv;
> - uint32_t length_in_bits, offset_in_bits;
> - int unencrypted_bytes = 0;
> -
> - offset_in_bits = op->sym->cipher.data.offset;
> - src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *);
> - if (op->sym->m_dst == NULL) {
> - op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
> - IPSEC_MB_LOG(ERR, "bit-level in-place not supported\n");
> - return 0;
> - }
> - length_in_bits = op->sym->cipher.data.length;
> - dst = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *);
> - /* In case of out-of-place, auth-cipher operation
> - * with partial encryption of the digest, copy
> - * the remaining, unencrypted part.
> - */
> - if (session->op == IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT ||
> - session->op == IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT)
> - unencrypted_bytes =
> - (op->sym->auth.data.offset >> 3) +
> - (op->sym->auth.data.length >> 3) +
> - (SNOW3G_DIGEST_LENGTH) -
> - (offset_in_bits >> 3) -
> - (length_in_bits >> 3);
> - if (unencrypted_bytes > 0)
> - rte_memcpy(
> - rte_pktmbuf_mtod_offset(
> - op->sym->m_dst, uint8_t *,
> - (length_in_bits >> 3)),
> - rte_pktmbuf_mtod_offset(
> - op->sym->m_src, uint8_t *,
> - (length_in_bits >> 3)),
> - unencrypted_bytes);
> -
> - iv = rte_crypto_op_ctod_offset(op, uint8_t *,
> - session->cipher_iv_offset);
> -
> - IMB_SNOW3G_F8_1_BUFFER_BIT(qp->mb_mgr, &session-
> >pKeySched_cipher, iv,
> - src, dst, length_in_bits, offset_in_bits);
> -
> - return 1;
> -}
> -
> -/** Generate/verify hash from mbufs with same hash key. */
> -static int
> -process_snow3g_hash_op(struct ipsec_mb_qp *qp, struct rte_crypto_op
> **ops,
> - struct snow3g_session *session,
> - uint8_t num_ops)
> -{
> - uint32_t i;
> - uint8_t processed_ops = 0;
> - uint8_t *src, *dst;
> - uint32_t length_in_bits;
> - uint8_t *iv;
> - uint8_t digest_appended = 0;
> - struct snow3g_qp_data *qp_data =
> ipsec_mb_get_qp_private_data(qp);
> -
> - for (i = 0; i < num_ops; i++) {
> - /* Data must be byte aligned */
> - if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
> - ops[i]->status =
> RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
> - IPSEC_MB_LOG(ERR, "Offset");
> - break;
> - }
> -
> - dst = NULL;
> -
> - length_in_bits = ops[i]->sym->auth.data.length;
> -
> - src = rte_pktmbuf_mtod_offset(ops[i]->sym->m_src, uint8_t
> *,
> - (ops[i]->sym->auth.data.offset >>
> 3));
> - iv = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
> - session->auth_iv_offset);
> -
> - if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
> - dst = qp_data->temp_digest;
> - /* Handle auth cipher verify oop case*/
> - if ((session->op ==
> - IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN
> ||
> - session->op ==
> -
> IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY) &&
> - ops[i]->sym->m_dst != NULL)
> - src = rte_pktmbuf_mtod_offset(
> - ops[i]->sym->m_dst, uint8_t *,
> - ops[i]->sym->auth.data.offset >> 3);
> -
> - IMB_SNOW3G_F9_1_BUFFER(qp->mb_mgr,
> - &session->pKeySched_hash,
> - iv, src, length_in_bits, dst);
> - /* Verify digest. */
> - if (memcmp(dst, ops[i]->sym->auth.digest.data,
> - SNOW3G_DIGEST_LENGTH) != 0)
> - ops[i]->status =
> -
> RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
> - } else {
> - if (session->op ==
> - IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT
> ||
> - session->op ==
> - IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT)
> - dst =
> snow3g_digest_appended_in_src(ops[i]);
> -
> - if (dst != NULL)
> - digest_appended = 1;
> - else
> - dst = ops[i]->sym->auth.digest.data;
> -
> - IMB_SNOW3G_F9_1_BUFFER(qp->mb_mgr,
> - &session->pKeySched_hash,
> - iv, src, length_in_bits, dst);
> -
> - /* Copy back digest from src to auth.digest.data */
> - if (digest_appended)
> - rte_memcpy(ops[i]->sym->auth.digest.data,
> - dst, SNOW3G_DIGEST_LENGTH);
> - }
> - processed_ops++;
> - }
> -
> - return processed_ops;
> -}
> -
> -/** Process a batch of crypto ops which shares the same session. */
> -static int
> -process_ops(struct rte_crypto_op **ops, struct snow3g_session *session,
> - struct ipsec_mb_qp *qp, uint8_t num_ops)
> -{
> - uint32_t i;
> - uint32_t processed_ops;
> -
> -#ifdef RTE_LIBRTE_PMD_SNOW3G_DEBUG
> - for (i = 0; i < num_ops; i++) {
> - if (!rte_pktmbuf_is_contiguous(ops[i]->sym->m_src) ||
> - (ops[i]->sym->m_dst != NULL &&
> - !rte_pktmbuf_is_contiguous(
> - ops[i]->sym->m_dst))) {
> - IPSEC_MB_LOG(ERR,
> - "PMD supports only contiguous mbufs, "
> - "op (%p) provides noncontiguous mbuf as "
> - "source/destination buffer.\n", ops[i]);
> - ops[i]->status =
> RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
> - return 0;
> - }
> - }
> -#endif
> -
> - switch (session->op) {
> - case IPSEC_MB_OP_ENCRYPT_ONLY:
> - case IPSEC_MB_OP_DECRYPT_ONLY:
> - processed_ops = process_snow3g_cipher_op(qp, ops,
> - session, num_ops);
> - break;
> - case IPSEC_MB_OP_HASH_GEN_ONLY:
> - case IPSEC_MB_OP_HASH_VERIFY_ONLY:
> - processed_ops = process_snow3g_hash_op(qp, ops, session,
> - num_ops);
> - break;
> - case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN:
> - case IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY:
> - processed_ops = process_snow3g_cipher_op(qp, ops,
> session,
> - num_ops);
> - process_snow3g_hash_op(qp, ops, session, processed_ops);
> - break;
> - case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
> - case IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT:
> - processed_ops = process_snow3g_hash_op(qp, ops, session,
> - num_ops);
> - process_snow3g_cipher_op(qp, ops, session,
> processed_ops);
> - break;
> - default:
> - /* Operation not supported. */
> - processed_ops = 0;
> - }
> -
> - for (i = 0; i < num_ops; i++) {
> - /*
> - * If there was no error/authentication failure,
> - * change status to successful.
> - */
> - if (ops[i]->status ==
> RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
> - ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
> - /* Free session if a session-less crypto op. */
> - if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
> - memset(session, 0, sizeof(struct snow3g_session));
> - rte_mempool_put(qp->sess_mp, ops[i]->sym-
> >session);
> - ops[i]->sym->session = NULL;
> - }
> - }
> - return processed_ops;
> -}
> -
> -/** Process a crypto op with length/offset in bits. */
> -static int
> -process_op_bit(struct rte_crypto_op *op, struct snow3g_session *session,
> - struct ipsec_mb_qp *qp)
> -{
> - unsigned int processed_op;
> - int ret;
> -
> - switch (session->op) {
> - case IPSEC_MB_OP_ENCRYPT_ONLY:
> - case IPSEC_MB_OP_DECRYPT_ONLY:
> -
> - processed_op = process_snow3g_cipher_op_bit(qp, op,
> - session);
> - break;
> - case IPSEC_MB_OP_HASH_GEN_ONLY:
> - case IPSEC_MB_OP_HASH_VERIFY_ONLY:
> - processed_op = process_snow3g_hash_op(qp, &op, session,
> 1);
> - break;
> - case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN:
> - case IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY:
> - processed_op = process_snow3g_cipher_op_bit(qp, op,
> session);
> - if (processed_op == 1)
> - process_snow3g_hash_op(qp, &op, session, 1);
> - break;
> - case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
> - case IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT:
> - processed_op = process_snow3g_hash_op(qp, &op, session,
> 1);
> - if (processed_op == 1)
> - process_snow3g_cipher_op_bit(qp, op, session);
> - break;
> - default:
> - /* Operation not supported. */
> - processed_op = 0;
> - }
> -
> - /*
> - * If there was no error/authentication failure,
> - * change status to successful.
> - */
> - if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
> - op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
> -
> - /* Free session if a session-less crypto op. */
> - if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
> - memset(CRYPTODEV_GET_SYM_SESS_PRIV(op->sym-
> >session), 0,
> - sizeof(struct snow3g_session));
> - rte_mempool_put(qp->sess_mp, (void *)op->sym->session);
> - op->sym->session = NULL;
> - }
> -
> - if (unlikely(processed_op != 1))
> - return 0;
> -
> - ret = rte_ring_enqueue(qp->ingress_queue, op);
> - if (ret != 0)
> - return ret;
> -
> - return 1;
> -}
> -
> -static uint16_t
> -snow3g_pmd_dequeue_burst(void *queue_pair,
> - struct rte_crypto_op **ops, uint16_t nb_ops)
> -{
> - struct ipsec_mb_qp *qp = queue_pair;
> - struct rte_crypto_op *c_ops[SNOW3G_MAX_BURST];
> - struct rte_crypto_op *curr_c_op;
> -
> - struct snow3g_session *prev_sess = NULL, *curr_sess = NULL;
> - uint32_t i;
> - uint8_t burst_size = 0;
> - uint8_t processed_ops;
> - uint32_t nb_dequeued;
> -
> - nb_dequeued = rte_ring_dequeue_burst(qp->ingress_queue,
> - (void **)ops, nb_ops, NULL);
> -
> - for (i = 0; i < nb_dequeued; i++) {
> - curr_c_op = ops[i];
> -
> - /* Set status as enqueued (not processed yet) by default. */
> - curr_c_op->status =
> RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
> -
> - curr_sess = ipsec_mb_get_session_private(qp, curr_c_op);
> - if (unlikely(curr_sess == NULL ||
> - curr_sess->op ==
> IPSEC_MB_OP_NOT_SUPPORTED)) {
> - curr_c_op->status =
> -
> RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
> - break;
> - }
> -
> - /* If length/offset is at bit-level,
> - * process this buffer alone.
> - */
> - if (((curr_c_op->sym->cipher.data.length % BYTE_LEN) != 0)
> - || ((curr_c_op->sym->cipher.data.offset
> - % BYTE_LEN) != 0)) {
> - /* Process the ops of the previous session. */
> - if (prev_sess != NULL) {
> - processed_ops = process_ops(c_ops,
> prev_sess,
> - qp, burst_size);
> - if (processed_ops < burst_size) {
> - burst_size = 0;
> - break;
> - }
> -
> - burst_size = 0;
> - prev_sess = NULL;
> - }
> -
> - processed_ops = process_op_bit(curr_c_op,
> curr_sess, qp);
> - if (processed_ops != 1)
> - break;
> -
> - continue;
> - }
> -
> - /* Batch ops that share the same session. */
> - if (prev_sess == NULL) {
> - prev_sess = curr_sess;
> - c_ops[burst_size++] = curr_c_op;
> - } else if (curr_sess == prev_sess) {
> - c_ops[burst_size++] = curr_c_op;
> - /*
> - * When there are enough ops to process in a batch,
> - * process them, and start a new batch.
> - */
> - if (burst_size == SNOW3G_MAX_BURST) {
> - processed_ops = process_ops(c_ops,
> prev_sess,
> - qp, burst_size);
> - if (processed_ops < burst_size) {
> - burst_size = 0;
> - break;
> - }
> -
> - burst_size = 0;
> - prev_sess = NULL;
> - }
> - } else {
> - /*
> - * Different session, process the ops
> - * of the previous session.
> - */
> - processed_ops = process_ops(c_ops, prev_sess,
> - qp, burst_size);
> - if (processed_ops < burst_size) {
> - burst_size = 0;
> - break;
> - }
> -
> - burst_size = 0;
> - prev_sess = curr_sess;
> -
> - c_ops[burst_size++] = curr_c_op;
> - }
> - }
> -
> - if (burst_size != 0) {
> - /* Process the crypto ops of the last session. */
> - processed_ops = process_ops(c_ops, prev_sess,
> - qp, burst_size);
> - }
> -
> - qp->stats.dequeued_count += i;
> - return i;
> -}
> +#include "pmd_aesni_mb_priv.h"
>
> struct rte_cryptodev_ops snow3g_pmd_ops = {
> .dev_configure = ipsec_mb_config,
> @@ -586,7 +54,7 @@ RTE_INIT(ipsec_mb_register_snow3g)
> = &ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_SNOW3G];
>
> snow3g_data->caps = snow3g_capabilities;
> - snow3g_data->dequeue_burst = snow3g_pmd_dequeue_burst;
> + snow3g_data->dequeue_burst = aesni_mb_dequeue_burst;
> snow3g_data->feature_flags =
> RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
> RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
> RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA |
> @@ -595,7 +63,8 @@ RTE_INIT(ipsec_mb_register_snow3g)
> RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED;
> snow3g_data->internals_priv_size = 0;
> snow3g_data->ops = &snow3g_pmd_ops;
> - snow3g_data->qp_priv_size = sizeof(struct snow3g_qp_data);
> - snow3g_data->session_configure = snow3g_session_configure;
> - snow3g_data->session_priv_size = sizeof(struct snow3g_session);
> + snow3g_data->qp_priv_size = sizeof(struct aesni_mb_qp_data);
> + snow3g_data->session_configure = aesni_mb_session_configure;
> + snow3g_data->session_priv_size =
> + sizeof(struct aesni_mb_session);
> }
> diff --git a/drivers/crypto/ipsec_mb/pmd_snow3g_priv.h
> b/drivers/crypto/ipsec_mb/pmd_snow3g_priv.h
> index ca1ce7f9d6..3ceb33b602 100644
> --- a/drivers/crypto/ipsec_mb/pmd_snow3g_priv.h
> +++ b/drivers/crypto/ipsec_mb/pmd_snow3g_priv.h
> @@ -8,10 +8,7 @@
> #include "ipsec_mb_private.h"
>
> #define SNOW3G_IV_LENGTH 16
> -#define SNOW3G_MAX_BURST 8
> -#define BYTE_LEN 8
> #define SNOW3G_DIGEST_LENGTH 4
> -#define SNOW3G_MAX_KEY_SIZE 128
>
> uint8_t pmd_driver_id_snow3g;
>
> @@ -64,22 +61,4 @@ static const struct rte_cryptodev_capabilities
> snow3g_capabilities[] = {
> RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
> };
>
> -/** SNOW 3G private session structure */
> -struct snow3g_session {
> - enum ipsec_mb_operation op;
> - enum rte_crypto_auth_operation auth_op;
> - snow3g_key_schedule_t pKeySched_cipher;
> - snow3g_key_schedule_t pKeySched_hash;
> - uint16_t cipher_iv_offset;
> - uint16_t auth_iv_offset;
> -} __rte_cache_aligned;
> -
> -struct snow3g_qp_data {
> - uint8_t temp_digest[SNOW3G_DIGEST_LENGTH];
> - /**< Buffer used to store the digest generated
> - * by the driver when verifying a digest provided
> - * by the user (using authentication verify operation)
> - */
> -};
> -
> #endif /* _PMD_SNOW3G_PRIV_H_ */
> diff --git a/drivers/crypto/ipsec_mb/pmd_zuc.c
> b/drivers/crypto/ipsec_mb/pmd_zuc.c
> index 44781be1d1..b72191c7a7 100644
> --- a/drivers/crypto/ipsec_mb/pmd_zuc.c
> +++ b/drivers/crypto/ipsec_mb/pmd_zuc.c
> @@ -3,343 +3,7 @@
> */
>
> #include "pmd_zuc_priv.h"
> -
> -/** Parse crypto xform chain and set private session parameters. */
> -static int
> -zuc_session_configure(__rte_unused IMB_MGR * mgr, void *zuc_sess,
> - const struct rte_crypto_sym_xform *xform)
> -{
> - struct zuc_session *sess = (struct zuc_session *) zuc_sess;
> - const struct rte_crypto_sym_xform *auth_xform = NULL;
> - const struct rte_crypto_sym_xform *cipher_xform = NULL;
> - enum ipsec_mb_operation mode;
> - /* Select Crypto operation - hash then cipher / cipher then hash */
> - int ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform,
> - &cipher_xform, NULL);
> -
> - if (ret)
> - return ret;
> -
> - if (cipher_xform) {
> - /* Only ZUC EEA3 supported */
> - if (cipher_xform->cipher.algo !=
> RTE_CRYPTO_CIPHER_ZUC_EEA3)
> - return -ENOTSUP;
> -
> - if (cipher_xform->cipher.iv.length != ZUC_IV_KEY_LENGTH) {
> - IPSEC_MB_LOG(ERR, "Wrong IV length");
> - return -EINVAL;
> - }
> - sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
> -
> - /* Copy the key */
> - memcpy(sess->pKey_cipher, cipher_xform->cipher.key.data,
> - ZUC_IV_KEY_LENGTH);
> - }
> -
> - if (auth_xform) {
> - /* Only ZUC EIA3 supported */
> - if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_ZUC_EIA3)
> - return -ENOTSUP;
> -
> - if (auth_xform->auth.digest_length != ZUC_DIGEST_LENGTH) {
> - IPSEC_MB_LOG(ERR, "Wrong digest length");
> - return -EINVAL;
> - }
> -
> - sess->auth_op = auth_xform->auth.op;
> -
> - if (auth_xform->auth.iv.length != ZUC_IV_KEY_LENGTH) {
> - IPSEC_MB_LOG(ERR, "Wrong IV length");
> - return -EINVAL;
> - }
> - sess->auth_iv_offset = auth_xform->auth.iv.offset;
> -
> - /* Copy the key */
> - memcpy(sess->pKey_hash, auth_xform->auth.key.data,
> - ZUC_IV_KEY_LENGTH);
> - }
> -
> - sess->op = mode;
> - return 0;
> -}
> -
> -/** Encrypt/decrypt mbufs. */
> -static uint8_t
> -process_zuc_cipher_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
> - struct zuc_session **sessions,
> - uint8_t num_ops)
> -{
> - unsigned int i;
> - uint8_t processed_ops = 0;
> - const void *src[ZUC_MAX_BURST];
> - void *dst[ZUC_MAX_BURST];
> - const void *iv[ZUC_MAX_BURST];
> - uint32_t num_bytes[ZUC_MAX_BURST];
> - const void *cipher_keys[ZUC_MAX_BURST];
> - struct zuc_session *sess;
> -
> - for (i = 0; i < num_ops; i++) {
> - if (((ops[i]->sym->cipher.data.length % BYTE_LEN) != 0)
> - || ((ops[i]->sym->cipher.data.offset
> - % BYTE_LEN) != 0)) {
> - ops[i]->status =
> RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
> - IPSEC_MB_LOG(ERR, "Data Length or offset");
> - break;
> - }
> -
> - sess = sessions[i];
> -
> -#ifdef RTE_LIBRTE_PMD_ZUC_DEBUG
> - if (!rte_pktmbuf_is_contiguous(ops[i]->sym->m_src) ||
> - (ops[i]->sym->m_dst != NULL &&
> - !rte_pktmbuf_is_contiguous(
> - ops[i]->sym->m_dst))) {
> - IPSEC_MB_LOG(ERR, "PMD supports only "
> - " contiguous mbufs, op (%p) "
> - "provides noncontiguous mbuf "
> - "as source/destination buffer.\n",
> - "PMD supports only contiguous mbufs, "
> - "op (%p) provides noncontiguous mbuf "
> - "as source/destination buffer.\n",
> - ops[i]);
> - ops[i]->status =
> RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
> - break;
> - }
> -#endif
> -
> - src[i] = rte_pktmbuf_mtod_offset(ops[i]->sym->m_src,
> - uint8_t *,
> - (ops[i]->sym-
> >cipher.data.offset >> 3));
> - dst[i] = ops[i]->sym->m_dst ?
> - rte_pktmbuf_mtod_offset(ops[i]->sym->m_dst,
> uint8_t *,
> - (ops[i]->sym-
> >cipher.data.offset >> 3)) :
> - rte_pktmbuf_mtod_offset(ops[i]->sym->m_src,
> uint8_t *,
> - (ops[i]->sym-
> >cipher.data.offset >> 3));
> - iv[i] = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
> - sess->cipher_iv_offset);
> - num_bytes[i] = ops[i]->sym->cipher.data.length >> 3;
> -
> - cipher_keys[i] = sess->pKey_cipher;
> -
> - processed_ops++;
> - }
> -
> - IMB_ZUC_EEA3_N_BUFFER(qp->mb_mgr, (const void **)cipher_keys,
> - (const void **)iv, (const void **)src, (void **)dst,
> - num_bytes, processed_ops);
> -
> - return processed_ops;
> -}
> -
> -/** Generate/verify hash from mbufs. */
> -static int
> -process_zuc_hash_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
> - struct zuc_session **sessions,
> - uint8_t num_ops)
> -{
> - unsigned int i;
> - uint8_t processed_ops = 0;
> - uint8_t *src[ZUC_MAX_BURST] = { 0 };
> - uint32_t *dst[ZUC_MAX_BURST];
> - uint32_t length_in_bits[ZUC_MAX_BURST] = { 0 };
> - uint8_t *iv[ZUC_MAX_BURST] = { 0 };
> - const void *hash_keys[ZUC_MAX_BURST] = { 0 };
> - struct zuc_session *sess;
> - struct zuc_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
> -
> -
> - for (i = 0; i < num_ops; i++) {
> - /* Data must be byte aligned */
> - if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
> - ops[i]->status =
> RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
> - IPSEC_MB_LOG(ERR, "Offset");
> - break;
> - }
> -
> - sess = sessions[i];
> -
> - length_in_bits[i] = ops[i]->sym->auth.data.length;
> -
> - src[i] = rte_pktmbuf_mtod_offset(ops[i]->sym->m_src,
> - uint8_t *,
> - (ops[i]->sym-
> >auth.data.offset >> 3));
> - iv[i] = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
> - sess->auth_iv_offset);
> -
> - hash_keys[i] = sess->pKey_hash;
> - if (sess->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY)
> - dst[i] = (uint32_t *)qp_data->temp_digest[i];
> - else
> - dst[i] = (uint32_t *)ops[i]->sym->auth.digest.data;
> -
> - processed_ops++;
> - }
> -
> - IMB_ZUC_EIA3_N_BUFFER(qp->mb_mgr, (const void **)hash_keys,
> - (const void * const *)iv, (const void * const *)src,
> - length_in_bits, dst, processed_ops);
> -
> - /*
> - * If tag needs to be verified, compare generated tag
> - * with attached tag
> - */
> - for (i = 0; i < processed_ops; i++)
> - if (sessions[i]->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY)
> - if (memcmp(dst[i], ops[i]->sym->auth.digest.data,
> - ZUC_DIGEST_LENGTH) != 0)
> - ops[i]->status =
> -
> RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
> -
> - return processed_ops;
> -}
> -
> -/** Process a batch of crypto ops which shares the same operation type. */
> -static int
> -process_ops(struct rte_crypto_op **ops, enum ipsec_mb_operation
> op_type,
> - struct zuc_session **sessions,
> - struct ipsec_mb_qp *qp, uint8_t num_ops)
> -{
> - unsigned int i;
> - unsigned int processed_ops = 0;
> -
> - switch (op_type) {
> - case IPSEC_MB_OP_ENCRYPT_ONLY:
> - case IPSEC_MB_OP_DECRYPT_ONLY:
> - processed_ops = process_zuc_cipher_op(qp, ops,
> - sessions, num_ops);
> - break;
> - case IPSEC_MB_OP_HASH_GEN_ONLY:
> - case IPSEC_MB_OP_HASH_VERIFY_ONLY:
> - processed_ops = process_zuc_hash_op(qp, ops, sessions,
> - num_ops);
> - break;
> - case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN:
> - case IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY:
> - processed_ops = process_zuc_cipher_op(qp, ops, sessions,
> - num_ops);
> - process_zuc_hash_op(qp, ops, sessions, processed_ops);
> - break;
> - case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
> - case IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT:
> - processed_ops = process_zuc_hash_op(qp, ops, sessions,
> - num_ops);
> - process_zuc_cipher_op(qp, ops, sessions, processed_ops);
> - break;
> - default:
> - /* Operation not supported. */
> - for (i = 0; i < num_ops; i++)
> - ops[i]->status =
> RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
> - }
> -
> - for (i = 0; i < num_ops; i++) {
> - /*
> - * If there was no error/authentication failure,
> - * change status to successful.
> - */
> - if (ops[i]->status ==
> RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
> - ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
> - /* Free session if a session-less crypto op. */
> - if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
> - memset(sessions[i], 0, sizeof(struct zuc_session));
> - rte_mempool_put(qp->sess_mp, ops[i]->sym-
> >session);
> - ops[i]->sym->session = NULL;
> - }
> - }
> - return processed_ops;
> -}
> -
> -static uint16_t
> -zuc_pmd_dequeue_burst(void *queue_pair,
> - struct rte_crypto_op **c_ops, uint16_t nb_ops)
> -{
> -
> - struct rte_crypto_op *curr_c_op;
> -
> - struct zuc_session *curr_sess;
> - struct zuc_session *sessions[ZUC_MAX_BURST];
> - struct rte_crypto_op *int_c_ops[ZUC_MAX_BURST];
> - enum ipsec_mb_operation prev_zuc_op =
> IPSEC_MB_OP_NOT_SUPPORTED;
> - enum ipsec_mb_operation curr_zuc_op;
> - struct ipsec_mb_qp *qp = queue_pair;
> - unsigned int nb_dequeued;
> - unsigned int i;
> - uint8_t burst_size = 0;
> - uint8_t processed_ops;
> -
> - nb_dequeued = rte_ring_dequeue_burst(qp->ingress_queue,
> - (void **)c_ops, nb_ops, NULL);
> -
> -
> - for (i = 0; i < nb_dequeued; i++) {
> - curr_c_op = c_ops[i];
> -
> - curr_sess = (struct zuc_session *)
> - ipsec_mb_get_session_private(qp, curr_c_op);
> - if (unlikely(curr_sess == NULL)) {
> - curr_c_op->status =
> -
> RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
> - break;
> - }
> -
> - curr_zuc_op = curr_sess->op;
> -
> - /*
> - * Batch ops that share the same operation type
> - * (cipher only, auth only...).
> - */
> - if (burst_size == 0) {
> - prev_zuc_op = curr_zuc_op;
> - int_c_ops[0] = curr_c_op;
> - sessions[0] = curr_sess;
> - burst_size++;
> - } else if (curr_zuc_op == prev_zuc_op) {
> - int_c_ops[burst_size] = curr_c_op;
> - sessions[burst_size] = curr_sess;
> - burst_size++;
> - /*
> - * When there are enough ops to process in a batch,
> - * process them, and start a new batch.
> - */
> - if (burst_size == ZUC_MAX_BURST) {
> - processed_ops = process_ops(int_c_ops,
> curr_zuc_op,
> - sessions, qp, burst_size);
> - if (processed_ops < burst_size) {
> - burst_size = 0;
> - break;
> - }
> -
> - burst_size = 0;
> - }
> - } else {
> - /*
> - * Different operation type, process the ops
> - * of the previous type.
> - */
> - processed_ops = process_ops(int_c_ops,
> prev_zuc_op,
> - sessions, qp, burst_size);
> - if (processed_ops < burst_size) {
> - burst_size = 0;
> - break;
> - }
> -
> - burst_size = 0;
> - prev_zuc_op = curr_zuc_op;
> -
> - int_c_ops[0] = curr_c_op;
> - sessions[0] = curr_sess;
> - burst_size++;
> - }
> - }
> -
> - if (burst_size != 0) {
> - /* Process the crypto ops of the last operation type. */
> - processed_ops = process_ops(int_c_ops, prev_zuc_op,
> - sessions, qp, burst_size);
> - }
> -
> - qp->stats.dequeued_count += i;
> - return i;
> -}
> +#include "pmd_aesni_mb_priv.h"
>
> struct rte_cryptodev_ops zuc_pmd_ops = {
> .dev_configure = ipsec_mb_config,
> @@ -390,7 +54,7 @@ RTE_INIT(ipsec_mb_register_zuc)
> = &ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_ZUC];
>
> zuc_data->caps = zuc_capabilities;
> - zuc_data->dequeue_burst = zuc_pmd_dequeue_burst;
> + zuc_data->dequeue_burst = aesni_mb_dequeue_burst;
> zuc_data->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO
> | RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING
> | RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA
> @@ -399,7 +63,8 @@ RTE_INIT(ipsec_mb_register_zuc)
> | RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
> zuc_data->internals_priv_size = 0;
> zuc_data->ops = &zuc_pmd_ops;
> - zuc_data->qp_priv_size = sizeof(struct zuc_qp_data);
> - zuc_data->session_configure = zuc_session_configure;
> - zuc_data->session_priv_size = sizeof(struct zuc_session);
> + zuc_data->qp_priv_size = sizeof(struct aesni_mb_qp_data);
> + zuc_data->session_configure = aesni_mb_session_configure;
> + zuc_data->session_priv_size =
> + sizeof(struct aesni_mb_session);
> }
> diff --git a/drivers/crypto/ipsec_mb/pmd_zuc_priv.h
> b/drivers/crypto/ipsec_mb/pmd_zuc_priv.h
> index 76fd6758c2..a1e8e3aade 100644
> --- a/drivers/crypto/ipsec_mb/pmd_zuc_priv.h
> +++ b/drivers/crypto/ipsec_mb/pmd_zuc_priv.h
> @@ -10,7 +10,6 @@
> #define ZUC_IV_KEY_LENGTH 16
> #define ZUC_DIGEST_LENGTH 4
> #define ZUC_MAX_BURST 16
> -#define BYTE_LEN 8
>
> uint8_t pmd_driver_id_zuc;
>
> @@ -63,23 +62,4 @@ static const struct rte_cryptodev_capabilities
> zuc_capabilities[] = {
> RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
> };
>
> -/** ZUC private session structure */
> -struct zuc_session {
> - enum ipsec_mb_operation op;
> - enum rte_crypto_auth_operation auth_op;
> - uint8_t pKey_cipher[ZUC_IV_KEY_LENGTH];
> - uint8_t pKey_hash[ZUC_IV_KEY_LENGTH];
> - uint16_t cipher_iv_offset;
> - uint16_t auth_iv_offset;
> -} __rte_cache_aligned;
> -
> -struct zuc_qp_data {
> -
> - uint8_t temp_digest[ZUC_MAX_BURST][ZUC_DIGEST_LENGTH];
> - /* *< Buffers used to store the digest generated
> - * by the driver when verifying a digest provided
> - * by the user (using authentication verify operation)
> - */
> -};
> -
> #endif /* _PMD_ZUC_PRIV_H_ */
> --
> 2.25.1
^ permalink raw reply [flat|nested] 45+ messages in thread
* RE: [PATCH v4] crypto/ipsec_mb: unified IPsec MB interface
2024-03-05 5:39 ` Honnappa Nagarahalli
@ 2024-03-05 17:31 ` Wathsala Wathawana Vithanage
0 siblings, 0 replies; 45+ messages in thread
From: Wathsala Wathawana Vithanage @ 2024-03-05 17:31 UTC (permalink / raw)
To: Honnappa Nagarahalli, Akhil Goyal
Cc: Dooley, Brian, Ji, Kai, De Lara Guarch, Pablo, Power, Ciara, dev,
Ruifeng Wang, Jack Bond-Preston, nd, nd
> This is being worked on. We are in the process of creating a new tag. We will
> update soon.
>
A new tag SECLIB-IPSEC-2024.03.05 has been created. We will be sending out a
patch for the documentation soon.
^ permalink raw reply [flat|nested] 45+ messages in thread
* [PATCH v5 1/4] crypto/ipsec_mb: bump minimum IPsec Multi-buffer version
2023-12-12 15:36 [PATCH v1] crypto/ipsec_mb: unified IPsec MB interface Brian Dooley
` (2 preceding siblings ...)
2024-02-28 11:33 ` [PATCH v4] " Brian Dooley
@ 2024-03-05 17:42 ` Brian Dooley
2024-03-05 17:42 ` [PATCH v5 2/4] doc: remove outdated version details Brian Dooley
` (3 more replies)
2024-03-12 13:50 ` [PATCH v6 1/5] ci: replace IPsec-mb package install Brian Dooley
2024-03-14 10:37 ` [PATCH v7 1/2] doc: remove outdated version details Brian Dooley
5 siblings, 4 replies; 45+ messages in thread
From: Brian Dooley @ 2024-03-05 17:42 UTC (permalink / raw)
To: Kai Ji, Pablo de Lara
Cc: dev, gakhil, Sivaramakrishnan Venkat, Ciara Power, Wathsala Vithanage
From: Sivaramakrishnan Venkat <venkatx.sivaramakrishnan@intel.com>
SW PMDs increment IPsec Multi-buffer version to 1.4.
A minimum IPsec Multi-buffer version of 1.4 or greater is now required.
Signed-off-by: Sivaramakrishnan Venkat <venkatx.sivaramakrishnan@intel.com>
Acked-by: Ciara Power <ciara.power@intel.com>
Acked-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Wathsala Vithanage <wathsala.vithanage@arm.com>
---
v5:
- Rebased and added to patchset
v4:
- 24.03 release notes updated to bump minimum IPSec Multi-buffer
version to 1.4 for SW PMDs.
v2:
- Removed unused macro in ipsec_mb_ops.c
- set_gcm_job() modified correctly to keep multi_sgl_job line
- Updated SW PMDs documentation for minimum IPSec Multi-buffer version
- Updated commit message, and patch title.
---
doc/guides/cryptodevs/aesni_gcm.rst | 3 +-
doc/guides/cryptodevs/aesni_mb.rst | 3 +-
doc/guides/cryptodevs/chacha20_poly1305.rst | 3 +-
doc/guides/cryptodevs/kasumi.rst | 3 +-
doc/guides/cryptodevs/snow3g.rst | 3 +-
doc/guides/cryptodevs/zuc.rst | 3 +-
doc/guides/rel_notes/release_24_03.rst | 4 +
drivers/crypto/ipsec_mb/ipsec_mb_ops.c | 23 ---
drivers/crypto/ipsec_mb/meson.build | 2 +-
drivers/crypto/ipsec_mb/pmd_aesni_mb.c | 164 --------------------
drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h | 9 --
11 files changed, 17 insertions(+), 203 deletions(-)
diff --git a/doc/guides/cryptodevs/aesni_gcm.rst b/doc/guides/cryptodevs/aesni_gcm.rst
index f5773426ee..dc665e536c 100644
--- a/doc/guides/cryptodevs/aesni_gcm.rst
+++ b/doc/guides/cryptodevs/aesni_gcm.rst
@@ -85,7 +85,8 @@ and the external crypto libraries supported by them:
18.05 - 19.02 Multi-buffer library 0.49 - 0.52
19.05 - 20.08 Multi-buffer library 0.52 - 0.55
20.11 - 21.08 Multi-buffer library 0.53 - 1.3*
- 21.11+ Multi-buffer library 1.0 - 1.5*
+ 21.11 - 23.11 Multi-buffer library 1.0 - 1.5*
+ 24.03+ Multi-buffer library 1.4 - 1.5*
============= ================================
\* Multi-buffer library 1.0 or newer only works for Meson but not Make build system.
diff --git a/doc/guides/cryptodevs/aesni_mb.rst b/doc/guides/cryptodevs/aesni_mb.rst
index b2e74ba417..5d670ee237 100644
--- a/doc/guides/cryptodevs/aesni_mb.rst
+++ b/doc/guides/cryptodevs/aesni_mb.rst
@@ -146,7 +146,8 @@ and the Multi-Buffer library version supported by them:
19.05 - 19.08 0.52
19.11 - 20.08 0.52 - 0.55
20.11 - 21.08 0.53 - 1.3*
- 21.11+ 1.0 - 1.5*
+ 21.11 - 23.11 1.0 - 1.5*
+ 24.03+ 1.4 - 1.5*
============== ============================
\* Multi-buffer library 1.0 or newer only works for Meson but not Make build system.
diff --git a/doc/guides/cryptodevs/chacha20_poly1305.rst b/doc/guides/cryptodevs/chacha20_poly1305.rst
index 9d4bf86cf1..c32866b301 100644
--- a/doc/guides/cryptodevs/chacha20_poly1305.rst
+++ b/doc/guides/cryptodevs/chacha20_poly1305.rst
@@ -72,7 +72,8 @@ and the external crypto libraries supported by them:
============= ================================
DPDK version Crypto library version
============= ================================
- 21.11+ Multi-buffer library 1.0-1.5*
+ 21.11 - 23.11 Multi-buffer library 1.0-1.5*
+ 24.03+ Multi-buffer library 1.4-1.5*
============= ================================
\* Multi-buffer library 1.0 or newer only works for Meson but not Make build system.
diff --git a/doc/guides/cryptodevs/kasumi.rst b/doc/guides/cryptodevs/kasumi.rst
index 0989054875..a8f4e6b204 100644
--- a/doc/guides/cryptodevs/kasumi.rst
+++ b/doc/guides/cryptodevs/kasumi.rst
@@ -87,7 +87,8 @@ and the external crypto libraries supported by them:
============= ================================
16.11 - 19.11 LibSSO KASUMI
20.02 - 21.08 Multi-buffer library 0.53 - 1.3*
- 21.11+ Multi-buffer library 1.0 - 1.5*
+ 21.11 - 23.11 Multi-buffer library 1.0 - 1.5*
+ 24.03+ Multi-buffer library 1.4 - 1.5*
============= ================================
\* Multi-buffer library 1.0 or newer only works for Meson but not Make build system.
diff --git a/doc/guides/cryptodevs/snow3g.rst b/doc/guides/cryptodevs/snow3g.rst
index 3392932653..46863462e5 100644
--- a/doc/guides/cryptodevs/snow3g.rst
+++ b/doc/guides/cryptodevs/snow3g.rst
@@ -96,7 +96,8 @@ and the external crypto libraries supported by them:
============= ================================
16.04 - 19.11 LibSSO SNOW3G
20.02 - 21.08 Multi-buffer library 0.53 - 1.3*
- 21.11+ Multi-buffer library 1.0 - 1.5*
+ 21.11 - 23.11 Multi-buffer library 1.0 - 1.5*
+ 24.03+ Multi-buffer library 1.4 - 1.5*
============= ================================
\* Multi-buffer library 1.0 or newer only works for Meson but not Make build system.
diff --git a/doc/guides/cryptodevs/zuc.rst b/doc/guides/cryptodevs/zuc.rst
index a414b5ad2c..51867e1a16 100644
--- a/doc/guides/cryptodevs/zuc.rst
+++ b/doc/guides/cryptodevs/zuc.rst
@@ -95,7 +95,8 @@ and the external crypto libraries supported by them:
============= ================================
16.11 - 19.11 LibSSO ZUC
20.02 - 21.08 Multi-buffer library 0.53 - 1.3*
- 21.11+ Multi-buffer library 1.0 - 1.5*
+ 21.11 - 23.11 Multi-buffer library 1.0 - 1.5*
+ 24.03+ Multi-buffer library 1.4 - 1.5*
============= ================================
\* Multi-buffer library 1.0 or newer only works for Meson but not Make build system.
diff --git a/doc/guides/rel_notes/release_24_03.rst b/doc/guides/rel_notes/release_24_03.rst
index 78590c047b..8fa8cf1dd6 100644
--- a/doc/guides/rel_notes/release_24_03.rst
+++ b/doc/guides/rel_notes/release_24_03.rst
@@ -144,6 +144,10 @@ New Features
* Added support for GEN LCE (1454) device, for AES-GCM only.
* Enabled support for virtual QAT - vQAT (0da5) devices in QAT crypto driver.
+* **Updated ipsec_mb crypto driver.**
+
+ * Bump minimum IPSec Multi-buffer version to 1.4 for SW PMDs.
+
* **Updated Marvell cnxk crypto driver.**
* Added support for Rx inject in crypto_cn10k.
diff --git a/drivers/crypto/ipsec_mb/ipsec_mb_ops.c b/drivers/crypto/ipsec_mb/ipsec_mb_ops.c
index f21f9cc5a0..d25c671d7d 100644
--- a/drivers/crypto/ipsec_mb/ipsec_mb_ops.c
+++ b/drivers/crypto/ipsec_mb/ipsec_mb_ops.c
@@ -11,7 +11,6 @@
#include "ipsec_mb_private.h"
-#define IMB_MP_REQ_VER_STR "1.1.0"
/** Configure device */
int
@@ -147,15 +146,10 @@ ipsec_mb_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
rte_ring_free(rte_ring_lookup(qp->name));
-#if IMB_VERSION(1, 1, 0) > IMB_VERSION_NUM
- if (qp->mb_mgr)
- free_mb_mgr(qp->mb_mgr);
-#else
if (qp->mb_mgr_mz) {
rte_memzone_free(qp->mb_mgr_mz);
qp->mb_mgr = NULL;
}
-#endif
rte_free(qp);
dev->data->queue_pairs[qp_id] = NULL;
} else { /* secondary process */
@@ -211,7 +205,6 @@ static struct rte_ring
RING_F_SP_ENQ | RING_F_SC_DEQ);
}
-#if IMB_VERSION(1, 1, 0) <= IMB_VERSION_NUM
static IMB_MGR *
ipsec_mb_alloc_mgr_from_memzone(const struct rte_memzone **mb_mgr_mz,
const char *mb_mgr_mz_name)
@@ -244,7 +237,6 @@ ipsec_mb_alloc_mgr_from_memzone(const struct rte_memzone **mb_mgr_mz,
}
return mb_mgr;
}
-#endif
/** Setup a queue pair */
int
@@ -260,12 +252,6 @@ ipsec_mb_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
int ret;
if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
-#if IMB_VERSION(1, 1, 0) > IMB_VERSION_NUM
- IPSEC_MB_LOG(ERR, "The intel-ipsec-mb version (%s) does not support multiprocess,"
- "the minimum version required for this feature is %s.",
- IMB_VERSION_STR, IMB_MP_REQ_VER_STR);
- return -EINVAL;
-#endif
qp = dev->data->queue_pairs[qp_id];
if (qp == NULL) {
IPSEC_MB_LOG(DEBUG, "Secondary process setting up device qp.");
@@ -285,15 +271,11 @@ ipsec_mb_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
return -ENOMEM;
}
-#if IMB_VERSION(1, 1, 0) > IMB_VERSION_NUM
- qp->mb_mgr = alloc_init_mb_mgr();
-#else
char mz_name[IPSEC_MB_MAX_MZ_NAME];
snprintf(mz_name, sizeof(mz_name), "IMB_MGR_DEV_%d_QP_%d",
dev->data->dev_id, qp_id);
qp->mb_mgr = ipsec_mb_alloc_mgr_from_memzone(&(qp->mb_mgr_mz),
mz_name);
-#endif
if (qp->mb_mgr == NULL) {
ret = -ENOMEM;
goto qp_setup_cleanup;
@@ -330,15 +312,10 @@ ipsec_mb_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
return 0;
qp_setup_cleanup:
-#if IMB_VERSION(1, 1, 0) > IMB_VERSION_NUM
- if (qp->mb_mgr)
- free_mb_mgr(qp->mb_mgr);
-#else
if (rte_eal_process_type() == RTE_PROC_SECONDARY)
return ret;
if (qp->mb_mgr_mz)
rte_memzone_free(qp->mb_mgr_mz);
-#endif
rte_free(qp);
return ret;
}
diff --git a/drivers/crypto/ipsec_mb/meson.build b/drivers/crypto/ipsec_mb/meson.build
index 87bf965554..0c988d7411 100644
--- a/drivers/crypto/ipsec_mb/meson.build
+++ b/drivers/crypto/ipsec_mb/meson.build
@@ -7,7 +7,7 @@ if is_windows
subdir_done()
endif
-IMB_required_ver = '1.0.0'
+IMB_required_ver = '1.4.0'
IMB_header = '#include<intel-ipsec-mb.h>'
if arch_subdir == 'arm'
IMB_header = '#include<ipsec-mb.h>'
diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c
index 4de4866cf3..2acd229268 100644
--- a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c
+++ b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c
@@ -210,13 +210,9 @@ aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr,
}
} else if (xform->auth.key.length == 32) {
sess->template_job.hash_alg = IMB_AUTH_ZUC256_EIA3_BITLEN;
-#if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM
if (sess->auth.req_digest_len != 4 &&
sess->auth.req_digest_len != 8 &&
sess->auth.req_digest_len != 16) {
-#else
- if (sess->auth.req_digest_len != 4) {
-#endif
IPSEC_MB_LOG(ERR, "Invalid digest size\n");
return -EINVAL;
}
@@ -845,11 +841,9 @@ aesni_mb_session_configure(IMB_MGR *mb_mgr,
}
}
-#if IMB_VERSION(1, 3, 0) < IMB_VERSION_NUM
sess->session_id = imb_set_session(mb_mgr, &sess->template_job);
sess->pid = getpid();
RTE_PER_LCORE(pid) = sess->pid;
-#endif
return 0;
}
@@ -982,9 +976,7 @@ aesni_mb_set_docsis_sec_session_parameters(
goto error_exit;
}
-#if IMB_VERSION(1, 3, 0) < IMB_VERSION_NUM
ipsec_sess->session_id = imb_set_session(mb_mgr, &ipsec_sess->template_job);
-#endif
error_exit:
free_mb_mgr(mb_mgr);
@@ -1239,7 +1231,6 @@ imb_lib_support_sgl_algo(IMB_CIPHER_MODE alg)
return 0;
}
-#if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM
static inline int
single_sgl_job(IMB_JOB *job, struct rte_crypto_op *op,
int oop, uint32_t offset, struct rte_mbuf *m_src,
@@ -1324,7 +1315,6 @@ single_sgl_job(IMB_JOB *job, struct rte_crypto_op *op,
job->sgl_io_segs = sgl_segs;
return 0;
}
-#endif
static inline int
multi_sgl_job(IMB_JOB *job, struct rte_crypto_op *op,
@@ -1394,9 +1384,7 @@ set_gcm_job(IMB_MGR *mb_mgr, IMB_JOB *job, const uint8_t sgl,
job->msg_len_to_hash_in_bytes = 0;
job->msg_len_to_cipher_in_bytes = 0;
job->cipher_start_src_offset_in_bytes = 0;
-#if IMB_VERSION(1, 3, 0) < IMB_VERSION_NUM
imb_set_session(mb_mgr, job);
-#endif
} else {
job->hash_start_src_offset_in_bytes =
op->sym->aead.data.offset;
@@ -1424,13 +1412,11 @@ set_gcm_job(IMB_MGR *mb_mgr, IMB_JOB *job, const uint8_t sgl,
job->src = NULL;
job->dst = NULL;
-#if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM
if (m_src->nb_segs <= MAX_NUM_SEGS)
return single_sgl_job(job, op, oop,
m_offset, m_src, m_dst,
qp_data->sgl_segs);
else
-#endif
return multi_sgl_job(job, op, oop,
m_offset, m_src, m_dst, mb_mgr);
} else {
@@ -1520,10 +1506,6 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp,
uint8_t sgl = 0;
uint8_t lb_sgl = 0;
-#if IMB_VERSION(1, 3, 0) >= IMB_VERSION_NUM
- (void) pid;
-#endif
-
session = ipsec_mb_get_session_private(qp, op);
if (session == NULL) {
op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
@@ -1533,12 +1515,10 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp,
const IMB_CIPHER_MODE cipher_mode =
session->template_job.cipher_mode;
-#if IMB_VERSION(1, 3, 0) < IMB_VERSION_NUM
if (session->pid != pid) {
memcpy(job, &session->template_job, sizeof(IMB_JOB));
imb_set_session(mb_mgr, job);
} else if (job->session_id != session->session_id)
-#endif
memcpy(job, &session->template_job, sizeof(IMB_JOB));
if (!op->sym->m_dst) {
@@ -1579,9 +1559,7 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp,
job->u.GCM.ctx = &qp_data->gcm_sgl_ctx;
job->cipher_mode = IMB_CIPHER_GCM_SGL;
job->hash_alg = IMB_AUTH_GCM_SGL;
-#if IMB_VERSION(1, 3, 0) < IMB_VERSION_NUM
imb_set_session(mb_mgr, job);
-#endif
}
break;
case IMB_AUTH_AES_GMAC_128:
@@ -1606,9 +1584,7 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp,
job->u.CHACHA20_POLY1305.ctx = &qp_data->chacha_sgl_ctx;
job->cipher_mode = IMB_CIPHER_CHACHA20_POLY1305_SGL;
job->hash_alg = IMB_AUTH_CHACHA20_POLY1305_SGL;
-#if IMB_VERSION(1, 3, 0) < IMB_VERSION_NUM
imb_set_session(mb_mgr, job);
-#endif
}
break;
default:
@@ -1804,13 +1780,11 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp,
if (lb_sgl)
return handle_sgl_linear(job, op, m_offset, session);
-#if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM
if (m_src->nb_segs <= MAX_NUM_SEGS)
return single_sgl_job(job, op, oop,
m_offset, m_src, m_dst,
qp_data->sgl_segs);
else
-#endif
return multi_sgl_job(job, op, oop,
m_offset, m_src, m_dst, mb_mgr);
}
@@ -2130,7 +2104,6 @@ set_job_null_op(IMB_JOB *job, struct rte_crypto_op *op)
return job;
}
-#if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM
static uint16_t
aesni_mb_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
uint16_t nb_ops)
@@ -2263,144 +2236,7 @@ aesni_mb_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
return processed_jobs;
}
-#else
-
-/**
- * Process a completed IMB_JOB job and keep processing jobs until
- * get_completed_job return NULL
- *
- * @param qp Queue Pair to process
- * @param mb_mgr IMB_MGR to use
- * @param job IMB_JOB job
- * @param ops crypto ops to fill
- * @param nb_ops number of crypto ops
- *
- * @return
- * - Number of processed jobs
- */
-static unsigned
-handle_completed_jobs(struct ipsec_mb_qp *qp, IMB_MGR *mb_mgr,
- IMB_JOB *job, struct rte_crypto_op **ops,
- uint16_t nb_ops)
-{
- struct rte_crypto_op *op = NULL;
- uint16_t processed_jobs = 0;
-
- while (job != NULL) {
- op = post_process_mb_job(qp, job);
-
- if (op) {
- ops[processed_jobs++] = op;
- qp->stats.dequeued_count++;
- } else {
- qp->stats.dequeue_err_count++;
- break;
- }
- if (processed_jobs == nb_ops)
- break;
-
- job = IMB_GET_COMPLETED_JOB(mb_mgr);
- }
-
- return processed_jobs;
-}
-
-static inline uint16_t
-flush_mb_mgr(struct ipsec_mb_qp *qp, IMB_MGR *mb_mgr,
- struct rte_crypto_op **ops, uint16_t nb_ops)
-{
- int processed_ops = 0;
-
- /* Flush the remaining jobs */
- IMB_JOB *job = IMB_FLUSH_JOB(mb_mgr);
-
- if (job)
- processed_ops += handle_completed_jobs(qp, mb_mgr, job,
- &ops[processed_ops], nb_ops - processed_ops);
-
- return processed_ops;
-}
-static uint16_t
-aesni_mb_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
- uint16_t nb_ops)
-{
- struct ipsec_mb_qp *qp = queue_pair;
- IMB_MGR *mb_mgr = qp->mb_mgr;
- struct rte_crypto_op *op;
- IMB_JOB *job;
- int retval, processed_jobs = 0;
- pid_t pid = 0;
-
- if (unlikely(nb_ops == 0 || mb_mgr == NULL))
- return 0;
-
- uint8_t digest_idx = qp->digest_idx;
-
- do {
- /* Get next free mb job struct from mb manager */
- job = IMB_GET_NEXT_JOB(mb_mgr);
- if (unlikely(job == NULL)) {
- /* if no free mb job structs we need to flush mb_mgr */
- processed_jobs += flush_mb_mgr(qp, mb_mgr,
- &ops[processed_jobs],
- nb_ops - processed_jobs);
-
- if (nb_ops == processed_jobs)
- break;
-
- job = IMB_GET_NEXT_JOB(mb_mgr);
- }
-
- /*
- * Get next operation to process from ingress queue.
- * There is no need to return the job to the IMB_MGR
- * if there are no more operations to process, since the IMB_MGR
- * can use that pointer again in next get_next calls.
- */
- retval = rte_ring_dequeue(qp->ingress_queue, (void **)&op);
- if (retval < 0)
- break;
-
- if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
- retval = set_sec_mb_job_params(job, qp, op,
- &digest_idx);
- else
- retval = set_mb_job_params(job, qp, op,
- &digest_idx, mb_mgr, pid);
-
- if (unlikely(retval != 0)) {
- qp->stats.dequeue_err_count++;
- set_job_null_op(job, op);
- }
-
- /* Submit job to multi-buffer for processing */
-#ifdef RTE_LIBRTE_PMD_AESNI_MB_DEBUG
- job = IMB_SUBMIT_JOB(mb_mgr);
-#else
- job = IMB_SUBMIT_JOB_NOCHECK(mb_mgr);
-#endif
- /*
- * If submit returns a processed job then handle it,
- * before submitting subsequent jobs
- */
- if (job)
- processed_jobs += handle_completed_jobs(qp, mb_mgr,
- job, &ops[processed_jobs],
- nb_ops - processed_jobs);
-
- } while (processed_jobs < nb_ops);
-
- qp->digest_idx = digest_idx;
-
- if (processed_jobs < 1)
- processed_jobs += flush_mb_mgr(qp, mb_mgr,
- &ops[processed_jobs],
- nb_ops - processed_jobs);
-
- return processed_jobs;
-}
-#endif
static inline int
check_crypto_sgl(union rte_crypto_sym_ofs so, const struct rte_crypto_sgl *sgl)
{
diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h b/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h
index 85994fe5a1..51cfd7e2aa 100644
--- a/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h
+++ b/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h
@@ -17,9 +17,7 @@
#define HMAC_IPAD_VALUE (0x36)
#define HMAC_OPAD_VALUE (0x5C)
-#if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM
#define MAX_NUM_SEGS 16
-#endif
static const struct rte_cryptodev_capabilities aesni_mb_capabilities[] = {
{ /* MD5 HMAC */
@@ -567,13 +565,8 @@ static const struct rte_cryptodev_capabilities aesni_mb_capabilities[] = {
},
.digest_size = {
.min = 4,
-#if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM
.max = 16,
.increment = 4
-#else
- .max = 4,
- .increment = 0
-#endif
},
.iv_size = {
.min = 16,
@@ -730,9 +723,7 @@ struct aesni_mb_qp_data {
* by the driver when verifying a digest provided
* by the user (using authentication verify operation)
*/
-#if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM
struct IMB_SGL_IOV sgl_segs[MAX_NUM_SEGS];
-#endif
union {
struct gcm_context_data gcm_sgl_ctx;
struct chacha20_poly1305_context_data chacha_sgl_ctx;
--
2.25.1
^ permalink raw reply [flat|nested] 45+ messages in thread
* [PATCH v5 2/4] doc: remove outdated version details
2024-03-05 17:42 ` [PATCH v5 1/4] crypto/ipsec_mb: bump minimum IPsec Multi-buffer version Brian Dooley
@ 2024-03-05 17:42 ` Brian Dooley
2024-03-05 17:42 ` [PATCH v5 3/4] crypto/ipsec_mb: use new ipad/opad calculation API Brian Dooley
` (2 subsequent siblings)
3 siblings, 0 replies; 45+ messages in thread
From: Brian Dooley @ 2024-03-05 17:42 UTC (permalink / raw)
To: Kai Ji, Pablo de Lara
Cc: dev, gakhil, Sivaramakrishnan Venkat, Wathsala Vithanage
From: Sivaramakrishnan Venkat <venkatx.sivaramakrishnan@intel.com>
SW PMDs documentation is updated to remove details of unsupported IPsec
Multi-buffer versions.DPDK older than 20.11 is end of life. So, older
DPDK versions are removed from the Crypto library version table.
Signed-off-by: Sivaramakrishnan Venkat <venkatx.sivaramakrishnan@intel.com>
Acked-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Wathsala Vithanage <wathsala.vithanage@arm.com>
---
v5:
- Rebased and added to patchset
v3:
- added second patch for outdated documentation updates.
---
doc/guides/cryptodevs/aesni_gcm.rst | 19 +++---------------
doc/guides/cryptodevs/aesni_mb.rst | 22 +++------------------
doc/guides/cryptodevs/chacha20_poly1305.rst | 12 ++---------
doc/guides/cryptodevs/kasumi.rst | 14 +++----------
doc/guides/cryptodevs/snow3g.rst | 15 +++-----------
doc/guides/cryptodevs/zuc.rst | 15 +++-----------
6 files changed, 17 insertions(+), 80 deletions(-)
diff --git a/doc/guides/cryptodevs/aesni_gcm.rst b/doc/guides/cryptodevs/aesni_gcm.rst
index dc665e536c..e38a03b78f 100644
--- a/doc/guides/cryptodevs/aesni_gcm.rst
+++ b/doc/guides/cryptodevs/aesni_gcm.rst
@@ -62,12 +62,6 @@ Once it is downloaded, extract it and follow these steps:
make
make install
-.. note::
-
- Compilation of the Multi-Buffer library is broken when GCC < 5.0, if library <= v0.53.
- If a lower GCC version than 5.0, the workaround proposed by the following link
- should be used: `<https://github.com/intel/intel-ipsec-mb/issues/40>`_.
-
As a reference, the following table shows a mapping between the past DPDK versions
and the external crypto libraries supported by them:
@@ -79,18 +73,11 @@ and the external crypto libraries supported by them:
============= ================================
DPDK version Crypto library version
============= ================================
- 16.04 - 16.11 Multi-buffer library 0.43 - 0.44
- 17.02 - 17.05 ISA-L Crypto v2.18
- 17.08 - 18.02 Multi-buffer library 0.46 - 0.48
- 18.05 - 19.02 Multi-buffer library 0.49 - 0.52
- 19.05 - 20.08 Multi-buffer library 0.52 - 0.55
- 20.11 - 21.08 Multi-buffer library 0.53 - 1.3*
- 21.11 - 23.11 Multi-buffer library 1.0 - 1.5*
- 24.03+ Multi-buffer library 1.4 - 1.5*
+ 20.11 - 21.08 Multi-buffer library 0.53 - 1.3
+ 21.11 - 23.11 Multi-buffer library 1.0 - 1.5
+ 24.03+ Multi-buffer library 1.4 - 1.5
============= ================================
-\* Multi-buffer library 1.0 or newer only works for Meson but not Make build system.
-
Initialization
--------------
diff --git a/doc/guides/cryptodevs/aesni_mb.rst b/doc/guides/cryptodevs/aesni_mb.rst
index 5d670ee237..bd7c8de07f 100644
--- a/doc/guides/cryptodevs/aesni_mb.rst
+++ b/doc/guides/cryptodevs/aesni_mb.rst
@@ -121,12 +121,6 @@ Once it is downloaded, extract it and follow these steps:
make
make install
-.. note::
-
- Compilation of the Multi-Buffer library is broken when GCC < 5.0, if library <= v0.53.
- If a lower GCC version than 5.0, the workaround proposed by the following link
- should be used: `<https://github.com/intel/intel-ipsec-mb/issues/40>`_.
-
As a reference, the following table shows a mapping between the past DPDK versions
and the Multi-Buffer library version supported by them:
@@ -137,21 +131,11 @@ and the Multi-Buffer library version supported by them:
============== ============================
DPDK version Multi-buffer library version
============== ============================
- 2.2 - 16.11 0.43 - 0.44
- 17.02 0.44
- 17.05 - 17.08 0.45 - 0.48
- 17.11 0.47 - 0.48
- 18.02 0.48
- 18.05 - 19.02 0.49 - 0.52
- 19.05 - 19.08 0.52
- 19.11 - 20.08 0.52 - 0.55
- 20.11 - 21.08 0.53 - 1.3*
- 21.11 - 23.11 1.0 - 1.5*
- 24.03+ 1.4 - 1.5*
+ 20.11 - 21.08 0.53 - 1.3
+ 21.11 - 23.11 1.0 - 1.5
+ 24.03+ 1.4 - 1.5
============== ============================
-\* Multi-buffer library 1.0 or newer only works for Meson but not Make build system.
-
Initialization
--------------
diff --git a/doc/guides/cryptodevs/chacha20_poly1305.rst b/doc/guides/cryptodevs/chacha20_poly1305.rst
index c32866b301..8e0ee4f835 100644
--- a/doc/guides/cryptodevs/chacha20_poly1305.rst
+++ b/doc/guides/cryptodevs/chacha20_poly1305.rst
@@ -56,12 +56,6 @@ Once it is downloaded, extract it and follow these steps:
make
make install
-.. note::
-
- Compilation of the Multi-Buffer library is broken when GCC < 5.0, if library <= v0.53.
- If a lower GCC version than 5.0, the workaround proposed by the following link
- should be used: `<https://github.com/intel/intel-ipsec-mb/issues/40>`_.
-
As a reference, the following table shows a mapping between the past DPDK versions
and the external crypto libraries supported by them:
@@ -72,12 +66,10 @@ and the external crypto libraries supported by them:
============= ================================
DPDK version Crypto library version
============= ================================
- 21.11 - 23.11 Multi-buffer library 1.0-1.5*
- 24.03+ Multi-buffer library 1.4-1.5*
+ 21.11 - 23.11 Multi-buffer library 1.0-1.5
+ 24.03+ Multi-buffer library 1.4-1.5
============= ================================
-\* Multi-buffer library 1.0 or newer only works for Meson but not Make build system.
-
Initialization
--------------
diff --git a/doc/guides/cryptodevs/kasumi.rst b/doc/guides/cryptodevs/kasumi.rst
index a8f4e6b204..28ac452524 100644
--- a/doc/guides/cryptodevs/kasumi.rst
+++ b/doc/guides/cryptodevs/kasumi.rst
@@ -69,12 +69,6 @@ Once it is downloaded, extract it and follow these steps:
make
make install
-.. note::
-
- Compilation of the Multi-Buffer library is broken when GCC < 5.0, if library <= v0.53.
- If a lower GCC version than 5.0, the workaround proposed by the following link
- should be used: `<https://github.com/intel/intel-ipsec-mb/issues/40>`_.
-
As a reference, the following table shows a mapping between the past DPDK versions
and the external crypto libraries supported by them:
@@ -86,13 +80,11 @@ and the external crypto libraries supported by them:
DPDK version Crypto library version
============= ================================
16.11 - 19.11 LibSSO KASUMI
- 20.02 - 21.08 Multi-buffer library 0.53 - 1.3*
- 21.11 - 23.11 Multi-buffer library 1.0 - 1.5*
- 24.03+ Multi-buffer library 1.4 - 1.5*
+ 20.11 - 21.08 Multi-buffer library 0.53 - 1.3
+ 21.11 - 23.11 Multi-buffer library 1.0 - 1.5
+ 24.03+ Multi-buffer library 1.4 - 1.5
============= ================================
-\* Multi-buffer library 1.0 or newer only works for Meson but not Make build system.
-
Initialization
--------------
diff --git a/doc/guides/cryptodevs/snow3g.rst b/doc/guides/cryptodevs/snow3g.rst
index 46863462e5..0141f62976 100644
--- a/doc/guides/cryptodevs/snow3g.rst
+++ b/doc/guides/cryptodevs/snow3g.rst
@@ -78,12 +78,6 @@ Once it is downloaded, extract it and follow these steps:
make
make install
-.. note::
-
- Compilation of the Multi-Buffer library is broken when GCC < 5.0, if library <= v0.53.
- If a lower GCC version than 5.0, the workaround proposed by the following link
- should be used: `<https://github.com/intel/intel-ipsec-mb/issues/40>`_.
-
As a reference, the following table shows a mapping between the past DPDK versions
and the external crypto libraries supported by them:
@@ -94,14 +88,11 @@ and the external crypto libraries supported by them:
============= ================================
DPDK version Crypto library version
============= ================================
- 16.04 - 19.11 LibSSO SNOW3G
- 20.02 - 21.08 Multi-buffer library 0.53 - 1.3*
- 21.11 - 23.11 Multi-buffer library 1.0 - 1.5*
- 24.03+ Multi-buffer library 1.4 - 1.5*
+ 20.11 - 21.08 Multi-buffer library 0.53 - 1.3
+ 21.11 - 23.11 Multi-buffer library 1.0 - 1.5
+ 24.03+ Multi-buffer library 1.4 - 1.5
============= ================================
-\* Multi-buffer library 1.0 or newer only works for Meson but not Make build system.
-
Initialization
--------------
diff --git a/doc/guides/cryptodevs/zuc.rst b/doc/guides/cryptodevs/zuc.rst
index 51867e1a16..97c14c8c77 100644
--- a/doc/guides/cryptodevs/zuc.rst
+++ b/doc/guides/cryptodevs/zuc.rst
@@ -77,12 +77,6 @@ Once it is downloaded, extract it and follow these steps:
make
make install
-.. note::
-
- Compilation of the Multi-Buffer library is broken when GCC < 5.0, if library <= v0.53.
- If a lower GCC version than 5.0, the workaround proposed by the following link
- should be used: `<https://github.com/intel/intel-ipsec-mb/issues/40>`_.
-
As a reference, the following table shows a mapping between the past DPDK versions
and the external crypto libraries supported by them:
@@ -93,14 +87,11 @@ and the external crypto libraries supported by them:
============= ================================
DPDK version Crypto library version
============= ================================
- 16.11 - 19.11 LibSSO ZUC
- 20.02 - 21.08 Multi-buffer library 0.53 - 1.3*
- 21.11 - 23.11 Multi-buffer library 1.0 - 1.5*
- 24.03+ Multi-buffer library 1.4 - 1.5*
+ 20.11 - 21.08 Multi-buffer library 0.53 - 1.3
+ 21.11 - 23.11 Multi-buffer library 1.0 - 1.5
+ 24.03+ Multi-buffer library 1.4 - 1.5
============= ================================
-\* Multi-buffer library 1.0 or newer only works for Meson but not Make build system.
-
Initialization
--------------
--
2.25.1
^ permalink raw reply [flat|nested] 45+ messages in thread
* [PATCH v5 3/4] crypto/ipsec_mb: use new ipad/opad calculation API
2024-03-05 17:42 ` [PATCH v5 1/4] crypto/ipsec_mb: bump minimum IPsec Multi-buffer version Brian Dooley
2024-03-05 17:42 ` [PATCH v5 2/4] doc: remove outdated version details Brian Dooley
@ 2024-03-05 17:42 ` Brian Dooley
2024-03-05 17:42 ` [PATCH v5 4/4] crypto/ipsec_mb: unified IPsec MB interface Brian Dooley
2024-03-05 19:11 ` [EXTERNAL] [PATCH v5 1/4] crypto/ipsec_mb: bump minimum IPsec Multi-buffer version Akhil Goyal
3 siblings, 0 replies; 45+ messages in thread
From: Brian Dooley @ 2024-03-05 17:42 UTC (permalink / raw)
To: Kai Ji, Pablo de Lara
Cc: dev, gakhil, Brian Dooley, Ciara Power, Wathsala Vithanage
IPSec Multi-buffer library v1.4 added a new API to
calculate inner/outer padding for HMAC-SHAx/MD5.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Signed-off-by: Brian Dooley <brian.dooley@intel.com>
Acked-by: Ciara Power <ciara.power@intel.com>
Acked-by: Wathsala Vithanage <wathsala.vithanage@arm.com>
---
v5:
- Rebased and added to patchset
v2:
- Remove ipsec mb version checks
---
drivers/crypto/ipsec_mb/pmd_aesni_mb.c | 75 ++------------------------
1 file changed, 5 insertions(+), 70 deletions(-)
diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c
index 2acd229268..92703a76f0 100644
--- a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c
+++ b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c
@@ -13,49 +13,6 @@ struct aesni_mb_op_buf_data {
uint32_t offset;
};
-/**
- * Calculate the authentication pre-computes
- *
- * @param one_block_hash Function pointer
- * to calculate digest on ipad/opad
- * @param ipad Inner pad output byte array
- * @param opad Outer pad output byte array
- * @param hkey Authentication key
- * @param hkey_len Authentication key length
- * @param blocksize Block size of selected hash algo
- */
-static void
-calculate_auth_precomputes(hash_one_block_t one_block_hash,
- uint8_t *ipad, uint8_t *opad,
- const uint8_t *hkey, uint16_t hkey_len,
- uint16_t blocksize)
-{
- uint32_t i, length;
-
- uint8_t ipad_buf[blocksize] __rte_aligned(16);
- uint8_t opad_buf[blocksize] __rte_aligned(16);
-
- /* Setup inner and outer pads */
- memset(ipad_buf, HMAC_IPAD_VALUE, blocksize);
- memset(opad_buf, HMAC_OPAD_VALUE, blocksize);
-
- /* XOR hash key with inner and outer pads */
- length = hkey_len > blocksize ? blocksize : hkey_len;
-
- for (i = 0; i < length; i++) {
- ipad_buf[i] ^= hkey[i];
- opad_buf[i] ^= hkey[i];
- }
-
- /* Compute partial hashes */
- (*one_block_hash)(ipad_buf, ipad);
- (*one_block_hash)(opad_buf, opad);
-
- /* Clean up stack */
- memset(ipad_buf, 0, blocksize);
- memset(opad_buf, 0, blocksize);
-}
-
static inline int
is_aead_algo(IMB_HASH_ALG hash_alg, IMB_CIPHER_MODE cipher_mode)
{
@@ -66,12 +23,10 @@ is_aead_algo(IMB_HASH_ALG hash_alg, IMB_CIPHER_MODE cipher_mode)
/** Set session authentication parameters */
static int
-aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr,
+aesni_mb_set_session_auth_parameters(IMB_MGR *mb_mgr,
struct aesni_mb_session *sess,
const struct rte_crypto_sym_xform *xform)
{
- hash_one_block_t hash_oneblock_fn = NULL;
- unsigned int key_larger_block_size = 0;
uint8_t hashed_key[HMAC_MAX_BLOCK_SIZE] = { 0 };
uint32_t auth_precompute = 1;
@@ -263,18 +218,15 @@ aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr,
switch (xform->auth.algo) {
case RTE_CRYPTO_AUTH_MD5_HMAC:
sess->template_job.hash_alg = IMB_AUTH_MD5;
- hash_oneblock_fn = mb_mgr->md5_one_block;
break;
case RTE_CRYPTO_AUTH_SHA1_HMAC:
sess->template_job.hash_alg = IMB_AUTH_HMAC_SHA_1;
- hash_oneblock_fn = mb_mgr->sha1_one_block;
if (xform->auth.key.length > get_auth_algo_blocksize(
IMB_AUTH_HMAC_SHA_1)) {
IMB_SHA1(mb_mgr,
xform->auth.key.data,
xform->auth.key.length,
hashed_key);
- key_larger_block_size = 1;
}
break;
case RTE_CRYPTO_AUTH_SHA1:
@@ -283,14 +235,12 @@ aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr,
break;
case RTE_CRYPTO_AUTH_SHA224_HMAC:
sess->template_job.hash_alg = IMB_AUTH_HMAC_SHA_224;
- hash_oneblock_fn = mb_mgr->sha224_one_block;
if (xform->auth.key.length > get_auth_algo_blocksize(
IMB_AUTH_HMAC_SHA_224)) {
IMB_SHA224(mb_mgr,
xform->auth.key.data,
xform->auth.key.length,
hashed_key);
- key_larger_block_size = 1;
}
break;
case RTE_CRYPTO_AUTH_SHA224:
@@ -299,14 +249,12 @@ aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr,
break;
case RTE_CRYPTO_AUTH_SHA256_HMAC:
sess->template_job.hash_alg = IMB_AUTH_HMAC_SHA_256;
- hash_oneblock_fn = mb_mgr->sha256_one_block;
if (xform->auth.key.length > get_auth_algo_blocksize(
IMB_AUTH_HMAC_SHA_256)) {
IMB_SHA256(mb_mgr,
xform->auth.key.data,
xform->auth.key.length,
hashed_key);
- key_larger_block_size = 1;
}
break;
case RTE_CRYPTO_AUTH_SHA256:
@@ -315,14 +263,12 @@ aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr,
break;
case RTE_CRYPTO_AUTH_SHA384_HMAC:
sess->template_job.hash_alg = IMB_AUTH_HMAC_SHA_384;
- hash_oneblock_fn = mb_mgr->sha384_one_block;
if (xform->auth.key.length > get_auth_algo_blocksize(
IMB_AUTH_HMAC_SHA_384)) {
IMB_SHA384(mb_mgr,
xform->auth.key.data,
xform->auth.key.length,
hashed_key);
- key_larger_block_size = 1;
}
break;
case RTE_CRYPTO_AUTH_SHA384:
@@ -331,14 +277,12 @@ aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr,
break;
case RTE_CRYPTO_AUTH_SHA512_HMAC:
sess->template_job.hash_alg = IMB_AUTH_HMAC_SHA_512;
- hash_oneblock_fn = mb_mgr->sha512_one_block;
if (xform->auth.key.length > get_auth_algo_blocksize(
IMB_AUTH_HMAC_SHA_512)) {
IMB_SHA512(mb_mgr,
xform->auth.key.data,
xform->auth.key.length,
hashed_key);
- key_larger_block_size = 1;
}
break;
case RTE_CRYPTO_AUTH_SHA512:
@@ -372,19 +316,10 @@ aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr,
return 0;
/* Calculate Authentication precomputes */
- if (key_larger_block_size) {
- calculate_auth_precomputes(hash_oneblock_fn,
- sess->auth.pads.inner, sess->auth.pads.outer,
- hashed_key,
- xform->auth.key.length,
- get_auth_algo_blocksize(sess->template_job.hash_alg));
- } else {
- calculate_auth_precomputes(hash_oneblock_fn,
- sess->auth.pads.inner, sess->auth.pads.outer,
- xform->auth.key.data,
- xform->auth.key.length,
- get_auth_algo_blocksize(sess->template_job.hash_alg));
- }
+ imb_hmac_ipad_opad(mb_mgr, sess->template_job.hash_alg,
+ xform->auth.key.data, xform->auth.key.length,
+ sess->auth.pads.inner, sess->auth.pads.outer);
+
sess->template_job.u.HMAC._hashed_auth_key_xor_ipad =
sess->auth.pads.inner;
sess->template_job.u.HMAC._hashed_auth_key_xor_opad =
--
2.25.1
^ permalink raw reply [flat|nested] 45+ messages in thread
* [PATCH v5 4/4] crypto/ipsec_mb: unified IPsec MB interface
2024-03-05 17:42 ` [PATCH v5 1/4] crypto/ipsec_mb: bump minimum IPsec Multi-buffer version Brian Dooley
2024-03-05 17:42 ` [PATCH v5 2/4] doc: remove outdated version details Brian Dooley
2024-03-05 17:42 ` [PATCH v5 3/4] crypto/ipsec_mb: use new ipad/opad calculation API Brian Dooley
@ 2024-03-05 17:42 ` Brian Dooley
2024-03-15 18:25 ` Patrick Robb
2024-03-05 19:11 ` [EXTERNAL] [PATCH v5 1/4] crypto/ipsec_mb: bump minimum IPsec Multi-buffer version Akhil Goyal
3 siblings, 1 reply; 45+ messages in thread
From: Brian Dooley @ 2024-03-05 17:42 UTC (permalink / raw)
To: Kai Ji, Pablo de Lara
Cc: dev, gakhil, Brian Dooley, Ciara Power, Wathsala Vithanage
Currently IPsec MB provides both the JOB API and direct API.
AESNI_MB PMD is using the JOB API codepath while ZUC, KASUMI, SNOW3G
and CHACHA20_POLY1305 are using the direct API.
Instead of using the direct API for these PMDs, they should now make
use of the JOB API codepath. This would remove all use of the IPsec MB
direct API for these PMDs.
Signed-off-by: Brian Dooley <brian.dooley@intel.com>
Acked-by: Ciara Power <ciara.power@intel.com>
Acked-by: Wathsala Vithanage <wathsala.vithanage@arm.com>
---
v5:
- Rebased and added patchset
v4:
- Keep AES GCM PMD and fix extern issue
v3:
- Remove session configure pointer for each PMD
v2:
- Fix compilation failure
---
doc/guides/rel_notes/release_24_03.rst | 3 +
drivers/crypto/ipsec_mb/pmd_aesni_mb.c | 8 +-
drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h | 15 +-
drivers/crypto/ipsec_mb/pmd_chacha_poly.c | 338 +----------
.../crypto/ipsec_mb/pmd_chacha_poly_priv.h | 28 -
drivers/crypto/ipsec_mb/pmd_kasumi.c | 410 +------------
drivers/crypto/ipsec_mb/pmd_kasumi_priv.h | 20 -
drivers/crypto/ipsec_mb/pmd_snow3g.c | 543 +-----------------
drivers/crypto/ipsec_mb/pmd_snow3g_priv.h | 21 -
drivers/crypto/ipsec_mb/pmd_zuc.c | 347 +----------
drivers/crypto/ipsec_mb/pmd_zuc_priv.h | 20 -
11 files changed, 44 insertions(+), 1709 deletions(-)
diff --git a/doc/guides/rel_notes/release_24_03.rst b/doc/guides/rel_notes/release_24_03.rst
index 8fa8cf1dd6..a4309311d4 100644
--- a/doc/guides/rel_notes/release_24_03.rst
+++ b/doc/guides/rel_notes/release_24_03.rst
@@ -147,6 +147,9 @@ New Features
* **Updated ipsec_mb crypto driver.**
* Bump minimum IPSec Multi-buffer version to 1.4 for SW PMDs.
+ * Kasumi, Snow3G, ChaChaPoly and ZUC PMDs now share the job API codepath
+ with AESNI_MB PMD. Depending on the architecture, the performance of ZUC
+ crypto PMD is approximately 10% less for small fixed packet sizes.
* **Updated Marvell cnxk crypto driver.**
diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c
index 92703a76f0..35bd7eaa51 100644
--- a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c
+++ b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c
@@ -8,6 +8,8 @@
RTE_DEFINE_PER_LCORE(pid_t, pid);
+uint8_t pmd_driver_id_aesni_mb;
+
struct aesni_mb_op_buf_data {
struct rte_mbuf *m;
uint32_t offset;
@@ -692,7 +694,7 @@ aesni_mb_set_session_aead_parameters(const IMB_MGR *mb_mgr,
}
/** Configure a aesni multi-buffer session from a crypto xform chain */
-static int
+int
aesni_mb_session_configure(IMB_MGR *mb_mgr,
void *priv_sess,
const struct rte_crypto_sym_xform *xform)
@@ -2039,7 +2041,7 @@ set_job_null_op(IMB_JOB *job, struct rte_crypto_op *op)
return job;
}
-static uint16_t
+uint16_t
aesni_mb_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
uint16_t nb_ops)
{
@@ -2227,7 +2229,7 @@ verify_sync_dgst(struct rte_crypto_sym_vec *vec,
return k;
}
-static uint32_t
+uint32_t
aesni_mb_process_bulk(struct rte_cryptodev *dev __rte_unused,
struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs sofs,
struct rte_crypto_sym_vec *vec)
diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h b/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h
index 51cfd7e2aa..4805627679 100644
--- a/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h
+++ b/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h
@@ -19,6 +19,19 @@
#define MAX_NUM_SEGS 16
+int
+aesni_mb_session_configure(IMB_MGR * m __rte_unused, void *priv_sess,
+ const struct rte_crypto_sym_xform *xform);
+
+uint16_t
+aesni_mb_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops);
+
+uint32_t
+aesni_mb_process_bulk(struct rte_cryptodev *dev __rte_unused,
+ struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs sofs,
+ struct rte_crypto_sym_vec *vec);
+
static const struct rte_cryptodev_capabilities aesni_mb_capabilities[] = {
{ /* MD5 HMAC */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
@@ -715,8 +728,6 @@ static const struct rte_cryptodev_capabilities aesni_mb_capabilities[] = {
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
};
-uint8_t pmd_driver_id_aesni_mb;
-
struct aesni_mb_qp_data {
uint8_t temp_digests[IMB_MAX_JOBS][DIGEST_LENGTH_MAX];
/* *< Buffers used to store the digest generated
diff --git a/drivers/crypto/ipsec_mb/pmd_chacha_poly.c b/drivers/crypto/ipsec_mb/pmd_chacha_poly.c
index 97e7cef233..7436353fc2 100644
--- a/drivers/crypto/ipsec_mb/pmd_chacha_poly.c
+++ b/drivers/crypto/ipsec_mb/pmd_chacha_poly.c
@@ -3,334 +3,7 @@
*/
#include "pmd_chacha_poly_priv.h"
-
-/** Parse crypto xform chain and set private session parameters. */
-static int
-chacha20_poly1305_session_configure(IMB_MGR * mb_mgr __rte_unused,
- void *priv_sess, const struct rte_crypto_sym_xform *xform)
-{
- struct chacha20_poly1305_session *sess = priv_sess;
- const struct rte_crypto_sym_xform *auth_xform;
- const struct rte_crypto_sym_xform *cipher_xform;
- const struct rte_crypto_sym_xform *aead_xform;
-
- uint8_t key_length;
- const uint8_t *key;
- enum ipsec_mb_operation mode;
- int ret = 0;
-
- ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform,
- &cipher_xform, &aead_xform);
- if (ret)
- return ret;
-
- sess->op = mode;
-
- switch (sess->op) {
- case IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT:
- case IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT:
- if (aead_xform->aead.algo !=
- RTE_CRYPTO_AEAD_CHACHA20_POLY1305) {
- IPSEC_MB_LOG(ERR,
- "The only combined operation supported is CHACHA20 POLY1305");
- ret = -ENOTSUP;
- goto error_exit;
- }
- /* Set IV parameters */
- sess->iv.offset = aead_xform->aead.iv.offset;
- sess->iv.length = aead_xform->aead.iv.length;
- key_length = aead_xform->aead.key.length;
- key = aead_xform->aead.key.data;
- sess->aad_length = aead_xform->aead.aad_length;
- sess->req_digest_length = aead_xform->aead.digest_length;
- break;
- default:
- IPSEC_MB_LOG(
- ERR, "Wrong xform type, has to be AEAD or authentication");
- ret = -ENOTSUP;
- goto error_exit;
- }
-
- /* IV check */
- if (sess->iv.length != CHACHA20_POLY1305_IV_LENGTH &&
- sess->iv.length != 0) {
- IPSEC_MB_LOG(ERR, "Wrong IV length");
- ret = -EINVAL;
- goto error_exit;
- }
-
- /* Check key length */
- if (key_length != CHACHA20_POLY1305_KEY_SIZE) {
- IPSEC_MB_LOG(ERR, "Invalid key length");
- ret = -EINVAL;
- goto error_exit;
- } else {
- memcpy(sess->key, key, CHACHA20_POLY1305_KEY_SIZE);
- }
-
- /* Digest check */
- if (sess->req_digest_length != CHACHA20_POLY1305_DIGEST_LENGTH) {
- IPSEC_MB_LOG(ERR, "Invalid digest length");
- ret = -EINVAL;
- goto error_exit;
- } else {
- sess->gen_digest_length = CHACHA20_POLY1305_DIGEST_LENGTH;
- }
-
-error_exit:
- return ret;
-}
-
-/**
- * Process a crypto operation, calling
- * the direct chacha poly API from the multi buffer library.
- *
- * @param qp queue pair
- * @param op symmetric crypto operation
- * @param session chacha poly session
- *
- * @return
- * - Return 0 if success
- */
-static int
-chacha20_poly1305_crypto_op(struct ipsec_mb_qp *qp, struct rte_crypto_op *op,
- struct chacha20_poly1305_session *session)
-{
- struct chacha20_poly1305_qp_data *qp_data =
- ipsec_mb_get_qp_private_data(qp);
- uint8_t *src, *dst;
- uint8_t *iv_ptr;
- struct rte_crypto_sym_op *sym_op = op->sym;
- struct rte_mbuf *m_src = sym_op->m_src;
- uint32_t offset, data_offset, data_length;
- uint32_t part_len, data_len;
- int total_len;
- uint8_t *tag;
- unsigned int oop = 0;
-
- offset = sym_op->aead.data.offset;
- data_offset = offset;
- data_length = sym_op->aead.data.length;
- RTE_ASSERT(m_src != NULL);
-
- while (offset >= m_src->data_len && data_length != 0) {
- offset -= m_src->data_len;
- m_src = m_src->next;
-
- RTE_ASSERT(m_src != NULL);
- }
-
- src = rte_pktmbuf_mtod_offset(m_src, uint8_t *, offset);
-
- data_len = m_src->data_len - offset;
- part_len = (data_len < data_length) ? data_len :
- data_length;
-
- /* In-place */
- if (sym_op->m_dst == NULL || (sym_op->m_dst == sym_op->m_src))
- dst = src;
- /* Out-of-place */
- else {
- oop = 1;
- /* Segmented destination buffer is not supported
- * if operation is Out-of-place
- */
- RTE_ASSERT(rte_pktmbuf_is_contiguous(sym_op->m_dst));
- dst = rte_pktmbuf_mtod_offset(sym_op->m_dst, uint8_t *,
- data_offset);
- }
-
- iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
- session->iv.offset);
-
- IMB_CHACHA20_POLY1305_INIT(qp->mb_mgr, session->key,
- &qp_data->chacha20_poly1305_ctx_data,
- iv_ptr, sym_op->aead.aad.data,
- (uint64_t)session->aad_length);
-
- if (session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT) {
- IMB_CHACHA20_POLY1305_ENC_UPDATE(qp->mb_mgr,
- session->key,
- &qp_data->chacha20_poly1305_ctx_data,
- dst, src, (uint64_t)part_len);
- total_len = data_length - part_len;
-
- while (total_len) {
- m_src = m_src->next;
- RTE_ASSERT(m_src != NULL);
-
- src = rte_pktmbuf_mtod(m_src, uint8_t *);
- if (oop)
- dst += part_len;
- else
- dst = src;
- part_len = (m_src->data_len < total_len) ?
- m_src->data_len : total_len;
-
- if (dst == NULL || src == NULL) {
- IPSEC_MB_LOG(ERR, "Invalid src or dst input");
- return -EINVAL;
- }
- IMB_CHACHA20_POLY1305_ENC_UPDATE(qp->mb_mgr,
- session->key,
- &qp_data->chacha20_poly1305_ctx_data,
- dst, src, (uint64_t)part_len);
- total_len -= part_len;
- if (total_len < 0) {
- IPSEC_MB_LOG(ERR, "Invalid part len");
- return -EINVAL;
- }
- }
-
- tag = sym_op->aead.digest.data;
- IMB_CHACHA20_POLY1305_ENC_FINALIZE(qp->mb_mgr,
- &qp_data->chacha20_poly1305_ctx_data,
- tag, session->gen_digest_length);
-
- } else {
- IMB_CHACHA20_POLY1305_DEC_UPDATE(qp->mb_mgr,
- session->key,
- &qp_data->chacha20_poly1305_ctx_data,
- dst, src, (uint64_t)part_len);
-
- total_len = data_length - part_len;
-
- while (total_len) {
- m_src = m_src->next;
-
- RTE_ASSERT(m_src != NULL);
-
- src = rte_pktmbuf_mtod(m_src, uint8_t *);
- if (oop)
- dst += part_len;
- else
- dst = src;
- part_len = (m_src->data_len < total_len) ?
- m_src->data_len : total_len;
-
- if (dst == NULL || src == NULL) {
- IPSEC_MB_LOG(ERR, "Invalid src or dst input");
- return -EINVAL;
- }
- IMB_CHACHA20_POLY1305_DEC_UPDATE(qp->mb_mgr,
- session->key,
- &qp_data->chacha20_poly1305_ctx_data,
- dst, src, (uint64_t)part_len);
- total_len -= part_len;
- if (total_len < 0) {
- IPSEC_MB_LOG(ERR, "Invalid part len");
- return -EINVAL;
- }
- }
-
- tag = qp_data->temp_digest;
- IMB_CHACHA20_POLY1305_DEC_FINALIZE(qp->mb_mgr,
- &qp_data->chacha20_poly1305_ctx_data,
- tag, session->gen_digest_length);
- }
-
- return 0;
-}
-
-/**
- * Process a completed chacha poly op
- *
- * @param qp Queue Pair to process
- * @param op Crypto operation
- * @param sess Crypto session
- *
- * @return
- * - void
- */
-static void
-post_process_chacha20_poly1305_crypto_op(struct ipsec_mb_qp *qp,
- struct rte_crypto_op *op,
- struct chacha20_poly1305_session *session)
-{
- struct chacha20_poly1305_qp_data *qp_data =
- ipsec_mb_get_qp_private_data(qp);
-
- op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
- /* Verify digest if required */
- if (session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT ||
- session->op == IPSEC_MB_OP_HASH_VERIFY_ONLY) {
- uint8_t *digest = op->sym->aead.digest.data;
- uint8_t *tag = qp_data->temp_digest;
-
-#ifdef RTE_LIBRTE_PMD_CHACHA20_POLY1305_DEBUG
- rte_hexdump(stdout, "auth tag (orig):",
- digest, session->req_digest_length);
- rte_hexdump(stdout, "auth tag (calc):",
- tag, session->req_digest_length);
-#endif
- if (memcmp(tag, digest, session->req_digest_length) != 0)
- op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
-
- }
-
-}
-
-/**
- * Process a completed Chacha20_poly1305 request
- *
- * @param qp Queue Pair to process
- * @param op Crypto operation
- * @param sess Crypto session
- *
- * @return
- * - void
- */
-static void
-handle_completed_chacha20_poly1305_crypto_op(struct ipsec_mb_qp *qp,
- struct rte_crypto_op *op,
- struct chacha20_poly1305_session *sess)
-{
- post_process_chacha20_poly1305_crypto_op(qp, op, sess);
-
- /* Free session if a session-less crypto op */
- if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
- memset(sess, 0, sizeof(struct chacha20_poly1305_session));
- rte_mempool_put(qp->sess_mp, op->sym->session);
- op->sym->session = NULL;
- }
-}
-
-static uint16_t
-chacha20_poly1305_pmd_dequeue_burst(void *queue_pair,
- struct rte_crypto_op **ops, uint16_t nb_ops)
-{
- struct chacha20_poly1305_session *sess;
- struct ipsec_mb_qp *qp = queue_pair;
-
- int retval = 0;
- unsigned int i = 0, nb_dequeued;
-
- nb_dequeued = rte_ring_dequeue_burst(qp->ingress_queue,
- (void **)ops, nb_ops, NULL);
-
- for (i = 0; i < nb_dequeued; i++) {
-
- sess = ipsec_mb_get_session_private(qp, ops[i]);
- if (unlikely(sess == NULL)) {
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- qp->stats.dequeue_err_count++;
- break;
- }
-
- retval = chacha20_poly1305_crypto_op(qp, ops[i], sess);
- if (retval < 0) {
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- qp->stats.dequeue_err_count++;
- break;
- }
-
- handle_completed_chacha20_poly1305_crypto_op(qp, ops[i], sess);
- }
-
- qp->stats.dequeued_count += i;
-
- return i;
-}
+#include "pmd_aesni_mb_priv.h"
struct rte_cryptodev_ops chacha20_poly1305_pmd_ops = {
.dev_configure = ipsec_mb_config,
@@ -384,7 +57,7 @@ RTE_INIT(ipsec_mb_register_chacha20_poly1305)
= &ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_CHACHA20_POLY1305];
chacha_poly_data->caps = chacha20_poly1305_capabilities;
- chacha_poly_data->dequeue_burst = chacha20_poly1305_pmd_dequeue_burst;
+ chacha_poly_data->dequeue_burst = aesni_mb_dequeue_burst;
chacha_poly_data->feature_flags =
RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
@@ -395,10 +68,9 @@ RTE_INIT(ipsec_mb_register_chacha20_poly1305)
RTE_CRYPTODEV_FF_SYM_SESSIONLESS;
chacha_poly_data->internals_priv_size = 0;
chacha_poly_data->ops = &chacha20_poly1305_pmd_ops;
- chacha_poly_data->qp_priv_size =
- sizeof(struct chacha20_poly1305_qp_data);
+ chacha_poly_data->qp_priv_size = sizeof(struct aesni_mb_qp_data);
chacha_poly_data->session_configure =
- chacha20_poly1305_session_configure;
+ aesni_mb_session_configure;
chacha_poly_data->session_priv_size =
- sizeof(struct chacha20_poly1305_session);
+ sizeof(struct aesni_mb_session);
}
diff --git a/drivers/crypto/ipsec_mb/pmd_chacha_poly_priv.h b/drivers/crypto/ipsec_mb/pmd_chacha_poly_priv.h
index 842f62f5d1..e668bfe07f 100644
--- a/drivers/crypto/ipsec_mb/pmd_chacha_poly_priv.h
+++ b/drivers/crypto/ipsec_mb/pmd_chacha_poly_priv.h
@@ -7,9 +7,7 @@
#include "ipsec_mb_private.h"
-#define CHACHA20_POLY1305_IV_LENGTH 12
#define CHACHA20_POLY1305_DIGEST_LENGTH 16
-#define CHACHA20_POLY1305_KEY_SIZE 32
static const
struct rte_cryptodev_capabilities chacha20_poly1305_capabilities[] = {
@@ -45,30 +43,4 @@ struct rte_cryptodev_capabilities chacha20_poly1305_capabilities[] = {
uint8_t pmd_driver_id_chacha20_poly1305;
-/** CHACHA20 POLY1305 private session structure */
-struct chacha20_poly1305_session {
- struct {
- uint16_t length;
- uint16_t offset;
- } iv;
- /**< IV parameters */
- uint16_t aad_length;
- /**< AAD length */
- uint16_t req_digest_length;
- /**< Requested digest length */
- uint16_t gen_digest_length;
- /**< Generated digest length */
- uint8_t key[CHACHA20_POLY1305_KEY_SIZE];
- enum ipsec_mb_operation op;
-} __rte_cache_aligned;
-
-struct chacha20_poly1305_qp_data {
- struct chacha20_poly1305_context_data chacha20_poly1305_ctx_data;
- uint8_t temp_digest[CHACHA20_POLY1305_DIGEST_LENGTH];
- /**< Buffer used to store the digest generated
- * by the driver when verifying a digest provided
- * by the user (using authentication verify operation)
- */
-};
-
#endif /* _PMD_CHACHA_POLY_PRIV_H_ */
diff --git a/drivers/crypto/ipsec_mb/pmd_kasumi.c b/drivers/crypto/ipsec_mb/pmd_kasumi.c
index 70536ec3dc..c3571ec81b 100644
--- a/drivers/crypto/ipsec_mb/pmd_kasumi.c
+++ b/drivers/crypto/ipsec_mb/pmd_kasumi.c
@@ -10,406 +10,7 @@
#include <rte_malloc.h>
#include "pmd_kasumi_priv.h"
-
-/** Parse crypto xform chain and set private session parameters. */
-static int
-kasumi_session_configure(IMB_MGR *mgr, void *priv_sess,
- const struct rte_crypto_sym_xform *xform)
-{
- const struct rte_crypto_sym_xform *auth_xform = NULL;
- const struct rte_crypto_sym_xform *cipher_xform = NULL;
- enum ipsec_mb_operation mode;
- struct kasumi_session *sess = (struct kasumi_session *)priv_sess;
- /* Select Crypto operation - hash then cipher / cipher then hash */
- int ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform,
- &cipher_xform, NULL);
-
- if (ret)
- return ret;
-
- if (cipher_xform) {
- /* Only KASUMI F8 supported */
- if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_KASUMI_F8) {
- IPSEC_MB_LOG(ERR, "Unsupported cipher algorithm ");
- return -ENOTSUP;
- }
-
- sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
- if (cipher_xform->cipher.iv.length != KASUMI_IV_LENGTH) {
- IPSEC_MB_LOG(ERR, "Wrong IV length");
- return -EINVAL;
- }
-
- /* Initialize key */
- IMB_KASUMI_INIT_F8_KEY_SCHED(mgr,
- cipher_xform->cipher.key.data,
- &sess->pKeySched_cipher);
- }
-
- if (auth_xform) {
- /* Only KASUMI F9 supported */
- if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_KASUMI_F9) {
- IPSEC_MB_LOG(ERR, "Unsupported authentication");
- return -ENOTSUP;
- }
-
- if (auth_xform->auth.digest_length != KASUMI_DIGEST_LENGTH) {
- IPSEC_MB_LOG(ERR, "Wrong digest length");
- return -EINVAL;
- }
-
- sess->auth_op = auth_xform->auth.op;
-
- /* Initialize key */
- IMB_KASUMI_INIT_F9_KEY_SCHED(mgr, auth_xform->auth.key.data,
- &sess->pKeySched_hash);
- }
-
- sess->op = mode;
- return ret;
-}
-
-/** Encrypt/decrypt mbufs with same cipher key. */
-static uint8_t
-process_kasumi_cipher_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
- struct kasumi_session *session, uint8_t num_ops)
-{
- unsigned int i;
- uint8_t processed_ops = 0;
- const void *src[num_ops];
- void *dst[num_ops];
- uint8_t *iv_ptr;
- uint64_t iv[num_ops];
- uint32_t num_bytes[num_ops];
-
- for (i = 0; i < num_ops; i++) {
- src[i] = rte_pktmbuf_mtod_offset(ops[i]->sym->m_src,
- uint8_t *,
- (ops[i]->sym->cipher.data.offset >> 3));
- dst[i] = ops[i]->sym->m_dst
- ? rte_pktmbuf_mtod_offset(ops[i]->sym->m_dst,
- uint8_t *,
- (ops[i]->sym->cipher.data.offset >> 3))
- : rte_pktmbuf_mtod_offset(ops[i]->sym->m_src,
- uint8_t *,
- (ops[i]->sym->cipher.data.offset >> 3));
- iv_ptr = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
- session->cipher_iv_offset);
- iv[i] = *((uint64_t *)(iv_ptr));
- num_bytes[i] = ops[i]->sym->cipher.data.length >> 3;
-
- processed_ops++;
- }
-
- if (processed_ops != 0)
- IMB_KASUMI_F8_N_BUFFER(qp->mb_mgr, &session->pKeySched_cipher,
- iv, src, dst, num_bytes,
- processed_ops);
-
- return processed_ops;
-}
-
-/** Encrypt/decrypt mbuf (bit level function). */
-static uint8_t
-process_kasumi_cipher_op_bit(struct ipsec_mb_qp *qp, struct rte_crypto_op *op,
- struct kasumi_session *session)
-{
- uint8_t *src, *dst;
- uint8_t *iv_ptr;
- uint64_t iv;
- uint32_t length_in_bits, offset_in_bits;
-
- offset_in_bits = op->sym->cipher.data.offset;
- src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *);
- if (op->sym->m_dst == NULL)
- dst = src;
- else
- dst = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *);
- iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
- session->cipher_iv_offset);
- iv = *((uint64_t *)(iv_ptr));
- length_in_bits = op->sym->cipher.data.length;
-
- IMB_KASUMI_F8_1_BUFFER_BIT(qp->mb_mgr, &session->pKeySched_cipher, iv,
- src, dst, length_in_bits, offset_in_bits);
-
- return 1;
-}
-
-/** Generate/verify hash from mbufs with same hash key. */
-static int
-process_kasumi_hash_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
- struct kasumi_session *session, uint8_t num_ops)
-{
- unsigned int i;
- uint8_t processed_ops = 0;
- uint8_t *src, *dst;
- uint32_t length_in_bits;
- uint32_t num_bytes;
- struct kasumi_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
-
- for (i = 0; i < num_ops; i++) {
- /* Data must be byte aligned */
- if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- IPSEC_MB_LOG(ERR, "Invalid Offset");
- break;
- }
-
- length_in_bits = ops[i]->sym->auth.data.length;
-
- src = rte_pktmbuf_mtod_offset(ops[i]->sym->m_src, uint8_t *,
- (ops[i]->sym->auth.data.offset >> 3));
- /* Direction from next bit after end of message */
- num_bytes = length_in_bits >> 3;
-
- if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
- dst = qp_data->temp_digest;
- IMB_KASUMI_F9_1_BUFFER(qp->mb_mgr,
- &session->pKeySched_hash, src,
- num_bytes, dst);
-
- /* Verify digest. */
- if (memcmp(dst, ops[i]->sym->auth.digest.data,
- KASUMI_DIGEST_LENGTH)
- != 0)
- ops[i]->status
- = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
- } else {
- dst = ops[i]->sym->auth.digest.data;
-
- IMB_KASUMI_F9_1_BUFFER(qp->mb_mgr,
- &session->pKeySched_hash, src,
- num_bytes, dst);
- }
- processed_ops++;
- }
-
- return processed_ops;
-}
-
-/** Process a batch of crypto ops which shares the same session. */
-static int
-process_ops(struct rte_crypto_op **ops, struct kasumi_session *session,
- struct ipsec_mb_qp *qp, uint8_t num_ops)
-{
- unsigned int i;
- unsigned int processed_ops;
-
- switch (session->op) {
- case IPSEC_MB_OP_ENCRYPT_ONLY:
- case IPSEC_MB_OP_DECRYPT_ONLY:
- processed_ops
- = process_kasumi_cipher_op(qp, ops, session, num_ops);
- break;
- case IPSEC_MB_OP_HASH_GEN_ONLY:
- case IPSEC_MB_OP_HASH_VERIFY_ONLY:
- processed_ops
- = process_kasumi_hash_op(qp, ops, session, num_ops);
- break;
- case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN:
- case IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY:
- processed_ops
- = process_kasumi_cipher_op(qp, ops, session, num_ops);
- process_kasumi_hash_op(qp, ops, session, processed_ops);
- break;
- case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
- case IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT:
- processed_ops
- = process_kasumi_hash_op(qp, ops, session, num_ops);
- process_kasumi_cipher_op(qp, ops, session, processed_ops);
- break;
- default:
- /* Operation not supported. */
- processed_ops = 0;
- }
-
- for (i = 0; i < num_ops; i++) {
- /*
- * If there was no error/authentication failure,
- * change status to successful.
- */
- if (ops[i]->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
- ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
- /* Free session if a session-less crypto op. */
- if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
- memset(session, 0, sizeof(struct kasumi_session));
- rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
- ops[i]->sym->session = NULL;
- }
- }
- return processed_ops;
-}
-
-/** Process a crypto op with length/offset in bits. */
-static int
-process_op_bit(struct rte_crypto_op *op, struct kasumi_session *session,
- struct ipsec_mb_qp *qp)
-{
- unsigned int processed_op;
-
- switch (session->op) {
- /* case KASUMI_OP_ONLY_CIPHER: */
- case IPSEC_MB_OP_ENCRYPT_ONLY:
- case IPSEC_MB_OP_DECRYPT_ONLY:
- processed_op = process_kasumi_cipher_op_bit(qp, op, session);
- break;
- /* case KASUMI_OP_ONLY_AUTH: */
- case IPSEC_MB_OP_HASH_GEN_ONLY:
- case IPSEC_MB_OP_HASH_VERIFY_ONLY:
- processed_op = process_kasumi_hash_op(qp, &op, session, 1);
- break;
- /* case KASUMI_OP_CIPHER_AUTH: */
- case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN:
- processed_op = process_kasumi_cipher_op_bit(qp, op, session);
- if (processed_op == 1)
- process_kasumi_hash_op(qp, &op, session, 1);
- break;
- /* case KASUMI_OP_AUTH_CIPHER: */
- case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
- processed_op = process_kasumi_hash_op(qp, &op, session, 1);
- if (processed_op == 1)
- process_kasumi_cipher_op_bit(qp, op, session);
- break;
- default:
- /* Operation not supported. */
- processed_op = 0;
- }
-
- /*
- * If there was no error/authentication failure,
- * change status to successful.
- */
- if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
- op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-
- /* Free session if a session-less crypto op. */
- if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
- memset(CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session), 0,
- sizeof(struct kasumi_session));
- rte_mempool_put(qp->sess_mp, (void *)op->sym->session);
- op->sym->session = NULL;
- }
- return processed_op;
-}
-
-static uint16_t
-kasumi_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
- uint16_t nb_ops)
-{
- struct rte_crypto_op *c_ops[nb_ops];
- struct rte_crypto_op *curr_c_op = NULL;
-
- struct kasumi_session *prev_sess = NULL, *curr_sess = NULL;
- struct ipsec_mb_qp *qp = queue_pair;
- unsigned int i;
- uint8_t burst_size = 0;
- uint8_t processed_ops;
- unsigned int nb_dequeued;
-
- nb_dequeued = rte_ring_dequeue_burst(qp->ingress_queue,
- (void **)ops, nb_ops, NULL);
- for (i = 0; i < nb_dequeued; i++) {
- curr_c_op = ops[i];
-
-#ifdef RTE_LIBRTE_PMD_KASUMI_DEBUG
- if (!rte_pktmbuf_is_contiguous(curr_c_op->sym->m_src)
- || (curr_c_op->sym->m_dst != NULL
- && !rte_pktmbuf_is_contiguous(
- curr_c_op->sym->m_dst))) {
- IPSEC_MB_LOG(ERR,
- "PMD supports only contiguous mbufs, op (%p) provides noncontiguous mbuf as source/destination buffer.",
- curr_c_op);
- curr_c_op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- break;
- }
-#endif
-
- /* Set status as enqueued (not processed yet) by default. */
- curr_c_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
-
- curr_sess = (struct kasumi_session *)
- ipsec_mb_get_session_private(qp, curr_c_op);
- if (unlikely(curr_sess == NULL
- || curr_sess->op == IPSEC_MB_OP_NOT_SUPPORTED)) {
- curr_c_op->status
- = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
- break;
- }
-
- /* If length/offset is at bit-level, process this buffer alone.
- */
- if (((curr_c_op->sym->cipher.data.length % BYTE_LEN) != 0)
- || ((ops[i]->sym->cipher.data.offset % BYTE_LEN) != 0)) {
- /* Process the ops of the previous session. */
- if (prev_sess != NULL) {
- processed_ops = process_ops(c_ops, prev_sess,
- qp, burst_size);
- if (processed_ops < burst_size) {
- burst_size = 0;
- break;
- }
-
- burst_size = 0;
- prev_sess = NULL;
- }
-
- processed_ops = process_op_bit(curr_c_op,
- curr_sess, qp);
- if (processed_ops != 1)
- break;
-
- continue;
- }
-
- /* Batch ops that share the same session. */
- if (prev_sess == NULL) {
- prev_sess = curr_sess;
- c_ops[burst_size++] = curr_c_op;
- } else if (curr_sess == prev_sess) {
- c_ops[burst_size++] = curr_c_op;
- /*
- * When there are enough ops to process in a batch,
- * process them, and start a new batch.
- */
- if (burst_size == KASUMI_MAX_BURST) {
- processed_ops = process_ops(c_ops, prev_sess,
- qp, burst_size);
- if (processed_ops < burst_size) {
- burst_size = 0;
- break;
- }
-
- burst_size = 0;
- prev_sess = NULL;
- }
- } else {
- /*
- * Different session, process the ops
- * of the previous session.
- */
- processed_ops = process_ops(c_ops, prev_sess, qp,
- burst_size);
- if (processed_ops < burst_size) {
- burst_size = 0;
- break;
- }
-
- burst_size = 0;
- prev_sess = curr_sess;
-
- c_ops[burst_size++] = curr_c_op;
- }
- }
-
- if (burst_size != 0) {
- /* Process the crypto ops of the last session. */
- processed_ops = process_ops(c_ops, prev_sess, qp, burst_size);
- }
-
- qp->stats.dequeued_count += i;
- return i;
-}
+#include "pmd_aesni_mb_priv.h"
struct rte_cryptodev_ops kasumi_pmd_ops = {
.dev_configure = ipsec_mb_config,
@@ -460,7 +61,7 @@ RTE_INIT(ipsec_mb_register_kasumi)
= &ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_KASUMI];
kasumi_data->caps = kasumi_capabilities;
- kasumi_data->dequeue_burst = kasumi_pmd_dequeue_burst;
+ kasumi_data->dequeue_burst = aesni_mb_dequeue_burst;
kasumi_data->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO
| RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING
| RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA
@@ -469,7 +70,8 @@ RTE_INIT(ipsec_mb_register_kasumi)
| RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
kasumi_data->internals_priv_size = 0;
kasumi_data->ops = &kasumi_pmd_ops;
- kasumi_data->qp_priv_size = sizeof(struct kasumi_qp_data);
- kasumi_data->session_configure = kasumi_session_configure;
- kasumi_data->session_priv_size = sizeof(struct kasumi_session);
+ kasumi_data->qp_priv_size = sizeof(struct aesni_mb_qp_data);
+ kasumi_data->session_configure = aesni_mb_session_configure;
+ kasumi_data->session_priv_size =
+ sizeof(struct aesni_mb_session);
}
diff --git a/drivers/crypto/ipsec_mb/pmd_kasumi_priv.h b/drivers/crypto/ipsec_mb/pmd_kasumi_priv.h
index 8db1d1cc5b..3223cf1a14 100644
--- a/drivers/crypto/ipsec_mb/pmd_kasumi_priv.h
+++ b/drivers/crypto/ipsec_mb/pmd_kasumi_priv.h
@@ -9,8 +9,6 @@
#define KASUMI_KEY_LENGTH 16
#define KASUMI_IV_LENGTH 8
-#define KASUMI_MAX_BURST 4
-#define BYTE_LEN 8
#define KASUMI_DIGEST_LENGTH 4
uint8_t pmd_driver_id_kasumi;
@@ -60,22 +58,4 @@ static const struct rte_cryptodev_capabilities kasumi_capabilities[] = {
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
};
-/** KASUMI private session structure */
-struct kasumi_session {
- /* Keys have to be 16-byte aligned */
- kasumi_key_sched_t pKeySched_cipher;
- kasumi_key_sched_t pKeySched_hash;
- enum ipsec_mb_operation op;
- enum rte_crypto_auth_operation auth_op;
- uint16_t cipher_iv_offset;
-} __rte_cache_aligned;
-
-struct kasumi_qp_data {
- uint8_t temp_digest[KASUMI_DIGEST_LENGTH];
- /* *< Buffers used to store the digest generated
- * by the driver when verifying a digest provided
- * by the user (using authentication verify operation)
- */
-};
-
#endif /* _PMD_KASUMI_PRIV_H_ */
diff --git a/drivers/crypto/ipsec_mb/pmd_snow3g.c b/drivers/crypto/ipsec_mb/pmd_snow3g.c
index a96779f059..957f6aade8 100644
--- a/drivers/crypto/ipsec_mb/pmd_snow3g.c
+++ b/drivers/crypto/ipsec_mb/pmd_snow3g.c
@@ -3,539 +3,7 @@
*/
#include "pmd_snow3g_priv.h"
-
-/** Parse crypto xform chain and set private session parameters. */
-static int
-snow3g_session_configure(IMB_MGR *mgr, void *priv_sess,
- const struct rte_crypto_sym_xform *xform)
-{
- struct snow3g_session *sess = (struct snow3g_session *)priv_sess;
- const struct rte_crypto_sym_xform *auth_xform = NULL;
- const struct rte_crypto_sym_xform *cipher_xform = NULL;
- enum ipsec_mb_operation mode;
-
- /* Select Crypto operation - hash then cipher / cipher then hash */
- int ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform,
- &cipher_xform, NULL);
- if (ret)
- return ret;
-
- if (cipher_xform) {
- /* Only SNOW 3G UEA2 supported */
- if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_SNOW3G_UEA2)
- return -ENOTSUP;
-
- if (cipher_xform->cipher.iv.length != SNOW3G_IV_LENGTH) {
- IPSEC_MB_LOG(ERR, "Wrong IV length");
- return -EINVAL;
- }
- if (cipher_xform->cipher.key.length > SNOW3G_MAX_KEY_SIZE) {
- IPSEC_MB_LOG(ERR, "Not enough memory to store the key");
- return -ENOMEM;
- }
-
- sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
-
- /* Initialize key */
- IMB_SNOW3G_INIT_KEY_SCHED(mgr, cipher_xform->cipher.key.data,
- &sess->pKeySched_cipher);
- }
-
- if (auth_xform) {
- /* Only SNOW 3G UIA2 supported */
- if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_SNOW3G_UIA2)
- return -ENOTSUP;
-
- if (auth_xform->auth.digest_length != SNOW3G_DIGEST_LENGTH) {
- IPSEC_MB_LOG(ERR, "Wrong digest length");
- return -EINVAL;
- }
- if (auth_xform->auth.key.length > SNOW3G_MAX_KEY_SIZE) {
- IPSEC_MB_LOG(ERR, "Not enough memory to store the key");
- return -ENOMEM;
- }
-
- sess->auth_op = auth_xform->auth.op;
-
- if (auth_xform->auth.iv.length != SNOW3G_IV_LENGTH) {
- IPSEC_MB_LOG(ERR, "Wrong IV length");
- return -EINVAL;
- }
- sess->auth_iv_offset = auth_xform->auth.iv.offset;
-
- /* Initialize key */
- IMB_SNOW3G_INIT_KEY_SCHED(mgr, auth_xform->auth.key.data,
- &sess->pKeySched_hash);
- }
-
- sess->op = mode;
-
- return 0;
-}
-
-/** Check if conditions are met for digest-appended operations */
-static uint8_t *
-snow3g_digest_appended_in_src(struct rte_crypto_op *op)
-{
- unsigned int auth_size, cipher_size;
-
- auth_size = (op->sym->auth.data.offset >> 3) +
- (op->sym->auth.data.length >> 3);
- cipher_size = (op->sym->cipher.data.offset >> 3) +
- (op->sym->cipher.data.length >> 3);
-
- if (auth_size < cipher_size)
- return rte_pktmbuf_mtod_offset(op->sym->m_src,
- uint8_t *, auth_size);
-
- return NULL;
-}
-
-/** Encrypt/decrypt mbufs with same cipher key. */
-static uint8_t
-process_snow3g_cipher_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
- struct snow3g_session *session,
- uint8_t num_ops)
-{
- uint32_t i;
- uint8_t processed_ops = 0;
- const void *src[SNOW3G_MAX_BURST] = {NULL};
- void *dst[SNOW3G_MAX_BURST] = {NULL};
- uint8_t *digest_appended[SNOW3G_MAX_BURST] = {NULL};
- const void *iv[SNOW3G_MAX_BURST] = {NULL};
- uint32_t num_bytes[SNOW3G_MAX_BURST] = {0};
- uint32_t cipher_off, cipher_len;
- int unencrypted_bytes = 0;
-
- for (i = 0; i < num_ops; i++) {
-
- cipher_off = ops[i]->sym->cipher.data.offset >> 3;
- cipher_len = ops[i]->sym->cipher.data.length >> 3;
- src[i] = rte_pktmbuf_mtod_offset(
- ops[i]->sym->m_src, uint8_t *, cipher_off);
-
- /* If out-of-place operation */
- if (ops[i]->sym->m_dst &&
- ops[i]->sym->m_src != ops[i]->sym->m_dst) {
- dst[i] = rte_pktmbuf_mtod_offset(
- ops[i]->sym->m_dst, uint8_t *, cipher_off);
-
- /* In case of out-of-place, auth-cipher operation
- * with partial encryption of the digest, copy
- * the remaining, unencrypted part.
- */
- if (session->op == IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT
- || session->op == IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT)
- unencrypted_bytes =
- (ops[i]->sym->auth.data.offset >> 3) +
- (ops[i]->sym->auth.data.length >> 3) +
- (SNOW3G_DIGEST_LENGTH) -
- cipher_off - cipher_len;
- if (unencrypted_bytes > 0)
- rte_memcpy(
- rte_pktmbuf_mtod_offset(
- ops[i]->sym->m_dst, uint8_t *,
- cipher_off + cipher_len),
- rte_pktmbuf_mtod_offset(
- ops[i]->sym->m_src, uint8_t *,
- cipher_off + cipher_len),
- unencrypted_bytes);
- } else
- dst[i] = rte_pktmbuf_mtod_offset(ops[i]->sym->m_src,
- uint8_t *, cipher_off);
-
- iv[i] = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
- session->cipher_iv_offset);
- num_bytes[i] = cipher_len;
- processed_ops++;
- }
-
- IMB_SNOW3G_F8_N_BUFFER(qp->mb_mgr, &session->pKeySched_cipher, iv,
- src, dst, num_bytes, processed_ops);
-
- /* Take care of the raw digest data in src buffer */
- for (i = 0; i < num_ops; i++) {
- if ((session->op == IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT ||
- session->op == IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT) &&
- ops[i]->sym->m_dst != NULL) {
- digest_appended[i] =
- snow3g_digest_appended_in_src(ops[i]);
- /* Clear unencrypted digest from
- * the src buffer
- */
- if (digest_appended[i] != NULL)
- memset(digest_appended[i],
- 0, SNOW3G_DIGEST_LENGTH);
- }
- }
- return processed_ops;
-}
-
-/** Encrypt/decrypt mbuf (bit level function). */
-static uint8_t
-process_snow3g_cipher_op_bit(struct ipsec_mb_qp *qp,
- struct rte_crypto_op *op,
- struct snow3g_session *session)
-{
- uint8_t *src, *dst;
- uint8_t *iv;
- uint32_t length_in_bits, offset_in_bits;
- int unencrypted_bytes = 0;
-
- offset_in_bits = op->sym->cipher.data.offset;
- src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *);
- if (op->sym->m_dst == NULL) {
- op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- IPSEC_MB_LOG(ERR, "bit-level in-place not supported\n");
- return 0;
- }
- length_in_bits = op->sym->cipher.data.length;
- dst = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *);
- /* In case of out-of-place, auth-cipher operation
- * with partial encryption of the digest, copy
- * the remaining, unencrypted part.
- */
- if (session->op == IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT ||
- session->op == IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT)
- unencrypted_bytes =
- (op->sym->auth.data.offset >> 3) +
- (op->sym->auth.data.length >> 3) +
- (SNOW3G_DIGEST_LENGTH) -
- (offset_in_bits >> 3) -
- (length_in_bits >> 3);
- if (unencrypted_bytes > 0)
- rte_memcpy(
- rte_pktmbuf_mtod_offset(
- op->sym->m_dst, uint8_t *,
- (length_in_bits >> 3)),
- rte_pktmbuf_mtod_offset(
- op->sym->m_src, uint8_t *,
- (length_in_bits >> 3)),
- unencrypted_bytes);
-
- iv = rte_crypto_op_ctod_offset(op, uint8_t *,
- session->cipher_iv_offset);
-
- IMB_SNOW3G_F8_1_BUFFER_BIT(qp->mb_mgr, &session->pKeySched_cipher, iv,
- src, dst, length_in_bits, offset_in_bits);
-
- return 1;
-}
-
-/** Generate/verify hash from mbufs with same hash key. */
-static int
-process_snow3g_hash_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
- struct snow3g_session *session,
- uint8_t num_ops)
-{
- uint32_t i;
- uint8_t processed_ops = 0;
- uint8_t *src, *dst;
- uint32_t length_in_bits;
- uint8_t *iv;
- uint8_t digest_appended = 0;
- struct snow3g_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
-
- for (i = 0; i < num_ops; i++) {
- /* Data must be byte aligned */
- if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- IPSEC_MB_LOG(ERR, "Offset");
- break;
- }
-
- dst = NULL;
-
- length_in_bits = ops[i]->sym->auth.data.length;
-
- src = rte_pktmbuf_mtod_offset(ops[i]->sym->m_src, uint8_t *,
- (ops[i]->sym->auth.data.offset >> 3));
- iv = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
- session->auth_iv_offset);
-
- if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
- dst = qp_data->temp_digest;
- /* Handle auth cipher verify oop case*/
- if ((session->op ==
- IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN ||
- session->op ==
- IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY) &&
- ops[i]->sym->m_dst != NULL)
- src = rte_pktmbuf_mtod_offset(
- ops[i]->sym->m_dst, uint8_t *,
- ops[i]->sym->auth.data.offset >> 3);
-
- IMB_SNOW3G_F9_1_BUFFER(qp->mb_mgr,
- &session->pKeySched_hash,
- iv, src, length_in_bits, dst);
- /* Verify digest. */
- if (memcmp(dst, ops[i]->sym->auth.digest.data,
- SNOW3G_DIGEST_LENGTH) != 0)
- ops[i]->status =
- RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
- } else {
- if (session->op ==
- IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT ||
- session->op ==
- IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT)
- dst = snow3g_digest_appended_in_src(ops[i]);
-
- if (dst != NULL)
- digest_appended = 1;
- else
- dst = ops[i]->sym->auth.digest.data;
-
- IMB_SNOW3G_F9_1_BUFFER(qp->mb_mgr,
- &session->pKeySched_hash,
- iv, src, length_in_bits, dst);
-
- /* Copy back digest from src to auth.digest.data */
- if (digest_appended)
- rte_memcpy(ops[i]->sym->auth.digest.data,
- dst, SNOW3G_DIGEST_LENGTH);
- }
- processed_ops++;
- }
-
- return processed_ops;
-}
-
-/** Process a batch of crypto ops which shares the same session. */
-static int
-process_ops(struct rte_crypto_op **ops, struct snow3g_session *session,
- struct ipsec_mb_qp *qp, uint8_t num_ops)
-{
- uint32_t i;
- uint32_t processed_ops;
-
-#ifdef RTE_LIBRTE_PMD_SNOW3G_DEBUG
- for (i = 0; i < num_ops; i++) {
- if (!rte_pktmbuf_is_contiguous(ops[i]->sym->m_src) ||
- (ops[i]->sym->m_dst != NULL &&
- !rte_pktmbuf_is_contiguous(
- ops[i]->sym->m_dst))) {
- IPSEC_MB_LOG(ERR,
- "PMD supports only contiguous mbufs, "
- "op (%p) provides noncontiguous mbuf as "
- "source/destination buffer.\n", ops[i]);
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- return 0;
- }
- }
-#endif
-
- switch (session->op) {
- case IPSEC_MB_OP_ENCRYPT_ONLY:
- case IPSEC_MB_OP_DECRYPT_ONLY:
- processed_ops = process_snow3g_cipher_op(qp, ops,
- session, num_ops);
- break;
- case IPSEC_MB_OP_HASH_GEN_ONLY:
- case IPSEC_MB_OP_HASH_VERIFY_ONLY:
- processed_ops = process_snow3g_hash_op(qp, ops, session,
- num_ops);
- break;
- case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN:
- case IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY:
- processed_ops = process_snow3g_cipher_op(qp, ops, session,
- num_ops);
- process_snow3g_hash_op(qp, ops, session, processed_ops);
- break;
- case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
- case IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT:
- processed_ops = process_snow3g_hash_op(qp, ops, session,
- num_ops);
- process_snow3g_cipher_op(qp, ops, session, processed_ops);
- break;
- default:
- /* Operation not supported. */
- processed_ops = 0;
- }
-
- for (i = 0; i < num_ops; i++) {
- /*
- * If there was no error/authentication failure,
- * change status to successful.
- */
- if (ops[i]->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
- ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
- /* Free session if a session-less crypto op. */
- if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
- memset(session, 0, sizeof(struct snow3g_session));
- rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
- ops[i]->sym->session = NULL;
- }
- }
- return processed_ops;
-}
-
-/** Process a crypto op with length/offset in bits. */
-static int
-process_op_bit(struct rte_crypto_op *op, struct snow3g_session *session,
- struct ipsec_mb_qp *qp)
-{
- unsigned int processed_op;
- int ret;
-
- switch (session->op) {
- case IPSEC_MB_OP_ENCRYPT_ONLY:
- case IPSEC_MB_OP_DECRYPT_ONLY:
-
- processed_op = process_snow3g_cipher_op_bit(qp, op,
- session);
- break;
- case IPSEC_MB_OP_HASH_GEN_ONLY:
- case IPSEC_MB_OP_HASH_VERIFY_ONLY:
- processed_op = process_snow3g_hash_op(qp, &op, session, 1);
- break;
- case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN:
- case IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY:
- processed_op = process_snow3g_cipher_op_bit(qp, op, session);
- if (processed_op == 1)
- process_snow3g_hash_op(qp, &op, session, 1);
- break;
- case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
- case IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT:
- processed_op = process_snow3g_hash_op(qp, &op, session, 1);
- if (processed_op == 1)
- process_snow3g_cipher_op_bit(qp, op, session);
- break;
- default:
- /* Operation not supported. */
- processed_op = 0;
- }
-
- /*
- * If there was no error/authentication failure,
- * change status to successful.
- */
- if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
- op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-
- /* Free session if a session-less crypto op. */
- if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
- memset(CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session), 0,
- sizeof(struct snow3g_session));
- rte_mempool_put(qp->sess_mp, (void *)op->sym->session);
- op->sym->session = NULL;
- }
-
- if (unlikely(processed_op != 1))
- return 0;
-
- ret = rte_ring_enqueue(qp->ingress_queue, op);
- if (ret != 0)
- return ret;
-
- return 1;
-}
-
-static uint16_t
-snow3g_pmd_dequeue_burst(void *queue_pair,
- struct rte_crypto_op **ops, uint16_t nb_ops)
-{
- struct ipsec_mb_qp *qp = queue_pair;
- struct rte_crypto_op *c_ops[SNOW3G_MAX_BURST];
- struct rte_crypto_op *curr_c_op;
-
- struct snow3g_session *prev_sess = NULL, *curr_sess = NULL;
- uint32_t i;
- uint8_t burst_size = 0;
- uint8_t processed_ops;
- uint32_t nb_dequeued;
-
- nb_dequeued = rte_ring_dequeue_burst(qp->ingress_queue,
- (void **)ops, nb_ops, NULL);
-
- for (i = 0; i < nb_dequeued; i++) {
- curr_c_op = ops[i];
-
- /* Set status as enqueued (not processed yet) by default. */
- curr_c_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
-
- curr_sess = ipsec_mb_get_session_private(qp, curr_c_op);
- if (unlikely(curr_sess == NULL ||
- curr_sess->op == IPSEC_MB_OP_NOT_SUPPORTED)) {
- curr_c_op->status =
- RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
- break;
- }
-
- /* If length/offset is at bit-level,
- * process this buffer alone.
- */
- if (((curr_c_op->sym->cipher.data.length % BYTE_LEN) != 0)
- || ((curr_c_op->sym->cipher.data.offset
- % BYTE_LEN) != 0)) {
- /* Process the ops of the previous session. */
- if (prev_sess != NULL) {
- processed_ops = process_ops(c_ops, prev_sess,
- qp, burst_size);
- if (processed_ops < burst_size) {
- burst_size = 0;
- break;
- }
-
- burst_size = 0;
- prev_sess = NULL;
- }
-
- processed_ops = process_op_bit(curr_c_op, curr_sess, qp);
- if (processed_ops != 1)
- break;
-
- continue;
- }
-
- /* Batch ops that share the same session. */
- if (prev_sess == NULL) {
- prev_sess = curr_sess;
- c_ops[burst_size++] = curr_c_op;
- } else if (curr_sess == prev_sess) {
- c_ops[burst_size++] = curr_c_op;
- /*
- * When there are enough ops to process in a batch,
- * process them, and start a new batch.
- */
- if (burst_size == SNOW3G_MAX_BURST) {
- processed_ops = process_ops(c_ops, prev_sess,
- qp, burst_size);
- if (processed_ops < burst_size) {
- burst_size = 0;
- break;
- }
-
- burst_size = 0;
- prev_sess = NULL;
- }
- } else {
- /*
- * Different session, process the ops
- * of the previous session.
- */
- processed_ops = process_ops(c_ops, prev_sess,
- qp, burst_size);
- if (processed_ops < burst_size) {
- burst_size = 0;
- break;
- }
-
- burst_size = 0;
- prev_sess = curr_sess;
-
- c_ops[burst_size++] = curr_c_op;
- }
- }
-
- if (burst_size != 0) {
- /* Process the crypto ops of the last session. */
- processed_ops = process_ops(c_ops, prev_sess,
- qp, burst_size);
- }
-
- qp->stats.dequeued_count += i;
- return i;
-}
+#include "pmd_aesni_mb_priv.h"
struct rte_cryptodev_ops snow3g_pmd_ops = {
.dev_configure = ipsec_mb_config,
@@ -586,7 +54,7 @@ RTE_INIT(ipsec_mb_register_snow3g)
= &ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_SNOW3G];
snow3g_data->caps = snow3g_capabilities;
- snow3g_data->dequeue_burst = snow3g_pmd_dequeue_burst;
+ snow3g_data->dequeue_burst = aesni_mb_dequeue_burst;
snow3g_data->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA |
@@ -595,7 +63,8 @@ RTE_INIT(ipsec_mb_register_snow3g)
RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED;
snow3g_data->internals_priv_size = 0;
snow3g_data->ops = &snow3g_pmd_ops;
- snow3g_data->qp_priv_size = sizeof(struct snow3g_qp_data);
- snow3g_data->session_configure = snow3g_session_configure;
- snow3g_data->session_priv_size = sizeof(struct snow3g_session);
+ snow3g_data->qp_priv_size = sizeof(struct aesni_mb_qp_data);
+ snow3g_data->session_configure = aesni_mb_session_configure;
+ snow3g_data->session_priv_size =
+ sizeof(struct aesni_mb_session);
}
diff --git a/drivers/crypto/ipsec_mb/pmd_snow3g_priv.h b/drivers/crypto/ipsec_mb/pmd_snow3g_priv.h
index ca1ce7f9d6..3ceb33b602 100644
--- a/drivers/crypto/ipsec_mb/pmd_snow3g_priv.h
+++ b/drivers/crypto/ipsec_mb/pmd_snow3g_priv.h
@@ -8,10 +8,7 @@
#include "ipsec_mb_private.h"
#define SNOW3G_IV_LENGTH 16
-#define SNOW3G_MAX_BURST 8
-#define BYTE_LEN 8
#define SNOW3G_DIGEST_LENGTH 4
-#define SNOW3G_MAX_KEY_SIZE 128
uint8_t pmd_driver_id_snow3g;
@@ -64,22 +61,4 @@ static const struct rte_cryptodev_capabilities snow3g_capabilities[] = {
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
};
-/** SNOW 3G private session structure */
-struct snow3g_session {
- enum ipsec_mb_operation op;
- enum rte_crypto_auth_operation auth_op;
- snow3g_key_schedule_t pKeySched_cipher;
- snow3g_key_schedule_t pKeySched_hash;
- uint16_t cipher_iv_offset;
- uint16_t auth_iv_offset;
-} __rte_cache_aligned;
-
-struct snow3g_qp_data {
- uint8_t temp_digest[SNOW3G_DIGEST_LENGTH];
- /**< Buffer used to store the digest generated
- * by the driver when verifying a digest provided
- * by the user (using authentication verify operation)
- */
-};
-
#endif /* _PMD_SNOW3G_PRIV_H_ */
diff --git a/drivers/crypto/ipsec_mb/pmd_zuc.c b/drivers/crypto/ipsec_mb/pmd_zuc.c
index 44781be1d1..b72191c7a7 100644
--- a/drivers/crypto/ipsec_mb/pmd_zuc.c
+++ b/drivers/crypto/ipsec_mb/pmd_zuc.c
@@ -3,343 +3,7 @@
*/
#include "pmd_zuc_priv.h"
-
-/** Parse crypto xform chain and set private session parameters. */
-static int
-zuc_session_configure(__rte_unused IMB_MGR * mgr, void *zuc_sess,
- const struct rte_crypto_sym_xform *xform)
-{
- struct zuc_session *sess = (struct zuc_session *) zuc_sess;
- const struct rte_crypto_sym_xform *auth_xform = NULL;
- const struct rte_crypto_sym_xform *cipher_xform = NULL;
- enum ipsec_mb_operation mode;
- /* Select Crypto operation - hash then cipher / cipher then hash */
- int ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform,
- &cipher_xform, NULL);
-
- if (ret)
- return ret;
-
- if (cipher_xform) {
- /* Only ZUC EEA3 supported */
- if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_ZUC_EEA3)
- return -ENOTSUP;
-
- if (cipher_xform->cipher.iv.length != ZUC_IV_KEY_LENGTH) {
- IPSEC_MB_LOG(ERR, "Wrong IV length");
- return -EINVAL;
- }
- sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
-
- /* Copy the key */
- memcpy(sess->pKey_cipher, cipher_xform->cipher.key.data,
- ZUC_IV_KEY_LENGTH);
- }
-
- if (auth_xform) {
- /* Only ZUC EIA3 supported */
- if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_ZUC_EIA3)
- return -ENOTSUP;
-
- if (auth_xform->auth.digest_length != ZUC_DIGEST_LENGTH) {
- IPSEC_MB_LOG(ERR, "Wrong digest length");
- return -EINVAL;
- }
-
- sess->auth_op = auth_xform->auth.op;
-
- if (auth_xform->auth.iv.length != ZUC_IV_KEY_LENGTH) {
- IPSEC_MB_LOG(ERR, "Wrong IV length");
- return -EINVAL;
- }
- sess->auth_iv_offset = auth_xform->auth.iv.offset;
-
- /* Copy the key */
- memcpy(sess->pKey_hash, auth_xform->auth.key.data,
- ZUC_IV_KEY_LENGTH);
- }
-
- sess->op = mode;
- return 0;
-}
-
-/** Encrypt/decrypt mbufs. */
-static uint8_t
-process_zuc_cipher_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
- struct zuc_session **sessions,
- uint8_t num_ops)
-{
- unsigned int i;
- uint8_t processed_ops = 0;
- const void *src[ZUC_MAX_BURST];
- void *dst[ZUC_MAX_BURST];
- const void *iv[ZUC_MAX_BURST];
- uint32_t num_bytes[ZUC_MAX_BURST];
- const void *cipher_keys[ZUC_MAX_BURST];
- struct zuc_session *sess;
-
- for (i = 0; i < num_ops; i++) {
- if (((ops[i]->sym->cipher.data.length % BYTE_LEN) != 0)
- || ((ops[i]->sym->cipher.data.offset
- % BYTE_LEN) != 0)) {
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- IPSEC_MB_LOG(ERR, "Data Length or offset");
- break;
- }
-
- sess = sessions[i];
-
-#ifdef RTE_LIBRTE_PMD_ZUC_DEBUG
- if (!rte_pktmbuf_is_contiguous(ops[i]->sym->m_src) ||
- (ops[i]->sym->m_dst != NULL &&
- !rte_pktmbuf_is_contiguous(
- ops[i]->sym->m_dst))) {
- IPSEC_MB_LOG(ERR, "PMD supports only "
- " contiguous mbufs, op (%p) "
- "provides noncontiguous mbuf "
- "as source/destination buffer.\n",
- "PMD supports only contiguous mbufs, "
- "op (%p) provides noncontiguous mbuf "
- "as source/destination buffer.\n",
- ops[i]);
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- break;
- }
-#endif
-
- src[i] = rte_pktmbuf_mtod_offset(ops[i]->sym->m_src,
- uint8_t *,
- (ops[i]->sym->cipher.data.offset >> 3));
- dst[i] = ops[i]->sym->m_dst ?
- rte_pktmbuf_mtod_offset(ops[i]->sym->m_dst, uint8_t *,
- (ops[i]->sym->cipher.data.offset >> 3)) :
- rte_pktmbuf_mtod_offset(ops[i]->sym->m_src, uint8_t *,
- (ops[i]->sym->cipher.data.offset >> 3));
- iv[i] = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
- sess->cipher_iv_offset);
- num_bytes[i] = ops[i]->sym->cipher.data.length >> 3;
-
- cipher_keys[i] = sess->pKey_cipher;
-
- processed_ops++;
- }
-
- IMB_ZUC_EEA3_N_BUFFER(qp->mb_mgr, (const void **)cipher_keys,
- (const void **)iv, (const void **)src, (void **)dst,
- num_bytes, processed_ops);
-
- return processed_ops;
-}
-
-/** Generate/verify hash from mbufs. */
-static int
-process_zuc_hash_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
- struct zuc_session **sessions,
- uint8_t num_ops)
-{
- unsigned int i;
- uint8_t processed_ops = 0;
- uint8_t *src[ZUC_MAX_BURST] = { 0 };
- uint32_t *dst[ZUC_MAX_BURST];
- uint32_t length_in_bits[ZUC_MAX_BURST] = { 0 };
- uint8_t *iv[ZUC_MAX_BURST] = { 0 };
- const void *hash_keys[ZUC_MAX_BURST] = { 0 };
- struct zuc_session *sess;
- struct zuc_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
-
-
- for (i = 0; i < num_ops; i++) {
- /* Data must be byte aligned */
- if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- IPSEC_MB_LOG(ERR, "Offset");
- break;
- }
-
- sess = sessions[i];
-
- length_in_bits[i] = ops[i]->sym->auth.data.length;
-
- src[i] = rte_pktmbuf_mtod_offset(ops[i]->sym->m_src,
- uint8_t *,
- (ops[i]->sym->auth.data.offset >> 3));
- iv[i] = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
- sess->auth_iv_offset);
-
- hash_keys[i] = sess->pKey_hash;
- if (sess->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY)
- dst[i] = (uint32_t *)qp_data->temp_digest[i];
- else
- dst[i] = (uint32_t *)ops[i]->sym->auth.digest.data;
-
- processed_ops++;
- }
-
- IMB_ZUC_EIA3_N_BUFFER(qp->mb_mgr, (const void **)hash_keys,
- (const void * const *)iv, (const void * const *)src,
- length_in_bits, dst, processed_ops);
-
- /*
- * If tag needs to be verified, compare generated tag
- * with attached tag
- */
- for (i = 0; i < processed_ops; i++)
- if (sessions[i]->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY)
- if (memcmp(dst[i], ops[i]->sym->auth.digest.data,
- ZUC_DIGEST_LENGTH) != 0)
- ops[i]->status =
- RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
-
- return processed_ops;
-}
-
-/** Process a batch of crypto ops which shares the same operation type. */
-static int
-process_ops(struct rte_crypto_op **ops, enum ipsec_mb_operation op_type,
- struct zuc_session **sessions,
- struct ipsec_mb_qp *qp, uint8_t num_ops)
-{
- unsigned int i;
- unsigned int processed_ops = 0;
-
- switch (op_type) {
- case IPSEC_MB_OP_ENCRYPT_ONLY:
- case IPSEC_MB_OP_DECRYPT_ONLY:
- processed_ops = process_zuc_cipher_op(qp, ops,
- sessions, num_ops);
- break;
- case IPSEC_MB_OP_HASH_GEN_ONLY:
- case IPSEC_MB_OP_HASH_VERIFY_ONLY:
- processed_ops = process_zuc_hash_op(qp, ops, sessions,
- num_ops);
- break;
- case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN:
- case IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY:
- processed_ops = process_zuc_cipher_op(qp, ops, sessions,
- num_ops);
- process_zuc_hash_op(qp, ops, sessions, processed_ops);
- break;
- case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
- case IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT:
- processed_ops = process_zuc_hash_op(qp, ops, sessions,
- num_ops);
- process_zuc_cipher_op(qp, ops, sessions, processed_ops);
- break;
- default:
- /* Operation not supported. */
- for (i = 0; i < num_ops; i++)
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
- }
-
- for (i = 0; i < num_ops; i++) {
- /*
- * If there was no error/authentication failure,
- * change status to successful.
- */
- if (ops[i]->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
- ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
- /* Free session if a session-less crypto op. */
- if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
- memset(sessions[i], 0, sizeof(struct zuc_session));
- rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
- ops[i]->sym->session = NULL;
- }
- }
- return processed_ops;
-}
-
-static uint16_t
-zuc_pmd_dequeue_burst(void *queue_pair,
- struct rte_crypto_op **c_ops, uint16_t nb_ops)
-{
-
- struct rte_crypto_op *curr_c_op;
-
- struct zuc_session *curr_sess;
- struct zuc_session *sessions[ZUC_MAX_BURST];
- struct rte_crypto_op *int_c_ops[ZUC_MAX_BURST];
- enum ipsec_mb_operation prev_zuc_op = IPSEC_MB_OP_NOT_SUPPORTED;
- enum ipsec_mb_operation curr_zuc_op;
- struct ipsec_mb_qp *qp = queue_pair;
- unsigned int nb_dequeued;
- unsigned int i;
- uint8_t burst_size = 0;
- uint8_t processed_ops;
-
- nb_dequeued = rte_ring_dequeue_burst(qp->ingress_queue,
- (void **)c_ops, nb_ops, NULL);
-
-
- for (i = 0; i < nb_dequeued; i++) {
- curr_c_op = c_ops[i];
-
- curr_sess = (struct zuc_session *)
- ipsec_mb_get_session_private(qp, curr_c_op);
- if (unlikely(curr_sess == NULL)) {
- curr_c_op->status =
- RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
- break;
- }
-
- curr_zuc_op = curr_sess->op;
-
- /*
- * Batch ops that share the same operation type
- * (cipher only, auth only...).
- */
- if (burst_size == 0) {
- prev_zuc_op = curr_zuc_op;
- int_c_ops[0] = curr_c_op;
- sessions[0] = curr_sess;
- burst_size++;
- } else if (curr_zuc_op == prev_zuc_op) {
- int_c_ops[burst_size] = curr_c_op;
- sessions[burst_size] = curr_sess;
- burst_size++;
- /*
- * When there are enough ops to process in a batch,
- * process them, and start a new batch.
- */
- if (burst_size == ZUC_MAX_BURST) {
- processed_ops = process_ops(int_c_ops, curr_zuc_op,
- sessions, qp, burst_size);
- if (processed_ops < burst_size) {
- burst_size = 0;
- break;
- }
-
- burst_size = 0;
- }
- } else {
- /*
- * Different operation type, process the ops
- * of the previous type.
- */
- processed_ops = process_ops(int_c_ops, prev_zuc_op,
- sessions, qp, burst_size);
- if (processed_ops < burst_size) {
- burst_size = 0;
- break;
- }
-
- burst_size = 0;
- prev_zuc_op = curr_zuc_op;
-
- int_c_ops[0] = curr_c_op;
- sessions[0] = curr_sess;
- burst_size++;
- }
- }
-
- if (burst_size != 0) {
- /* Process the crypto ops of the last operation type. */
- processed_ops = process_ops(int_c_ops, prev_zuc_op,
- sessions, qp, burst_size);
- }
-
- qp->stats.dequeued_count += i;
- return i;
-}
+#include "pmd_aesni_mb_priv.h"
struct rte_cryptodev_ops zuc_pmd_ops = {
.dev_configure = ipsec_mb_config,
@@ -390,7 +54,7 @@ RTE_INIT(ipsec_mb_register_zuc)
= &ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_ZUC];
zuc_data->caps = zuc_capabilities;
- zuc_data->dequeue_burst = zuc_pmd_dequeue_burst;
+ zuc_data->dequeue_burst = aesni_mb_dequeue_burst;
zuc_data->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO
| RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING
| RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA
@@ -399,7 +63,8 @@ RTE_INIT(ipsec_mb_register_zuc)
| RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
zuc_data->internals_priv_size = 0;
zuc_data->ops = &zuc_pmd_ops;
- zuc_data->qp_priv_size = sizeof(struct zuc_qp_data);
- zuc_data->session_configure = zuc_session_configure;
- zuc_data->session_priv_size = sizeof(struct zuc_session);
+ zuc_data->qp_priv_size = sizeof(struct aesni_mb_qp_data);
+ zuc_data->session_configure = aesni_mb_session_configure;
+ zuc_data->session_priv_size =
+ sizeof(struct aesni_mb_session);
}
diff --git a/drivers/crypto/ipsec_mb/pmd_zuc_priv.h b/drivers/crypto/ipsec_mb/pmd_zuc_priv.h
index 76fd6758c2..a1e8e3aade 100644
--- a/drivers/crypto/ipsec_mb/pmd_zuc_priv.h
+++ b/drivers/crypto/ipsec_mb/pmd_zuc_priv.h
@@ -10,7 +10,6 @@
#define ZUC_IV_KEY_LENGTH 16
#define ZUC_DIGEST_LENGTH 4
#define ZUC_MAX_BURST 16
-#define BYTE_LEN 8
uint8_t pmd_driver_id_zuc;
@@ -63,23 +62,4 @@ static const struct rte_cryptodev_capabilities zuc_capabilities[] = {
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
};
-/** ZUC private session structure */
-struct zuc_session {
- enum ipsec_mb_operation op;
- enum rte_crypto_auth_operation auth_op;
- uint8_t pKey_cipher[ZUC_IV_KEY_LENGTH];
- uint8_t pKey_hash[ZUC_IV_KEY_LENGTH];
- uint16_t cipher_iv_offset;
- uint16_t auth_iv_offset;
-} __rte_cache_aligned;
-
-struct zuc_qp_data {
-
- uint8_t temp_digest[ZUC_MAX_BURST][ZUC_DIGEST_LENGTH];
- /* *< Buffers used to store the digest generated
- * by the driver when verifying a digest provided
- * by the user (using authentication verify operation)
- */
-};
-
#endif /* _PMD_ZUC_PRIV_H_ */
--
2.25.1
^ permalink raw reply [flat|nested] 45+ messages in thread
* RE: [EXTERNAL] [PATCH v5 1/4] crypto/ipsec_mb: bump minimum IPsec Multi-buffer version
2024-03-05 17:42 ` [PATCH v5 1/4] crypto/ipsec_mb: bump minimum IPsec Multi-buffer version Brian Dooley
` (2 preceding siblings ...)
2024-03-05 17:42 ` [PATCH v5 4/4] crypto/ipsec_mb: unified IPsec MB interface Brian Dooley
@ 2024-03-05 19:11 ` Akhil Goyal
2024-03-05 19:50 ` Patrick Robb
2024-03-06 11:12 ` Power, Ciara
3 siblings, 2 replies; 45+ messages in thread
From: Akhil Goyal @ 2024-03-05 19:11 UTC (permalink / raw)
To: Brian Dooley, Kai Ji, Pablo de Lara, Patrick Robb, Aaron Conole
Cc: dev, Sivaramakrishnan Venkat, Ciara Power, Wathsala Vithanage,
thomas, David Marchand
> Subject: [EXTERNAL] [PATCH v5 1/4] crypto/ipsec_mb: bump minimum IPsec
> Multi-buffer version
>
> From: Sivaramakrishnan Venkat <venkatx.sivaramakrishnan@intel.com>
>
> SW PMDs increment IPsec Multi-buffer version to 1.4.
> A minimum IPsec Multi-buffer version of 1.4 or greater is now required.
>
> Signed-off-by: Sivaramakrishnan Venkat <venkatx.sivaramakrishnan@intel.com>
> Acked-by: Ciara Power <ciara.power@intel.com>
> Acked-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
> Acked-by: Wathsala Vithanage <wathsala.vithanage@arm.com>
please check these:
https://github.com/ovsrobot/dpdk/actions/runs/8160942783/job/22308639670#step:19:19411
Error: cannot find librte_crypto_ipsec_mb.so.24.0 in install
You need to get this fixed or else CI would fail for every patch once this series is applied.
And this is also failing http://mails.dpdk.org/archives/test-report/2024-March/601301.html
These need to be fixed in CI infra.
^ permalink raw reply [flat|nested] 45+ messages in thread
* Re: [EXTERNAL] [PATCH v5 1/4] crypto/ipsec_mb: bump minimum IPsec Multi-buffer version
2024-03-05 19:11 ` [EXTERNAL] [PATCH v5 1/4] crypto/ipsec_mb: bump minimum IPsec Multi-buffer version Akhil Goyal
@ 2024-03-05 19:50 ` Patrick Robb
2024-03-05 23:30 ` Patrick Robb
2024-03-06 11:12 ` Power, Ciara
1 sibling, 1 reply; 45+ messages in thread
From: Patrick Robb @ 2024-03-05 19:50 UTC (permalink / raw)
To: Akhil Goyal
Cc: Brian Dooley, Kai Ji, Pablo de Lara, Aaron Conole, dev,
Sivaramakrishnan Venkat, Ciara Power, Wathsala Vithanage, thomas,
David Marchand
On Tue, Mar 5, 2024 at 2:11 PM Akhil Goyal <gakhil@marvell.com> wrote:
>
> > Subject: [EXTERNAL] [PATCH v5 1/4] crypto/ipsec_mb: bump minimum IPsec
> > Multi-buffer version
> >
> > From: Sivaramakrishnan Venkat <venkatx.sivaramakrishnan@intel.com>
> >
> > SW PMDs increment IPsec Multi-buffer version to 1.4.
> > A minimum IPsec Multi-buffer version of 1.4 or greater is now required.
> >
> > Signed-off-by: Sivaramakrishnan Venkat <venkatx.sivaramakrishnan@intel.com>
> > Acked-by: Ciara Power <ciara.power@intel.com>
> > Acked-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
> > Acked-by: Wathsala Vithanage <wathsala.vithanage@arm.com>
> please check these:
> https://github.com/ovsrobot/dpdk/actions/runs/8160942783/job/22308639670#step:19:19411
> Error: cannot find librte_crypto_ipsec_mb.so.24.0 in install
Aaron has some questions about whether the upgrade is appropriate or
not in another thread. If/when those are resolved, I think he will be
able to upgrade the robot to 1.4.
> You need to get this fixed or else CI would fail for every patch once this series is applied.
> And this is also failing http://mails.dpdk.org/archives/test-report/2024-March/601301.html
> These need to be fixed in CI infra.
For context, we had upgraded a couple weeks ago to tip of main on the
arm ipsec-mb repo, and the v4 of this series was passing at that
point.
https://patchwork.dpdk.org/project/dpdk/list/?series=31200&state=%2A&archive=both
I see that there have been some subsequent commits since then and I
see in the other thread Wathsala created a SECLIB-IPSEC-2024.03.05 tag
today. We can rebuild from that tag right now and issue reruns. If the
updated arm repo resolves the issues seen here, you should see the IOL
CI results go green tonight.
^ permalink raw reply [flat|nested] 45+ messages in thread
* Re: [EXTERNAL] [PATCH v5 1/4] crypto/ipsec_mb: bump minimum IPsec Multi-buffer version
2024-03-05 19:50 ` Patrick Robb
@ 2024-03-05 23:30 ` Patrick Robb
2024-03-06 3:57 ` Patrick Robb
0 siblings, 1 reply; 45+ messages in thread
From: Patrick Robb @ 2024-03-05 23:30 UTC (permalink / raw)
To: Akhil Goyal
Cc: Brian Dooley, Kai Ji, Pablo de Lara, Aaron Conole, dev,
Sivaramakrishnan Venkat, Ciara Power, Wathsala Vithanage, thomas,
David Marchand
Recheck-request: iol-unit-arm64-testing
^ permalink raw reply [flat|nested] 45+ messages in thread
* Re: [EXTERNAL] [PATCH v5 1/4] crypto/ipsec_mb: bump minimum IPsec Multi-buffer version
2024-03-05 23:30 ` Patrick Robb
@ 2024-03-06 3:57 ` Patrick Robb
0 siblings, 0 replies; 45+ messages in thread
From: Patrick Robb @ 2024-03-06 3:57 UTC (permalink / raw)
To: Akhil Goyal
Cc: Brian Dooley, Kai Ji, Pablo de Lara, Aaron Conole, dev,
Sivaramakrishnan Venkat, Ciara Power, Wathsala Vithanage, thomas,
David Marchand
On Tue, Mar 5, 2024 at 6:30 PM Patrick Robb <probb@iol.unh.edu> wrote:
>
> Recheck-request: iol-unit-arm64-testing
https://mails.dpdk.org/archives/test-report/2024-March/601582.html
Hello. I wanted to flag this as still failing on arm after running the
testing using the new tag Wathsala published today. We did it via a
debian 12 container image which we rebuilt today with all the ipsec
dependencies and arm ipsec install. We simply:
git clone --depth 1 --branch SECLIB-IPSEC-2024.03.05
https://git.gitlab.arm.com/arm-reference-solutions/ipsec-mb.git
make -j $(nproc)
make install
then build DPDK.
Wathsala, what do you think? Might I be missing something here?
^ permalink raw reply [flat|nested] 45+ messages in thread
* RE: [EXTERNAL] [PATCH v5 1/4] crypto/ipsec_mb: bump minimum IPsec Multi-buffer version
2024-03-05 19:11 ` [EXTERNAL] [PATCH v5 1/4] crypto/ipsec_mb: bump minimum IPsec Multi-buffer version Akhil Goyal
2024-03-05 19:50 ` Patrick Robb
@ 2024-03-06 11:12 ` Power, Ciara
2024-03-06 14:59 ` Patrick Robb
1 sibling, 1 reply; 45+ messages in thread
From: Power, Ciara @ 2024-03-06 11:12 UTC (permalink / raw)
To: Akhil Goyal, Dooley, Brian, Ji, Kai, De Lara Guarch, Pablo,
Patrick Robb, Aaron Conole
Cc: dev, Sivaramakrishnan, VenkatX, Wathsala Vithanage, thomas,
Marchand, David
> -----Original Message-----
> From: Akhil Goyal <gakhil@marvell.com>
> Sent: Tuesday, March 5, 2024 7:12 PM
> To: Dooley, Brian <brian.dooley@intel.com>; Ji, Kai <kai.ji@intel.com>; De Lara
> Guarch, Pablo <pablo.de.lara.guarch@intel.com>; Patrick Robb
> <probb@iol.unh.edu>; Aaron Conole <aconole@redhat.com>
> Cc: dev@dpdk.org; Sivaramakrishnan, VenkatX
> <venkatx.sivaramakrishnan@intel.com>; Power, Ciara <ciara.power@intel.com>;
> Wathsala Vithanage <wathsala.vithanage@arm.com>; thomas@monjalon.net;
> Marchand, David <david.marchand@redhat.com>
> Subject: RE: [EXTERNAL] [PATCH v5 1/4] crypto/ipsec_mb: bump minimum IPsec
> Multi-buffer version
>
> > Subject: [EXTERNAL] [PATCH v5 1/4] crypto/ipsec_mb: bump minimum IPsec
> > Multi-buffer version
> >
> > From: Sivaramakrishnan Venkat <venkatx.sivaramakrishnan@intel.com>
> >
> > SW PMDs increment IPsec Multi-buffer version to 1.4.
> > A minimum IPsec Multi-buffer version of 1.4 or greater is now required.
> >
> > Signed-off-by: Sivaramakrishnan Venkat
> > <venkatx.sivaramakrishnan@intel.com>
> > Acked-by: Ciara Power <ciara.power@intel.com>
> > Acked-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
> > Acked-by: Wathsala Vithanage <wathsala.vithanage@arm.com>
> please check these:
> https://github.com/ovsrobot/dpdk/actions/runs/8160942783/job/223086396
> 70#step:19:19411
> Error: cannot find librte_crypto_ipsec_mb.so.24.0 in install You need to get this
> fixed or else CI would fail for every patch once this series is applied.
I am having trouble reproducing this one.
I have run these commands that I saw in the CI log, before the patches, and then after the patches are applied - with ipsec-mb v1.2 on system as in CI.
meson configure build -Denable_docs=true -Dexamples=all -Dplatform=generic -Ddefault_library=shared -Dbuildtype=debug -Dcheck_includes=true -Dlibdir=lib -Dwerror=true
meson install -C build
ninja -C build
It compiles ok both times, first time it compiles ipsec-mb PMDs, after the patches applied, it skips compiling the PMDs as expected.
I am wondering, could this error be to do with the ABI reference/install comparison?
Maybe reference has the ipsec_mb.so file from a build that supported it, and it can't find the equivalent in the new install, because it's not compiled anymore:
+ devtools/check-abi.sh reference install
Error: cannot find librte_crypto_ipsec_mb.so.24.0 in install
Aaron, could that be the case?
Or, maybe my steps to reproduce the build setup are incorrect?
> And this is also failing http://mails.dpdk.org/archives/test-report/2024-
> March/601301.html
> These need to be fixed in CI infra.
This function that throws the error is available in the recently tagged 1.4 equivalent Arm repo, so I am unsure why it can't find it.
Could there be some old installed ipsec-mb version in the environment that is being picked up by DPDK?
Sometimes the meson configure step will pick up the correct ipsec-mb version, but then ninja step links to an older version that still exists and hadn't been uninstalled previously.
Not sure if that could be the case for the CI container though - Patrick maybe you can verify there are no 1.3 or less versions on system that could be being picked up:
I usually use something like: find /usr -name libIPSec_MB.so\*
Thanks for the help,
Ciara
^ permalink raw reply [flat|nested] 45+ messages in thread
* Re: [EXTERNAL] [PATCH v5 1/4] crypto/ipsec_mb: bump minimum IPsec Multi-buffer version
2024-03-06 11:12 ` Power, Ciara
@ 2024-03-06 14:59 ` Patrick Robb
2024-03-06 15:29 ` Power, Ciara
0 siblings, 1 reply; 45+ messages in thread
From: Patrick Robb @ 2024-03-06 14:59 UTC (permalink / raw)
To: Power, Ciara
Cc: Akhil Goyal, Dooley, Brian, Ji, Kai, De Lara Guarch, Pablo,
Aaron Conole, dev, Sivaramakrishnan, VenkatX, Wathsala Vithanage,
thomas, Marchand, David
On Wed, Mar 6, 2024 at 6:12 AM Power, Ciara <ciara.power@intel.com> wrote:
>
>
> > And this is also failing http://mails.dpdk.org/archives/test-report/2024-
> > March/601301.html
> > These need to be fixed in CI infra.
>
> This function that throws the error is available in the recently tagged 1.4 equivalent Arm repo, so I am unsure why it can't find it.
> Could there be some old installed ipsec-mb version in the environment that is being picked up by DPDK?
> Sometimes the meson configure step will pick up the correct ipsec-mb version, but then ninja step links to an older version that still exists and hadn't been uninstalled previously.
> Not sure if that could be the case for the CI container though - Patrick maybe you can verify there are no 1.3 or less versions on system that could be being picked up:
> I usually use something like: find /usr -name libIPSec_MB.so\*
We are building the arm ipsec environment from a base debian 12 image.
1. Install NASM
2. Install ipsec 1.4 from the new ARM tag.
3. run DPDK build
Thank you for providing the find command to check ipsec version.
root@f968452d8612:/# find /usr -name libIPSec_MB.so\*
/usr/lib/libIPSec_MB.so
/usr/lib/libIPSec_MB.so.1.4.0
/usr/lib/libIPSec_MB.so.1
I assume my reading of the above is correct that only 1.4 is
installed. Let me know if it's otherwise though. Thanks!
>
>
> Thanks for the help,
> Ciara
>
^ permalink raw reply [flat|nested] 45+ messages in thread
* RE: [EXTERNAL] [PATCH v5 1/4] crypto/ipsec_mb: bump minimum IPsec Multi-buffer version
2024-03-06 14:59 ` Patrick Robb
@ 2024-03-06 15:29 ` Power, Ciara
2024-03-07 16:21 ` Wathsala Wathawana Vithanage
0 siblings, 1 reply; 45+ messages in thread
From: Power, Ciara @ 2024-03-06 15:29 UTC (permalink / raw)
To: Patrick Robb
Cc: Akhil Goyal, Dooley, Brian, Ji, Kai, De Lara Guarch, Pablo,
Aaron Conole, dev, Sivaramakrishnan, VenkatX, Wathsala Vithanage,
thomas, Marchand, David
> -----Original Message-----
> From: Patrick Robb <probb@iol.unh.edu>
> Sent: Wednesday, March 6, 2024 2:59 PM
> To: Power, Ciara <ciara.power@intel.com>
> Cc: Akhil Goyal <gakhil@marvell.com>; Dooley, Brian <brian.dooley@intel.com>;
> Ji, Kai <kai.ji@intel.com>; De Lara Guarch, Pablo
> <pablo.de.lara.guarch@intel.com>; Aaron Conole <aconole@redhat.com>;
> dev@dpdk.org; Sivaramakrishnan, VenkatX
> <venkatx.sivaramakrishnan@intel.com>; Wathsala Vithanage
> <wathsala.vithanage@arm.com>; thomas@monjalon.net; Marchand, David
> <david.marchand@redhat.com>
> Subject: Re: [EXTERNAL] [PATCH v5 1/4] crypto/ipsec_mb: bump minimum IPsec
> Multi-buffer version
>
> On Wed, Mar 6, 2024 at 6:12 AM Power, Ciara <ciara.power@intel.com> wrote:
> >
> >
> > > And this is also failing
> > > http://mails.dpdk.org/archives/test-report/2024-
> > > March/601301.html
> > > These need to be fixed in CI infra.
> >
> > This function that throws the error is available in the recently tagged 1.4
> equivalent Arm repo, so I am unsure why it can't find it.
> > Could there be some old installed ipsec-mb version in the environment that is
> being picked up by DPDK?
> > Sometimes the meson configure step will pick up the correct ipsec-mb version,
> but then ninja step links to an older version that still exists and hadn't been
> uninstalled previously.
> > Not sure if that could be the case for the CI container though - Patrick maybe
> you can verify there are no 1.3 or less versions on system that could be being
> picked up:
> > I usually use something like: find /usr -name libIPSec_MB.so\*
> We are building the arm ipsec environment from a base debian 12 image.
> 1. Install NASM
> 2. Install ipsec 1.4 from the new ARM tag.
> 3. run DPDK build
>
> Thank you for providing the find command to check ipsec version.
>
> root@f968452d8612:/# find /usr -name libIPSec_MB.so\*
> /usr/lib/libIPSec_MB.so
> /usr/lib/libIPSec_MB.so.1.4.0
> /usr/lib/libIPSec_MB.so.1
>
> I assume my reading of the above is correct that only 1.4 is installed. Let me
> know if it's otherwise though. Thanks!
Thanks for checking that Patrick - looks fine to me, only 1.4 is there.
If the correct version is being installed and picked up, maybe there is something missing for that function definition in arm-ipsec-mb repo.
Wathsala, can you check that please?
For x86, we define the function in the header: lib/intel-ipsec-mb.h
And the implementation is in the C file: lib/x86_64/hmac_ipad_opad.c
Thanks,
Ciara
>
> >
> >
> > Thanks for the help,
> > Ciara
> >
^ permalink raw reply [flat|nested] 45+ messages in thread
* RE: [EXTERNAL] [PATCH v5 1/4] crypto/ipsec_mb: bump minimum IPsec Multi-buffer version
2024-03-06 15:29 ` Power, Ciara
@ 2024-03-07 16:21 ` Wathsala Wathawana Vithanage
2024-03-08 16:05 ` Power, Ciara
2024-03-12 16:26 ` Wathsala Wathawana Vithanage
0 siblings, 2 replies; 45+ messages in thread
From: Wathsala Wathawana Vithanage @ 2024-03-07 16:21 UTC (permalink / raw)
To: Power, Ciara, Patrick Robb
Cc: Akhil Goyal, Dooley, Brian, Ji, Kai, De Lara Guarch, Pablo,
Aaron Conole, dev, Sivaramakrishnan, VenkatX, thomas, Marchand,
David, nd
>
> If the correct version is being installed and picked up, maybe there is
> something missing for that function definition in arm-ipsec-mb repo.
> Wathsala, can you check that please?
We are working on reproducing this issue. Will update asap.
^ permalink raw reply [flat|nested] 45+ messages in thread
* RE: [EXTERNAL] [PATCH v5 1/4] crypto/ipsec_mb: bump minimum IPsec Multi-buffer version
2024-03-07 16:21 ` Wathsala Wathawana Vithanage
@ 2024-03-08 16:05 ` Power, Ciara
2024-03-12 16:26 ` Wathsala Wathawana Vithanage
1 sibling, 0 replies; 45+ messages in thread
From: Power, Ciara @ 2024-03-08 16:05 UTC (permalink / raw)
To: Wathsala Wathawana Vithanage, Patrick Robb
Cc: Akhil Goyal, Dooley, Brian, Ji, Kai, De Lara Guarch, Pablo,
Aaron Conole, dev, Sivaramakrishnan, VenkatX, thomas, Marchand,
David, nd, Kantecki, Tomasz
> -----Original Message-----
> From: Wathsala Wathawana Vithanage <wathsala.vithanage@arm.com>
> Sent: Thursday, March 7, 2024 4:21 PM
> To: Power, Ciara <ciara.power@intel.com>; Patrick Robb <probb@iol.unh.edu>
> Cc: Akhil Goyal <gakhil@marvell.com>; Dooley, Brian <brian.dooley@intel.com>;
> Ji, Kai <kai.ji@intel.com>; De Lara Guarch, Pablo
> <pablo.de.lara.guarch@intel.com>; Aaron Conole <aconole@redhat.com>;
> dev@dpdk.org; Sivaramakrishnan, VenkatX
> <venkatx.sivaramakrishnan@intel.com>; thomas@monjalon.net; Marchand,
> David <david.marchand@redhat.com>; nd <nd@arm.com>
> Subject: RE: [EXTERNAL] [PATCH v5 1/4] crypto/ipsec_mb: bump minimum IPsec
> Multi-buffer version
>
> >
> > If the correct version is being installed and picked up, maybe there
> > is something missing for that function definition in arm-ipsec-mb repo.
> > Wathsala, can you check that please?
>
> We are working on reproducing this issue. Will update asap.
Thanks Wathsala.
I think, for basic compile to work for both repos, any functions in the ipsec_mb.h file need to be defined in a C file to avoid issues.
But, I understand only SNOW3G + ZUC are the focus for the Arm ipsec-mb repo, so a HMAC related function like the one throwing the error isn't used.
Perhaps having empty stubs for all other functions such as the imb_hmac_ipad_opad would be sufficient to allow compiling, because PMD expects it to exist somewhere in the library.
Thanks,
Ciara
^ permalink raw reply [flat|nested] 45+ messages in thread
* [PATCH v6 1/5] ci: replace IPsec-mb package install
2023-12-12 15:36 [PATCH v1] crypto/ipsec_mb: unified IPsec MB interface Brian Dooley
` (3 preceding siblings ...)
2024-03-05 17:42 ` [PATCH v5 1/4] crypto/ipsec_mb: bump minimum IPsec Multi-buffer version Brian Dooley
@ 2024-03-12 13:50 ` Brian Dooley
2024-03-12 13:50 ` [PATCH v6 2/5] crypto/ipsec_mb: bump minimum IPsec Multi-buffer version Brian Dooley
` (6 more replies)
2024-03-14 10:37 ` [PATCH v7 1/2] doc: remove outdated version details Brian Dooley
5 siblings, 7 replies; 45+ messages in thread
From: Brian Dooley @ 2024-03-12 13:50 UTC (permalink / raw)
To: Aaron Conole, Michael Santana
Cc: dev, gakhil, pablo.de.lara.guarch, probb, wathsala.vithanage,
Ciara Power
From: Ciara Power <ciara.power@intel.com>
The IPsec-mb version that is available through current package
managers is 1.2.
This release moves the minimum required IPsec-mb version for IPsec-mb
based SW PMDs to 1.4.
To compile these PMDs, a manual step is added to install IPsec-mb v1.4
using dpkg.
Signed-off-by: Ciara Power <ciara.power@intel.com>
---
.github/workflows/build.yml | 25 ++++++++++++++++++++++---
1 file changed, 22 insertions(+), 3 deletions(-)
diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index 776fbf6f30..ed44b1f730 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -106,9 +106,15 @@ jobs:
run: sudo apt update || true
- name: Install packages
run: sudo apt install -y ccache libarchive-dev libbsd-dev libbpf-dev
- libfdt-dev libibverbs-dev libipsec-mb-dev libisal-dev libjansson-dev
+ libfdt-dev libibverbs-dev libisal-dev libjansson-dev
libnuma-dev libpcap-dev libssl-dev ninja-build pkg-config python3-pip
python3-pyelftools python3-setuptools python3-wheel zlib1g-dev
+ - name: Install ipsec-mb library
+ run: |
+ wget "https://launchpad.net/ubuntu/+archive/primary/+files/libipsec-mb-dev_1.4-3_amd64.deb"
+ wget "https://launchpad.net/ubuntu/+archive/primary/+files/libipsec-mb1_1.4-3_amd64.deb"
+ sudo dpkg -i libipsec-mb1_1.4-3_amd64.deb
+ sudo dpkg -i libipsec-mb-dev_1.4-3_amd64.deb
- name: Install libabigail build dependencies if no cache is available
if: env.ABI_CHECKS == 'true' && steps.libabigail-cache.outputs.cache-hit != 'true'
run: sudo apt install -y autoconf automake libdw-dev libtool libxml2-dev
@@ -187,11 +193,18 @@ jobs:
run: docker exec -i dpdk dnf update -y
- name: Install packages
if: steps.image_cache.outputs.cache-hit != 'true'
- run: docker exec -i dpdk dnf install -y ccache intel-ipsec-mb-devel
+ run: docker exec -i dpdk dnf install -y ccache
isa-l-devel jansson-devel libarchive-devel libatomic libbsd-devel
libbpf-devel libfdt-devel libpcap-devel libxdp-devel ninja-build
numactl-devel openssl-devel python3-pip python3-pyelftools
python3-setuptools python3-wheel rdma-core-devel zlib-devel
+ - name: Install ipsec-mb library
+ if: steps.image_cache.outputs.cache-hit != 'true'
+ run: |
+ wget "https://launchpad.net/ubuntu/+archive/primary/+files/libipsec-mb-dev_1.4-3_amd64.deb"
+ wget "https://launchpad.net/ubuntu/+archive/primary/+files/libipsec-mb1_1.4-3_amd64.deb"
+ sudo dpkg -i libipsec-mb1_1.4-3_amd64.deb
+ sudo dpkg -i libipsec-mb-dev_1.4-3_amd64.deb
- name: Save image in cache
if: steps.image_cache.outputs.cache-hit != 'true'
run: |
@@ -262,12 +275,18 @@ jobs:
- name: Update
run: docker exec -i dpdk dnf update -y || true
- name: Install packages
- run: docker exec -i dpdk dnf install -y ccache intel-ipsec-mb-devel
+ run: docker exec -i dpdk dnf install -y ccache
isa-l-devel jansson-devel libarchive-devel libatomic libbsd-devel
libbpf-devel libfdt-devel libpcap-devel libxdp-devel ninja-build
numactl-devel openssl-devel python3-pip python3-pyelftools
python3-setuptools python3-wheel rdma-core-devel zlib-devel
${{ matrix.config.compiler }}
+ - name: Install ipsec-mb library
+ run: |
+ wget "https://launchpad.net/ubuntu/+archive/primary/+files/libipsec-mb-dev_1.4-3_amd64.deb"
+ wget "https://launchpad.net/ubuntu/+archive/primary/+files/libipsec-mb1_1.4-3_amd64.deb"
+ sudo dpkg -i libipsec-mb1_1.4-3_amd64.deb
+ sudo dpkg -i libipsec-mb-dev_1.4-3_amd64.deb
- name: Run setup
run: docker exec -i dpdk .ci/linux-setup.sh
- name: Build
--
2.25.1
^ permalink raw reply [flat|nested] 45+ messages in thread
* [PATCH v6 2/5] crypto/ipsec_mb: bump minimum IPsec Multi-buffer version
2024-03-12 13:50 ` [PATCH v6 1/5] ci: replace IPsec-mb package install Brian Dooley
@ 2024-03-12 13:50 ` Brian Dooley
2024-03-12 13:50 ` [PATCH v6 3/5] doc: remove outdated version details Brian Dooley
` (5 subsequent siblings)
6 siblings, 0 replies; 45+ messages in thread
From: Brian Dooley @ 2024-03-12 13:50 UTC (permalink / raw)
To: Kai Ji, Pablo de Lara
Cc: dev, gakhil, aconole, probb, wathsala.vithanage,
Sivaramakrishnan Venkat, Ciara Power
From: Sivaramakrishnan Venkat <venkatx.sivaramakrishnan@intel.com>
SW PMDs increment IPsec Multi-buffer version to 1.4.
A minimum IPsec Multi-buffer version of 1.4 or greater is now required.
Signed-off-by: Sivaramakrishnan Venkat <venkatx.sivaramakrishnan@intel.com>
Acked-by: Ciara Power <ciara.power@intel.com>
Acked-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Wathsala Vithanage <wathsala.vithanage@arm.com>
---
v5:
- Rebased and added to patchset
v4:
- 24.03 release notes updated to bump minimum IPSec Multi-buffer
version to 1.4 for SW PMDs.
v2:
- Removed unused macro in ipsec_mb_ops.c
- set_gcm_job() modified correctly to keep multi_sgl_job line
- Updated SW PMDs documentation for minimum IPSec Multi-buffer version
- Updated commit message, and patch title.
---
doc/guides/cryptodevs/aesni_gcm.rst | 3 +-
doc/guides/cryptodevs/aesni_mb.rst | 3 +-
doc/guides/cryptodevs/chacha20_poly1305.rst | 3 +-
doc/guides/cryptodevs/kasumi.rst | 3 +-
doc/guides/cryptodevs/snow3g.rst | 3 +-
doc/guides/cryptodevs/zuc.rst | 3 +-
doc/guides/rel_notes/release_24_03.rst | 4 +
drivers/crypto/ipsec_mb/ipsec_mb_ops.c | 23 ---
drivers/crypto/ipsec_mb/meson.build | 2 +-
drivers/crypto/ipsec_mb/pmd_aesni_mb.c | 165 --------------------
drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h | 9 --
11 files changed, 17 insertions(+), 204 deletions(-)
diff --git a/doc/guides/cryptodevs/aesni_gcm.rst b/doc/guides/cryptodevs/aesni_gcm.rst
index f5773426ee..dc665e536c 100644
--- a/doc/guides/cryptodevs/aesni_gcm.rst
+++ b/doc/guides/cryptodevs/aesni_gcm.rst
@@ -85,7 +85,8 @@ and the external crypto libraries supported by them:
18.05 - 19.02 Multi-buffer library 0.49 - 0.52
19.05 - 20.08 Multi-buffer library 0.52 - 0.55
20.11 - 21.08 Multi-buffer library 0.53 - 1.3*
- 21.11+ Multi-buffer library 1.0 - 1.5*
+ 21.11 - 23.11 Multi-buffer library 1.0 - 1.5*
+ 24.03+ Multi-buffer library 1.4 - 1.5*
============= ================================
\* Multi-buffer library 1.0 or newer only works for Meson but not Make build system.
diff --git a/doc/guides/cryptodevs/aesni_mb.rst b/doc/guides/cryptodevs/aesni_mb.rst
index b2e74ba417..5d670ee237 100644
--- a/doc/guides/cryptodevs/aesni_mb.rst
+++ b/doc/guides/cryptodevs/aesni_mb.rst
@@ -146,7 +146,8 @@ and the Multi-Buffer library version supported by them:
19.05 - 19.08 0.52
19.11 - 20.08 0.52 - 0.55
20.11 - 21.08 0.53 - 1.3*
- 21.11+ 1.0 - 1.5*
+ 21.11 - 23.11 1.0 - 1.5*
+ 24.03+ 1.4 - 1.5*
============== ============================
\* Multi-buffer library 1.0 or newer only works for Meson but not Make build system.
diff --git a/doc/guides/cryptodevs/chacha20_poly1305.rst b/doc/guides/cryptodevs/chacha20_poly1305.rst
index 9d4bf86cf1..c32866b301 100644
--- a/doc/guides/cryptodevs/chacha20_poly1305.rst
+++ b/doc/guides/cryptodevs/chacha20_poly1305.rst
@@ -72,7 +72,8 @@ and the external crypto libraries supported by them:
============= ================================
DPDK version Crypto library version
============= ================================
- 21.11+ Multi-buffer library 1.0-1.5*
+ 21.11 - 23.11 Multi-buffer library 1.0-1.5*
+ 24.03+ Multi-buffer library 1.4-1.5*
============= ================================
\* Multi-buffer library 1.0 or newer only works for Meson but not Make build system.
diff --git a/doc/guides/cryptodevs/kasumi.rst b/doc/guides/cryptodevs/kasumi.rst
index 0989054875..a8f4e6b204 100644
--- a/doc/guides/cryptodevs/kasumi.rst
+++ b/doc/guides/cryptodevs/kasumi.rst
@@ -87,7 +87,8 @@ and the external crypto libraries supported by them:
============= ================================
16.11 - 19.11 LibSSO KASUMI
20.02 - 21.08 Multi-buffer library 0.53 - 1.3*
- 21.11+ Multi-buffer library 1.0 - 1.5*
+ 21.11 - 23.11 Multi-buffer library 1.0 - 1.5*
+ 24.03+ Multi-buffer library 1.4 - 1.5*
============= ================================
\* Multi-buffer library 1.0 or newer only works for Meson but not Make build system.
diff --git a/doc/guides/cryptodevs/snow3g.rst b/doc/guides/cryptodevs/snow3g.rst
index 3392932653..46863462e5 100644
--- a/doc/guides/cryptodevs/snow3g.rst
+++ b/doc/guides/cryptodevs/snow3g.rst
@@ -96,7 +96,8 @@ and the external crypto libraries supported by them:
============= ================================
16.04 - 19.11 LibSSO SNOW3G
20.02 - 21.08 Multi-buffer library 0.53 - 1.3*
- 21.11+ Multi-buffer library 1.0 - 1.5*
+ 21.11 - 23.11 Multi-buffer library 1.0 - 1.5*
+ 24.03+ Multi-buffer library 1.4 - 1.5*
============= ================================
\* Multi-buffer library 1.0 or newer only works for Meson but not Make build system.
diff --git a/doc/guides/cryptodevs/zuc.rst b/doc/guides/cryptodevs/zuc.rst
index a414b5ad2c..51867e1a16 100644
--- a/doc/guides/cryptodevs/zuc.rst
+++ b/doc/guides/cryptodevs/zuc.rst
@@ -95,7 +95,8 @@ and the external crypto libraries supported by them:
============= ================================
16.11 - 19.11 LibSSO ZUC
20.02 - 21.08 Multi-buffer library 0.53 - 1.3*
- 21.11+ Multi-buffer library 1.0 - 1.5*
+ 21.11 - 23.11 Multi-buffer library 1.0 - 1.5*
+ 24.03+ Multi-buffer library 1.4 - 1.5*
============= ================================
\* Multi-buffer library 1.0 or newer only works for Meson but not Make build system.
diff --git a/doc/guides/rel_notes/release_24_03.rst b/doc/guides/rel_notes/release_24_03.rst
index 78590c047b..8fa8cf1dd6 100644
--- a/doc/guides/rel_notes/release_24_03.rst
+++ b/doc/guides/rel_notes/release_24_03.rst
@@ -144,6 +144,10 @@ New Features
* Added support for GEN LCE (1454) device, for AES-GCM only.
* Enabled support for virtual QAT - vQAT (0da5) devices in QAT crypto driver.
+* **Updated ipsec_mb crypto driver.**
+
+ * Bump minimum IPSec Multi-buffer version to 1.4 for SW PMDs.
+
* **Updated Marvell cnxk crypto driver.**
* Added support for Rx inject in crypto_cn10k.
diff --git a/drivers/crypto/ipsec_mb/ipsec_mb_ops.c b/drivers/crypto/ipsec_mb/ipsec_mb_ops.c
index f21f9cc5a0..d25c671d7d 100644
--- a/drivers/crypto/ipsec_mb/ipsec_mb_ops.c
+++ b/drivers/crypto/ipsec_mb/ipsec_mb_ops.c
@@ -11,7 +11,6 @@
#include "ipsec_mb_private.h"
-#define IMB_MP_REQ_VER_STR "1.1.0"
/** Configure device */
int
@@ -147,15 +146,10 @@ ipsec_mb_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
rte_ring_free(rte_ring_lookup(qp->name));
-#if IMB_VERSION(1, 1, 0) > IMB_VERSION_NUM
- if (qp->mb_mgr)
- free_mb_mgr(qp->mb_mgr);
-#else
if (qp->mb_mgr_mz) {
rte_memzone_free(qp->mb_mgr_mz);
qp->mb_mgr = NULL;
}
-#endif
rte_free(qp);
dev->data->queue_pairs[qp_id] = NULL;
} else { /* secondary process */
@@ -211,7 +205,6 @@ static struct rte_ring
RING_F_SP_ENQ | RING_F_SC_DEQ);
}
-#if IMB_VERSION(1, 1, 0) <= IMB_VERSION_NUM
static IMB_MGR *
ipsec_mb_alloc_mgr_from_memzone(const struct rte_memzone **mb_mgr_mz,
const char *mb_mgr_mz_name)
@@ -244,7 +237,6 @@ ipsec_mb_alloc_mgr_from_memzone(const struct rte_memzone **mb_mgr_mz,
}
return mb_mgr;
}
-#endif
/** Setup a queue pair */
int
@@ -260,12 +252,6 @@ ipsec_mb_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
int ret;
if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
-#if IMB_VERSION(1, 1, 0) > IMB_VERSION_NUM
- IPSEC_MB_LOG(ERR, "The intel-ipsec-mb version (%s) does not support multiprocess,"
- "the minimum version required for this feature is %s.",
- IMB_VERSION_STR, IMB_MP_REQ_VER_STR);
- return -EINVAL;
-#endif
qp = dev->data->queue_pairs[qp_id];
if (qp == NULL) {
IPSEC_MB_LOG(DEBUG, "Secondary process setting up device qp.");
@@ -285,15 +271,11 @@ ipsec_mb_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
return -ENOMEM;
}
-#if IMB_VERSION(1, 1, 0) > IMB_VERSION_NUM
- qp->mb_mgr = alloc_init_mb_mgr();
-#else
char mz_name[IPSEC_MB_MAX_MZ_NAME];
snprintf(mz_name, sizeof(mz_name), "IMB_MGR_DEV_%d_QP_%d",
dev->data->dev_id, qp_id);
qp->mb_mgr = ipsec_mb_alloc_mgr_from_memzone(&(qp->mb_mgr_mz),
mz_name);
-#endif
if (qp->mb_mgr == NULL) {
ret = -ENOMEM;
goto qp_setup_cleanup;
@@ -330,15 +312,10 @@ ipsec_mb_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
return 0;
qp_setup_cleanup:
-#if IMB_VERSION(1, 1, 0) > IMB_VERSION_NUM
- if (qp->mb_mgr)
- free_mb_mgr(qp->mb_mgr);
-#else
if (rte_eal_process_type() == RTE_PROC_SECONDARY)
return ret;
if (qp->mb_mgr_mz)
rte_memzone_free(qp->mb_mgr_mz);
-#endif
rte_free(qp);
return ret;
}
diff --git a/drivers/crypto/ipsec_mb/meson.build b/drivers/crypto/ipsec_mb/meson.build
index 87bf965554..0c988d7411 100644
--- a/drivers/crypto/ipsec_mb/meson.build
+++ b/drivers/crypto/ipsec_mb/meson.build
@@ -7,7 +7,7 @@ if is_windows
subdir_done()
endif
-IMB_required_ver = '1.0.0'
+IMB_required_ver = '1.4.0'
IMB_header = '#include<intel-ipsec-mb.h>'
if arch_subdir == 'arm'
IMB_header = '#include<ipsec-mb.h>'
diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c
index 4de4866cf3..b93267f1c3 100644
--- a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c
+++ b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c
@@ -210,13 +210,9 @@ aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr,
}
} else if (xform->auth.key.length == 32) {
sess->template_job.hash_alg = IMB_AUTH_ZUC256_EIA3_BITLEN;
-#if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM
if (sess->auth.req_digest_len != 4 &&
sess->auth.req_digest_len != 8 &&
sess->auth.req_digest_len != 16) {
-#else
- if (sess->auth.req_digest_len != 4) {
-#endif
IPSEC_MB_LOG(ERR, "Invalid digest size\n");
return -EINVAL;
}
@@ -845,11 +841,9 @@ aesni_mb_session_configure(IMB_MGR *mb_mgr,
}
}
-#if IMB_VERSION(1, 3, 0) < IMB_VERSION_NUM
sess->session_id = imb_set_session(mb_mgr, &sess->template_job);
sess->pid = getpid();
RTE_PER_LCORE(pid) = sess->pid;
-#endif
return 0;
}
@@ -982,9 +976,7 @@ aesni_mb_set_docsis_sec_session_parameters(
goto error_exit;
}
-#if IMB_VERSION(1, 3, 0) < IMB_VERSION_NUM
ipsec_sess->session_id = imb_set_session(mb_mgr, &ipsec_sess->template_job);
-#endif
error_exit:
free_mb_mgr(mb_mgr);
@@ -1239,7 +1231,6 @@ imb_lib_support_sgl_algo(IMB_CIPHER_MODE alg)
return 0;
}
-#if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM
static inline int
single_sgl_job(IMB_JOB *job, struct rte_crypto_op *op,
int oop, uint32_t offset, struct rte_mbuf *m_src,
@@ -1324,7 +1315,6 @@ single_sgl_job(IMB_JOB *job, struct rte_crypto_op *op,
job->sgl_io_segs = sgl_segs;
return 0;
}
-#endif
static inline int
multi_sgl_job(IMB_JOB *job, struct rte_crypto_op *op,
@@ -1394,9 +1384,7 @@ set_gcm_job(IMB_MGR *mb_mgr, IMB_JOB *job, const uint8_t sgl,
job->msg_len_to_hash_in_bytes = 0;
job->msg_len_to_cipher_in_bytes = 0;
job->cipher_start_src_offset_in_bytes = 0;
-#if IMB_VERSION(1, 3, 0) < IMB_VERSION_NUM
imb_set_session(mb_mgr, job);
-#endif
} else {
job->hash_start_src_offset_in_bytes =
op->sym->aead.data.offset;
@@ -1424,13 +1412,11 @@ set_gcm_job(IMB_MGR *mb_mgr, IMB_JOB *job, const uint8_t sgl,
job->src = NULL;
job->dst = NULL;
-#if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM
if (m_src->nb_segs <= MAX_NUM_SEGS)
return single_sgl_job(job, op, oop,
m_offset, m_src, m_dst,
qp_data->sgl_segs);
else
-#endif
return multi_sgl_job(job, op, oop,
m_offset, m_src, m_dst, mb_mgr);
} else {
@@ -1520,10 +1506,6 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp,
uint8_t sgl = 0;
uint8_t lb_sgl = 0;
-#if IMB_VERSION(1, 3, 0) >= IMB_VERSION_NUM
- (void) pid;
-#endif
-
session = ipsec_mb_get_session_private(qp, op);
if (session == NULL) {
op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
@@ -1533,12 +1515,10 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp,
const IMB_CIPHER_MODE cipher_mode =
session->template_job.cipher_mode;
-#if IMB_VERSION(1, 3, 0) < IMB_VERSION_NUM
if (session->pid != pid) {
memcpy(job, &session->template_job, sizeof(IMB_JOB));
imb_set_session(mb_mgr, job);
} else if (job->session_id != session->session_id)
-#endif
memcpy(job, &session->template_job, sizeof(IMB_JOB));
if (!op->sym->m_dst) {
@@ -1579,9 +1559,7 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp,
job->u.GCM.ctx = &qp_data->gcm_sgl_ctx;
job->cipher_mode = IMB_CIPHER_GCM_SGL;
job->hash_alg = IMB_AUTH_GCM_SGL;
-#if IMB_VERSION(1, 3, 0) < IMB_VERSION_NUM
imb_set_session(mb_mgr, job);
-#endif
}
break;
case IMB_AUTH_AES_GMAC_128:
@@ -1606,9 +1584,7 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp,
job->u.CHACHA20_POLY1305.ctx = &qp_data->chacha_sgl_ctx;
job->cipher_mode = IMB_CIPHER_CHACHA20_POLY1305_SGL;
job->hash_alg = IMB_AUTH_CHACHA20_POLY1305_SGL;
-#if IMB_VERSION(1, 3, 0) < IMB_VERSION_NUM
imb_set_session(mb_mgr, job);
-#endif
}
break;
default:
@@ -1804,13 +1780,11 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp,
if (lb_sgl)
return handle_sgl_linear(job, op, m_offset, session);
-#if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM
if (m_src->nb_segs <= MAX_NUM_SEGS)
return single_sgl_job(job, op, oop,
m_offset, m_src, m_dst,
qp_data->sgl_segs);
else
-#endif
return multi_sgl_job(job, op, oop,
m_offset, m_src, m_dst, mb_mgr);
}
@@ -2130,7 +2104,6 @@ set_job_null_op(IMB_JOB *job, struct rte_crypto_op *op)
return job;
}
-#if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM
static uint16_t
aesni_mb_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
uint16_t nb_ops)
@@ -2263,144 +2236,6 @@ aesni_mb_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
return processed_jobs;
}
-#else
-
-/**
- * Process a completed IMB_JOB job and keep processing jobs until
- * get_completed_job return NULL
- *
- * @param qp Queue Pair to process
- * @param mb_mgr IMB_MGR to use
- * @param job IMB_JOB job
- * @param ops crypto ops to fill
- * @param nb_ops number of crypto ops
- *
- * @return
- * - Number of processed jobs
- */
-static unsigned
-handle_completed_jobs(struct ipsec_mb_qp *qp, IMB_MGR *mb_mgr,
- IMB_JOB *job, struct rte_crypto_op **ops,
- uint16_t nb_ops)
-{
- struct rte_crypto_op *op = NULL;
- uint16_t processed_jobs = 0;
-
- while (job != NULL) {
- op = post_process_mb_job(qp, job);
-
- if (op) {
- ops[processed_jobs++] = op;
- qp->stats.dequeued_count++;
- } else {
- qp->stats.dequeue_err_count++;
- break;
- }
- if (processed_jobs == nb_ops)
- break;
-
- job = IMB_GET_COMPLETED_JOB(mb_mgr);
- }
-
- return processed_jobs;
-}
-
-static inline uint16_t
-flush_mb_mgr(struct ipsec_mb_qp *qp, IMB_MGR *mb_mgr,
- struct rte_crypto_op **ops, uint16_t nb_ops)
-{
- int processed_ops = 0;
-
- /* Flush the remaining jobs */
- IMB_JOB *job = IMB_FLUSH_JOB(mb_mgr);
-
- if (job)
- processed_ops += handle_completed_jobs(qp, mb_mgr, job,
- &ops[processed_ops], nb_ops - processed_ops);
-
- return processed_ops;
-}
-
-static uint16_t
-aesni_mb_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
- uint16_t nb_ops)
-{
- struct ipsec_mb_qp *qp = queue_pair;
- IMB_MGR *mb_mgr = qp->mb_mgr;
- struct rte_crypto_op *op;
- IMB_JOB *job;
- int retval, processed_jobs = 0;
- pid_t pid = 0;
-
- if (unlikely(nb_ops == 0 || mb_mgr == NULL))
- return 0;
-
- uint8_t digest_idx = qp->digest_idx;
-
- do {
- /* Get next free mb job struct from mb manager */
- job = IMB_GET_NEXT_JOB(mb_mgr);
- if (unlikely(job == NULL)) {
- /* if no free mb job structs we need to flush mb_mgr */
- processed_jobs += flush_mb_mgr(qp, mb_mgr,
- &ops[processed_jobs],
- nb_ops - processed_jobs);
-
- if (nb_ops == processed_jobs)
- break;
-
- job = IMB_GET_NEXT_JOB(mb_mgr);
- }
-
- /*
- * Get next operation to process from ingress queue.
- * There is no need to return the job to the IMB_MGR
- * if there are no more operations to process, since the IMB_MGR
- * can use that pointer again in next get_next calls.
- */
- retval = rte_ring_dequeue(qp->ingress_queue, (void **)&op);
- if (retval < 0)
- break;
-
- if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
- retval = set_sec_mb_job_params(job, qp, op,
- &digest_idx);
- else
- retval = set_mb_job_params(job, qp, op,
- &digest_idx, mb_mgr, pid);
-
- if (unlikely(retval != 0)) {
- qp->stats.dequeue_err_count++;
- set_job_null_op(job, op);
- }
-
- /* Submit job to multi-buffer for processing */
-#ifdef RTE_LIBRTE_PMD_AESNI_MB_DEBUG
- job = IMB_SUBMIT_JOB(mb_mgr);
-#else
- job = IMB_SUBMIT_JOB_NOCHECK(mb_mgr);
-#endif
- /*
- * If submit returns a processed job then handle it,
- * before submitting subsequent jobs
- */
- if (job)
- processed_jobs += handle_completed_jobs(qp, mb_mgr,
- job, &ops[processed_jobs],
- nb_ops - processed_jobs);
-
- } while (processed_jobs < nb_ops);
-
- qp->digest_idx = digest_idx;
-
- if (processed_jobs < 1)
- processed_jobs += flush_mb_mgr(qp, mb_mgr,
- &ops[processed_jobs],
- nb_ops - processed_jobs);
-
- return processed_jobs;
-}
-#endif
static inline int
check_crypto_sgl(union rte_crypto_sym_ofs so, const struct rte_crypto_sgl *sgl)
{
diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h b/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h
index 85994fe5a1..51cfd7e2aa 100644
--- a/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h
+++ b/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h
@@ -17,9 +17,7 @@
#define HMAC_IPAD_VALUE (0x36)
#define HMAC_OPAD_VALUE (0x5C)
-#if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM
#define MAX_NUM_SEGS 16
-#endif
static const struct rte_cryptodev_capabilities aesni_mb_capabilities[] = {
{ /* MD5 HMAC */
@@ -567,13 +565,8 @@ static const struct rte_cryptodev_capabilities aesni_mb_capabilities[] = {
},
.digest_size = {
.min = 4,
-#if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM
.max = 16,
.increment = 4
-#else
- .max = 4,
- .increment = 0
-#endif
},
.iv_size = {
.min = 16,
@@ -730,9 +723,7 @@ struct aesni_mb_qp_data {
* by the driver when verifying a digest provided
* by the user (using authentication verify operation)
*/
-#if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM
struct IMB_SGL_IOV sgl_segs[MAX_NUM_SEGS];
-#endif
union {
struct gcm_context_data gcm_sgl_ctx;
struct chacha20_poly1305_context_data chacha_sgl_ctx;
--
2.25.1
^ permalink raw reply [flat|nested] 45+ messages in thread
* [PATCH v6 3/5] doc: remove outdated version details
2024-03-12 13:50 ` [PATCH v6 1/5] ci: replace IPsec-mb package install Brian Dooley
2024-03-12 13:50 ` [PATCH v6 2/5] crypto/ipsec_mb: bump minimum IPsec Multi-buffer version Brian Dooley
@ 2024-03-12 13:50 ` Brian Dooley
2024-03-12 13:50 ` [PATCH v6 4/5] crypto/ipsec_mb: use new ipad/opad calculation API Brian Dooley
` (4 subsequent siblings)
6 siblings, 0 replies; 45+ messages in thread
From: Brian Dooley @ 2024-03-12 13:50 UTC (permalink / raw)
To: Kai Ji, Pablo de Lara
Cc: dev, gakhil, aconole, probb, wathsala.vithanage, Sivaramakrishnan Venkat
From: Sivaramakrishnan Venkat <venkatx.sivaramakrishnan@intel.com>
SW PMDs documentation is updated to remove details of unsupported IPsec
Multi-buffer versions.DPDK older than 20.11 is end of life. So, older
DPDK versions are removed from the Crypto library version table.
Signed-off-by: Sivaramakrishnan Venkat <venkatx.sivaramakrishnan@intel.com>
Acked-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Wathsala Vithanage <wathsala.vithanage@arm.com>
---
v5:
- Rebased and added to patchset
v3:
- added second patch for outdated documentation updates.
---
doc/guides/cryptodevs/aesni_gcm.rst | 19 +++---------------
doc/guides/cryptodevs/aesni_mb.rst | 22 +++------------------
doc/guides/cryptodevs/chacha20_poly1305.rst | 12 ++---------
doc/guides/cryptodevs/kasumi.rst | 14 +++----------
doc/guides/cryptodevs/snow3g.rst | 15 +++-----------
doc/guides/cryptodevs/zuc.rst | 15 +++-----------
6 files changed, 17 insertions(+), 80 deletions(-)
diff --git a/doc/guides/cryptodevs/aesni_gcm.rst b/doc/guides/cryptodevs/aesni_gcm.rst
index dc665e536c..e38a03b78f 100644
--- a/doc/guides/cryptodevs/aesni_gcm.rst
+++ b/doc/guides/cryptodevs/aesni_gcm.rst
@@ -62,12 +62,6 @@ Once it is downloaded, extract it and follow these steps:
make
make install
-.. note::
-
- Compilation of the Multi-Buffer library is broken when GCC < 5.0, if library <= v0.53.
- If a lower GCC version than 5.0, the workaround proposed by the following link
- should be used: `<https://github.com/intel/intel-ipsec-mb/issues/40>`_.
-
As a reference, the following table shows a mapping between the past DPDK versions
and the external crypto libraries supported by them:
@@ -79,18 +73,11 @@ and the external crypto libraries supported by them:
============= ================================
DPDK version Crypto library version
============= ================================
- 16.04 - 16.11 Multi-buffer library 0.43 - 0.44
- 17.02 - 17.05 ISA-L Crypto v2.18
- 17.08 - 18.02 Multi-buffer library 0.46 - 0.48
- 18.05 - 19.02 Multi-buffer library 0.49 - 0.52
- 19.05 - 20.08 Multi-buffer library 0.52 - 0.55
- 20.11 - 21.08 Multi-buffer library 0.53 - 1.3*
- 21.11 - 23.11 Multi-buffer library 1.0 - 1.5*
- 24.03+ Multi-buffer library 1.4 - 1.5*
+ 20.11 - 21.08 Multi-buffer library 0.53 - 1.3
+ 21.11 - 23.11 Multi-buffer library 1.0 - 1.5
+ 24.03+ Multi-buffer library 1.4 - 1.5
============= ================================
-\* Multi-buffer library 1.0 or newer only works for Meson but not Make build system.
-
Initialization
--------------
diff --git a/doc/guides/cryptodevs/aesni_mb.rst b/doc/guides/cryptodevs/aesni_mb.rst
index 5d670ee237..bd7c8de07f 100644
--- a/doc/guides/cryptodevs/aesni_mb.rst
+++ b/doc/guides/cryptodevs/aesni_mb.rst
@@ -121,12 +121,6 @@ Once it is downloaded, extract it and follow these steps:
make
make install
-.. note::
-
- Compilation of the Multi-Buffer library is broken when GCC < 5.0, if library <= v0.53.
- If a lower GCC version than 5.0, the workaround proposed by the following link
- should be used: `<https://github.com/intel/intel-ipsec-mb/issues/40>`_.
-
As a reference, the following table shows a mapping between the past DPDK versions
and the Multi-Buffer library version supported by them:
@@ -137,21 +131,11 @@ and the Multi-Buffer library version supported by them:
============== ============================
DPDK version Multi-buffer library version
============== ============================
- 2.2 - 16.11 0.43 - 0.44
- 17.02 0.44
- 17.05 - 17.08 0.45 - 0.48
- 17.11 0.47 - 0.48
- 18.02 0.48
- 18.05 - 19.02 0.49 - 0.52
- 19.05 - 19.08 0.52
- 19.11 - 20.08 0.52 - 0.55
- 20.11 - 21.08 0.53 - 1.3*
- 21.11 - 23.11 1.0 - 1.5*
- 24.03+ 1.4 - 1.5*
+ 20.11 - 21.08 0.53 - 1.3
+ 21.11 - 23.11 1.0 - 1.5
+ 24.03+ 1.4 - 1.5
============== ============================
-\* Multi-buffer library 1.0 or newer only works for Meson but not Make build system.
-
Initialization
--------------
diff --git a/doc/guides/cryptodevs/chacha20_poly1305.rst b/doc/guides/cryptodevs/chacha20_poly1305.rst
index c32866b301..8e0ee4f835 100644
--- a/doc/guides/cryptodevs/chacha20_poly1305.rst
+++ b/doc/guides/cryptodevs/chacha20_poly1305.rst
@@ -56,12 +56,6 @@ Once it is downloaded, extract it and follow these steps:
make
make install
-.. note::
-
- Compilation of the Multi-Buffer library is broken when GCC < 5.0, if library <= v0.53.
- If a lower GCC version than 5.0, the workaround proposed by the following link
- should be used: `<https://github.com/intel/intel-ipsec-mb/issues/40>`_.
-
As a reference, the following table shows a mapping between the past DPDK versions
and the external crypto libraries supported by them:
@@ -72,12 +66,10 @@ and the external crypto libraries supported by them:
============= ================================
DPDK version Crypto library version
============= ================================
- 21.11 - 23.11 Multi-buffer library 1.0-1.5*
- 24.03+ Multi-buffer library 1.4-1.5*
+ 21.11 - 23.11 Multi-buffer library 1.0-1.5
+ 24.03+ Multi-buffer library 1.4-1.5
============= ================================
-\* Multi-buffer library 1.0 or newer only works for Meson but not Make build system.
-
Initialization
--------------
diff --git a/doc/guides/cryptodevs/kasumi.rst b/doc/guides/cryptodevs/kasumi.rst
index a8f4e6b204..28ac452524 100644
--- a/doc/guides/cryptodevs/kasumi.rst
+++ b/doc/guides/cryptodevs/kasumi.rst
@@ -69,12 +69,6 @@ Once it is downloaded, extract it and follow these steps:
make
make install
-.. note::
-
- Compilation of the Multi-Buffer library is broken when GCC < 5.0, if library <= v0.53.
- If a lower GCC version than 5.0, the workaround proposed by the following link
- should be used: `<https://github.com/intel/intel-ipsec-mb/issues/40>`_.
-
As a reference, the following table shows a mapping between the past DPDK versions
and the external crypto libraries supported by them:
@@ -86,13 +80,11 @@ and the external crypto libraries supported by them:
DPDK version Crypto library version
============= ================================
16.11 - 19.11 LibSSO KASUMI
- 20.02 - 21.08 Multi-buffer library 0.53 - 1.3*
- 21.11 - 23.11 Multi-buffer library 1.0 - 1.5*
- 24.03+ Multi-buffer library 1.4 - 1.5*
+ 20.11 - 21.08 Multi-buffer library 0.53 - 1.3
+ 21.11 - 23.11 Multi-buffer library 1.0 - 1.5
+ 24.03+ Multi-buffer library 1.4 - 1.5
============= ================================
-\* Multi-buffer library 1.0 or newer only works for Meson but not Make build system.
-
Initialization
--------------
diff --git a/doc/guides/cryptodevs/snow3g.rst b/doc/guides/cryptodevs/snow3g.rst
index 46863462e5..0141f62976 100644
--- a/doc/guides/cryptodevs/snow3g.rst
+++ b/doc/guides/cryptodevs/snow3g.rst
@@ -78,12 +78,6 @@ Once it is downloaded, extract it and follow these steps:
make
make install
-.. note::
-
- Compilation of the Multi-Buffer library is broken when GCC < 5.0, if library <= v0.53.
- If a lower GCC version than 5.0, the workaround proposed by the following link
- should be used: `<https://github.com/intel/intel-ipsec-mb/issues/40>`_.
-
As a reference, the following table shows a mapping between the past DPDK versions
and the external crypto libraries supported by them:
@@ -94,14 +88,11 @@ and the external crypto libraries supported by them:
============= ================================
DPDK version Crypto library version
============= ================================
- 16.04 - 19.11 LibSSO SNOW3G
- 20.02 - 21.08 Multi-buffer library 0.53 - 1.3*
- 21.11 - 23.11 Multi-buffer library 1.0 - 1.5*
- 24.03+ Multi-buffer library 1.4 - 1.5*
+ 20.11 - 21.08 Multi-buffer library 0.53 - 1.3
+ 21.11 - 23.11 Multi-buffer library 1.0 - 1.5
+ 24.03+ Multi-buffer library 1.4 - 1.5
============= ================================
-\* Multi-buffer library 1.0 or newer only works for Meson but not Make build system.
-
Initialization
--------------
diff --git a/doc/guides/cryptodevs/zuc.rst b/doc/guides/cryptodevs/zuc.rst
index 51867e1a16..97c14c8c77 100644
--- a/doc/guides/cryptodevs/zuc.rst
+++ b/doc/guides/cryptodevs/zuc.rst
@@ -77,12 +77,6 @@ Once it is downloaded, extract it and follow these steps:
make
make install
-.. note::
-
- Compilation of the Multi-Buffer library is broken when GCC < 5.0, if library <= v0.53.
- If a lower GCC version than 5.0, the workaround proposed by the following link
- should be used: `<https://github.com/intel/intel-ipsec-mb/issues/40>`_.
-
As a reference, the following table shows a mapping between the past DPDK versions
and the external crypto libraries supported by them:
@@ -93,14 +87,11 @@ and the external crypto libraries supported by them:
============= ================================
DPDK version Crypto library version
============= ================================
- 16.11 - 19.11 LibSSO ZUC
- 20.02 - 21.08 Multi-buffer library 0.53 - 1.3*
- 21.11 - 23.11 Multi-buffer library 1.0 - 1.5*
- 24.03+ Multi-buffer library 1.4 - 1.5*
+ 20.11 - 21.08 Multi-buffer library 0.53 - 1.3
+ 21.11 - 23.11 Multi-buffer library 1.0 - 1.5
+ 24.03+ Multi-buffer library 1.4 - 1.5
============= ================================
-\* Multi-buffer library 1.0 or newer only works for Meson but not Make build system.
-
Initialization
--------------
--
2.25.1
^ permalink raw reply [flat|nested] 45+ messages in thread
* [PATCH v6 4/5] crypto/ipsec_mb: use new ipad/opad calculation API
2024-03-12 13:50 ` [PATCH v6 1/5] ci: replace IPsec-mb package install Brian Dooley
2024-03-12 13:50 ` [PATCH v6 2/5] crypto/ipsec_mb: bump minimum IPsec Multi-buffer version Brian Dooley
2024-03-12 13:50 ` [PATCH v6 3/5] doc: remove outdated version details Brian Dooley
@ 2024-03-12 13:50 ` Brian Dooley
2024-03-12 13:50 ` [PATCH v6 5/5] crypto/ipsec_mb: unify some IPsec MB PMDs Brian Dooley
` (3 subsequent siblings)
6 siblings, 0 replies; 45+ messages in thread
From: Brian Dooley @ 2024-03-12 13:50 UTC (permalink / raw)
To: Kai Ji, Pablo de Lara
Cc: dev, gakhil, aconole, probb, wathsala.vithanage, Brian Dooley,
Ciara Power
IPSec Multi-buffer library v1.4 added a new API to
calculate inner/outer padding for HMAC-SHAx/MD5.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Signed-off-by: Brian Dooley <brian.dooley@intel.com>
Acked-by: Ciara Power <ciara.power@intel.com>
Acked-by: Wathsala Vithanage <wathsala.vithanage@arm.com>
---
v5:
- Rebased and added to patchset
v2:
- Remove ipsec mb version checks
---
drivers/crypto/ipsec_mb/pmd_aesni_mb.c | 75 ++------------------------
1 file changed, 5 insertions(+), 70 deletions(-)
diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c
index b93267f1c3..80ced1e4fe 100644
--- a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c
+++ b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c
@@ -13,49 +13,6 @@ struct aesni_mb_op_buf_data {
uint32_t offset;
};
-/**
- * Calculate the authentication pre-computes
- *
- * @param one_block_hash Function pointer
- * to calculate digest on ipad/opad
- * @param ipad Inner pad output byte array
- * @param opad Outer pad output byte array
- * @param hkey Authentication key
- * @param hkey_len Authentication key length
- * @param blocksize Block size of selected hash algo
- */
-static void
-calculate_auth_precomputes(hash_one_block_t one_block_hash,
- uint8_t *ipad, uint8_t *opad,
- const uint8_t *hkey, uint16_t hkey_len,
- uint16_t blocksize)
-{
- uint32_t i, length;
-
- uint8_t ipad_buf[blocksize] __rte_aligned(16);
- uint8_t opad_buf[blocksize] __rte_aligned(16);
-
- /* Setup inner and outer pads */
- memset(ipad_buf, HMAC_IPAD_VALUE, blocksize);
- memset(opad_buf, HMAC_OPAD_VALUE, blocksize);
-
- /* XOR hash key with inner and outer pads */
- length = hkey_len > blocksize ? blocksize : hkey_len;
-
- for (i = 0; i < length; i++) {
- ipad_buf[i] ^= hkey[i];
- opad_buf[i] ^= hkey[i];
- }
-
- /* Compute partial hashes */
- (*one_block_hash)(ipad_buf, ipad);
- (*one_block_hash)(opad_buf, opad);
-
- /* Clean up stack */
- memset(ipad_buf, 0, blocksize);
- memset(opad_buf, 0, blocksize);
-}
-
static inline int
is_aead_algo(IMB_HASH_ALG hash_alg, IMB_CIPHER_MODE cipher_mode)
{
@@ -66,12 +23,10 @@ is_aead_algo(IMB_HASH_ALG hash_alg, IMB_CIPHER_MODE cipher_mode)
/** Set session authentication parameters */
static int
-aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr,
+aesni_mb_set_session_auth_parameters(IMB_MGR *mb_mgr,
struct aesni_mb_session *sess,
const struct rte_crypto_sym_xform *xform)
{
- hash_one_block_t hash_oneblock_fn = NULL;
- unsigned int key_larger_block_size = 0;
uint8_t hashed_key[HMAC_MAX_BLOCK_SIZE] = { 0 };
uint32_t auth_precompute = 1;
@@ -263,18 +218,15 @@ aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr,
switch (xform->auth.algo) {
case RTE_CRYPTO_AUTH_MD5_HMAC:
sess->template_job.hash_alg = IMB_AUTH_MD5;
- hash_oneblock_fn = mb_mgr->md5_one_block;
break;
case RTE_CRYPTO_AUTH_SHA1_HMAC:
sess->template_job.hash_alg = IMB_AUTH_HMAC_SHA_1;
- hash_oneblock_fn = mb_mgr->sha1_one_block;
if (xform->auth.key.length > get_auth_algo_blocksize(
IMB_AUTH_HMAC_SHA_1)) {
IMB_SHA1(mb_mgr,
xform->auth.key.data,
xform->auth.key.length,
hashed_key);
- key_larger_block_size = 1;
}
break;
case RTE_CRYPTO_AUTH_SHA1:
@@ -283,14 +235,12 @@ aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr,
break;
case RTE_CRYPTO_AUTH_SHA224_HMAC:
sess->template_job.hash_alg = IMB_AUTH_HMAC_SHA_224;
- hash_oneblock_fn = mb_mgr->sha224_one_block;
if (xform->auth.key.length > get_auth_algo_blocksize(
IMB_AUTH_HMAC_SHA_224)) {
IMB_SHA224(mb_mgr,
xform->auth.key.data,
xform->auth.key.length,
hashed_key);
- key_larger_block_size = 1;
}
break;
case RTE_CRYPTO_AUTH_SHA224:
@@ -299,14 +249,12 @@ aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr,
break;
case RTE_CRYPTO_AUTH_SHA256_HMAC:
sess->template_job.hash_alg = IMB_AUTH_HMAC_SHA_256;
- hash_oneblock_fn = mb_mgr->sha256_one_block;
if (xform->auth.key.length > get_auth_algo_blocksize(
IMB_AUTH_HMAC_SHA_256)) {
IMB_SHA256(mb_mgr,
xform->auth.key.data,
xform->auth.key.length,
hashed_key);
- key_larger_block_size = 1;
}
break;
case RTE_CRYPTO_AUTH_SHA256:
@@ -315,14 +263,12 @@ aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr,
break;
case RTE_CRYPTO_AUTH_SHA384_HMAC:
sess->template_job.hash_alg = IMB_AUTH_HMAC_SHA_384;
- hash_oneblock_fn = mb_mgr->sha384_one_block;
if (xform->auth.key.length > get_auth_algo_blocksize(
IMB_AUTH_HMAC_SHA_384)) {
IMB_SHA384(mb_mgr,
xform->auth.key.data,
xform->auth.key.length,
hashed_key);
- key_larger_block_size = 1;
}
break;
case RTE_CRYPTO_AUTH_SHA384:
@@ -331,14 +277,12 @@ aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr,
break;
case RTE_CRYPTO_AUTH_SHA512_HMAC:
sess->template_job.hash_alg = IMB_AUTH_HMAC_SHA_512;
- hash_oneblock_fn = mb_mgr->sha512_one_block;
if (xform->auth.key.length > get_auth_algo_blocksize(
IMB_AUTH_HMAC_SHA_512)) {
IMB_SHA512(mb_mgr,
xform->auth.key.data,
xform->auth.key.length,
hashed_key);
- key_larger_block_size = 1;
}
break;
case RTE_CRYPTO_AUTH_SHA512:
@@ -372,19 +316,10 @@ aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr,
return 0;
/* Calculate Authentication precomputes */
- if (key_larger_block_size) {
- calculate_auth_precomputes(hash_oneblock_fn,
- sess->auth.pads.inner, sess->auth.pads.outer,
- hashed_key,
- xform->auth.key.length,
- get_auth_algo_blocksize(sess->template_job.hash_alg));
- } else {
- calculate_auth_precomputes(hash_oneblock_fn,
- sess->auth.pads.inner, sess->auth.pads.outer,
- xform->auth.key.data,
- xform->auth.key.length,
- get_auth_algo_blocksize(sess->template_job.hash_alg));
- }
+ imb_hmac_ipad_opad(mb_mgr, sess->template_job.hash_alg,
+ xform->auth.key.data, xform->auth.key.length,
+ sess->auth.pads.inner, sess->auth.pads.outer);
+
sess->template_job.u.HMAC._hashed_auth_key_xor_ipad =
sess->auth.pads.inner;
sess->template_job.u.HMAC._hashed_auth_key_xor_opad =
--
2.25.1
^ permalink raw reply [flat|nested] 45+ messages in thread
* [PATCH v6 5/5] crypto/ipsec_mb: unify some IPsec MB PMDs
2024-03-12 13:50 ` [PATCH v6 1/5] ci: replace IPsec-mb package install Brian Dooley
` (2 preceding siblings ...)
2024-03-12 13:50 ` [PATCH v6 4/5] crypto/ipsec_mb: use new ipad/opad calculation API Brian Dooley
@ 2024-03-12 13:50 ` Brian Dooley
2024-03-12 13:54 ` [PATCH v6 1/5] ci: replace IPsec-mb package install David Marchand
` (2 subsequent siblings)
6 siblings, 0 replies; 45+ messages in thread
From: Brian Dooley @ 2024-03-12 13:50 UTC (permalink / raw)
To: Kai Ji, Pablo de Lara
Cc: dev, gakhil, aconole, probb, wathsala.vithanage, Brian Dooley,
Ciara Power
Currently IPsec MB provides both the JOB API and direct API.
AESNI_MB PMD is using the JOB API codepath while KASUMI and
CHACHA20_POLY1305 are using the direct API.
Instead of using the direct API for these PMDs, they should now make
use of the JOB API codepath. This would remove all use of the IPsec MB
direct API for these PMDs.
Signed-off-by: Brian Dooley <brian.dooley@intel.com>
Acked-by: Ciara Power <ciara.power@intel.com>
Acked-by: Wathsala Vithanage <wathsala.vithanage@arm.com>
---
v6:
- Reintroduce SNOW3G and ZUC PMDs
v5:
- Rebased and added patchset
v4:
- Keep AES GCM PMD and fix extern issue
v3:
- Remove session configure pointer for each PMD
v2:
- Fix compilation failure
---
doc/guides/rel_notes/release_24_03.rst | 2 +
drivers/crypto/ipsec_mb/pmd_aesni_mb.c | 9 +-
drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h | 15 +-
drivers/crypto/ipsec_mb/pmd_chacha_poly.c | 338 +--------------
.../crypto/ipsec_mb/pmd_chacha_poly_priv.h | 28 --
drivers/crypto/ipsec_mb/pmd_kasumi.c | 410 +-----------------
drivers/crypto/ipsec_mb/pmd_kasumi_priv.h | 20 -
7 files changed, 32 insertions(+), 790 deletions(-)
diff --git a/doc/guides/rel_notes/release_24_03.rst b/doc/guides/rel_notes/release_24_03.rst
index 8fa8cf1dd6..c9ddd120de 100644
--- a/doc/guides/rel_notes/release_24_03.rst
+++ b/doc/guides/rel_notes/release_24_03.rst
@@ -147,6 +147,8 @@ New Features
* **Updated ipsec_mb crypto driver.**
* Bump minimum IPSec Multi-buffer version to 1.4 for SW PMDs.
+ * Kasumi and ChaChaPoly PMDs now share the job API codepath
+ with AESNI_MB PMD.
* **Updated Marvell cnxk crypto driver.**
diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c
index 80ced1e4fe..35bd7eaa51 100644
--- a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c
+++ b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c
@@ -8,6 +8,8 @@
RTE_DEFINE_PER_LCORE(pid_t, pid);
+uint8_t pmd_driver_id_aesni_mb;
+
struct aesni_mb_op_buf_data {
struct rte_mbuf *m;
uint32_t offset;
@@ -692,7 +694,7 @@ aesni_mb_set_session_aead_parameters(const IMB_MGR *mb_mgr,
}
/** Configure a aesni multi-buffer session from a crypto xform chain */
-static int
+int
aesni_mb_session_configure(IMB_MGR *mb_mgr,
void *priv_sess,
const struct rte_crypto_sym_xform *xform)
@@ -2039,7 +2041,7 @@ set_job_null_op(IMB_JOB *job, struct rte_crypto_op *op)
return job;
}
-static uint16_t
+uint16_t
aesni_mb_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
uint16_t nb_ops)
{
@@ -2171,6 +2173,7 @@ aesni_mb_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
return processed_jobs;
}
+
static inline int
check_crypto_sgl(union rte_crypto_sym_ofs so, const struct rte_crypto_sgl *sgl)
{
@@ -2226,7 +2229,7 @@ verify_sync_dgst(struct rte_crypto_sym_vec *vec,
return k;
}
-static uint32_t
+uint32_t
aesni_mb_process_bulk(struct rte_cryptodev *dev __rte_unused,
struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs sofs,
struct rte_crypto_sym_vec *vec)
diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h b/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h
index 51cfd7e2aa..4805627679 100644
--- a/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h
+++ b/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h
@@ -19,6 +19,19 @@
#define MAX_NUM_SEGS 16
+int
+aesni_mb_session_configure(IMB_MGR * m __rte_unused, void *priv_sess,
+ const struct rte_crypto_sym_xform *xform);
+
+uint16_t
+aesni_mb_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops);
+
+uint32_t
+aesni_mb_process_bulk(struct rte_cryptodev *dev __rte_unused,
+ struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs sofs,
+ struct rte_crypto_sym_vec *vec);
+
static const struct rte_cryptodev_capabilities aesni_mb_capabilities[] = {
{ /* MD5 HMAC */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
@@ -715,8 +728,6 @@ static const struct rte_cryptodev_capabilities aesni_mb_capabilities[] = {
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
};
-uint8_t pmd_driver_id_aesni_mb;
-
struct aesni_mb_qp_data {
uint8_t temp_digests[IMB_MAX_JOBS][DIGEST_LENGTH_MAX];
/* *< Buffers used to store the digest generated
diff --git a/drivers/crypto/ipsec_mb/pmd_chacha_poly.c b/drivers/crypto/ipsec_mb/pmd_chacha_poly.c
index 97e7cef233..7436353fc2 100644
--- a/drivers/crypto/ipsec_mb/pmd_chacha_poly.c
+++ b/drivers/crypto/ipsec_mb/pmd_chacha_poly.c
@@ -3,334 +3,7 @@
*/
#include "pmd_chacha_poly_priv.h"
-
-/** Parse crypto xform chain and set private session parameters. */
-static int
-chacha20_poly1305_session_configure(IMB_MGR * mb_mgr __rte_unused,
- void *priv_sess, const struct rte_crypto_sym_xform *xform)
-{
- struct chacha20_poly1305_session *sess = priv_sess;
- const struct rte_crypto_sym_xform *auth_xform;
- const struct rte_crypto_sym_xform *cipher_xform;
- const struct rte_crypto_sym_xform *aead_xform;
-
- uint8_t key_length;
- const uint8_t *key;
- enum ipsec_mb_operation mode;
- int ret = 0;
-
- ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform,
- &cipher_xform, &aead_xform);
- if (ret)
- return ret;
-
- sess->op = mode;
-
- switch (sess->op) {
- case IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT:
- case IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT:
- if (aead_xform->aead.algo !=
- RTE_CRYPTO_AEAD_CHACHA20_POLY1305) {
- IPSEC_MB_LOG(ERR,
- "The only combined operation supported is CHACHA20 POLY1305");
- ret = -ENOTSUP;
- goto error_exit;
- }
- /* Set IV parameters */
- sess->iv.offset = aead_xform->aead.iv.offset;
- sess->iv.length = aead_xform->aead.iv.length;
- key_length = aead_xform->aead.key.length;
- key = aead_xform->aead.key.data;
- sess->aad_length = aead_xform->aead.aad_length;
- sess->req_digest_length = aead_xform->aead.digest_length;
- break;
- default:
- IPSEC_MB_LOG(
- ERR, "Wrong xform type, has to be AEAD or authentication");
- ret = -ENOTSUP;
- goto error_exit;
- }
-
- /* IV check */
- if (sess->iv.length != CHACHA20_POLY1305_IV_LENGTH &&
- sess->iv.length != 0) {
- IPSEC_MB_LOG(ERR, "Wrong IV length");
- ret = -EINVAL;
- goto error_exit;
- }
-
- /* Check key length */
- if (key_length != CHACHA20_POLY1305_KEY_SIZE) {
- IPSEC_MB_LOG(ERR, "Invalid key length");
- ret = -EINVAL;
- goto error_exit;
- } else {
- memcpy(sess->key, key, CHACHA20_POLY1305_KEY_SIZE);
- }
-
- /* Digest check */
- if (sess->req_digest_length != CHACHA20_POLY1305_DIGEST_LENGTH) {
- IPSEC_MB_LOG(ERR, "Invalid digest length");
- ret = -EINVAL;
- goto error_exit;
- } else {
- sess->gen_digest_length = CHACHA20_POLY1305_DIGEST_LENGTH;
- }
-
-error_exit:
- return ret;
-}
-
-/**
- * Process a crypto operation, calling
- * the direct chacha poly API from the multi buffer library.
- *
- * @param qp queue pair
- * @param op symmetric crypto operation
- * @param session chacha poly session
- *
- * @return
- * - Return 0 if success
- */
-static int
-chacha20_poly1305_crypto_op(struct ipsec_mb_qp *qp, struct rte_crypto_op *op,
- struct chacha20_poly1305_session *session)
-{
- struct chacha20_poly1305_qp_data *qp_data =
- ipsec_mb_get_qp_private_data(qp);
- uint8_t *src, *dst;
- uint8_t *iv_ptr;
- struct rte_crypto_sym_op *sym_op = op->sym;
- struct rte_mbuf *m_src = sym_op->m_src;
- uint32_t offset, data_offset, data_length;
- uint32_t part_len, data_len;
- int total_len;
- uint8_t *tag;
- unsigned int oop = 0;
-
- offset = sym_op->aead.data.offset;
- data_offset = offset;
- data_length = sym_op->aead.data.length;
- RTE_ASSERT(m_src != NULL);
-
- while (offset >= m_src->data_len && data_length != 0) {
- offset -= m_src->data_len;
- m_src = m_src->next;
-
- RTE_ASSERT(m_src != NULL);
- }
-
- src = rte_pktmbuf_mtod_offset(m_src, uint8_t *, offset);
-
- data_len = m_src->data_len - offset;
- part_len = (data_len < data_length) ? data_len :
- data_length;
-
- /* In-place */
- if (sym_op->m_dst == NULL || (sym_op->m_dst == sym_op->m_src))
- dst = src;
- /* Out-of-place */
- else {
- oop = 1;
- /* Segmented destination buffer is not supported
- * if operation is Out-of-place
- */
- RTE_ASSERT(rte_pktmbuf_is_contiguous(sym_op->m_dst));
- dst = rte_pktmbuf_mtod_offset(sym_op->m_dst, uint8_t *,
- data_offset);
- }
-
- iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
- session->iv.offset);
-
- IMB_CHACHA20_POLY1305_INIT(qp->mb_mgr, session->key,
- &qp_data->chacha20_poly1305_ctx_data,
- iv_ptr, sym_op->aead.aad.data,
- (uint64_t)session->aad_length);
-
- if (session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT) {
- IMB_CHACHA20_POLY1305_ENC_UPDATE(qp->mb_mgr,
- session->key,
- &qp_data->chacha20_poly1305_ctx_data,
- dst, src, (uint64_t)part_len);
- total_len = data_length - part_len;
-
- while (total_len) {
- m_src = m_src->next;
- RTE_ASSERT(m_src != NULL);
-
- src = rte_pktmbuf_mtod(m_src, uint8_t *);
- if (oop)
- dst += part_len;
- else
- dst = src;
- part_len = (m_src->data_len < total_len) ?
- m_src->data_len : total_len;
-
- if (dst == NULL || src == NULL) {
- IPSEC_MB_LOG(ERR, "Invalid src or dst input");
- return -EINVAL;
- }
- IMB_CHACHA20_POLY1305_ENC_UPDATE(qp->mb_mgr,
- session->key,
- &qp_data->chacha20_poly1305_ctx_data,
- dst, src, (uint64_t)part_len);
- total_len -= part_len;
- if (total_len < 0) {
- IPSEC_MB_LOG(ERR, "Invalid part len");
- return -EINVAL;
- }
- }
-
- tag = sym_op->aead.digest.data;
- IMB_CHACHA20_POLY1305_ENC_FINALIZE(qp->mb_mgr,
- &qp_data->chacha20_poly1305_ctx_data,
- tag, session->gen_digest_length);
-
- } else {
- IMB_CHACHA20_POLY1305_DEC_UPDATE(qp->mb_mgr,
- session->key,
- &qp_data->chacha20_poly1305_ctx_data,
- dst, src, (uint64_t)part_len);
-
- total_len = data_length - part_len;
-
- while (total_len) {
- m_src = m_src->next;
-
- RTE_ASSERT(m_src != NULL);
-
- src = rte_pktmbuf_mtod(m_src, uint8_t *);
- if (oop)
- dst += part_len;
- else
- dst = src;
- part_len = (m_src->data_len < total_len) ?
- m_src->data_len : total_len;
-
- if (dst == NULL || src == NULL) {
- IPSEC_MB_LOG(ERR, "Invalid src or dst input");
- return -EINVAL;
- }
- IMB_CHACHA20_POLY1305_DEC_UPDATE(qp->mb_mgr,
- session->key,
- &qp_data->chacha20_poly1305_ctx_data,
- dst, src, (uint64_t)part_len);
- total_len -= part_len;
- if (total_len < 0) {
- IPSEC_MB_LOG(ERR, "Invalid part len");
- return -EINVAL;
- }
- }
-
- tag = qp_data->temp_digest;
- IMB_CHACHA20_POLY1305_DEC_FINALIZE(qp->mb_mgr,
- &qp_data->chacha20_poly1305_ctx_data,
- tag, session->gen_digest_length);
- }
-
- return 0;
-}
-
-/**
- * Process a completed chacha poly op
- *
- * @param qp Queue Pair to process
- * @param op Crypto operation
- * @param sess Crypto session
- *
- * @return
- * - void
- */
-static void
-post_process_chacha20_poly1305_crypto_op(struct ipsec_mb_qp *qp,
- struct rte_crypto_op *op,
- struct chacha20_poly1305_session *session)
-{
- struct chacha20_poly1305_qp_data *qp_data =
- ipsec_mb_get_qp_private_data(qp);
-
- op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
- /* Verify digest if required */
- if (session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT ||
- session->op == IPSEC_MB_OP_HASH_VERIFY_ONLY) {
- uint8_t *digest = op->sym->aead.digest.data;
- uint8_t *tag = qp_data->temp_digest;
-
-#ifdef RTE_LIBRTE_PMD_CHACHA20_POLY1305_DEBUG
- rte_hexdump(stdout, "auth tag (orig):",
- digest, session->req_digest_length);
- rte_hexdump(stdout, "auth tag (calc):",
- tag, session->req_digest_length);
-#endif
- if (memcmp(tag, digest, session->req_digest_length) != 0)
- op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
-
- }
-
-}
-
-/**
- * Process a completed Chacha20_poly1305 request
- *
- * @param qp Queue Pair to process
- * @param op Crypto operation
- * @param sess Crypto session
- *
- * @return
- * - void
- */
-static void
-handle_completed_chacha20_poly1305_crypto_op(struct ipsec_mb_qp *qp,
- struct rte_crypto_op *op,
- struct chacha20_poly1305_session *sess)
-{
- post_process_chacha20_poly1305_crypto_op(qp, op, sess);
-
- /* Free session if a session-less crypto op */
- if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
- memset(sess, 0, sizeof(struct chacha20_poly1305_session));
- rte_mempool_put(qp->sess_mp, op->sym->session);
- op->sym->session = NULL;
- }
-}
-
-static uint16_t
-chacha20_poly1305_pmd_dequeue_burst(void *queue_pair,
- struct rte_crypto_op **ops, uint16_t nb_ops)
-{
- struct chacha20_poly1305_session *sess;
- struct ipsec_mb_qp *qp = queue_pair;
-
- int retval = 0;
- unsigned int i = 0, nb_dequeued;
-
- nb_dequeued = rte_ring_dequeue_burst(qp->ingress_queue,
- (void **)ops, nb_ops, NULL);
-
- for (i = 0; i < nb_dequeued; i++) {
-
- sess = ipsec_mb_get_session_private(qp, ops[i]);
- if (unlikely(sess == NULL)) {
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- qp->stats.dequeue_err_count++;
- break;
- }
-
- retval = chacha20_poly1305_crypto_op(qp, ops[i], sess);
- if (retval < 0) {
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- qp->stats.dequeue_err_count++;
- break;
- }
-
- handle_completed_chacha20_poly1305_crypto_op(qp, ops[i], sess);
- }
-
- qp->stats.dequeued_count += i;
-
- return i;
-}
+#include "pmd_aesni_mb_priv.h"
struct rte_cryptodev_ops chacha20_poly1305_pmd_ops = {
.dev_configure = ipsec_mb_config,
@@ -384,7 +57,7 @@ RTE_INIT(ipsec_mb_register_chacha20_poly1305)
= &ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_CHACHA20_POLY1305];
chacha_poly_data->caps = chacha20_poly1305_capabilities;
- chacha_poly_data->dequeue_burst = chacha20_poly1305_pmd_dequeue_burst;
+ chacha_poly_data->dequeue_burst = aesni_mb_dequeue_burst;
chacha_poly_data->feature_flags =
RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
@@ -395,10 +68,9 @@ RTE_INIT(ipsec_mb_register_chacha20_poly1305)
RTE_CRYPTODEV_FF_SYM_SESSIONLESS;
chacha_poly_data->internals_priv_size = 0;
chacha_poly_data->ops = &chacha20_poly1305_pmd_ops;
- chacha_poly_data->qp_priv_size =
- sizeof(struct chacha20_poly1305_qp_data);
+ chacha_poly_data->qp_priv_size = sizeof(struct aesni_mb_qp_data);
chacha_poly_data->session_configure =
- chacha20_poly1305_session_configure;
+ aesni_mb_session_configure;
chacha_poly_data->session_priv_size =
- sizeof(struct chacha20_poly1305_session);
+ sizeof(struct aesni_mb_session);
}
diff --git a/drivers/crypto/ipsec_mb/pmd_chacha_poly_priv.h b/drivers/crypto/ipsec_mb/pmd_chacha_poly_priv.h
index 842f62f5d1..e668bfe07f 100644
--- a/drivers/crypto/ipsec_mb/pmd_chacha_poly_priv.h
+++ b/drivers/crypto/ipsec_mb/pmd_chacha_poly_priv.h
@@ -7,9 +7,7 @@
#include "ipsec_mb_private.h"
-#define CHACHA20_POLY1305_IV_LENGTH 12
#define CHACHA20_POLY1305_DIGEST_LENGTH 16
-#define CHACHA20_POLY1305_KEY_SIZE 32
static const
struct rte_cryptodev_capabilities chacha20_poly1305_capabilities[] = {
@@ -45,30 +43,4 @@ struct rte_cryptodev_capabilities chacha20_poly1305_capabilities[] = {
uint8_t pmd_driver_id_chacha20_poly1305;
-/** CHACHA20 POLY1305 private session structure */
-struct chacha20_poly1305_session {
- struct {
- uint16_t length;
- uint16_t offset;
- } iv;
- /**< IV parameters */
- uint16_t aad_length;
- /**< AAD length */
- uint16_t req_digest_length;
- /**< Requested digest length */
- uint16_t gen_digest_length;
- /**< Generated digest length */
- uint8_t key[CHACHA20_POLY1305_KEY_SIZE];
- enum ipsec_mb_operation op;
-} __rte_cache_aligned;
-
-struct chacha20_poly1305_qp_data {
- struct chacha20_poly1305_context_data chacha20_poly1305_ctx_data;
- uint8_t temp_digest[CHACHA20_POLY1305_DIGEST_LENGTH];
- /**< Buffer used to store the digest generated
- * by the driver when verifying a digest provided
- * by the user (using authentication verify operation)
- */
-};
-
#endif /* _PMD_CHACHA_POLY_PRIV_H_ */
diff --git a/drivers/crypto/ipsec_mb/pmd_kasumi.c b/drivers/crypto/ipsec_mb/pmd_kasumi.c
index 70536ec3dc..c3571ec81b 100644
--- a/drivers/crypto/ipsec_mb/pmd_kasumi.c
+++ b/drivers/crypto/ipsec_mb/pmd_kasumi.c
@@ -10,406 +10,7 @@
#include <rte_malloc.h>
#include "pmd_kasumi_priv.h"
-
-/** Parse crypto xform chain and set private session parameters. */
-static int
-kasumi_session_configure(IMB_MGR *mgr, void *priv_sess,
- const struct rte_crypto_sym_xform *xform)
-{
- const struct rte_crypto_sym_xform *auth_xform = NULL;
- const struct rte_crypto_sym_xform *cipher_xform = NULL;
- enum ipsec_mb_operation mode;
- struct kasumi_session *sess = (struct kasumi_session *)priv_sess;
- /* Select Crypto operation - hash then cipher / cipher then hash */
- int ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform,
- &cipher_xform, NULL);
-
- if (ret)
- return ret;
-
- if (cipher_xform) {
- /* Only KASUMI F8 supported */
- if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_KASUMI_F8) {
- IPSEC_MB_LOG(ERR, "Unsupported cipher algorithm ");
- return -ENOTSUP;
- }
-
- sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
- if (cipher_xform->cipher.iv.length != KASUMI_IV_LENGTH) {
- IPSEC_MB_LOG(ERR, "Wrong IV length");
- return -EINVAL;
- }
-
- /* Initialize key */
- IMB_KASUMI_INIT_F8_KEY_SCHED(mgr,
- cipher_xform->cipher.key.data,
- &sess->pKeySched_cipher);
- }
-
- if (auth_xform) {
- /* Only KASUMI F9 supported */
- if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_KASUMI_F9) {
- IPSEC_MB_LOG(ERR, "Unsupported authentication");
- return -ENOTSUP;
- }
-
- if (auth_xform->auth.digest_length != KASUMI_DIGEST_LENGTH) {
- IPSEC_MB_LOG(ERR, "Wrong digest length");
- return -EINVAL;
- }
-
- sess->auth_op = auth_xform->auth.op;
-
- /* Initialize key */
- IMB_KASUMI_INIT_F9_KEY_SCHED(mgr, auth_xform->auth.key.data,
- &sess->pKeySched_hash);
- }
-
- sess->op = mode;
- return ret;
-}
-
-/** Encrypt/decrypt mbufs with same cipher key. */
-static uint8_t
-process_kasumi_cipher_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
- struct kasumi_session *session, uint8_t num_ops)
-{
- unsigned int i;
- uint8_t processed_ops = 0;
- const void *src[num_ops];
- void *dst[num_ops];
- uint8_t *iv_ptr;
- uint64_t iv[num_ops];
- uint32_t num_bytes[num_ops];
-
- for (i = 0; i < num_ops; i++) {
- src[i] = rte_pktmbuf_mtod_offset(ops[i]->sym->m_src,
- uint8_t *,
- (ops[i]->sym->cipher.data.offset >> 3));
- dst[i] = ops[i]->sym->m_dst
- ? rte_pktmbuf_mtod_offset(ops[i]->sym->m_dst,
- uint8_t *,
- (ops[i]->sym->cipher.data.offset >> 3))
- : rte_pktmbuf_mtod_offset(ops[i]->sym->m_src,
- uint8_t *,
- (ops[i]->sym->cipher.data.offset >> 3));
- iv_ptr = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
- session->cipher_iv_offset);
- iv[i] = *((uint64_t *)(iv_ptr));
- num_bytes[i] = ops[i]->sym->cipher.data.length >> 3;
-
- processed_ops++;
- }
-
- if (processed_ops != 0)
- IMB_KASUMI_F8_N_BUFFER(qp->mb_mgr, &session->pKeySched_cipher,
- iv, src, dst, num_bytes,
- processed_ops);
-
- return processed_ops;
-}
-
-/** Encrypt/decrypt mbuf (bit level function). */
-static uint8_t
-process_kasumi_cipher_op_bit(struct ipsec_mb_qp *qp, struct rte_crypto_op *op,
- struct kasumi_session *session)
-{
- uint8_t *src, *dst;
- uint8_t *iv_ptr;
- uint64_t iv;
- uint32_t length_in_bits, offset_in_bits;
-
- offset_in_bits = op->sym->cipher.data.offset;
- src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *);
- if (op->sym->m_dst == NULL)
- dst = src;
- else
- dst = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *);
- iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
- session->cipher_iv_offset);
- iv = *((uint64_t *)(iv_ptr));
- length_in_bits = op->sym->cipher.data.length;
-
- IMB_KASUMI_F8_1_BUFFER_BIT(qp->mb_mgr, &session->pKeySched_cipher, iv,
- src, dst, length_in_bits, offset_in_bits);
-
- return 1;
-}
-
-/** Generate/verify hash from mbufs with same hash key. */
-static int
-process_kasumi_hash_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
- struct kasumi_session *session, uint8_t num_ops)
-{
- unsigned int i;
- uint8_t processed_ops = 0;
- uint8_t *src, *dst;
- uint32_t length_in_bits;
- uint32_t num_bytes;
- struct kasumi_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
-
- for (i = 0; i < num_ops; i++) {
- /* Data must be byte aligned */
- if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- IPSEC_MB_LOG(ERR, "Invalid Offset");
- break;
- }
-
- length_in_bits = ops[i]->sym->auth.data.length;
-
- src = rte_pktmbuf_mtod_offset(ops[i]->sym->m_src, uint8_t *,
- (ops[i]->sym->auth.data.offset >> 3));
- /* Direction from next bit after end of message */
- num_bytes = length_in_bits >> 3;
-
- if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
- dst = qp_data->temp_digest;
- IMB_KASUMI_F9_1_BUFFER(qp->mb_mgr,
- &session->pKeySched_hash, src,
- num_bytes, dst);
-
- /* Verify digest. */
- if (memcmp(dst, ops[i]->sym->auth.digest.data,
- KASUMI_DIGEST_LENGTH)
- != 0)
- ops[i]->status
- = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
- } else {
- dst = ops[i]->sym->auth.digest.data;
-
- IMB_KASUMI_F9_1_BUFFER(qp->mb_mgr,
- &session->pKeySched_hash, src,
- num_bytes, dst);
- }
- processed_ops++;
- }
-
- return processed_ops;
-}
-
-/** Process a batch of crypto ops which shares the same session. */
-static int
-process_ops(struct rte_crypto_op **ops, struct kasumi_session *session,
- struct ipsec_mb_qp *qp, uint8_t num_ops)
-{
- unsigned int i;
- unsigned int processed_ops;
-
- switch (session->op) {
- case IPSEC_MB_OP_ENCRYPT_ONLY:
- case IPSEC_MB_OP_DECRYPT_ONLY:
- processed_ops
- = process_kasumi_cipher_op(qp, ops, session, num_ops);
- break;
- case IPSEC_MB_OP_HASH_GEN_ONLY:
- case IPSEC_MB_OP_HASH_VERIFY_ONLY:
- processed_ops
- = process_kasumi_hash_op(qp, ops, session, num_ops);
- break;
- case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN:
- case IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY:
- processed_ops
- = process_kasumi_cipher_op(qp, ops, session, num_ops);
- process_kasumi_hash_op(qp, ops, session, processed_ops);
- break;
- case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
- case IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT:
- processed_ops
- = process_kasumi_hash_op(qp, ops, session, num_ops);
- process_kasumi_cipher_op(qp, ops, session, processed_ops);
- break;
- default:
- /* Operation not supported. */
- processed_ops = 0;
- }
-
- for (i = 0; i < num_ops; i++) {
- /*
- * If there was no error/authentication failure,
- * change status to successful.
- */
- if (ops[i]->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
- ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
- /* Free session if a session-less crypto op. */
- if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
- memset(session, 0, sizeof(struct kasumi_session));
- rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
- ops[i]->sym->session = NULL;
- }
- }
- return processed_ops;
-}
-
-/** Process a crypto op with length/offset in bits. */
-static int
-process_op_bit(struct rte_crypto_op *op, struct kasumi_session *session,
- struct ipsec_mb_qp *qp)
-{
- unsigned int processed_op;
-
- switch (session->op) {
- /* case KASUMI_OP_ONLY_CIPHER: */
- case IPSEC_MB_OP_ENCRYPT_ONLY:
- case IPSEC_MB_OP_DECRYPT_ONLY:
- processed_op = process_kasumi_cipher_op_bit(qp, op, session);
- break;
- /* case KASUMI_OP_ONLY_AUTH: */
- case IPSEC_MB_OP_HASH_GEN_ONLY:
- case IPSEC_MB_OP_HASH_VERIFY_ONLY:
- processed_op = process_kasumi_hash_op(qp, &op, session, 1);
- break;
- /* case KASUMI_OP_CIPHER_AUTH: */
- case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN:
- processed_op = process_kasumi_cipher_op_bit(qp, op, session);
- if (processed_op == 1)
- process_kasumi_hash_op(qp, &op, session, 1);
- break;
- /* case KASUMI_OP_AUTH_CIPHER: */
- case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
- processed_op = process_kasumi_hash_op(qp, &op, session, 1);
- if (processed_op == 1)
- process_kasumi_cipher_op_bit(qp, op, session);
- break;
- default:
- /* Operation not supported. */
- processed_op = 0;
- }
-
- /*
- * If there was no error/authentication failure,
- * change status to successful.
- */
- if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
- op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-
- /* Free session if a session-less crypto op. */
- if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
- memset(CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session), 0,
- sizeof(struct kasumi_session));
- rte_mempool_put(qp->sess_mp, (void *)op->sym->session);
- op->sym->session = NULL;
- }
- return processed_op;
-}
-
-static uint16_t
-kasumi_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
- uint16_t nb_ops)
-{
- struct rte_crypto_op *c_ops[nb_ops];
- struct rte_crypto_op *curr_c_op = NULL;
-
- struct kasumi_session *prev_sess = NULL, *curr_sess = NULL;
- struct ipsec_mb_qp *qp = queue_pair;
- unsigned int i;
- uint8_t burst_size = 0;
- uint8_t processed_ops;
- unsigned int nb_dequeued;
-
- nb_dequeued = rte_ring_dequeue_burst(qp->ingress_queue,
- (void **)ops, nb_ops, NULL);
- for (i = 0; i < nb_dequeued; i++) {
- curr_c_op = ops[i];
-
-#ifdef RTE_LIBRTE_PMD_KASUMI_DEBUG
- if (!rte_pktmbuf_is_contiguous(curr_c_op->sym->m_src)
- || (curr_c_op->sym->m_dst != NULL
- && !rte_pktmbuf_is_contiguous(
- curr_c_op->sym->m_dst))) {
- IPSEC_MB_LOG(ERR,
- "PMD supports only contiguous mbufs, op (%p) provides noncontiguous mbuf as source/destination buffer.",
- curr_c_op);
- curr_c_op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- break;
- }
-#endif
-
- /* Set status as enqueued (not processed yet) by default. */
- curr_c_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
-
- curr_sess = (struct kasumi_session *)
- ipsec_mb_get_session_private(qp, curr_c_op);
- if (unlikely(curr_sess == NULL
- || curr_sess->op == IPSEC_MB_OP_NOT_SUPPORTED)) {
- curr_c_op->status
- = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
- break;
- }
-
- /* If length/offset is at bit-level, process this buffer alone.
- */
- if (((curr_c_op->sym->cipher.data.length % BYTE_LEN) != 0)
- || ((ops[i]->sym->cipher.data.offset % BYTE_LEN) != 0)) {
- /* Process the ops of the previous session. */
- if (prev_sess != NULL) {
- processed_ops = process_ops(c_ops, prev_sess,
- qp, burst_size);
- if (processed_ops < burst_size) {
- burst_size = 0;
- break;
- }
-
- burst_size = 0;
- prev_sess = NULL;
- }
-
- processed_ops = process_op_bit(curr_c_op,
- curr_sess, qp);
- if (processed_ops != 1)
- break;
-
- continue;
- }
-
- /* Batch ops that share the same session. */
- if (prev_sess == NULL) {
- prev_sess = curr_sess;
- c_ops[burst_size++] = curr_c_op;
- } else if (curr_sess == prev_sess) {
- c_ops[burst_size++] = curr_c_op;
- /*
- * When there are enough ops to process in a batch,
- * process them, and start a new batch.
- */
- if (burst_size == KASUMI_MAX_BURST) {
- processed_ops = process_ops(c_ops, prev_sess,
- qp, burst_size);
- if (processed_ops < burst_size) {
- burst_size = 0;
- break;
- }
-
- burst_size = 0;
- prev_sess = NULL;
- }
- } else {
- /*
- * Different session, process the ops
- * of the previous session.
- */
- processed_ops = process_ops(c_ops, prev_sess, qp,
- burst_size);
- if (processed_ops < burst_size) {
- burst_size = 0;
- break;
- }
-
- burst_size = 0;
- prev_sess = curr_sess;
-
- c_ops[burst_size++] = curr_c_op;
- }
- }
-
- if (burst_size != 0) {
- /* Process the crypto ops of the last session. */
- processed_ops = process_ops(c_ops, prev_sess, qp, burst_size);
- }
-
- qp->stats.dequeued_count += i;
- return i;
-}
+#include "pmd_aesni_mb_priv.h"
struct rte_cryptodev_ops kasumi_pmd_ops = {
.dev_configure = ipsec_mb_config,
@@ -460,7 +61,7 @@ RTE_INIT(ipsec_mb_register_kasumi)
= &ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_KASUMI];
kasumi_data->caps = kasumi_capabilities;
- kasumi_data->dequeue_burst = kasumi_pmd_dequeue_burst;
+ kasumi_data->dequeue_burst = aesni_mb_dequeue_burst;
kasumi_data->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO
| RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING
| RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA
@@ -469,7 +70,8 @@ RTE_INIT(ipsec_mb_register_kasumi)
| RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
kasumi_data->internals_priv_size = 0;
kasumi_data->ops = &kasumi_pmd_ops;
- kasumi_data->qp_priv_size = sizeof(struct kasumi_qp_data);
- kasumi_data->session_configure = kasumi_session_configure;
- kasumi_data->session_priv_size = sizeof(struct kasumi_session);
+ kasumi_data->qp_priv_size = sizeof(struct aesni_mb_qp_data);
+ kasumi_data->session_configure = aesni_mb_session_configure;
+ kasumi_data->session_priv_size =
+ sizeof(struct aesni_mb_session);
}
diff --git a/drivers/crypto/ipsec_mb/pmd_kasumi_priv.h b/drivers/crypto/ipsec_mb/pmd_kasumi_priv.h
index 8db1d1cc5b..3223cf1a14 100644
--- a/drivers/crypto/ipsec_mb/pmd_kasumi_priv.h
+++ b/drivers/crypto/ipsec_mb/pmd_kasumi_priv.h
@@ -9,8 +9,6 @@
#define KASUMI_KEY_LENGTH 16
#define KASUMI_IV_LENGTH 8
-#define KASUMI_MAX_BURST 4
-#define BYTE_LEN 8
#define KASUMI_DIGEST_LENGTH 4
uint8_t pmd_driver_id_kasumi;
@@ -60,22 +58,4 @@ static const struct rte_cryptodev_capabilities kasumi_capabilities[] = {
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
};
-/** KASUMI private session structure */
-struct kasumi_session {
- /* Keys have to be 16-byte aligned */
- kasumi_key_sched_t pKeySched_cipher;
- kasumi_key_sched_t pKeySched_hash;
- enum ipsec_mb_operation op;
- enum rte_crypto_auth_operation auth_op;
- uint16_t cipher_iv_offset;
-} __rte_cache_aligned;
-
-struct kasumi_qp_data {
- uint8_t temp_digest[KASUMI_DIGEST_LENGTH];
- /* *< Buffers used to store the digest generated
- * by the driver when verifying a digest provided
- * by the user (using authentication verify operation)
- */
-};
-
#endif /* _PMD_KASUMI_PRIV_H_ */
--
2.25.1
^ permalink raw reply [flat|nested] 45+ messages in thread
* Re: [PATCH v6 1/5] ci: replace IPsec-mb package install
2024-03-12 13:50 ` [PATCH v6 1/5] ci: replace IPsec-mb package install Brian Dooley
` (3 preceding siblings ...)
2024-03-12 13:50 ` [PATCH v6 5/5] crypto/ipsec_mb: unify some IPsec MB PMDs Brian Dooley
@ 2024-03-12 13:54 ` David Marchand
2024-03-12 15:26 ` Power, Ciara
2024-03-12 16:05 ` David Marchand
2024-03-12 18:04 ` Power, Ciara
6 siblings, 1 reply; 45+ messages in thread
From: David Marchand @ 2024-03-12 13:54 UTC (permalink / raw)
To: Brian Dooley
Cc: Aaron Conole, Michael Santana, dev, gakhil, pablo.de.lara.guarch,
probb, wathsala.vithanage, Ciara Power
Hello,
On Tue, Mar 12, 2024 at 2:50 PM Brian Dooley <brian.dooley@intel.com> wrote:
>
> From: Ciara Power <ciara.power@intel.com>
>
> The IPsec-mb version that is available through current package
> managers is 1.2.
> This release moves the minimum required IPsec-mb version for IPsec-mb
> based SW PMDs to 1.4.
> To compile these PMDs, a manual step is added to install IPsec-mb v1.4
> using dpkg.
>
> Signed-off-by: Ciara Power <ciara.power@intel.com>
> ---
> .github/workflows/build.yml | 25 ++++++++++++++++++++++---
> 1 file changed, 22 insertions(+), 3 deletions(-)
>
> diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
> index 776fbf6f30..ed44b1f730 100644
> --- a/.github/workflows/build.yml
> +++ b/.github/workflows/build.yml
> @@ -106,9 +106,15 @@ jobs:
> run: sudo apt update || true
> - name: Install packages
> run: sudo apt install -y ccache libarchive-dev libbsd-dev libbpf-dev
> - libfdt-dev libibverbs-dev libipsec-mb-dev libisal-dev libjansson-dev
> + libfdt-dev libibverbs-dev libisal-dev libjansson-dev
> libnuma-dev libpcap-dev libssl-dev ninja-build pkg-config python3-pip
> python3-pyelftools python3-setuptools python3-wheel zlib1g-dev
> + - name: Install ipsec-mb library
> + run: |
> + wget "https://launchpad.net/ubuntu/+archive/primary/+files/libipsec-mb-dev_1.4-3_amd64.deb"
> + wget "https://launchpad.net/ubuntu/+archive/primary/+files/libipsec-mb1_1.4-3_amd64.deb"
> + sudo dpkg -i libipsec-mb1_1.4-3_amd64.deb
> + sudo dpkg -i libipsec-mb-dev_1.4-3_amd64.deb
I am not enthousiastic at advertising a kind of out of tree approach.
That's a bit like if NVIDIA asked us to stop testing distribution
rdma-core packages and instead rely on MOFED.
Why are we removing support for versions that are packaged by the main
distributions?
--
David Marchand
^ permalink raw reply [flat|nested] 45+ messages in thread
* RE: [PATCH v6 1/5] ci: replace IPsec-mb package install
2024-03-12 13:54 ` [PATCH v6 1/5] ci: replace IPsec-mb package install David Marchand
@ 2024-03-12 15:26 ` Power, Ciara
2024-03-12 16:13 ` David Marchand
0 siblings, 1 reply; 45+ messages in thread
From: Power, Ciara @ 2024-03-12 15:26 UTC (permalink / raw)
To: Marchand, David, Dooley, Brian
Cc: Aaron Conole, Michael Santana, dev, gakhil, De Lara Guarch,
Pablo, probb, wathsala.vithanage
Hi David,
> -----Original Message-----
> From: David Marchand <david.marchand@redhat.com>
> Sent: Tuesday, March 12, 2024 1:54 PM
> To: Dooley, Brian <brian.dooley@intel.com>
> Cc: Aaron Conole <aconole@redhat.com>; Michael Santana
> <maicolgabriel@hotmail.com>; dev@dpdk.org; gakhil@marvell.com; De Lara
> Guarch, Pablo <pablo.de.lara.guarch@intel.com>; probb@iol.unh.edu;
> wathsala.vithanage@arm.com; Power, Ciara <ciara.power@intel.com>
> Subject: Re: [PATCH v6 1/5] ci: replace IPsec-mb package install
>
> Hello,
>
> On Tue, Mar 12, 2024 at 2:50 PM Brian Dooley <brian.dooley@intel.com>
> wrote:
> >
> > From: Ciara Power <ciara.power@intel.com>
> >
> > The IPsec-mb version that is available through current package
> > managers is 1.2.
> > This release moves the minimum required IPsec-mb version for IPsec-mb
> > based SW PMDs to 1.4.
> > To compile these PMDs, a manual step is added to install IPsec-mb v1.4
> > using dpkg.
> >
> > Signed-off-by: Ciara Power <ciara.power@intel.com>
> > ---
> > .github/workflows/build.yml | 25 ++++++++++++++++++++++---
> > 1 file changed, 22 insertions(+), 3 deletions(-)
> >
> > diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
> > index 776fbf6f30..ed44b1f730 100644
> > --- a/.github/workflows/build.yml
> > +++ b/.github/workflows/build.yml
> > @@ -106,9 +106,15 @@ jobs:
> > run: sudo apt update || true
> > - name: Install packages
> > run: sudo apt install -y ccache libarchive-dev libbsd-dev libbpf-dev
> > - libfdt-dev libibverbs-dev libipsec-mb-dev libisal-dev libjansson-dev
> > + libfdt-dev libibverbs-dev libisal-dev libjansson-dev
> > libnuma-dev libpcap-dev libssl-dev ninja-build pkg-config python3-pip
> > python3-pyelftools python3-setuptools python3-wheel
> > zlib1g-dev
> > + - name: Install ipsec-mb library
> > + run: |
> > + wget "https://launchpad.net/ubuntu/+archive/primary/+files/libipsec-
> mb-dev_1.4-3_amd64.deb"
> > + wget "https://launchpad.net/ubuntu/+archive/primary/+files/libipsec-
> mb1_1.4-3_amd64.deb"
> > + sudo dpkg -i libipsec-mb1_1.4-3_amd64.deb
> > + sudo dpkg -i libipsec-mb-dev_1.4-3_amd64.deb
>
> I am not enthousiastic at advertising a kind of out of tree approach.
> That's a bit like if NVIDIA asked us to stop testing distribution rdma-core
> packages and instead rely on MOFED.
>
> Why are we removing support for versions that are packaged by the main
> distributions?
With Ubuntu 22.04, ipsec-mb v1.2 is the version available through the package manager.
We were aiming to make v1.4 the minimum version for ipsec-mb PMDs from this release onwards,
removing the many ifdef codepaths in the PMDs for older versions. (patch included in this patchset)
Some of the other CI environments were updated to install v1.4 already to support this change,
but we found the github CI robot was limited for ipsec-mb versions when using the package manager.
It had some failures comparing ABI with v1.2 installed (SW PMDs compiled in reference build, but not compiled after patch).
To support the new minimum SW PMD ipsec-mb version for this CI, we thought installing v1.4 like this would suffice.
Thanks,
Ciara
^ permalink raw reply [flat|nested] 45+ messages in thread
* Re: [PATCH v6 1/5] ci: replace IPsec-mb package install
2024-03-12 13:50 ` [PATCH v6 1/5] ci: replace IPsec-mb package install Brian Dooley
` (4 preceding siblings ...)
2024-03-12 13:54 ` [PATCH v6 1/5] ci: replace IPsec-mb package install David Marchand
@ 2024-03-12 16:05 ` David Marchand
2024-03-12 16:16 ` Jack Bond-Preston
2024-03-12 17:08 ` Power, Ciara
2024-03-12 18:04 ` Power, Ciara
6 siblings, 2 replies; 45+ messages in thread
From: David Marchand @ 2024-03-12 16:05 UTC (permalink / raw)
To: Brian Dooley
Cc: Aaron Conole, Michael Santana, dev, gakhil, pablo.de.lara.guarch,
probb, wathsala.vithanage, Ciara Power
On Tue, Mar 12, 2024 at 2:50 PM Brian Dooley <brian.dooley@intel.com> wrote:
> @@ -187,11 +193,18 @@ jobs:
> run: docker exec -i dpdk dnf update -y
> - name: Install packages
> if: steps.image_cache.outputs.cache-hit != 'true'
> - run: docker exec -i dpdk dnf install -y ccache intel-ipsec-mb-devel
> + run: docker exec -i dpdk dnf install -y ccache
Removing ipsec-mb means we lose build coverage for those crypto
drivers on Fedora.
> isa-l-devel jansson-devel libarchive-devel libatomic libbsd-devel
> libbpf-devel libfdt-devel libpcap-devel libxdp-devel ninja-build
> numactl-devel openssl-devel python3-pip python3-pyelftools
> python3-setuptools python3-wheel rdma-core-devel zlib-devel
> + - name: Install ipsec-mb library
> + if: steps.image_cache.outputs.cache-hit != 'true'
> + run: |
> + wget "https://launchpad.net/ubuntu/+archive/primary/+files/libipsec-mb-dev_1.4-3_amd64.deb"
> + wget "https://launchpad.net/ubuntu/+archive/primary/+files/libipsec-mb1_1.4-3_amd64.deb"
> + sudo dpkg -i libipsec-mb1_1.4-3_amd64.deb
> + sudo dpkg -i libipsec-mb-dev_1.4-3_amd64.deb
And this hunk is useless.
This installs the deb in the Ubuntu "host", not in the Fedora container.
--
David Marchand
^ permalink raw reply [flat|nested] 45+ messages in thread
* Re: [PATCH v6 1/5] ci: replace IPsec-mb package install
2024-03-12 15:26 ` Power, Ciara
@ 2024-03-12 16:13 ` David Marchand
2024-03-12 17:07 ` Power, Ciara
0 siblings, 1 reply; 45+ messages in thread
From: David Marchand @ 2024-03-12 16:13 UTC (permalink / raw)
To: Power, Ciara
Cc: Dooley, Brian, Aaron Conole, Michael Santana, dev, gakhil,
De Lara Guarch, Pablo, probb, wathsala.vithanage,
Thomas Monjalon, Bruce Richardson
On Tue, Mar 12, 2024 at 4:26 PM Power, Ciara <ciara.power@intel.com> wrote:
> > From: David Marchand <david.marchand@redhat.com>
> > On Tue, Mar 12, 2024 at 2:50 PM Brian Dooley <brian.dooley@intel.com>
> > wrote:
> > >
> > > From: Ciara Power <ciara.power@intel.com>
> > >
> > > The IPsec-mb version that is available through current package
> > > managers is 1.2.
> > > This release moves the minimum required IPsec-mb version for IPsec-mb
> > > based SW PMDs to 1.4.
> > > To compile these PMDs, a manual step is added to install IPsec-mb v1.4
> > > using dpkg.
> > >
> > > Signed-off-by: Ciara Power <ciara.power@intel.com>
> > > ---
> > > .github/workflows/build.yml | 25 ++++++++++++++++++++++---
> > > 1 file changed, 22 insertions(+), 3 deletions(-)
> > >
> > > diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
> > > index 776fbf6f30..ed44b1f730 100644
> > > --- a/.github/workflows/build.yml
> > > +++ b/.github/workflows/build.yml
> > > @@ -106,9 +106,15 @@ jobs:
> > > run: sudo apt update || true
> > > - name: Install packages
> > > run: sudo apt install -y ccache libarchive-dev libbsd-dev libbpf-dev
> > > - libfdt-dev libibverbs-dev libipsec-mb-dev libisal-dev libjansson-dev
> > > + libfdt-dev libibverbs-dev libisal-dev libjansson-dev
> > > libnuma-dev libpcap-dev libssl-dev ninja-build pkg-config python3-pip
> > > python3-pyelftools python3-setuptools python3-wheel
> > > zlib1g-dev
> > > + - name: Install ipsec-mb library
> > > + run: |
> > > + wget "https://launchpad.net/ubuntu/+archive/primary/+files/libipsec-
> > mb-dev_1.4-3_amd64.deb"
> > > + wget "https://launchpad.net/ubuntu/+archive/primary/+files/libipsec-
> > mb1_1.4-3_amd64.deb"
> > > + sudo dpkg -i libipsec-mb1_1.4-3_amd64.deb
> > > + sudo dpkg -i libipsec-mb-dev_1.4-3_amd64.deb
> >
> > I am not enthousiastic at advertising a kind of out of tree approach.
> > That's a bit like if NVIDIA asked us to stop testing distribution rdma-core
> > packages and instead rely on MOFED.
> >
> > Why are we removing support for versions that are packaged by the main
> > distributions?
>
> With Ubuntu 22.04, ipsec-mb v1.2 is the version available through the package manager.
> We were aiming to make v1.4 the minimum version for ipsec-mb PMDs from this release onwards,
> removing the many ifdef codepaths in the PMDs for older versions. (patch included in this patchset)
>
> Some of the other CI environments were updated to install v1.4 already to support this change,
> but we found the github CI robot was limited for ipsec-mb versions when using the package manager.
> It had some failures comparing ABI with v1.2 installed (SW PMDs compiled in reference build, but not compiled after patch).
Such a change means that users of the Ubuntu/Fedora dpdk package lose
access to those drivers hypothetically.
"Hypothetically", because in reality, Ubuntu and others distributions
won't update to non LTS versions.
On the other hand, if a user was building DPDK (and not the one
provided by the distribution), now the user has to stop using the
ipsec mb provided by the distribution: building/packaging/maintaining
the ipsec mb library is now forced on the user plate.
I am unclear if this qualifies as a ABI breakage, but I am not
confortable with this change.
--
David Marchand
^ permalink raw reply [flat|nested] 45+ messages in thread
* Re: [PATCH v6 1/5] ci: replace IPsec-mb package install
2024-03-12 16:05 ` David Marchand
@ 2024-03-12 16:16 ` Jack Bond-Preston
2024-03-12 17:08 ` Power, Ciara
1 sibling, 0 replies; 45+ messages in thread
From: Jack Bond-Preston @ 2024-03-12 16:16 UTC (permalink / raw)
To: David Marchand, Brian Dooley
Cc: Aaron Conole, Michael Santana, dev, gakhil, pablo.de.lara.guarch,
probb, wathsala.vithanage, Ciara Power
Hi,
On 08/03/2024 16:05, Power, Ciara wrote:
> I think, for basic compile to work for both repos, any functions in the
> ipsec_mb.h file need to be defined in a C file to avoid issues.
> But, I understand only SNOW3G + ZUC are the focus for the Arm ipsec-mb
> repo, so a HMAC related function like the one throwing the error isn't
> used.
>
> Perhaps having empty stubs for all other functions such as the
> imb_hmac_ipad_opad would be sufficient to allow compiling, because PMD
> expects it to exist somewhere in the library.
We've stubbed the method hmac_ipad_opad in the Arm ipsec-mb repo. This
is now tagged as SECLIB-IPSEC-2024.03.12. See here:
https://gitlab.arm.com/arm-reference-solutions/ipsec-mb/-/tree/SECLIB-IPSEC-2024.03.12
Thanks,
Jack
^ permalink raw reply [flat|nested] 45+ messages in thread
* RE: [EXTERNAL] [PATCH v5 1/4] crypto/ipsec_mb: bump minimum IPsec Multi-buffer version
2024-03-07 16:21 ` Wathsala Wathawana Vithanage
2024-03-08 16:05 ` Power, Ciara
@ 2024-03-12 16:26 ` Wathsala Wathawana Vithanage
2024-03-15 18:24 ` Patrick Robb
1 sibling, 1 reply; 45+ messages in thread
From: Wathsala Wathawana Vithanage @ 2024-03-12 16:26 UTC (permalink / raw)
To: Wathsala Wathawana Vithanage, Power, Ciara, Patrick Robb
Cc: Akhil Goyal, Dooley, Brian, Ji, Kai, De Lara Guarch, Pablo,
Aaron Conole, dev, Sivaramakrishnan, VenkatX, thomas, Marchand,
David, nd, nd
> >
> > If the correct version is being installed and picked up, maybe there
> > is something missing for that function definition in arm-ipsec-mb repo.
> > Wathsala, can you check that please?
>
> We are working on reproducing this issue. Will update asap.
A stub has been added in the Arm ipsec-mb library to fix this issue. Fix is in the latest tag SECLIB-IPSEC-2024.03.12.
^ permalink raw reply [flat|nested] 45+ messages in thread
* RE: [PATCH v6 1/5] ci: replace IPsec-mb package install
2024-03-12 16:13 ` David Marchand
@ 2024-03-12 17:07 ` Power, Ciara
0 siblings, 0 replies; 45+ messages in thread
From: Power, Ciara @ 2024-03-12 17:07 UTC (permalink / raw)
To: Marchand, David
Cc: Dooley, Brian, Aaron Conole, Michael Santana, dev, gakhil,
De Lara Guarch, Pablo, probb, wathsala.vithanage,
Thomas Monjalon, Richardson, Bruce
> -----Original Message-----
> From: David Marchand <david.marchand@redhat.com>
> Sent: Tuesday, March 12, 2024 4:14 PM
> To: Power, Ciara <ciara.power@intel.com>
> Cc: Dooley, Brian <brian.dooley@intel.com>; Aaron Conole
> <aconole@redhat.com>; Michael Santana <maicolgabriel@hotmail.com>;
> dev@dpdk.org; gakhil@marvell.com; De Lara Guarch, Pablo
> <pablo.de.lara.guarch@intel.com>; probb@iol.unh.edu;
> wathsala.vithanage@arm.com; Thomas Monjalon <thomas@monjalon.net>;
> Richardson, Bruce <bruce.richardson@intel.com>
> Subject: Re: [PATCH v6 1/5] ci: replace IPsec-mb package install
>
> On Tue, Mar 12, 2024 at 4:26 PM Power, Ciara <ciara.power@intel.com> wrote:
> > > From: David Marchand <david.marchand@redhat.com> On Tue, Mar 12,
> > > 2024 at 2:50 PM Brian Dooley <brian.dooley@intel.com>
> > > wrote:
> > > >
> > > > From: Ciara Power <ciara.power@intel.com>
> > > >
> > > > The IPsec-mb version that is available through current package
> > > > managers is 1.2.
> > > > This release moves the minimum required IPsec-mb version for
> > > > IPsec-mb based SW PMDs to 1.4.
> > > > To compile these PMDs, a manual step is added to install IPsec-mb
> > > > v1.4 using dpkg.
> > > >
> > > > Signed-off-by: Ciara Power <ciara.power@intel.com>
> > > > ---
> > > > .github/workflows/build.yml | 25 ++++++++++++++++++++++---
> > > > 1 file changed, 22 insertions(+), 3 deletions(-)
> > > >
> > > > diff --git a/.github/workflows/build.yml
> > > > b/.github/workflows/build.yml index 776fbf6f30..ed44b1f730 100644
> > > > --- a/.github/workflows/build.yml
> > > > +++ b/.github/workflows/build.yml
> > > > @@ -106,9 +106,15 @@ jobs:
> > > > run: sudo apt update || true
> > > > - name: Install packages
> > > > run: sudo apt install -y ccache libarchive-dev libbsd-dev libbpf-dev
> > > > - libfdt-dev libibverbs-dev libipsec-mb-dev libisal-dev libjansson-dev
> > > > + libfdt-dev libibverbs-dev libisal-dev libjansson-dev
> > > > libnuma-dev libpcap-dev libssl-dev ninja-build pkg-config python3-
> pip
> > > > python3-pyelftools python3-setuptools python3-wheel
> > > > zlib1g-dev
> > > > + - name: Install ipsec-mb library
> > > > + run: |
> > > > + wget
> > > > + "https://launchpad.net/ubuntu/+archive/primary/+files/libipsec-
> > > mb-dev_1.4-3_amd64.deb"
> > > > + wget
> > > > + "https://launchpad.net/ubuntu/+archive/primary/+files/libipsec-
> > > mb1_1.4-3_amd64.deb"
> > > > + sudo dpkg -i libipsec-mb1_1.4-3_amd64.deb
> > > > + sudo dpkg -i libipsec-mb-dev_1.4-3_amd64.deb
> > >
> > > I am not enthousiastic at advertising a kind of out of tree approach.
> > > That's a bit like if NVIDIA asked us to stop testing distribution
> > > rdma-core packages and instead rely on MOFED.
> > >
> > > Why are we removing support for versions that are packaged by the
> > > main distributions?
> >
> > With Ubuntu 22.04, ipsec-mb v1.2 is the version available through the
> package manager.
> > We were aiming to make v1.4 the minimum version for ipsec-mb PMDs from
> > this release onwards, removing the many ifdef codepaths in the PMDs
> > for older versions. (patch included in this patchset)
> >
> > Some of the other CI environments were updated to install v1.4 already
> > to support this change, but we found the github CI robot was limited for ipsec-
> mb versions when using the package manager.
> > It had some failures comparing ABI with v1.2 installed (SW PMDs compiled in
> reference build, but not compiled after patch).
>
> Such a change means that users of the Ubuntu/Fedora dpdk package lose access
> to those drivers hypothetically.
> "Hypothetically", because in reality, Ubuntu and others distributions won't
> update to non LTS versions.
>
> On the other hand, if a user was building DPDK (and not the one provided by
> the distribution), now the user has to stop using the ipsec mb provided by the
> distribution: building/packaging/maintaining the ipsec mb library is now forced
> on the user plate.
>
> I am unclear if this qualifies as a ABI breakage, but I am not confortable with this
> change.
Hi David,
Ah, okay - thanks for the explanation.
Those are points I had missed, but it makes sense.
We will drop the version bump to v1.4 for this release, and revisit in a later release when suitable.
Thanks,
Ciara
^ permalink raw reply [flat|nested] 45+ messages in thread
* RE: [PATCH v6 1/5] ci: replace IPsec-mb package install
2024-03-12 16:05 ` David Marchand
2024-03-12 16:16 ` Jack Bond-Preston
@ 2024-03-12 17:08 ` Power, Ciara
1 sibling, 0 replies; 45+ messages in thread
From: Power, Ciara @ 2024-03-12 17:08 UTC (permalink / raw)
To: Marchand, David, Dooley, Brian
Cc: Aaron Conole, Michael Santana, dev, gakhil, De Lara Guarch,
Pablo, probb, wathsala.vithanage
> -----Original Message-----
> From: David Marchand <david.marchand@redhat.com>
> Sent: Tuesday, March 12, 2024 4:05 PM
> To: Dooley, Brian <brian.dooley@intel.com>
> Cc: Aaron Conole <aconole@redhat.com>; Michael Santana
> <maicolgabriel@hotmail.com>; dev@dpdk.org; gakhil@marvell.com; De Lara
> Guarch, Pablo <pablo.de.lara.guarch@intel.com>; probb@iol.unh.edu;
> wathsala.vithanage@arm.com; Power, Ciara <ciara.power@intel.com>
> Subject: Re: [PATCH v6 1/5] ci: replace IPsec-mb package install
>
> On Tue, Mar 12, 2024 at 2:50 PM Brian Dooley <brian.dooley@intel.com>
> wrote:
> > @@ -187,11 +193,18 @@ jobs:
> > run: docker exec -i dpdk dnf update -y
> > - name: Install packages
> > if: steps.image_cache.outputs.cache-hit != 'true'
> > - run: docker exec -i dpdk dnf install -y ccache intel-ipsec-mb-devel
> > + run: docker exec -i dpdk dnf install -y ccache
>
> Removing ipsec-mb means we lose build coverage for those crypto drivers on
> Fedora.
>
>
> > isa-l-devel jansson-devel libarchive-devel libatomic libbsd-devel
> > libbpf-devel libfdt-devel libpcap-devel libxdp-devel ninja-build
> > numactl-devel openssl-devel python3-pip python3-pyelftools
> > python3-setuptools python3-wheel rdma-core-devel zlib-devel
> > + - name: Install ipsec-mb library
> > + if: steps.image_cache.outputs.cache-hit != 'true'
> > + run: |
> > + wget "https://launchpad.net/ubuntu/+archive/primary/+files/libipsec-
> mb-dev_1.4-3_amd64.deb"
> > + wget "https://launchpad.net/ubuntu/+archive/primary/+files/libipsec-
> mb1_1.4-3_amd64.deb"
> > + sudo dpkg -i libipsec-mb1_1.4-3_amd64.deb
> > + sudo dpkg -i libipsec-mb-dev_1.4-3_amd64.deb
>
> And this hunk is useless.
> This installs the deb in the Ubuntu "host", not in the Fedora container.
Thanks, yes, good catch.
This patch isn't needed anymore as per the discussion in the other patch, no version bump for 24.03.
Thanks,
Ciara
^ permalink raw reply [flat|nested] 45+ messages in thread
* RE: [PATCH v6 1/5] ci: replace IPsec-mb package install
2024-03-12 13:50 ` [PATCH v6 1/5] ci: replace IPsec-mb package install Brian Dooley
` (5 preceding siblings ...)
2024-03-12 16:05 ` David Marchand
@ 2024-03-12 18:04 ` Power, Ciara
2024-03-15 18:26 ` Patrick Robb
6 siblings, 1 reply; 45+ messages in thread
From: Power, Ciara @ 2024-03-12 18:04 UTC (permalink / raw)
To: Dooley, Brian, Aaron Conole, Michael Santana
Cc: dev, gakhil, De Lara Guarch, Pablo, probb, wathsala.vithanage
> -----Original Message-----
> From: Dooley, Brian <brian.dooley@intel.com>
> Sent: Tuesday, March 12, 2024 1:50 PM
> To: Aaron Conole <aconole@redhat.com>; Michael Santana
> <maicolgabriel@hotmail.com>
> Cc: dev@dpdk.org; gakhil@marvell.com; De Lara Guarch, Pablo
> <pablo.de.lara.guarch@intel.com>; probb@iol.unh.edu;
> wathsala.vithanage@arm.com; Power, Ciara <ciara.power@intel.com>
> Subject: [PATCH v6 1/5] ci: replace IPsec-mb package install
>
> From: Ciara Power <ciara.power@intel.com>
>
> The IPsec-mb version that is available through current package managers is 1.2.
> This release moves the minimum required IPsec-mb version for IPsec-mb based
> SW PMDs to 1.4.
> To compile these PMDs, a manual step is added to install IPsec-mb v1.4 using
> dpkg.
>
> Signed-off-by: Ciara Power <ciara.power@intel.com>
> ---
Hi folks,
Due to the time of release, and churn of this patchset - we will hold off the changes until a later release.
This includes nearly all of the patches in this set (unifying SW PMDs, HMAC_ipad_opad API, version bump)
The doc update patch is still applicable and a small change so we can send that along for 24.03 rc3/rc4.
Thanks,
Ciara
^ permalink raw reply [flat|nested] 45+ messages in thread
* [PATCH v7 1/2] doc: remove outdated version details
2023-12-12 15:36 [PATCH v1] crypto/ipsec_mb: unified IPsec MB interface Brian Dooley
` (4 preceding siblings ...)
2024-03-12 13:50 ` [PATCH v6 1/5] ci: replace IPsec-mb package install Brian Dooley
@ 2024-03-14 10:37 ` Brian Dooley
2024-03-14 10:37 ` [PATCH v7 2/2] doc: announce Intel IPsec MB version bump Brian Dooley
2024-03-22 19:33 ` [EXTERNAL] [PATCH v7 1/2] doc: remove outdated version details Akhil Goyal
5 siblings, 2 replies; 45+ messages in thread
From: Brian Dooley @ 2024-03-14 10:37 UTC (permalink / raw)
To: Kai Ji, Pablo de Lara
Cc: dev, gakhil, Brian Dooley, Sivaramakrishnan Venkat, Wathsala Vithanage
SW PMDs documentation is updated to remove details of unsupported IPsec
Multi-buffer versions. DPDK older than 20.11 is end of life. So, older
DPDK versions are removed from the Crypto library version table.
Signed-off-by: Sivaramakrishnan Venkat <venkatx.sivaramakrishnan@intel.com>
Signed-off-by: Brian Dooley <brian.dooley@intel.com>
Acked-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Wathsala Vithanage <wathsala.vithanage@arm.com>
---
v7:
- Corrected versions
v5:
- Rebased and added to patchset
v3:
- added second patch for outdated documentation updates.
---
doc/guides/cryptodevs/aesni_gcm.rst | 17 ++---------------
doc/guides/cryptodevs/aesni_mb.rst | 20 ++------------------
doc/guides/cryptodevs/chacha20_poly1305.rst | 10 +---------
doc/guides/cryptodevs/kasumi.rst | 13 ++-----------
doc/guides/cryptodevs/snow3g.rst | 13 ++-----------
doc/guides/cryptodevs/zuc.rst | 13 ++-----------
6 files changed, 11 insertions(+), 75 deletions(-)
diff --git a/doc/guides/cryptodevs/aesni_gcm.rst b/doc/guides/cryptodevs/aesni_gcm.rst
index f5773426ee..3af1486553 100644
--- a/doc/guides/cryptodevs/aesni_gcm.rst
+++ b/doc/guides/cryptodevs/aesni_gcm.rst
@@ -62,12 +62,6 @@ Once it is downloaded, extract it and follow these steps:
make
make install
-.. note::
-
- Compilation of the Multi-Buffer library is broken when GCC < 5.0, if library <= v0.53.
- If a lower GCC version than 5.0, the workaround proposed by the following link
- should be used: `<https://github.com/intel/intel-ipsec-mb/issues/40>`_.
-
As a reference, the following table shows a mapping between the past DPDK versions
and the external crypto libraries supported by them:
@@ -79,17 +73,10 @@ and the external crypto libraries supported by them:
============= ================================
DPDK version Crypto library version
============= ================================
- 16.04 - 16.11 Multi-buffer library 0.43 - 0.44
- 17.02 - 17.05 ISA-L Crypto v2.18
- 17.08 - 18.02 Multi-buffer library 0.46 - 0.48
- 18.05 - 19.02 Multi-buffer library 0.49 - 0.52
- 19.05 - 20.08 Multi-buffer library 0.52 - 0.55
- 20.11 - 21.08 Multi-buffer library 0.53 - 1.3*
- 21.11+ Multi-buffer library 1.0 - 1.5*
+ 20.11 - 21.08 Multi-buffer library 0.53 - 1.3
+ 21.11+ Multi-buffer library 1.0 - 1.5
============= ================================
-\* Multi-buffer library 1.0 or newer only works for Meson but not Make build system.
-
Initialization
--------------
diff --git a/doc/guides/cryptodevs/aesni_mb.rst b/doc/guides/cryptodevs/aesni_mb.rst
index b2e74ba417..3c77d0f463 100644
--- a/doc/guides/cryptodevs/aesni_mb.rst
+++ b/doc/guides/cryptodevs/aesni_mb.rst
@@ -121,12 +121,6 @@ Once it is downloaded, extract it and follow these steps:
make
make install
-.. note::
-
- Compilation of the Multi-Buffer library is broken when GCC < 5.0, if library <= v0.53.
- If a lower GCC version than 5.0, the workaround proposed by the following link
- should be used: `<https://github.com/intel/intel-ipsec-mb/issues/40>`_.
-
As a reference, the following table shows a mapping between the past DPDK versions
and the Multi-Buffer library version supported by them:
@@ -137,20 +131,10 @@ and the Multi-Buffer library version supported by them:
============== ============================
DPDK version Multi-buffer library version
============== ============================
- 2.2 - 16.11 0.43 - 0.44
- 17.02 0.44
- 17.05 - 17.08 0.45 - 0.48
- 17.11 0.47 - 0.48
- 18.02 0.48
- 18.05 - 19.02 0.49 - 0.52
- 19.05 - 19.08 0.52
- 19.11 - 20.08 0.52 - 0.55
- 20.11 - 21.08 0.53 - 1.3*
- 21.11+ 1.0 - 1.5*
+ 20.11 - 21.08 0.53 - 1.3
+ 21.11+ 1.0 - 1.5
============== ============================
-\* Multi-buffer library 1.0 or newer only works for Meson but not Make build system.
-
Initialization
--------------
diff --git a/doc/guides/cryptodevs/chacha20_poly1305.rst b/doc/guides/cryptodevs/chacha20_poly1305.rst
index 9d4bf86cf1..44cff85918 100644
--- a/doc/guides/cryptodevs/chacha20_poly1305.rst
+++ b/doc/guides/cryptodevs/chacha20_poly1305.rst
@@ -56,12 +56,6 @@ Once it is downloaded, extract it and follow these steps:
make
make install
-.. note::
-
- Compilation of the Multi-Buffer library is broken when GCC < 5.0, if library <= v0.53.
- If a lower GCC version than 5.0, the workaround proposed by the following link
- should be used: `<https://github.com/intel/intel-ipsec-mb/issues/40>`_.
-
As a reference, the following table shows a mapping between the past DPDK versions
and the external crypto libraries supported by them:
@@ -72,11 +66,9 @@ and the external crypto libraries supported by them:
============= ================================
DPDK version Crypto library version
============= ================================
- 21.11+ Multi-buffer library 1.0-1.5*
+ 21.11+ Multi-buffer library 1.0-1.5
============= ================================
-\* Multi-buffer library 1.0 or newer only works for Meson but not Make build system.
-
Initialization
--------------
diff --git a/doc/guides/cryptodevs/kasumi.rst b/doc/guides/cryptodevs/kasumi.rst
index 0989054875..4070f025e1 100644
--- a/doc/guides/cryptodevs/kasumi.rst
+++ b/doc/guides/cryptodevs/kasumi.rst
@@ -69,12 +69,6 @@ Once it is downloaded, extract it and follow these steps:
make
make install
-.. note::
-
- Compilation of the Multi-Buffer library is broken when GCC < 5.0, if library <= v0.53.
- If a lower GCC version than 5.0, the workaround proposed by the following link
- should be used: `<https://github.com/intel/intel-ipsec-mb/issues/40>`_.
-
As a reference, the following table shows a mapping between the past DPDK versions
and the external crypto libraries supported by them:
@@ -85,13 +79,10 @@ and the external crypto libraries supported by them:
============= ================================
DPDK version Crypto library version
============= ================================
- 16.11 - 19.11 LibSSO KASUMI
- 20.02 - 21.08 Multi-buffer library 0.53 - 1.3*
- 21.11+ Multi-buffer library 1.0 - 1.5*
+ 20.02 - 21.08 Multi-buffer library 0.53 - 1.3
+ 21.11+ Multi-buffer library 1.0 - 1.5
============= ================================
-\* Multi-buffer library 1.0 or newer only works for Meson but not Make build system.
-
Initialization
--------------
diff --git a/doc/guides/cryptodevs/snow3g.rst b/doc/guides/cryptodevs/snow3g.rst
index 3392932653..2a04a027be 100644
--- a/doc/guides/cryptodevs/snow3g.rst
+++ b/doc/guides/cryptodevs/snow3g.rst
@@ -78,12 +78,6 @@ Once it is downloaded, extract it and follow these steps:
make
make install
-.. note::
-
- Compilation of the Multi-Buffer library is broken when GCC < 5.0, if library <= v0.53.
- If a lower GCC version than 5.0, the workaround proposed by the following link
- should be used: `<https://github.com/intel/intel-ipsec-mb/issues/40>`_.
-
As a reference, the following table shows a mapping between the past DPDK versions
and the external crypto libraries supported by them:
@@ -94,13 +88,10 @@ and the external crypto libraries supported by them:
============= ================================
DPDK version Crypto library version
============= ================================
- 16.04 - 19.11 LibSSO SNOW3G
- 20.02 - 21.08 Multi-buffer library 0.53 - 1.3*
- 21.11+ Multi-buffer library 1.0 - 1.5*
+ 20.02 - 21.08 Multi-buffer library 0.53 - 1.3
+ 21.11+ Multi-buffer library 1.0 - 1.5
============= ================================
-\* Multi-buffer library 1.0 or newer only works for Meson but not Make build system.
-
Initialization
--------------
diff --git a/doc/guides/cryptodevs/zuc.rst b/doc/guides/cryptodevs/zuc.rst
index a414b5ad2c..3084646099 100644
--- a/doc/guides/cryptodevs/zuc.rst
+++ b/doc/guides/cryptodevs/zuc.rst
@@ -77,12 +77,6 @@ Once it is downloaded, extract it and follow these steps:
make
make install
-.. note::
-
- Compilation of the Multi-Buffer library is broken when GCC < 5.0, if library <= v0.53.
- If a lower GCC version than 5.0, the workaround proposed by the following link
- should be used: `<https://github.com/intel/intel-ipsec-mb/issues/40>`_.
-
As a reference, the following table shows a mapping between the past DPDK versions
and the external crypto libraries supported by them:
@@ -93,13 +87,10 @@ and the external crypto libraries supported by them:
============= ================================
DPDK version Crypto library version
============= ================================
- 16.11 - 19.11 LibSSO ZUC
- 20.02 - 21.08 Multi-buffer library 0.53 - 1.3*
- 21.11+ Multi-buffer library 1.0 - 1.5*
+ 20.02 - 21.08 Multi-buffer library 0.53 - 1.3
+ 21.11+ Multi-buffer library 1.0 - 1.5
============= ================================
-\* Multi-buffer library 1.0 or newer only works for Meson but not Make build system.
-
Initialization
--------------
--
2.25.1
^ permalink raw reply [flat|nested] 45+ messages in thread
* [PATCH v7 2/2] doc: announce Intel IPsec MB version bump
2024-03-14 10:37 ` [PATCH v7 1/2] doc: remove outdated version details Brian Dooley
@ 2024-03-14 10:37 ` Brian Dooley
2024-03-14 12:04 ` Power, Ciara
2024-03-22 19:33 ` [EXTERNAL] [PATCH v7 1/2] doc: remove outdated version details Akhil Goyal
1 sibling, 1 reply; 45+ messages in thread
From: Brian Dooley @ 2024-03-14 10:37 UTC (permalink / raw)
Cc: dev, gakhil, pablo.de.lara.guarch, Brian Dooley
The Intel IPsec Multi-buffer version is set to be bumped to a minimum
version of 1.4 for the 24.11 LTS release.
Signed-off-by: Brian Dooley <brian.dooley@intel.com>
---
doc/guides/rel_notes/deprecation.rst | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/doc/guides/rel_notes/deprecation.rst b/doc/guides/rel_notes/deprecation.rst
index 10630ba255..a3d48dfcc1 100644
--- a/doc/guides/rel_notes/deprecation.rst
+++ b/doc/guides/rel_notes/deprecation.rst
@@ -120,6 +120,10 @@ Deprecation Notices
which got error interrupt to the application,
so that application can reset that particular queue pair.
+* cryptodev: In 24.11 LTS release the Intel IPsec Multi-buffer version will be
+ bumped to a minimum version of v1.4. This will effect the KASUMI, SNOW3G, ZUC,
+ AESNI GCM, AESNI MB and CHACHAPOLY SW PMDs.
+
* eventdev: The single-event (non-burst) enqueue and dequeue operations,
used by static inline burst enqueue and dequeue functions in ``rte_eventdev.h``,
will be removed in DPDK 23.11.
--
2.25.1
^ permalink raw reply [flat|nested] 45+ messages in thread
* RE: [PATCH v7 2/2] doc: announce Intel IPsec MB version bump
2024-03-14 10:37 ` [PATCH v7 2/2] doc: announce Intel IPsec MB version bump Brian Dooley
@ 2024-03-14 12:04 ` Power, Ciara
0 siblings, 0 replies; 45+ messages in thread
From: Power, Ciara @ 2024-03-14 12:04 UTC (permalink / raw)
To: Dooley, Brian; +Cc: dev, gakhil, De Lara Guarch, Pablo, Dooley, Brian
> -----Original Message-----
> From: Brian Dooley <brian.dooley@intel.com>
> Sent: Thursday, March 14, 2024 10:38 AM
> Cc: dev@dpdk.org; gakhil@marvell.com; De Lara Guarch, Pablo
> <pablo.de.lara.guarch@intel.com>; Dooley, Brian <brian.dooley@intel.com>
> Subject: [PATCH v7 2/2] doc: announce Intel IPsec MB version bump
>
> The Intel IPsec Multi-buffer version is set to be bumped to a minimum version of
> 1.4 for the 24.11 LTS release.
>
> Signed-off-by: Brian Dooley <brian.dooley@intel.com>
> ---
> doc/guides/rel_notes/deprecation.rst | 4 ++++
> 1 file changed, 4 insertions(+)
>
> diff --git a/doc/guides/rel_notes/deprecation.rst
> b/doc/guides/rel_notes/deprecation.rst
> index 10630ba255..a3d48dfcc1 100644
> --- a/doc/guides/rel_notes/deprecation.rst
> +++ b/doc/guides/rel_notes/deprecation.rst
> @@ -120,6 +120,10 @@ Deprecation Notices
> which got error interrupt to the application,
> so that application can reset that particular queue pair.
>
> +* cryptodev: In 24.11 LTS release the Intel IPsec Multi-buffer version
> +will be
> + bumped to a minimum version of v1.4. This will effect the KASUMI,
> +SNOW3G, ZUC,
> + AESNI GCM, AESNI MB and CHACHAPOLY SW PMDs.
> +
> * eventdev: The single-event (non-burst) enqueue and dequeue operations,
> used by static inline burst enqueue and dequeue functions in
> ``rte_eventdev.h``,
> will be removed in DPDK 23.11.
> --
> 2.25.1
Acked-by: Ciara Power <ciara.power@intel.com>
^ permalink raw reply [flat|nested] 45+ messages in thread
* Re: [EXTERNAL] [PATCH v5 1/4] crypto/ipsec_mb: bump minimum IPsec Multi-buffer version
2024-03-12 16:26 ` Wathsala Wathawana Vithanage
@ 2024-03-15 18:24 ` Patrick Robb
0 siblings, 0 replies; 45+ messages in thread
From: Patrick Robb @ 2024-03-15 18:24 UTC (permalink / raw)
To: Wathsala Wathawana Vithanage
Cc: Power, Ciara, Akhil Goyal, Dooley, Brian, Ji, Kai,
De Lara Guarch, Pablo, Aaron Conole, dev, Sivaramakrishnan,
VenkatX, thomas, Marchand, David, nd
Recheck-request: iol-unit-arm64-testing
Even though ipsec update is postponed to a later release, I'm putting
in rechecks for all series that have fails for the arm crypto tests
now that we are building from SECLIB-IPSEC-2024.03.12.
^ permalink raw reply [flat|nested] 45+ messages in thread
* Re: [PATCH v5 4/4] crypto/ipsec_mb: unified IPsec MB interface
2024-03-05 17:42 ` [PATCH v5 4/4] crypto/ipsec_mb: unified IPsec MB interface Brian Dooley
@ 2024-03-15 18:25 ` Patrick Robb
0 siblings, 0 replies; 45+ messages in thread
From: Patrick Robb @ 2024-03-15 18:25 UTC (permalink / raw)
To: Brian Dooley
Cc: Kai Ji, Pablo de Lara, dev, gakhil, Ciara Power, Wathsala Vithanage
Recheck-request: iol-unit-arm64-testing
Even though ipsec update is postponed to a later release, I'm putting
in rechecks for all series that have fails for the arm crypto tests
now that we are building from SECLIB-IPSEC-2024.03.12.
^ permalink raw reply [flat|nested] 45+ messages in thread
* Re: [PATCH v6 1/5] ci: replace IPsec-mb package install
2024-03-12 18:04 ` Power, Ciara
@ 2024-03-15 18:26 ` Patrick Robb
0 siblings, 0 replies; 45+ messages in thread
From: Patrick Robb @ 2024-03-15 18:26 UTC (permalink / raw)
To: Power, Ciara
Cc: Dooley, Brian, Aaron Conole, Michael Santana, dev, gakhil,
De Lara Guarch, Pablo, wathsala.vithanage
Recheck-request: iol-unit-arm64-testing
Even though ipsec update is postponed to a later release, I'm putting
in rechecks for all series that have fails for the arm crypto tests
now that we are building from SECLIB-IPSEC-2024.03.12.
^ permalink raw reply [flat|nested] 45+ messages in thread
* RE: [EXTERNAL] [PATCH v7 1/2] doc: remove outdated version details
2024-03-14 10:37 ` [PATCH v7 1/2] doc: remove outdated version details Brian Dooley
2024-03-14 10:37 ` [PATCH v7 2/2] doc: announce Intel IPsec MB version bump Brian Dooley
@ 2024-03-22 19:33 ` Akhil Goyal
1 sibling, 0 replies; 45+ messages in thread
From: Akhil Goyal @ 2024-03-22 19:33 UTC (permalink / raw)
To: Brian Dooley, Kai Ji, Pablo de Lara
Cc: dev, Sivaramakrishnan Venkat, Wathsala Vithanage
> SW PMDs documentation is updated to remove details of unsupported IPsec
> Multi-buffer versions. DPDK older than 20.11 is end of life. So, older
> DPDK versions are removed from the Crypto library version table.
>
> Signed-off-by: Sivaramakrishnan Venkat <venkatx.sivaramakrishnan@intel.com>
> Signed-off-by: Brian Dooley <brian.dooley@intel.com>
> Acked-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
> Acked-by: Wathsala Vithanage <wathsala.vithanage@arm.com>
> ---
Series applied to dpdk-next-crypto
^ permalink raw reply [flat|nested] 45+ messages in thread
end of thread, other threads:[~2024-03-22 19:33 UTC | newest]
Thread overview: 45+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-12-12 15:36 [PATCH v1] crypto/ipsec_mb: unified IPsec MB interface Brian Dooley
2023-12-14 15:15 ` [PATCH v2] " Brian Dooley
2024-01-18 12:00 ` [PATCH v3] " Brian Dooley
2024-02-28 11:33 ` [PATCH v4] " Brian Dooley
2024-02-28 11:50 ` Power, Ciara
2024-02-29 16:23 ` Dooley, Brian
2024-02-29 16:32 ` Akhil Goyal
2024-03-04 7:33 ` Akhil Goyal
2024-03-05 5:39 ` Honnappa Nagarahalli
2024-03-05 17:31 ` Wathsala Wathawana Vithanage
2024-03-05 15:21 ` Wathsala Wathawana Vithanage
2024-03-05 17:42 ` [PATCH v5 1/4] crypto/ipsec_mb: bump minimum IPsec Multi-buffer version Brian Dooley
2024-03-05 17:42 ` [PATCH v5 2/4] doc: remove outdated version details Brian Dooley
2024-03-05 17:42 ` [PATCH v5 3/4] crypto/ipsec_mb: use new ipad/opad calculation API Brian Dooley
2024-03-05 17:42 ` [PATCH v5 4/4] crypto/ipsec_mb: unified IPsec MB interface Brian Dooley
2024-03-15 18:25 ` Patrick Robb
2024-03-05 19:11 ` [EXTERNAL] [PATCH v5 1/4] crypto/ipsec_mb: bump minimum IPsec Multi-buffer version Akhil Goyal
2024-03-05 19:50 ` Patrick Robb
2024-03-05 23:30 ` Patrick Robb
2024-03-06 3:57 ` Patrick Robb
2024-03-06 11:12 ` Power, Ciara
2024-03-06 14:59 ` Patrick Robb
2024-03-06 15:29 ` Power, Ciara
2024-03-07 16:21 ` Wathsala Wathawana Vithanage
2024-03-08 16:05 ` Power, Ciara
2024-03-12 16:26 ` Wathsala Wathawana Vithanage
2024-03-15 18:24 ` Patrick Robb
2024-03-12 13:50 ` [PATCH v6 1/5] ci: replace IPsec-mb package install Brian Dooley
2024-03-12 13:50 ` [PATCH v6 2/5] crypto/ipsec_mb: bump minimum IPsec Multi-buffer version Brian Dooley
2024-03-12 13:50 ` [PATCH v6 3/5] doc: remove outdated version details Brian Dooley
2024-03-12 13:50 ` [PATCH v6 4/5] crypto/ipsec_mb: use new ipad/opad calculation API Brian Dooley
2024-03-12 13:50 ` [PATCH v6 5/5] crypto/ipsec_mb: unify some IPsec MB PMDs Brian Dooley
2024-03-12 13:54 ` [PATCH v6 1/5] ci: replace IPsec-mb package install David Marchand
2024-03-12 15:26 ` Power, Ciara
2024-03-12 16:13 ` David Marchand
2024-03-12 17:07 ` Power, Ciara
2024-03-12 16:05 ` David Marchand
2024-03-12 16:16 ` Jack Bond-Preston
2024-03-12 17:08 ` Power, Ciara
2024-03-12 18:04 ` Power, Ciara
2024-03-15 18:26 ` Patrick Robb
2024-03-14 10:37 ` [PATCH v7 1/2] doc: remove outdated version details Brian Dooley
2024-03-14 10:37 ` [PATCH v7 2/2] doc: announce Intel IPsec MB version bump Brian Dooley
2024-03-14 12:04 ` Power, Ciara
2024-03-22 19:33 ` [EXTERNAL] [PATCH v7 1/2] doc: remove outdated version details Akhil Goyal
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).