From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id E5EDEA0564; Thu, 5 Mar 2020 18:10:38 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id D67931C00E; Thu, 5 Mar 2020 18:09:46 +0100 (CET) Received: from mga07.intel.com (mga07.intel.com [134.134.136.100]) by dpdk.org (Postfix) with ESMTP id 6250D1BFE2 for ; Thu, 5 Mar 2020 18:09:42 +0100 (CET) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga002.fm.intel.com ([10.253.24.26]) by orsmga105.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 05 Mar 2020 09:09:41 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.70,518,1574150400"; d="scan'208";a="275173411" Received: from silpixa00400565.ir.intel.com (HELO silpixa00400565.ger.corp.intel.com) ([10.237.222.249]) by fmsmga002.fm.intel.com with ESMTP; 05 Mar 2020 09:09:40 -0800 From: Pablo de Lara To: dev@dpdk.org Cc: Pablo de Lara Date: Thu, 5 Mar 2020 15:34:54 +0000 Message-Id: <20200305153454.724874-6-pablo.de.lara.guarch@intel.com> X-Mailer: git-send-email 2.24.1 In-Reply-To: <20200305153454.724874-1-pablo.de.lara.guarch@intel.com> References: <20200305153454.724874-1-pablo.de.lara.guarch@intel.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Subject: [dpdk-dev] [RFC PATCH 5/5] crypto/aesni_gcm: support IPSec MB library v0.53 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add support for underlying Intel IPSec Multi-buffer library v0.53. Signed-off-by: Pablo de Lara --- drivers/crypto/aesni_gcm/aesni_gcm_ops.h | 65 ++------- drivers/crypto/aesni_gcm/aesni_gcm_pmd.c | 130 +++++++++++++----- drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c | 4 +- .../crypto/aesni_gcm/aesni_gcm_pmd_private.h | 4 + 4 files changed, 114 insertions(+), 89 deletions(-) diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_ops.h b/drivers/crypto/aesni_gcm/aesni_gcm_ops.h index 450616698..b2cc4002e 100644 --- a/drivers/crypto/aesni_gcm/aesni_gcm_ops.h +++ b/drivers/crypto/aesni_gcm/aesni_gcm_ops.h @@ -17,14 +17,15 @@ enum aesni_gcm_vector_mode { RTE_AESNI_GCM_SSE, RTE_AESNI_GCM_AVX, RTE_AESNI_GCM_AVX2, + RTE_AESNI_GCM_AVX512, RTE_AESNI_GCM_VECTOR_NUM }; enum aesni_gcm_key { - AESNI_GCM_KEY_128, - AESNI_GCM_KEY_192, - AESNI_GCM_KEY_256, - AESNI_GCM_KEY_NUM + GCM_KEY_128 = 0, + GCM_KEY_192, + GCM_KEY_256, + GCM_KEY_NUM }; @@ -34,7 +35,7 @@ typedef void (*aesni_gcm_t)(const struct gcm_key_data *gcm_key_data, const uint8_t *aad, uint64_t aad_len, uint8_t *auth_tag, uint64_t auth_tag_len); -typedef void (*aesni_gcm_precomp_t)(const void *key, struct gcm_key_data *gcm_data); +typedef void (*aesni_gcm_pre_t)(const void *key, struct gcm_key_data *gcm_data); typedef void (*aesni_gcm_init_t)(const struct gcm_key_data *gcm_key_data, struct gcm_context_data *gcm_ctx_data, @@ -57,60 +58,12 @@ typedef void (*aesni_gcm_finalize_t)(const struct gcm_key_data *gcm_key_data, struct aesni_gcm_ops { aesni_gcm_t enc; /**< GCM encode function pointer */ aesni_gcm_t dec; /**< GCM decode function pointer */ - aesni_gcm_precomp_t precomp; /**< GCM pre-compute */ + aesni_gcm_pre_t pre; /**< GCM pre-compute */ aesni_gcm_init_t init; aesni_gcm_update_t update_enc; aesni_gcm_update_t update_dec; - aesni_gcm_finalize_t finalize; + aesni_gcm_finalize_t finalize_enc; + aesni_gcm_finalize_t finalize_dec; }; -#define AES_GCM_FN(keylen, arch) \ -aes_gcm_enc_##keylen##_##arch,\ -aes_gcm_dec_##keylen##_##arch,\ -aes_gcm_pre_##keylen##_##arch,\ -aes_gcm_init_##keylen##_##arch,\ -aes_gcm_enc_##keylen##_update_##arch,\ -aes_gcm_dec_##keylen##_update_##arch,\ -aes_gcm_enc_##keylen##_finalize_##arch, - -static const struct aesni_gcm_ops gcm_ops[RTE_AESNI_GCM_VECTOR_NUM][AESNI_GCM_KEY_NUM] = { - [RTE_AESNI_GCM_NOT_SUPPORTED] = { - [AESNI_GCM_KEY_128] = {NULL}, - [AESNI_GCM_KEY_192] = {NULL}, - [AESNI_GCM_KEY_256] = {NULL} - }, - [RTE_AESNI_GCM_SSE] = { - [AESNI_GCM_KEY_128] = { - AES_GCM_FN(128, sse) - }, - [AESNI_GCM_KEY_192] = { - AES_GCM_FN(192, sse) - }, - [AESNI_GCM_KEY_256] = { - AES_GCM_FN(256, sse) - } - }, - [RTE_AESNI_GCM_AVX] = { - [AESNI_GCM_KEY_128] = { - AES_GCM_FN(128, avx_gen2) - }, - [AESNI_GCM_KEY_192] = { - AES_GCM_FN(192, avx_gen2) - }, - [AESNI_GCM_KEY_256] = { - AES_GCM_FN(256, avx_gen2) - } - }, - [RTE_AESNI_GCM_AVX2] = { - [AESNI_GCM_KEY_128] = { - AES_GCM_FN(128, avx_gen4) - }, - [AESNI_GCM_KEY_192] = { - AES_GCM_FN(192, avx_gen4) - }, - [AESNI_GCM_KEY_256] = { - AES_GCM_FN(256, avx_gen4) - } - } -}; #endif /* _AESNI_GCM_OPS_H_ */ diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c index ebdf7c35a..2bda5a560 100644 --- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c +++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c @@ -24,7 +24,7 @@ aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops, const struct rte_crypto_sym_xform *auth_xform; const struct rte_crypto_sym_xform *aead_xform; uint8_t key_length; - uint8_t *key; + const uint8_t *key; /* AES-GMAC */ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) { @@ -89,20 +89,20 @@ aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops, /* Check key length and calculate GCM pre-compute. */ switch (key_length) { case 16: - sess->key = AESNI_GCM_KEY_128; + sess->key = GCM_KEY_128; break; case 24: - sess->key = AESNI_GCM_KEY_192; + sess->key = GCM_KEY_192; break; case 32: - sess->key = AESNI_GCM_KEY_256; + sess->key = GCM_KEY_256; break; default: AESNI_GCM_LOG(ERR, "Invalid key length"); return -EINVAL; } - gcm_ops[sess->key].precomp(key, &sess->gdata_key); + gcm_ops[sess->key].pre(key, &sess->gdata_key); /* Digest check */ if (sess->req_digest_length > 16) { @@ -195,6 +195,7 @@ process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op, uint32_t offset, data_offset, data_length; uint32_t part_len, total_len, data_len; uint8_t *tag; + unsigned int oop = 0; if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION || session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) { @@ -216,27 +217,30 @@ process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op, RTE_ASSERT(m_src != NULL); } + src = rte_pktmbuf_mtod_offset(m_src, uint8_t *, offset); + data_len = m_src->data_len - offset; part_len = (data_len < data_length) ? data_len : data_length; - /* Destination buffer is required when segmented source buffer */ - RTE_ASSERT((part_len == data_length) || - ((part_len != data_length) && - (sym_op->m_dst != NULL))); - /* Segmented destination buffer is not supported */ RTE_ASSERT((sym_op->m_dst == NULL) || ((sym_op->m_dst != NULL) && rte_pktmbuf_is_contiguous(sym_op->m_dst))); - - dst = sym_op->m_dst ? - rte_pktmbuf_mtod_offset(sym_op->m_dst, uint8_t *, - data_offset) : - rte_pktmbuf_mtod_offset(sym_op->m_src, uint8_t *, + /* In-place */ + if (sym_op->m_dst == NULL || (sym_op->m_dst == sym_op->m_src)) + dst = src; + /* Out-of-place */ + else { + oop = 1; + /* + * Segmented destination buffer is not supported if operation is + * Out-of-place + */ + RTE_ASSERT(rte_pktmbuf_is_contiguous(sym_op->m_dst)); + dst = rte_pktmbuf_mtod_offset(sym_op->m_dst, uint8_t *, data_offset); - - src = rte_pktmbuf_mtod_offset(m_src, uint8_t *, offset); + } iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset); @@ -254,12 +258,15 @@ process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op, total_len = data_length - part_len; while (total_len) { - dst += part_len; m_src = m_src->next; RTE_ASSERT(m_src != NULL); src = rte_pktmbuf_mtod(m_src, uint8_t *); + if (oop) + dst += part_len; + else + dst = src; part_len = (m_src->data_len < total_len) ? m_src->data_len : total_len; @@ -274,7 +281,7 @@ process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op, else tag = sym_op->aead.digest.data; - qp->ops[session->key].finalize(&session->gdata_key, + qp->ops[session->key].finalize_enc(&session->gdata_key, &qp->gdata_ctx, tag, session->gen_digest_length); @@ -291,12 +298,15 @@ process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op, total_len = data_length - part_len; while (total_len) { - dst += part_len; m_src = m_src->next; RTE_ASSERT(m_src != NULL); src = rte_pktmbuf_mtod(m_src, uint8_t *); + if (oop) + dst += part_len; + else + dst = src; part_len = (m_src->data_len < total_len) ? m_src->data_len : total_len; @@ -308,7 +318,7 @@ process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op, } tag = qp->temp_digest; - qp->ops[session->key].finalize(&session->gdata_key, + qp->ops[session->key].finalize_dec(&session->gdata_key, &qp->gdata_ctx, tag, session->gen_digest_length); @@ -322,7 +332,7 @@ process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op, tag = qp->temp_digest; else tag = sym_op->auth.digest.data; - qp->ops[session->key].finalize(&session->gdata_key, + qp->ops[session->key].finalize_enc(&session->gdata_key, &qp->gdata_ctx, tag, session->gen_digest_length); @@ -338,7 +348,7 @@ process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op, * the bytes passed. */ tag = qp->temp_digest; - qp->ops[session->key].finalize(&session->gdata_key, + qp->ops[session->key].finalize_enc(&session->gdata_key, &qp->gdata_ctx, tag, session->gen_digest_length); @@ -487,12 +497,8 @@ aesni_gcm_create(const char *name, struct rte_cryptodev *dev; struct aesni_gcm_private *internals; enum aesni_gcm_vector_mode vector_mode; + MB_MGR *mb_mgr; - /* Check CPU for support for AES instruction set */ - if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) { - AESNI_GCM_LOG(ERR, "AES instructions not supported by CPU"); - return -EFAULT; - } dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params); if (dev == NULL) { AESNI_GCM_LOG(ERR, "driver %s: create failed", @@ -501,7 +507,9 @@ aesni_gcm_create(const char *name, } /* Check CPU for supported vector instruction set */ - if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2)) + if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F)) + vector_mode = RTE_AESNI_GCM_AVX512; + else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2)) vector_mode = RTE_AESNI_GCM_AVX2; else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX)) vector_mode = RTE_AESNI_GCM_AVX; @@ -517,27 +525,74 @@ aesni_gcm_create(const char *name, dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | - RTE_CRYPTODEV_FF_CPU_AESNI | + RTE_CRYPTODEV_FF_IN_PLACE_SGL | RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT | RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT; + /* Check CPU for support for AES instruction set */ + if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) + dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AESNI; + else + AESNI_GCM_LOG(WARNING, "AES instructions not supported by CPU"); + + mb_mgr = alloc_mb_mgr(0); + if (mb_mgr == NULL) + return -ENOMEM; + switch (vector_mode) { case RTE_AESNI_GCM_SSE: dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE; + init_mb_mgr_sse(mb_mgr); break; case RTE_AESNI_GCM_AVX: dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX; + init_mb_mgr_avx(mb_mgr); break; case RTE_AESNI_GCM_AVX2: dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2; + init_mb_mgr_avx2(mb_mgr); break; - default: + case RTE_AESNI_GCM_AVX512: + dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2; + init_mb_mgr_avx512(mb_mgr); break; + default: + AESNI_GCM_LOG(ERR, "Unsupported vector mode %u\n", vector_mode); + goto error_exit; } internals = dev->data->dev_private; internals->vector_mode = vector_mode; + internals->mb_mgr = mb_mgr; + + /* Set arch independent function pointers, based on key size */ + internals->ops[GCM_KEY_128].enc = mb_mgr->gcm128_enc; + internals->ops[GCM_KEY_128].dec = mb_mgr->gcm128_dec; + internals->ops[GCM_KEY_128].pre = mb_mgr->gcm128_pre; + internals->ops[GCM_KEY_128].init = mb_mgr->gcm128_init; + internals->ops[GCM_KEY_128].update_enc = mb_mgr->gcm128_enc_update; + internals->ops[GCM_KEY_128].update_dec = mb_mgr->gcm128_dec_update; + internals->ops[GCM_KEY_128].finalize_enc = mb_mgr->gcm128_enc_finalize; + internals->ops[GCM_KEY_128].finalize_dec = mb_mgr->gcm128_dec_finalize; + + internals->ops[GCM_KEY_192].enc = mb_mgr->gcm192_enc; + internals->ops[GCM_KEY_192].dec = mb_mgr->gcm192_dec; + internals->ops[GCM_KEY_192].pre = mb_mgr->gcm192_pre; + internals->ops[GCM_KEY_192].init = mb_mgr->gcm192_init; + internals->ops[GCM_KEY_192].update_enc = mb_mgr->gcm192_enc_update; + internals->ops[GCM_KEY_192].update_dec = mb_mgr->gcm192_dec_update; + internals->ops[GCM_KEY_192].finalize_enc = mb_mgr->gcm192_enc_finalize; + internals->ops[GCM_KEY_192].finalize_dec = mb_mgr->gcm192_dec_finalize; + + internals->ops[GCM_KEY_256].enc = mb_mgr->gcm256_enc; + internals->ops[GCM_KEY_256].dec = mb_mgr->gcm256_dec; + internals->ops[GCM_KEY_256].pre = mb_mgr->gcm256_pre; + internals->ops[GCM_KEY_256].init = mb_mgr->gcm256_init; + internals->ops[GCM_KEY_256].update_enc = mb_mgr->gcm256_enc_update; + internals->ops[GCM_KEY_256].update_dec = mb_mgr->gcm256_dec_update; + internals->ops[GCM_KEY_256].finalize_enc = mb_mgr->gcm256_enc_finalize; + internals->ops[GCM_KEY_256].finalize_dec = mb_mgr->gcm256_dec_finalize; internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs; @@ -549,6 +604,14 @@ aesni_gcm_create(const char *name, #endif return 0; + +error_exit: + if (mb_mgr) + free_mb_mgr(mb_mgr); + + rte_cryptodev_pmd_destroy(dev); + + return -1; } static int @@ -576,6 +639,7 @@ static int aesni_gcm_remove(struct rte_vdev_device *vdev) { struct rte_cryptodev *cryptodev; + struct aesni_gcm_private *internals; const char *name; name = rte_vdev_device_name(vdev); @@ -586,6 +650,10 @@ aesni_gcm_remove(struct rte_vdev_device *vdev) if (cryptodev == NULL) return -ENODEV; + internals = cryptodev->data->dev_private; + + free_mb_mgr(internals->mb_mgr); + return rte_cryptodev_pmd_destroy(cryptodev); } diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c index c343a393f..f599fc3f7 100644 --- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c +++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c @@ -222,7 +222,7 @@ aesni_gcm_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, if (aesni_gcm_pmd_qp_set_unique_name(dev, qp)) goto qp_setup_cleanup; - qp->ops = (const struct aesni_gcm_ops *)gcm_ops[internals->vector_mode]; + qp->ops = (const struct aesni_gcm_ops *)internals->ops; qp->processed_pkts = aesni_gcm_pmd_qp_create_processed_pkts_ring(qp, qp_conf->nb_descriptors, socket_id); @@ -277,7 +277,7 @@ aesni_gcm_pmd_sym_session_configure(struct rte_cryptodev *dev __rte_unused, "Couldn't get object from session mempool"); return -ENOMEM; } - ret = aesni_gcm_set_session_parameters(gcm_ops[internals->vector_mode], + ret = aesni_gcm_set_session_parameters(internals->ops, sess_private_data, xform); if (ret != 0) { AESNI_GCM_LOG(ERR, "failed configure session parameters"); diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h index 92b041354..fd43df3cf 100644 --- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h +++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h @@ -35,6 +35,10 @@ struct aesni_gcm_private { /**< Vector mode */ unsigned max_nb_queue_pairs; /**< Max number of queue pairs supported by device */ + MB_MGR *mb_mgr; + /**< Multi-buffer instance */ + struct aesni_gcm_ops ops[GCM_KEY_NUM]; + /**< Function pointer table of the gcm APIs */ }; struct aesni_gcm_qp { -- 2.24.1