From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 67AC4A2EEB for ; Mon, 7 Oct 2019 18:29:52 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 303691D15F; Mon, 7 Oct 2019 18:29:12 +0200 (CEST) Received: from mga05.intel.com (mga05.intel.com [192.55.52.43]) by dpdk.org (Postfix) with ESMTP id 5EC521C29D for ; Mon, 7 Oct 2019 18:29:05 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga005.fm.intel.com ([10.253.24.32]) by fmsmga105.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 07 Oct 2019 09:29:05 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.67,268,1566889200"; d="scan'208";a="393082033" Received: from silpixa00398673.ir.intel.com (HELO silpixa00398673.ger.corp.intel.com) ([10.237.223.136]) by fmsmga005.fm.intel.com with ESMTP; 07 Oct 2019 09:29:03 -0700 From: Fan Zhang To: dev@dpdk.org Cc: konstantin.ananyev@intel.com, declan.doherty@intel.com, akhil.goyal@nxp.com, Fan Zhang Date: Mon, 7 Oct 2019 17:28:45 +0100 Message-Id: <20191007162850.60552-6-roy.fan.zhang@intel.com> X-Mailer: git-send-email 2.14.5 In-Reply-To: <20191007162850.60552-1-roy.fan.zhang@intel.com> References: <20190906131330.40185-1-roy.fan.zhang@intel.com> <20191007162850.60552-1-roy.fan.zhang@intel.com> Subject: [dpdk-dev] [PATCH v2 05/10] crypto/aesni_mb: add rte_security handler X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch add rte_security support support to AESNI-MB PMD. The PMD now initialize security context instance, create/delete PMD specific security sessions, and process crypto workloads in synchronous mode. Signed-off-by: Fan Zhang --- drivers/crypto/aesni_mb/meson.build | 2 +- drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c | 368 +++++++++++++++++++-- drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c | 92 +++++- drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h | 21 +- 4 files changed, 453 insertions(+), 30 deletions(-) diff --git a/drivers/crypto/aesni_mb/meson.build b/drivers/crypto/aesni_mb/meson.build index 3e1687416..e7b585168 100644 --- a/drivers/crypto/aesni_mb/meson.build +++ b/drivers/crypto/aesni_mb/meson.build @@ -23,4 +23,4 @@ endif sources = files('rte_aesni_mb_pmd.c', 'rte_aesni_mb_pmd_ops.c') allow_experimental_apis = true -deps += ['bus_vdev'] +deps += ['bus_vdev', 'security'] diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c index ce1144b95..a4cd518b7 100644 --- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c +++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c @@ -8,6 +8,8 @@ #include #include #include +#include +#include #include #include #include @@ -19,6 +21,9 @@ #define HMAC_MAX_BLOCK_SIZE 128 static uint8_t cryptodev_driver_id; +static enum aesni_mb_vector_mode vector_mode; +/**< CPU vector instruction set mode */ + typedef void (*hash_one_block_t)(const void *data, void *digest); typedef void (*aes_keyexp_t)(const void *key, void *enc_exp_keys, void *dec_exp_keys); @@ -808,6 +813,164 @@ auth_start_offset(struct rte_crypto_op *op, struct aesni_mb_session *session, (UINT64_MAX - u_src + u_dst + 1); } +union sec_userdata_field { + int status; + struct { + uint16_t is_gen_digest; + uint16_t digest_len; + }; +}; + +struct sec_udata_digest_field { + uint32_t is_digest_gen; + uint32_t digest_len; +}; + +static inline int +set_mb_job_params_sec(JOB_AES_HMAC *job, struct aesni_mb_sec_session *sec_sess, + void *buf, uint32_t buf_len, void *iv, void *aad, void *digest, + int *status, uint8_t *digest_idx) +{ + struct aesni_mb_session *session = &sec_sess->sess; + uint32_t cipher_offset = sec_sess->cipher_offset; + union sec_userdata_field udata; + + if (unlikely(cipher_offset > buf_len)) + return -EINVAL; + + /* Set crypto operation */ + job->chain_order = session->chain_order; + + /* Set cipher parameters */ + job->cipher_direction = session->cipher.direction; + job->cipher_mode = session->cipher.mode; + + job->aes_key_len_in_bytes = session->cipher.key_length_in_bytes; + + /* Set authentication parameters */ + job->hash_alg = session->auth.algo; + job->iv = iv; + + switch (job->hash_alg) { + case AES_XCBC: + job->u.XCBC._k1_expanded = session->auth.xcbc.k1_expanded; + job->u.XCBC._k2 = session->auth.xcbc.k2; + job->u.XCBC._k3 = session->auth.xcbc.k3; + + job->aes_enc_key_expanded = + session->cipher.expanded_aes_keys.encode; + job->aes_dec_key_expanded = + session->cipher.expanded_aes_keys.decode; + break; + + case AES_CCM: + job->u.CCM.aad = (uint8_t *)aad + 18; + job->u.CCM.aad_len_in_bytes = session->aead.aad_len; + job->aes_enc_key_expanded = + session->cipher.expanded_aes_keys.encode; + job->aes_dec_key_expanded = + session->cipher.expanded_aes_keys.decode; + job->iv++; + break; + + case AES_CMAC: + job->u.CMAC._key_expanded = session->auth.cmac.expkey; + job->u.CMAC._skey1 = session->auth.cmac.skey1; + job->u.CMAC._skey2 = session->auth.cmac.skey2; + job->aes_enc_key_expanded = + session->cipher.expanded_aes_keys.encode; + job->aes_dec_key_expanded = + session->cipher.expanded_aes_keys.decode; + break; + + case AES_GMAC: + if (session->cipher.mode == GCM) { + job->u.GCM.aad = aad; + job->u.GCM.aad_len_in_bytes = session->aead.aad_len; + } else { + /* For GMAC */ + job->u.GCM.aad = aad; + job->u.GCM.aad_len_in_bytes = buf_len; + job->cipher_mode = GCM; + } + job->aes_enc_key_expanded = &session->cipher.gcm_key; + job->aes_dec_key_expanded = &session->cipher.gcm_key; + break; + + default: + job->u.HMAC._hashed_auth_key_xor_ipad = + session->auth.pads.inner; + job->u.HMAC._hashed_auth_key_xor_opad = + session->auth.pads.outer; + + if (job->cipher_mode == DES3) { + job->aes_enc_key_expanded = + session->cipher.exp_3des_keys.ks_ptr; + job->aes_dec_key_expanded = + session->cipher.exp_3des_keys.ks_ptr; + } else { + job->aes_enc_key_expanded = + session->cipher.expanded_aes_keys.encode; + job->aes_dec_key_expanded = + session->cipher.expanded_aes_keys.decode; + } + } + + /* Set digest output location */ + if (job->hash_alg != NULL_HASH && + session->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) { + job->auth_tag_output = sec_sess->temp_digests[*digest_idx]; + *digest_idx = (*digest_idx + 1) % MAX_JOBS; + + udata.is_gen_digest = 0; + udata.digest_len = session->auth.req_digest_len; + } else { + udata.is_gen_digest = 1; + udata.digest_len = session->auth.req_digest_len; + + if (session->auth.req_digest_len != + session->auth.gen_digest_len) { + job->auth_tag_output = + sec_sess->temp_digests[*digest_idx]; + *digest_idx = (*digest_idx + 1) % MAX_JOBS; + } else + job->auth_tag_output = digest; + } + + /* A bit of hack here, since job structure only supports + * 2 user data fields and we need 4 params to be passed + * (status, direction, digest for verify, and length of + * digest), we set the status value as digest length + + * direction here temporarily to avoid creating longer + * buffer to store all 4 params. + */ + *status = udata.status; + + /* + * Multi-buffer library current only support returning a truncated + * digest length as specified in the relevant IPsec RFCs + */ + + /* Set digest length */ + job->auth_tag_output_len_in_bytes = session->auth.gen_digest_len; + + /* Set IV parameters */ + job->iv_len_in_bytes = session->iv.length; + + /* Data Parameters */ + job->src = buf; + job->dst = (uint8_t *)buf + cipher_offset; + job->cipher_start_src_offset_in_bytes = cipher_offset; + job->msg_len_to_cipher_in_bytes = buf_len - cipher_offset; + job->hash_start_src_offset_in_bytes = 0; + job->msg_len_to_hash_in_bytes = buf_len; + + job->user_data = (void *)status; + job->user_data2 = digest; + + return 0; +} + /** * Process a crypto operation and complete a JOB_AES_HMAC job structure for * submission to the multi buffer library for processing. @@ -1100,6 +1263,35 @@ post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job) return op; } +static inline void +post_process_mb_sec_job(JOB_AES_HMAC *job) +{ + void *user_digest = job->user_data2; + int *status = job->user_data; + + switch (job->status) { + case STS_COMPLETED: + if (user_digest) { + union sec_userdata_field udata; + + udata.status = *status; + if (udata.is_gen_digest) { + *status = RTE_CRYPTO_OP_STATUS_SUCCESS; + memcpy(user_digest, job->auth_tag_output, + udata.digest_len); + } else { + *status = (memcmp(job->auth_tag_output, + user_digest, udata.digest_len) != 0) ? + -1 : 0; + } + } else + *status = RTE_CRYPTO_OP_STATUS_SUCCESS; + break; + default: + *status = RTE_CRYPTO_OP_STATUS_ERROR; + } +} + /** * Process a completed JOB_AES_HMAC job and keep processing jobs until * get_completed_job return NULL @@ -1136,6 +1328,32 @@ handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job, return processed_jobs; } +static inline uint32_t +handle_completed_sec_jobs(JOB_AES_HMAC *job, MB_MGR *mb_mgr) +{ + uint32_t processed = 0; + + while (job != NULL) { + post_process_mb_sec_job(job); + job = IMB_GET_COMPLETED_JOB(mb_mgr); + processed++; + } + + return processed; +} + +static inline uint32_t +flush_mb_sec_mgr(MB_MGR *mb_mgr) +{ + JOB_AES_HMAC *job = IMB_FLUSH_JOB(mb_mgr); + uint32_t processed = 0; + + if (job) + processed = handle_completed_sec_jobs(job, mb_mgr); + + return processed; +} + static inline uint16_t flush_mb_mgr(struct aesni_mb_qp *qp, struct rte_crypto_op **ops, uint16_t nb_ops) @@ -1239,6 +1457,105 @@ aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops, return processed_jobs; } +static MB_MGR * +alloc_init_mb_mgr(void) +{ + MB_MGR *mb_mgr = alloc_mb_mgr(0); + if (mb_mgr == NULL) + return NULL; + + switch (vector_mode) { + case RTE_AESNI_MB_SSE: + init_mb_mgr_sse(mb_mgr); + break; + case RTE_AESNI_MB_AVX: + init_mb_mgr_avx(mb_mgr); + break; + case RTE_AESNI_MB_AVX2: + init_mb_mgr_avx2(mb_mgr); + break; + case RTE_AESNI_MB_AVX512: + init_mb_mgr_avx512(mb_mgr); + break; + default: + AESNI_MB_LOG(ERR, "Unsupported vector mode %u\n", vector_mode); + free_mb_mgr(mb_mgr); + return NULL; + } + + return mb_mgr; +} + +static MB_MGR *sec_mb_mgrs[RTE_MAX_LCORE]; + +int +aesni_mb_sec_crypto_process_bulk(struct rte_security_session *sess, + struct rte_security_vec buf[], void *iv[], void *aad[], + void *digest[], int status[], uint32_t num) +{ + struct aesni_mb_sec_session *sec_sess = sess->sess_private_data; + JOB_AES_HMAC *job; + static MB_MGR *mb_mgr; + uint32_t lcore_id = rte_lcore_id(); + uint8_t digest_idx = sec_sess->digest_idx; + uint32_t i, processed = 0; + int ret = 0, errcnt = 0; + + if (unlikely(sec_mb_mgrs[lcore_id] == NULL)) { + sec_mb_mgrs[lcore_id] = alloc_init_mb_mgr(); + + if (sec_mb_mgrs[lcore_id] == NULL) { + for (i = 0; i < num; i++) + status[i] = -ENOMEM; + + return -num; + } + } + + mb_mgr = sec_mb_mgrs[lcore_id]; + + for (i = 0; i < num; i++) { + void *seg_buf = buf[i].vec[0].iov_base; + uint32_t buf_len = buf[i].vec[0].iov_len; + + job = IMB_GET_NEXT_JOB(mb_mgr); + if (unlikely(job == NULL)) { + processed += flush_mb_sec_mgr(mb_mgr); + + job = IMB_GET_NEXT_JOB(mb_mgr); + if (!job) { + errcnt -= 1; + status[i] = -ENOMEM; + } + } + + ret = set_mb_job_params_sec(job, sec_sess, seg_buf, buf_len, + iv[i], aad[i], digest[i], &status[i], + &digest_idx); + /* Submit job to multi-buffer for processing */ + if (ret) { + processed++; + status[i] = ret; + errcnt -= 1; + continue; + } + +#ifdef RTE_LIBRTE_PMD_AESNI_MB_DEBUG + job = IMB_SUBMIT_JOB(mb_mgr); +#else + job = IMB_SUBMIT_JOB_NOCHECK(mb_mgr); +#endif + + if (job) + processed += handle_completed_sec_jobs(job, mb_mgr); + } + + while (processed < num) + processed += flush_mb_sec_mgr(mb_mgr); + + return errcnt; +} + static int cryptodev_aesni_mb_remove(struct rte_vdev_device *vdev); static int @@ -1248,8 +1565,9 @@ cryptodev_aesni_mb_create(const char *name, { struct rte_cryptodev *dev; struct aesni_mb_private *internals; - enum aesni_mb_vector_mode vector_mode; + struct rte_security_ctx *sec_ctx; MB_MGR *mb_mgr; + char sec_name[RTE_DEV_NAME_MAX_LEN]; /* Check CPU for support for AES instruction set */ if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) { @@ -1283,35 +1601,14 @@ cryptodev_aesni_mb_create(const char *name, dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | RTE_CRYPTODEV_FF_CPU_AESNI | - RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT; + RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT | + RTE_CRYPTODEV_FF_SECURITY; - mb_mgr = alloc_mb_mgr(0); + mb_mgr = alloc_init_mb_mgr(); if (mb_mgr == NULL) return -ENOMEM; - switch (vector_mode) { - case RTE_AESNI_MB_SSE: - dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE; - init_mb_mgr_sse(mb_mgr); - break; - case RTE_AESNI_MB_AVX: - dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX; - init_mb_mgr_avx(mb_mgr); - break; - case RTE_AESNI_MB_AVX2: - dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2; - init_mb_mgr_avx2(mb_mgr); - break; - case RTE_AESNI_MB_AVX512: - dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX512; - init_mb_mgr_avx512(mb_mgr); - break; - default: - AESNI_MB_LOG(ERR, "Unsupported vector mode %u\n", vector_mode); - goto error_exit; - } - /* Set vector instructions mode supported */ internals = dev->data->dev_private; @@ -1322,11 +1619,28 @@ cryptodev_aesni_mb_create(const char *name, AESNI_MB_LOG(INFO, "IPSec Multi-buffer library version used: %s\n", imb_get_version_str()); + /* setup security operations */ + snprintf(sec_name, sizeof(sec_name) - 1, "aes_mb_sec_%u", + dev->driver_id); + sec_ctx = rte_zmalloc_socket(sec_name, + sizeof(struct rte_security_ctx), + RTE_CACHE_LINE_SIZE, init_params->socket_id); + if (sec_ctx == NULL) { + AESNI_MB_LOG(ERR, "memory allocation failed\n"); + goto error_exit; + } + + sec_ctx->device = (void *)dev; + sec_ctx->ops = rte_aesni_mb_pmd_security_ops; + dev->security_ctx = sec_ctx; + return 0; error_exit: if (mb_mgr) free_mb_mgr(mb_mgr); + if (sec_ctx) + rte_free(sec_ctx); rte_cryptodev_pmd_destroy(dev); @@ -1367,6 +1681,7 @@ cryptodev_aesni_mb_remove(struct rte_vdev_device *vdev) struct rte_cryptodev *cryptodev; struct aesni_mb_private *internals; const char *name; + uint32_t i; name = rte_vdev_device_name(vdev); if (name == NULL) @@ -1379,6 +1694,9 @@ cryptodev_aesni_mb_remove(struct rte_vdev_device *vdev) internals = cryptodev->data->dev_private; free_mb_mgr(internals->mb_mgr); + for (i = 0; i < RTE_MAX_LCORE; i++) + if (sec_mb_mgrs[i]) + free_mb_mgr(sec_mb_mgrs[i]); return rte_cryptodev_pmd_destroy(cryptodev); } diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c index 8d15b99d4..f47df2d57 100644 --- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c +++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c @@ -8,6 +8,7 @@ #include #include #include +#include #include "rte_aesni_mb_pmd_private.h" @@ -732,7 +733,8 @@ aesni_mb_pmd_qp_count(struct rte_cryptodev *dev) static unsigned aesni_mb_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused) { - return sizeof(struct aesni_mb_session); + return RTE_ALIGN_CEIL(sizeof(struct aesni_mb_session), + RTE_CACHE_LINE_SIZE); } /** Configure a aesni multi-buffer session from a crypto xform chain */ @@ -810,4 +812,92 @@ struct rte_cryptodev_ops aesni_mb_pmd_ops = { .sym_session_clear = aesni_mb_pmd_sym_session_clear }; +/** Set session authentication parameters */ + +static int +aesni_mb_security_session_create(void *dev, + struct rte_security_session_conf *conf, + struct rte_security_session *sess, + struct rte_mempool *mempool) +{ + struct rte_cryptodev *cdev = dev; + struct aesni_mb_private *internals = cdev->data->dev_private; + struct aesni_mb_sec_session *sess_priv; + int ret; + + if (!conf->crypto_xform) { + AESNI_MB_LOG(ERR, "Invalid security session conf"); + return -EINVAL; + } + + if (conf->cpucrypto.cipher_offset < 0) { + AESNI_MB_LOG(ERR, "Invalid security session conf"); + return -EINVAL; + } + + if (rte_mempool_get(mempool, (void **)(&sess_priv))) { + AESNI_MB_LOG(ERR, + "Couldn't get object from session mempool"); + return -ENOMEM; + } + + sess_priv->cipher_offset = conf->cpucrypto.cipher_offset; + + ret = aesni_mb_set_session_parameters(internals->mb_mgr, + &sess_priv->sess, conf->crypto_xform); + if (ret != 0) { + AESNI_MB_LOG(ERR, "failed configure session parameters"); + + rte_mempool_put(mempool, sess_priv); + } + + sess->sess_private_data = (void *)sess_priv; + + return ret; +} + +static int +aesni_mb_security_session_destroy(void *dev __rte_unused, + struct rte_security_session *sess) +{ + struct aesni_mb_sec_session *sess_priv = + get_sec_session_private_data(sess); + + if (sess_priv) { + struct rte_mempool *sess_mp = rte_mempool_from_obj( + (void *)sess_priv); + + memset(sess, 0, sizeof(struct aesni_mb_sec_session)); + set_sec_session_private_data(sess, NULL); + + if (sess_mp == NULL) { + AESNI_MB_LOG(ERR, "failed fetch session mempool"); + return -EINVAL; + } + + rte_mempool_put(sess_mp, sess_priv); + } + + return 0; +} + +static unsigned int +aesni_mb_sec_session_get_size(__rte_unused void *device) +{ + return RTE_ALIGN_CEIL(sizeof(struct aesni_mb_sec_session), + RTE_CACHE_LINE_SIZE); +} + +static struct rte_security_ops aesni_mb_security_ops = { + .session_create = aesni_mb_security_session_create, + .session_get_size = aesni_mb_sec_session_get_size, + .session_update = NULL, + .session_stats_get = NULL, + .session_destroy = aesni_mb_security_session_destroy, + .set_pkt_metadata = NULL, + .capabilities_get = NULL, + .process_cpu_crypto_bulk = aesni_mb_sec_crypto_process_bulk, +}; + struct rte_cryptodev_ops *rte_aesni_mb_pmd_ops = &aesni_mb_pmd_ops; +struct rte_security_ops *rte_aesni_mb_pmd_security_ops = &aesni_mb_security_ops; diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h index b794d4bc1..64b58ca8e 100644 --- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h +++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h @@ -176,7 +176,6 @@ struct aesni_mb_qp { */ } __rte_cache_aligned; -/** AES-NI multi-buffer private session structure */ struct aesni_mb_session { JOB_CHAIN_ORDER chain_order; struct { @@ -265,16 +264,32 @@ struct aesni_mb_session { /** AAD data length */ uint16_t aad_len; } aead; -} __rte_cache_aligned; +}; + +/** AES-NI multi-buffer private security session structure */ +struct aesni_mb_sec_session { + /**< Unique Queue Pair Name */ + struct aesni_mb_session sess; + uint8_t temp_digests[MAX_JOBS][DIGEST_LENGTH_MAX]; + uint16_t digest_idx; + uint32_t cipher_offset; + MB_MGR *mb_mgr; +}; extern int aesni_mb_set_session_parameters(const MB_MGR *mb_mgr, struct aesni_mb_session *sess, const struct rte_crypto_sym_xform *xform); +extern int +aesni_mb_sec_crypto_process_bulk(struct rte_security_session *sess, + struct rte_security_vec buf[], void *iv[], void *aad[], + void *digest[], int status[], uint32_t num); + /** device specific operations function pointer structure */ extern struct rte_cryptodev_ops *rte_aesni_mb_pmd_ops; - +/** device specific operations function pointer structure for rte_security */ +extern struct rte_security_ops *rte_aesni_mb_pmd_security_ops; #endif /* _RTE_AESNI_MB_PMD_PRIVATE_H_ */ -- 2.14.5