From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by dpdk.org (Postfix) with ESMTP id D1EDD2BC8 for ; Thu, 29 Mar 2018 14:53:02 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga001.jf.intel.com ([10.7.209.18]) by fmsmga101.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 29 Mar 2018 05:53:01 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.48,376,1517904000"; d="scan'208";a="43223346" Received: from fanzhan2-mobl.ger.corp.intel.com (HELO ubuntu.ger.corp.intel.com) ([10.252.23.170]) by orsmga001.jf.intel.com with ESMTP; 29 Mar 2018 05:53:00 -0700 From: Fan Zhang To: dev@dpdk.org Cc: maxime.coquelin@redhat.com, jianjay.zhou@huawei.com, jianfeng.tan@intel.com Date: Thu, 29 Mar 2018 07:52:50 -0500 Message-Id: <1522327975-28769-4-git-send-email-roy.fan.zhang@intel.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1522327975-28769-1-git-send-email-roy.fan.zhang@intel.com> References: <20180326095114.11605-1-roy.fan.zhang@intel.com> <1522327975-28769-1-git-send-email-roy.fan.zhang@intel.com> Subject: [dpdk-dev] [PATCH v4 3/8] lib/librte_vhost: add session message handler X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Thu, 29 Mar 2018 12:53:04 -0000 This patch adds session message handler to vhost crypto. Signed-off-by: Fan Zhang --- lib/librte_vhost/vhost_crypto.c | 428 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 428 insertions(+) create mode 100644 lib/librte_vhost/vhost_crypto.c diff --git a/lib/librte_vhost/vhost_crypto.c b/lib/librte_vhost/vhost_crypto.c new file mode 100644 index 0000000..c639b20 --- /dev/null +++ b/lib/librte_vhost/vhost_crypto.c @@ -0,0 +1,428 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017-2018 Intel Corporation + */ + +#include + +#include +#include +#include +#ifdef RTE_LIBRTE_VHOST_DEBUG +#include +#endif +#include "vhost.h" +#include "vhost_user.h" +#include "rte_vhost_crypto.h" + +#define NB_MEMPOOL_OBJS (1024) +#define NB_CRYPTO_DESCRIPTORS (1024) +#define NB_CACHE_OBJS (128) + +#define SESSION_MAP_ENTRIES (1024) /**< Max nb sessions per vdev */ +#define MAX_KEY_SIZE (32) +#define VHOST_CRYPTO_MAX_IV_LEN (16) +#define MAX_COUNT_DOWN_TIMES (100) + +#define INHDR_LEN (sizeof(struct virtio_crypto_inhdr)) +#define IV_OFFSET (sizeof(struct rte_crypto_op) + \ + sizeof(struct rte_crypto_sym_op)) + +#ifdef RTE_LIBRTE_VHOST_DEBUG +#define VC_LOG_ERR(fmt, args...) \ + RTE_LOG(ERR, USER1, "[%s] %s() line %u: " fmt "\n", \ + "Vhost-Crypto", __func__, __LINE__, ## args) +#define VC_LOG_INFO(fmt, args...) \ + RTE_LOG(INFO, USER1, "[%s] %s() line %u: " fmt "\n", \ + "Vhost-Crypto", __func__, __LINE__, ## args) + +#define VC_LOG_DBG(fmt, args...) \ + RTE_LOG(DEBUG, USER1, "[%s] %s() line %u: " fmt "\n", \ + "Vhost-Crypto", __func__, __LINE__, ## args) +#else +#define VC_LOG_ERR(fmt, args...) \ + RTE_LOG(ERR, USER1, "[VHOST-Crypto]: " fmt "\n", ## args) +#define VC_LOG_INFO(fmt, args...) \ + RTE_LOG(INFO, USER1, "[VHOST-Crypto]: " fmt "\n", ## args) +#define VC_LOG_DBG(fmt, args...) +#endif + +#define VIRTIO_CRYPTO_FEATURES ((1 << VIRTIO_F_NOTIFY_ON_EMPTY) | \ + (1 << VIRTIO_RING_F_INDIRECT_DESC) | \ + (1 << VIRTIO_RING_F_EVENT_IDX) | \ + (1 << VIRTIO_CRYPTO_SERVICE_CIPHER) | \ + (1 << VIRTIO_CRYPTO_SERVICE_HASH) | \ + (1 << VIRTIO_CRYPTO_SERVICE_MAC) | \ + (1 << VIRTIO_CRYPTO_SERVICE_AEAD) | \ + (1 << VIRTIO_NET_F_CTRL_VQ)) + + +#define GPA_TO_VVA(t, m, a) ((t)(uintptr_t)rte_vhost_gpa_to_vva(m, a)) + +/* Macro to get the buffer at the end of rte_crypto_op */ +#define REQ_OP_OFFSET (IV_OFFSET + VHOST_CRYPTO_MAX_IV_LEN) + +/** + * 1-to-1 mapping between RTE_CRYPTO_*ALGO* and VIRTIO_CRYPTO_*ALGO*, for + * algorithms not supported by RTE_CRYPTODEV, the -VIRTIO_CRYPTO_NOTSUPP is + * returned. + */ +static int cipher_algo_transform[] = { + RTE_CRYPTO_CIPHER_NULL, + RTE_CRYPTO_CIPHER_ARC4, + RTE_CRYPTO_CIPHER_AES_ECB, + RTE_CRYPTO_CIPHER_AES_CBC, + RTE_CRYPTO_CIPHER_AES_CTR, + -VIRTIO_CRYPTO_NOTSUPP, /* VIRTIO_CRYPTO_CIPHER_DES_ECB */ + RTE_CRYPTO_CIPHER_DES_CBC, + RTE_CRYPTO_CIPHER_3DES_ECB, + RTE_CRYPTO_CIPHER_3DES_CBC, + RTE_CRYPTO_CIPHER_3DES_CTR, + RTE_CRYPTO_CIPHER_KASUMI_F8, + RTE_CRYPTO_CIPHER_SNOW3G_UEA2, + RTE_CRYPTO_CIPHER_AES_F8, + RTE_CRYPTO_CIPHER_AES_XTS, + RTE_CRYPTO_CIPHER_ZUC_EEA3 +}; + +/** + * VIRTIO_CRYTPO_AUTH_* indexes are not sequential, the gaps are filled with + * -VIRTIO_CRYPTO_BADMSG errors. + */ +static int auth_algo_transform[] = { + RTE_CRYPTO_AUTH_NULL, + RTE_CRYPTO_AUTH_MD5_HMAC, + RTE_CRYPTO_AUTH_SHA1_HMAC, + RTE_CRYPTO_AUTH_SHA224_HMAC, + RTE_CRYPTO_AUTH_SHA256_HMAC, + RTE_CRYPTO_AUTH_SHA384_HMAC, + RTE_CRYPTO_AUTH_SHA512_HMAC, + -VIRTIO_CRYPTO_BADMSG, + -VIRTIO_CRYPTO_BADMSG, + -VIRTIO_CRYPTO_BADMSG, + -VIRTIO_CRYPTO_BADMSG, + -VIRTIO_CRYPTO_BADMSG, + -VIRTIO_CRYPTO_BADMSG, + -VIRTIO_CRYPTO_BADMSG, + -VIRTIO_CRYPTO_BADMSG, + -VIRTIO_CRYPTO_BADMSG, + -VIRTIO_CRYPTO_BADMSG, + -VIRTIO_CRYPTO_BADMSG, + -VIRTIO_CRYPTO_BADMSG, + -VIRTIO_CRYPTO_BADMSG, + -VIRTIO_CRYPTO_BADMSG, + -VIRTIO_CRYPTO_BADMSG, + -VIRTIO_CRYPTO_BADMSG, + -VIRTIO_CRYPTO_BADMSG, + -VIRTIO_CRYPTO_BADMSG, + -VIRTIO_CRYPTO_NOTSUPP, /* VIRTIO_CRYPTO_MAC_CMAC_3DES */ + RTE_CRYPTO_AUTH_AES_CMAC, + RTE_CRYPTO_AUTH_KASUMI_F9, + RTE_CRYPTO_AUTH_SNOW3G_UIA2, + -VIRTIO_CRYPTO_BADMSG, + -VIRTIO_CRYPTO_BADMSG, + -VIRTIO_CRYPTO_BADMSG, + -VIRTIO_CRYPTO_BADMSG, + -VIRTIO_CRYPTO_BADMSG, + -VIRTIO_CRYPTO_BADMSG, + -VIRTIO_CRYPTO_BADMSG, + -VIRTIO_CRYPTO_BADMSG, + -VIRTIO_CRYPTO_BADMSG, + -VIRTIO_CRYPTO_BADMSG, + -VIRTIO_CRYPTO_BADMSG, + -VIRTIO_CRYPTO_BADMSG, + RTE_CRYPTO_AUTH_AES_GMAC, + -VIRTIO_CRYPTO_NOTSUPP, /* VIRTIO_CRYPTO_MAC_GMAC_TWOFISH */ + RTE_CRYPTO_AUTH_AES_CBC_MAC, + -VIRTIO_CRYPTO_NOTSUPP, /* VIRTIO_CRYPTO_MAC_CBCMAC_KASUMI_F9 */ + RTE_CRYPTO_AUTH_AES_XCBC_MAC +}; + +static int cipher_op_transform[] = { + -VIRTIO_CRYPTO_BADMSG, /* meaningless */ + RTE_CRYPTO_CIPHER_OP_ENCRYPT, + RTE_CRYPTO_CIPHER_OP_DECRYPT +}; + +static int iv_lens[] = { + -1, /* Invalid input */ + 0, /* RTE_CRYPTO_CIPHER_NULL */ + 8, /* RTE_CRYPTO_CIPHER_3DES_CBC */ + 8, /* RTE_CRYPTO_CIPHER_3DES_CTR */ + 8, /* RTE_CRYPTO_CIPHER_3DES_ECB */ + 16, /* RTE_CRYPTO_CIPHER_AES_CBC */ + /* TODO: add common algos */ +}; + +/** + * vhost_crypto struct is used to maintain a number of virtio_cryptos and + * one DPDK crypto device that deals with all crypto workloads. It is declared + * here and defined in vhost_crypto.c + */ +struct vhost_crypto { + /** Used to lookup DPDK Cryptodev Session based on VIRTIO crypto + * session ID. + */ + struct rte_hash *session_map; + struct rte_mempool *mbuf_pool; + struct rte_mempool *sess_pool; + + /** DPDK cryptodev ID */ + uint8_t cid; + uint16_t nb_qps; + + uint64_t last_session_id; + + uint64_t cache_session_id; + struct rte_cryptodev_sym_session *cache_session; + /** socket id for the device */ + int socket_id; + + struct virtio_net *dev; + + uint8_t option; +} __rte_cache_aligned; + +struct vhost_crypto_data_req { + struct vring_desc *head; + struct rte_vhost_memory *mem; + struct virtio_crypto_inhdr *inhdr; + + uint16_t desc_idx; + uint32_t len; + struct vhost_virtqueue *vq; + + uint8_t zero_copy; + + int vid; + + struct vring_desc *wb_desc; + uint16_t wb_len; +}; + +static int +transform_cipher_param(struct rte_crypto_sym_xform *xform, + VhostUserCryptoSessionParam *param) +{ + int ret; + + ret = cipher_algo_transform[param->cipher_algo]; + if (unlikely(ret < 0)) + return ret; + + xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER; + xform->cipher.algo = ret; + xform->cipher.key.length = param->cipher_key_len; + if (xform->cipher.key.length > 0) + xform->cipher.key.data = param->cipher_key_buf; + ret = cipher_op_transform[param->dir]; + if (unlikely(ret < 0)) { + VC_LOG_DBG("Bad operation type"); + return ret; + } + xform->cipher.op = ret; + ret = iv_lens[xform->cipher.algo]; + if (unlikely(ret < 0)) + return ret; + xform->cipher.iv.length = ret; + xform->cipher.iv.offset = IV_OFFSET; + return 0; +} + +static int +transform_chain_param(struct rte_crypto_sym_xform *xforms, + VhostUserCryptoSessionParam *param) +{ + struct rte_crypto_sym_xform *xform_cipher, *xform_auth; + int ret; + + switch (param->chaining_dir) { + case VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER: + xform_auth = xforms; + xform_cipher = xforms->next; + xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT; + xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_VERIFY; + break; + case VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH: + xform_cipher = xforms; + xform_auth = xforms->next; + xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT; + xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE; + break; + default: + return -VIRTIO_CRYPTO_BADMSG; + } + + /* cipher */ + ret = cipher_algo_transform[param->cipher_algo]; + if (unlikely(ret < 0)) + return ret; + xform_cipher->type = RTE_CRYPTO_SYM_XFORM_CIPHER; + xform_cipher->cipher.algo = ret; + xform_cipher->cipher.key.length = param->cipher_key_len; + xform_cipher->cipher.key.data = param->cipher_key_buf; + ret = iv_lens[xform_cipher->cipher.algo]; + if (unlikely(ret < 0)) + return ret; + xform_cipher->cipher.iv.length = ret; + xform_cipher->cipher.iv.offset = IV_OFFSET; + + /* auth */ + xform_auth->type = RTE_CRYPTO_SYM_XFORM_AUTH; + ret = auth_algo_transform[param->hash_algo]; + if (unlikely(ret < 0)) + return ret; + xform_auth->auth.algo = ret; + xform_auth->auth.digest_length = param->digest_len; + xform_auth->auth.key.length = param->auth_key_len; + xform_auth->auth.key.data = param->auth_key_buf; + + return 0; +} + +static void +vhost_crypto_create_sess(struct vhost_crypto *vcrypto, + VhostUserCryptoSessionParam *sess_param) +{ + struct rte_crypto_sym_xform xform1 = {0}, xform2 = {0}; + struct rte_cryptodev_sym_session *session; + int ret; + + switch (sess_param->op_type) { + case VIRTIO_CRYPTO_SYM_OP_NONE: + case VIRTIO_CRYPTO_SYM_OP_CIPHER: + ret = transform_cipher_param(&xform1, sess_param); + if (unlikely(ret)) { + VC_LOG_ERR("Error transform session msg (%i)", ret); + sess_param->session_id = ret; + return; + } + break; + case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING: + if (unlikely(sess_param->hash_mode != + VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH)) { + sess_param->session_id = -VIRTIO_CRYPTO_NOTSUPP; + VC_LOG_ERR("Error transform session message (%i)", + -VIRTIO_CRYPTO_NOTSUPP); + return; + } + + xform1.next = &xform2; + + ret = transform_chain_param(&xform1, sess_param); + if (unlikely(ret)) { + VC_LOG_ERR("Error transform session message (%i)", ret); + sess_param->session_id = ret; + return; + } + + break; + default: + VC_LOG_ERR("Algorithm not yet supported"); + sess_param->session_id = -VIRTIO_CRYPTO_NOTSUPP; + return; + } + + session = rte_cryptodev_sym_session_create(vcrypto->sess_pool); + if (!session) { + VC_LOG_ERR("Failed to create session"); + sess_param->session_id = -VIRTIO_CRYPTO_ERR; + return; + } + + if (rte_cryptodev_sym_session_init(vcrypto->cid, session, &xform1, + vcrypto->sess_pool) < 0) { + VC_LOG_ERR("Failed to initialize session"); + sess_param->session_id = -VIRTIO_CRYPTO_ERR; + return; + } + + /* insert hash to map */ + if (rte_hash_add_key_data(vcrypto->session_map, + &vcrypto->last_session_id, session) < 0) { + VC_LOG_ERR("Failed to insert session to hash table"); + + if (rte_cryptodev_sym_session_clear(vcrypto->cid, session) < 0) + VC_LOG_ERR("Failed to clear session"); + else { + if (rte_cryptodev_sym_session_free(session) < 0) + VC_LOG_ERR("Failed to free session"); + } + sess_param->session_id = -VIRTIO_CRYPTO_ERR; + return; + } + + VC_LOG_DBG("Session (key %lu, session %p) created.", + vcrypto->last_session_id, session); + + sess_param->session_id = vcrypto->last_session_id; + vcrypto->last_session_id++; +} + +static int +vhost_crypto_close_sess(struct vhost_crypto *vcrypto, uint64_t session_id) +{ + struct rte_cryptodev_sym_session *session; + uint64_t sess_id = session_id; + int ret; + + ret = rte_hash_lookup_data(vcrypto->session_map, &sess_id, + (void **)&session); + + if (unlikely(ret < 0)) { + VC_LOG_ERR("Failed to delete session (key %lu).", session_id); + return -VIRTIO_CRYPTO_INVSESS; + } + + if (rte_cryptodev_sym_session_clear(vcrypto->cid, session) < 0) { + VC_LOG_DBG("Failed to delete session"); + return -VIRTIO_CRYPTO_ERR; + } + + if (rte_cryptodev_sym_session_free(session) < 0) { + VC_LOG_DBG("Failed to delete session"); + return -VIRTIO_CRYPTO_ERR; + } + + if (rte_hash_del_key(vcrypto->session_map, &sess_id) < 0) { + VC_LOG_DBG("Failed to delete session from hash table."); + return -VIRTIO_CRYPTO_ERR; + } + + VC_LOG_DBG("Session (key %lu, session %p) deleted.", sess_id, + session); + + return 0; +} + +static int +vhost_crypto_msg_post_handler(int vid, void *msg, uint32_t *require_reply) +{ + struct virtio_net *dev = get_device(vid); + struct vhost_crypto *vcrypto; + VhostUserMsg *vmsg = msg; + int ret = 0; + + if (dev == NULL || require_reply == NULL) { + VC_LOG_ERR("Invalid vid %i", vid); + return -EINVAL; + } + + vcrypto = dev->extern_data; + if (vcrypto == NULL) { + VC_LOG_ERR("Cannot find required data, is it initialized?"); + return -ENOENT; + } + + *require_reply = 0; + + if (vmsg->request.master == VHOST_USER_CRYPTO_CREATE_SESS) { + vhost_crypto_create_sess(vcrypto, + &vmsg->payload.crypto_session); + *require_reply = 1; + } else if (vmsg->request.master == VHOST_USER_CRYPTO_CLOSE_SESS) + ret = vhost_crypto_close_sess(vcrypto, vmsg->payload.u64); + else + ret = -EINVAL; + + return ret; +} -- 2.7.4