From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 70A15A0562; Fri, 3 Apr 2020 18:49:29 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id EB64E1C202; Fri, 3 Apr 2020 18:48:45 +0200 (CEST) Received: from mga17.intel.com (mga17.intel.com [192.55.52.151]) by dpdk.org (Postfix) with ESMTP id 5DAA41C1FD for ; Fri, 3 Apr 2020 18:48:43 +0200 (CEST) IronPort-SDR: IAf/Q83hYfZLqDHJpCBA6te97CiGbKHcVRfaWaCorJBv0NvPstvjugEQMAdt56QkwuRwF1fsMh qsbhHyDQxWMg== X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga004.fm.intel.com ([10.253.24.48]) by fmsmga107.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 03 Apr 2020 09:48:42 -0700 IronPort-SDR: 66Q+GYOImn3vmKYD9LqaKLdBbMTzMPddL1fzGLGmh5QczidnTZPkcOS6PpWT03YE1+8322J8GK uLEXxf/VAbyA== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.72,340,1580803200"; d="scan'208";a="274016929" Received: from silpixa00399912.ir.intel.com (HELO silpixa00399912.ger.corp.intel.com) ([10.237.223.64]) by fmsmga004.fm.intel.com with ESMTP; 03 Apr 2020 09:48:40 -0700 From: David Coyle To: dev@dpdk.org Cc: declan.doherty@intel.com, fiona.trahe@intel.com, pablo.de.lara.guarch@intel.com, brendan.ryan@intel.com, shreyansh.jain@nxp.com, hemant.agrawal@nxp.com, David Coyle , Mairtin o Loingsigh Date: Fri, 3 Apr 2020 17:36:56 +0100 Message-Id: <20200403163656.60545-5-david.coyle@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20200403163656.60545-1-david.coyle@intel.com> References: <20200403163656.60545-1-david.coyle@intel.com> Subject: [dpdk-dev] [PATCH v2 4/4] app/crypto-perf: add support for multi-function processing X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Support for multi-function operations, via a raw device, has been added to the test-crypto-perf app. A new optype has been added: multi-fn A new parameter has been added for multi-fn mode: --multi-fn-params The field specify what type of multi-function processing is required and the options associated with that. Currently the following are supported: docsis-cipher-crc,, pon-cipher-crc-bip, Signed-off-by: David Coyle Signed-off-by: Mairtin o Loingsigh --- app/test-crypto-perf/Makefile | 5 + app/test-crypto-perf/cperf_ops.c | 265 ++++++++++++ app/test-crypto-perf/cperf_options.h | 37 +- app/test-crypto-perf/cperf_options_parsing.c | 396 ++++++++++++++++-- app/test-crypto-perf/cperf_test_common.c | 88 +++- app/test-crypto-perf/cperf_test_latency.c | 176 ++++++-- .../cperf_test_pmd_cyclecount.c | 96 ++++- app/test-crypto-perf/cperf_test_throughput.c | 164 ++++++-- .../cperf_test_vector_parsing.c | 35 +- app/test-crypto-perf/cperf_test_vectors.c | 53 +++ app/test-crypto-perf/cperf_test_vectors.h | 9 + app/test-crypto-perf/cperf_test_verify.c | 205 +++++++-- app/test-crypto-perf/main.c | 255 +++++++++-- app/test-crypto-perf/meson.build | 6 + 14 files changed, 1584 insertions(+), 206 deletions(-) diff --git a/app/test-crypto-perf/Makefile b/app/test-crypto-perf/Makefile index 78135f38c..baf706e4a 100644 --- a/app/test-crypto-perf/Makefile +++ b/app/test-crypto-perf/Makefile @@ -26,4 +26,9 @@ ifeq ($(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER),y) LDLIBS += -lrte_pmd_crypto_scheduler endif +ifeq ($(CONFIG_RTE_LIBRTE_MULTI_FN_COMMON)$(CONFIG_RTE_LIBRTE_RAWDEV),yy) +CFLAGS += -DMULTI_FN_SUPPORTED +LDLIBS += -lrte_multi_fn +endif + include $(RTE_SDK)/mk/rte.app.mk diff --git a/app/test-crypto-perf/cperf_ops.c b/app/test-crypto-perf/cperf_ops.c index 97584ceed..29f294ac5 100644 --- a/app/test-crypto-perf/cperf_ops.c +++ b/app/test-crypto-perf/cperf_ops.c @@ -3,6 +3,10 @@ */ #include +#include +#ifdef MULTI_FN_SUPPORTED +#include +#endif /* MULTI_FN_SUPPORTED */ #include "cperf_ops.h" #include "cperf_test_vectors.h" @@ -505,6 +509,168 @@ cperf_set_ops_aead(struct rte_crypto_op **ops, return 0; } +#ifdef MULTI_FN_SUPPORTED +static int +cperf_set_ops_multi_fn_cipher_crc(struct rte_crypto_op **ops, + uint32_t src_buf_offset, uint32_t dst_buf_offset __rte_unused, + uint16_t nb_ops, struct rte_cryptodev_sym_session *sess, + const struct cperf_options *options, + const struct cperf_test_vector *test_vector, + uint16_t iv_offset, uint32_t *imix_idx) +{ + uint32_t buffer_sz, offset; + uint16_t i; + + for (i = 0; i < nb_ops; i++) { + struct rte_multi_fn_op *mf_op = + (struct rte_multi_fn_op *)ops[i]; + struct rte_crypto_sym_op *cipher_op; + struct rte_multi_fn_err_detect_op *crc_op; + struct rte_multi_fn_op *mf_cipher_op; + + mf_op->sess = (struct rte_multi_fn_session *)sess; + mf_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] + + src_buf_offset); + mf_op->m_dst = NULL; + + if (options->imix_distribution_count) { + buffer_sz = + options->imix_buffer_sizes[*imix_idx]; + *imix_idx = (*imix_idx + 1) % options->pool_sz; + } else + buffer_sz = options->test_buffer_size; + + if (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) { + /* CRC -> Cipher */ + crc_op = &mf_op->err_detect; + cipher_op = &mf_op->next->crypto_sym; + mf_cipher_op = mf_op->next; + } else { + /* Cipher -> CRC */ + cipher_op = &mf_op->crypto_sym; + crc_op = &mf_op->next->err_detect; + mf_cipher_op = mf_op; + } + + crc_op->data.offset = test_vector->multi_fn_data.crc_offset; + crc_op->data.length = buffer_sz - crc_op->data.offset - + RTE_ETHER_CRC_LEN; + offset = crc_op->data.offset + crc_op->data.length; + crc_op->output.data = rte_pktmbuf_mtod_offset(mf_op->m_src, + uint8_t *, + offset); + crc_op->output.phys_addr = rte_pktmbuf_iova_offset(mf_op->m_src, + offset); + + cipher_op->cipher.data.offset = test_vector->data.cipher_offset; + cipher_op->cipher.data.length = buffer_sz - + cipher_op->cipher.data.offset; + + if (options->test == CPERF_TEST_TYPE_VERIFY) { + uint8_t *iv_ptr = (uint8_t *)mf_cipher_op + iv_offset; + memcpy(iv_ptr, test_vector->cipher_iv.data, + test_vector->cipher_iv.length); + } + } + + return 0; +} + +#define PLI_SHIFT_BITS 2 + +static int +cperf_set_ops_multi_fn_cipher_crc_bip(struct rte_crypto_op **ops, + uint32_t src_buf_offset, uint32_t dst_buf_offset __rte_unused, + uint16_t nb_ops, struct rte_cryptodev_sym_session *sess, + const struct cperf_options *options, + const struct cperf_test_vector *test_vector, uint16_t iv_offset, + uint32_t *imix_idx) +{ + uint32_t buffer_sz, offset; + uint16_t i; + int crc_len; + + for (i = 0; i < nb_ops; i++) { + struct rte_multi_fn_op *mf_op = + (struct rte_multi_fn_op *)ops[i]; + struct rte_crypto_sym_op *cipher_op; + struct rte_multi_fn_err_detect_op *crc_op; + struct rte_multi_fn_err_detect_op *bip_op; + struct rte_multi_fn_op *mf_cipher_op; + + mf_op->sess = (struct rte_multi_fn_session *)sess; + mf_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] + + src_buf_offset); + mf_op->m_dst = NULL; + + if (options->imix_distribution_count) { + buffer_sz = + options->imix_buffer_sizes[*imix_idx]; + *imix_idx = (*imix_idx + 1) % options->pool_sz; + } else + buffer_sz = options->test_buffer_size; + + if (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) { + /* CRC -> Cipher -> BIP */ + crc_op = &mf_op->err_detect; + cipher_op = &mf_op->next->crypto_sym; + bip_op = &mf_op->next->next->err_detect; + } else { + /* BIP-> Cipher -> CRC */ + bip_op = &mf_op->err_detect; + cipher_op = &mf_op->next->crypto_sym; + crc_op = &mf_op->next->next->err_detect; + } + mf_cipher_op = mf_op->next; + + crc_op->data.offset = test_vector->multi_fn_data.crc_offset; + crc_len = buffer_sz - crc_op->data.offset - + options->multi_fn_opts.buffer_padding - + RTE_ETHER_CRC_LEN; + crc_len = crc_len > 0 ? crc_len : 0; + crc_op->data.length = crc_len; + offset = crc_op->data.offset + crc_op->data.length; + crc_op->output.data = rte_pktmbuf_mtod_offset(mf_op->m_src, + uint8_t *, + offset); + crc_op->output.phys_addr = rte_pktmbuf_iova_offset(mf_op->m_src, + offset); + + cipher_op->cipher.data.offset = test_vector->data.cipher_offset; + cipher_op->cipher.data.length = buffer_sz - + cipher_op->cipher.data.offset; + + bip_op->data.offset = test_vector->multi_fn_data.bip_offset; + bip_op->data.length = buffer_sz - bip_op->data.offset; + offset = options->test_buffer_size; + bip_op->output.data = rte_pktmbuf_mtod_offset(mf_op->m_src, + uint8_t *, + offset); + bip_op->output.phys_addr = rte_pktmbuf_iova_offset(mf_op->m_src, + offset); + + if (options->test == CPERF_TEST_TYPE_VERIFY) { + uint8_t *iv_ptr = (uint8_t *)mf_cipher_op + iv_offset; + memcpy(iv_ptr, test_vector->cipher_iv.data, + test_vector->cipher_iv.length); + } + + /* + * This is very protocol specific but IPSec MB uses the PLI + * (Payload Length Indication) field of the PON frame header + * to get the CRC length. So set the PLI here now + */ + uint16_t *pli_key_idx = rte_pktmbuf_mtod(mf_op->m_src, + uint16_t *); + uint16_t pli = cipher_op->cipher.data.length - + options->multi_fn_opts.buffer_padding; + *pli_key_idx = rte_bswap16(pli) << PLI_SHIFT_BITS; + } + + return 0; +} +#endif /* MULTI_FN_SUPPORTED */ + static struct rte_cryptodev_sym_session * cperf_create_session(struct rte_mempool *sess_mp, struct rte_mempool *priv_mp, @@ -590,6 +756,90 @@ cperf_create_session(struct rte_mempool *sess_mp, &sess_conf, sess_mp); } #endif + +#ifdef MULTI_FN_SUPPORTED + /* + * multi function + */ + if (options->op_type == CPERF_MULTI_FN) { + struct rte_multi_fn_xform mf_cipher_xform; + struct rte_multi_fn_xform mf_crc_xform; + struct rte_multi_fn_xform mf_bip_xform; + struct rte_multi_fn_xform *first_xform = NULL; + struct rte_crypto_cipher_xform *cipher_xform; + + if (options->multi_fn_opts.ops == + CPERF_MULTI_FN_OPS_DOCSIS_CIPHER_CRC || + options->multi_fn_opts.ops == + CPERF_MULTI_FN_OPS_PON_CIPHER_CRC_BIP) { + + mf_cipher_xform.type = + RTE_MULTI_FN_XFORM_TYPE_CRYPTO_SYM; + mf_cipher_xform.crypto_sym.type = + RTE_CRYPTO_SYM_XFORM_CIPHER; + cipher_xform = &mf_cipher_xform.crypto_sym.cipher; + cipher_xform->algo = options->cipher_algo; + cipher_xform->op = options->cipher_op; + cipher_xform->iv.offset = iv_offset; + cipher_xform->key.data = test_vector->cipher_key.data; + cipher_xform->key.length = + test_vector->cipher_key.length; + cipher_xform->iv.length = test_vector->cipher_iv.length; + + mf_crc_xform.type = + RTE_MULTI_FN_XFORM_TYPE_ERR_DETECT; + mf_crc_xform.err_detect.algo = + RTE_MULTI_FN_ERR_DETECT_CRC32_ETH; + + if (cipher_xform->op == + RTE_CRYPTO_CIPHER_OP_ENCRYPT) { + mf_crc_xform.err_detect.op = + RTE_MULTI_FN_ERR_DETECT_OP_GENERATE; + } else { + mf_crc_xform.err_detect.op = + RTE_MULTI_FN_ERR_DETECT_OP_VERIFY; + } + + mf_bip_xform.type = + RTE_MULTI_FN_XFORM_TYPE_ERR_DETECT; + mf_bip_xform.err_detect.algo = + RTE_MULTI_FN_ERR_DETECT_BIP32; + mf_bip_xform.err_detect.op = + RTE_MULTI_FN_ERR_DETECT_OP_GENERATE; + + if (options->multi_fn_opts.ops == + CPERF_MULTI_FN_OPS_DOCSIS_CIPHER_CRC) { + if (cipher_xform->op == + RTE_CRYPTO_CIPHER_OP_ENCRYPT) { + first_xform = &mf_crc_xform; + mf_crc_xform.next = &mf_cipher_xform; + mf_cipher_xform.next = NULL; + } else { + first_xform = &mf_cipher_xform; + mf_cipher_xform.next = &mf_crc_xform; + mf_crc_xform.next = NULL; + } + } else { + if (cipher_xform->op == + RTE_CRYPTO_CIPHER_OP_ENCRYPT) { + first_xform = &mf_crc_xform; + mf_crc_xform.next = &mf_cipher_xform; + mf_cipher_xform.next = &mf_bip_xform; + mf_bip_xform.next = NULL; + } else { + first_xform = &mf_bip_xform; + mf_bip_xform.next = &mf_cipher_xform; + mf_cipher_xform.next = &mf_crc_xform; + mf_crc_xform.next = NULL; + } + } + } + + return (void *)rte_multi_fn_session_create(dev_id, + first_xform, rte_socket_id()); + } +#endif /* MULTI_FN_SUPPORTED */ + sess = rte_cryptodev_sym_session_create(sess_mp); /* * cipher only @@ -773,5 +1023,20 @@ cperf_get_op_functions(const struct cperf_options *options, return 0; } #endif +#ifdef MULTI_FN_SUPPORTED + if (options->op_type == CPERF_MULTI_FN) { + if (options->multi_fn_opts.ops == + CPERF_MULTI_FN_OPS_DOCSIS_CIPHER_CRC) + op_fns->populate_ops = + cperf_set_ops_multi_fn_cipher_crc; + else if (options->multi_fn_opts.ops == + CPERF_MULTI_FN_OPS_PON_CIPHER_CRC_BIP) + op_fns->populate_ops = + cperf_set_ops_multi_fn_cipher_crc_bip; + else + return -1; + return 0; + } +#endif /* MULTI_FN_SUPPORTED */ return -1; } diff --git a/app/test-crypto-perf/cperf_options.h b/app/test-crypto-perf/cperf_options.h index 1ed0a77e5..0ca542224 100644 --- a/app/test-crypto-perf/cperf_options.h +++ b/app/test-crypto-perf/cperf_options.h @@ -10,6 +10,9 @@ #ifdef RTE_LIBRTE_SECURITY #include #endif +#ifdef MULTI_FN_SUPPORTED +#include +#endif /* MULTI_FN_SUPPORTED */ #define CPERF_PTEST_TYPE ("ptest") #define CPERF_SILENT ("silent") @@ -52,6 +55,10 @@ #define CPERF_PDCP_DOMAIN ("pdcp-domain") #endif +#ifdef MULTI_FN_SUPPORTED +#define CPERF_MULTI_FN_PARAMS ("multi-fn-params") +#endif /* MULTI_FN_SUPPORTED */ + #define CPERF_CSV ("csv-friendly") /* benchmark-specific options */ @@ -75,11 +82,34 @@ enum cperf_op_type { CPERF_CIPHER_THEN_AUTH, CPERF_AUTH_THEN_CIPHER, CPERF_AEAD, - CPERF_PDCP + CPERF_PDCP, +#ifdef MULTI_FN_SUPPORTED + CPERF_MULTI_FN +#endif /* MULTI_FN_SUPPORTED */ }; extern const char *cperf_op_type_strs[]; +#ifdef MULTI_FN_SUPPORTED +enum cperf_multi_fn_ops { + CPERF_MULTI_FN_OPS_DOCSIS_CIPHER_CRC, + CPERF_MULTI_FN_OPS_PON_CIPHER_CRC_BIP +}; + +extern const char *cperf_multi_fn_ops_strs[]; + +struct cperf_multi_fn_options { + enum cperf_multi_fn_ops ops; + + /* DOCSIS_CIPHER_CRC */ + uint32_t cipher_offset; + uint32_t crc_offset; + + /* PON_CIPHER_CRC_BIP */ + uint32_t buffer_padding; +}; +#endif /* MULTI_FN_SUPPORTED */ + struct cperf_options { enum cperf_perf_test_type test; @@ -123,6 +153,11 @@ struct cperf_options { uint16_t pdcp_sn_sz; enum rte_security_pdcp_domain pdcp_domain; #endif + +#ifdef MULTI_FN_SUPPORTED + struct cperf_multi_fn_options multi_fn_opts; +#endif /* MULTI_FN_SUPPORTED */ + char device_type[RTE_CRYPTODEV_NAME_MAX_LEN]; enum cperf_op_type op_type; diff --git a/app/test-crypto-perf/cperf_options_parsing.c b/app/test-crypto-perf/cperf_options_parsing.c index f43c5bede..25da68105 100644 --- a/app/test-crypto-perf/cperf_options_parsing.c +++ b/app/test-crypto-perf/cperf_options_parsing.c @@ -7,6 +7,7 @@ #include #include +#include #include "cperf_options.h" @@ -34,7 +35,7 @@ usage(char *progname) " --desc-nb N: set number of descriptors for each crypto device\n" " --devtype TYPE: set crypto device type to use\n" " --optype cipher-only / auth-only / cipher-then-auth /\n" - " auth-then-cipher / aead : set operation type\n" + " auth-then-cipher / aead%s : set operation type\n" " --sessionless: enable session-less crypto operations\n" " --out-of-place: enable out-of-place crypto operations\n" " --test-file NAME: set the test vector file path\n" @@ -53,11 +54,20 @@ usage(char *progname) " --aead-iv-sz N: set the AEAD IV size\n" " --aead-aad-sz N: set the AEAD AAD size\n" " --digest-sz N: set the digest size\n" +#ifdef MULTI_FN_SUPPORTED + " --multi-fn-params PARAMS: set multi function parameters\n" +#endif /* MULTI_FN_SUPPORTED */ " --pmd-cyclecount-delay-ms N: set delay between enqueue\n" " and dequeue in pmd-cyclecount benchmarking mode\n" " --csv-friendly: enable test result output CSV friendly\n" " -h: prints this help\n", - progname); + progname, +#ifdef MULTI_FN_SUPPORTED + " / multi-fn" +#else + "" +#endif /* MULTI_FN_SUPPORTED */ + ); } static int @@ -446,7 +456,13 @@ parse_op_type(struct cperf_options *opts, const char *arg) { cperf_op_type_strs[CPERF_PDCP], CPERF_PDCP + }, +#ifdef MULTI_FN_SUPPORTED + { + cperf_op_type_strs[CPERF_MULTI_FN], + CPERF_MULTI_FN } +#endif /* MULTI_FN_SUPPORTED */ }; int id = get_str_key_id_mapping(optype_namemap, @@ -744,6 +760,112 @@ parse_aead_aad_sz(struct cperf_options *opts, const char *arg) return parse_uint16_t(&opts->aead_aad_sz, arg); } +#ifdef MULTI_FN_SUPPORTED +static int +parse_multi_fn_ops(struct cperf_options *opts, const char *arg) +{ + struct name_id_map multi_fn_ops_namemap[] = { + { + cperf_multi_fn_ops_strs + [CPERF_MULTI_FN_OPS_DOCSIS_CIPHER_CRC], + CPERF_MULTI_FN_OPS_DOCSIS_CIPHER_CRC + }, + { + cperf_multi_fn_ops_strs + [CPERF_MULTI_FN_OPS_PON_CIPHER_CRC_BIP], + CPERF_MULTI_FN_OPS_PON_CIPHER_CRC_BIP + } + }; + + int id = get_str_key_id_mapping(multi_fn_ops_namemap, + RTE_DIM(multi_fn_ops_namemap), arg); + if (id < 0) { + RTE_LOG(ERR, USER1, "invalid multi function operation specified\n"); + return -1; + } + + opts->multi_fn_opts.ops = (enum cperf_multi_fn_ops)id; + + return 0; +} + +static int +parse_multi_fn_params(struct cperf_options *opts, const char *arg) +{ + char *token; + char *copy_arg = strdup(arg); + + if (copy_arg == NULL) + return -1; + + errno = 0; + token = strtok(copy_arg, ","); + + /* Parse first value */ + if (token == NULL || parse_multi_fn_ops(opts, token) < 0) + goto err_multi_fn_opts; + + if (opts->multi_fn_opts.ops == + CPERF_MULTI_FN_OPS_DOCSIS_CIPHER_CRC) { + /* Next params is cipher_offset */ + token = strtok(NULL, ","); + + if (token == NULL || + parse_uint32_t(&opts->multi_fn_opts.cipher_offset, + token) < 0) { + RTE_LOG(ERR, USER1, "invalid %s multi function cipher " + "offset specified\n", + cperf_multi_fn_ops_strs[ + opts->multi_fn_opts.ops]); + goto err_multi_fn_opts; + } + + /* Next params is crc_offset */ + token = strtok(NULL, ","); + + if (token == NULL || + parse_uint32_t(&opts->multi_fn_opts.crc_offset, + token) < 0) { + RTE_LOG(ERR, USER1, "invalid %s multi function crc " + "offset specified\n", + cperf_multi_fn_ops_strs[ + opts->multi_fn_opts.ops]); + goto err_multi_fn_opts; + } + + } else if (opts->multi_fn_opts.ops == + CPERF_MULTI_FN_OPS_PON_CIPHER_CRC_BIP) { + /* Next param is buffer_padding */ + token = strtok(NULL, ","); + + if (token == NULL || + parse_uint32_t(&opts->multi_fn_opts.buffer_padding, + token) < 0) { + RTE_LOG(ERR, USER1, "invalid %s multi function buffer " + "padding specified\n", + cperf_multi_fn_ops_strs[ + opts->multi_fn_opts.ops]); + goto err_multi_fn_opts; + } + } + + token = strtok(NULL, ","); + + if (token != NULL) { + RTE_LOG(ERR, USER1, "unknown %s multi function parameter\n", + cperf_multi_fn_ops_strs[opts->multi_fn_opts.ops]); + goto err_multi_fn_opts; + } + + free(copy_arg); + return 0; + +err_multi_fn_opts: + free(copy_arg); + return -1; +} +#endif /* MULTI_FN_SUPPORTED */ + static int parse_csv_friendly(struct cperf_options *opts, const char *arg __rte_unused) { @@ -821,6 +943,11 @@ static struct option lgopts[] = { { CPERF_PDCP_SN_SZ, required_argument, 0, 0 }, { CPERF_PDCP_DOMAIN, required_argument, 0, 0 }, #endif + +#ifdef MULTI_FN_SUPPORTED + { CPERF_MULTI_FN_PARAMS, required_argument, 0, 0 }, +#endif /* MULTI_FN_SUPPORTED */ + { CPERF_CSV, no_argument, 0, 0}, { CPERF_PMDCC_DELAY_MS, required_argument, 0, 0 }, @@ -891,47 +1018,57 @@ cperf_options_default(struct cperf_options *opts) opts->pdcp_sn_sz = 12; opts->pdcp_domain = RTE_SECURITY_PDCP_MODE_CONTROL; #endif + +#ifdef MULTI_FN_SUPPORTED + opts->multi_fn_opts.ops = CPERF_MULTI_FN_OPS_DOCSIS_CIPHER_CRC; + opts->multi_fn_opts.cipher_offset = 0; + opts->multi_fn_opts.crc_offset = 0; + opts->multi_fn_opts.buffer_padding = 0; +#endif /* MULTI_FN_SUPPORTED */ } static int cperf_opts_parse_long(int opt_idx, struct cperf_options *opts) { struct long_opt_parser parsermap[] = { - { CPERF_PTEST_TYPE, parse_cperf_test_type }, - { CPERF_SILENT, parse_silent }, - { CPERF_POOL_SIZE, parse_pool_sz }, - { CPERF_TOTAL_OPS, parse_total_ops }, - { CPERF_BURST_SIZE, parse_burst_sz }, - { CPERF_BUFFER_SIZE, parse_buffer_sz }, - { CPERF_SEGMENT_SIZE, parse_segment_sz }, - { CPERF_DESC_NB, parse_desc_nb }, - { CPERF_DEVTYPE, parse_device_type }, - { CPERF_OPTYPE, parse_op_type }, - { CPERF_SESSIONLESS, parse_sessionless }, - { CPERF_OUT_OF_PLACE, parse_out_of_place }, - { CPERF_IMIX, parse_imix }, - { CPERF_TEST_FILE, parse_test_file }, - { CPERF_TEST_NAME, parse_test_name }, - { CPERF_CIPHER_ALGO, parse_cipher_algo }, - { CPERF_CIPHER_OP, parse_cipher_op }, - { CPERF_CIPHER_KEY_SZ, parse_cipher_key_sz }, - { CPERF_CIPHER_IV_SZ, parse_cipher_iv_sz }, - { CPERF_AUTH_ALGO, parse_auth_algo }, - { CPERF_AUTH_OP, parse_auth_op }, - { CPERF_AUTH_KEY_SZ, parse_auth_key_sz }, - { CPERF_AUTH_IV_SZ, parse_auth_iv_sz }, - { CPERF_AEAD_ALGO, parse_aead_algo }, - { CPERF_AEAD_OP, parse_aead_op }, - { CPERF_AEAD_KEY_SZ, parse_aead_key_sz }, - { CPERF_AEAD_IV_SZ, parse_aead_iv_sz }, - { CPERF_AEAD_AAD_SZ, parse_aead_aad_sz }, - { CPERF_DIGEST_SZ, parse_digest_sz }, + { CPERF_PTEST_TYPE, parse_cperf_test_type }, + { CPERF_SILENT, parse_silent }, + { CPERF_POOL_SIZE, parse_pool_sz }, + { CPERF_TOTAL_OPS, parse_total_ops }, + { CPERF_BURST_SIZE, parse_burst_sz }, + { CPERF_BUFFER_SIZE, parse_buffer_sz }, + { CPERF_SEGMENT_SIZE, parse_segment_sz }, + { CPERF_DESC_NB, parse_desc_nb }, + { CPERF_DEVTYPE, parse_device_type }, + { CPERF_OPTYPE, parse_op_type }, + { CPERF_SESSIONLESS, parse_sessionless }, + { CPERF_OUT_OF_PLACE, parse_out_of_place }, + { CPERF_IMIX, parse_imix }, + { CPERF_TEST_FILE, parse_test_file }, + { CPERF_TEST_NAME, parse_test_name }, + { CPERF_CIPHER_ALGO, parse_cipher_algo }, + { CPERF_CIPHER_OP, parse_cipher_op }, + { CPERF_CIPHER_KEY_SZ, parse_cipher_key_sz }, + { CPERF_CIPHER_IV_SZ, parse_cipher_iv_sz }, + { CPERF_AUTH_ALGO, parse_auth_algo }, + { CPERF_AUTH_OP, parse_auth_op }, + { CPERF_AUTH_KEY_SZ, parse_auth_key_sz }, + { CPERF_AUTH_IV_SZ, parse_auth_iv_sz }, + { CPERF_AEAD_ALGO, parse_aead_algo }, + { CPERF_AEAD_OP, parse_aead_op }, + { CPERF_AEAD_KEY_SZ, parse_aead_key_sz }, + { CPERF_AEAD_IV_SZ, parse_aead_iv_sz }, + { CPERF_AEAD_AAD_SZ, parse_aead_aad_sz }, + { CPERF_DIGEST_SZ, parse_digest_sz }, #ifdef RTE_LIBRTE_SECURITY - { CPERF_PDCP_SN_SZ, parse_pdcp_sn_sz }, - { CPERF_PDCP_DOMAIN, parse_pdcp_domain }, + { CPERF_PDCP_SN_SZ, parse_pdcp_sn_sz }, + { CPERF_PDCP_DOMAIN, parse_pdcp_domain }, #endif - { CPERF_CSV, parse_csv_friendly}, - { CPERF_PMDCC_DELAY_MS, parse_pmd_cyclecount_delay_ms}, +#ifdef MULTI_FN_SUPPORTED + { CPERF_MULTI_FN_PARAMS, parse_multi_fn_params }, +#endif /* MULTI_FN_SUPPORTED */ + { CPERF_CSV, parse_csv_friendly }, + { CPERF_PMDCC_DELAY_MS, parse_pmd_cyclecount_delay_ms }, }; unsigned int i; @@ -1031,6 +1168,155 @@ check_cipher_buffer_length(struct cperf_options *options) return 0; } +#ifdef MULTI_FN_SUPPORTED +#define DOCSIS_CIPHER_CRC_OFFSET_DIFF (RTE_ETHER_HDR_LEN - RTE_ETHER_TYPE_LEN) +#define DOCSIS_MIN_CIPHER_SIZE (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN) + +#define PON_FRAME_HDR_SIZE (8U) +#define PON_FRAME_MULTIPLE_SIZE (4) +#define PON_FRAME_INVALID_SIZE (12) + +static int +check_multi_fn_options(struct cperf_options *options) +{ + uint32_t buffer_size, buffer_size_idx = 0; + + options->digest_sz = 0; + + if (options->multi_fn_opts.ops == + CPERF_MULTI_FN_OPS_DOCSIS_CIPHER_CRC || + options->multi_fn_opts.ops == + CPERF_MULTI_FN_OPS_PON_CIPHER_CRC_BIP) { + + /* Check the device type is a rawdev */ + if (strcmp(options->device_type, "rawdev_aesni_mb") != 0) { + RTE_LOG(ERR, USER1, "Invalid device type %s for " + "multi-function\n", + options->device_type); + return -EINVAL; + } + + /* Only single segment supported */ + if (options->segment_sz < options->max_buffer_size) { + RTE_LOG(ERR, USER1, "Segmented buffers not supported " + "for multi-function\n"); + return -EINVAL; + } + + /* Out-of-place not supported */ + if (options->out_of_place) { + RTE_LOG(ERR, USER1, "Out-of-place not supported for " + "multi-function\n"); + return -EINVAL; + } + } + + if (options->multi_fn_opts.ops == + CPERF_MULTI_FN_OPS_DOCSIS_CIPHER_CRC) { + /* + * Cipher offset must be at least 12 bytes (Ethernet SRC and + * DEST MACs) greater than CRC offset + */ + if (options->multi_fn_opts.cipher_offset < + options->multi_fn_opts.crc_offset + + DOCSIS_CIPHER_CRC_OFFSET_DIFF) { + RTE_LOG(ERR, USER1, "Cipher and CRC offsets not valid " + "for %s multi-function operation - " + "cipher_offset must greater than or equal to " + "crc_offset + %u\n", + cperf_multi_fn_ops_strs[ + options->multi_fn_opts.ops], + DOCSIS_CIPHER_CRC_OFFSET_DIFF); + return -EINVAL; + } + } + + if (options->multi_fn_opts.ops == + CPERF_MULTI_FN_OPS_PON_CIPHER_CRC_BIP) { + options->multi_fn_opts.cipher_offset = PON_FRAME_HDR_SIZE; + options->multi_fn_opts.crc_offset = PON_FRAME_HDR_SIZE; + } + + if (options->inc_buffer_size != 0) + buffer_size = options->min_buffer_size; + else + buffer_size = options->buffer_size_list[0]; + + while (buffer_size <= options->max_buffer_size) { + if (options->multi_fn_opts.ops == + CPERF_MULTI_FN_OPS_DOCSIS_CIPHER_CRC) { + /* Buffer must be large enough to accommodate offsets */ + if (buffer_size < + (options->multi_fn_opts.cipher_offset + + DOCSIS_MIN_CIPHER_SIZE) || + buffer_size < + (options->multi_fn_opts.crc_offset + + RTE_ETHER_CRC_LEN)) { + RTE_LOG(ERR, USER1, "Some of the buffer sizes " + "are not valid for %s multi-function " + "operation\n", + cperf_multi_fn_ops_strs[ + options->multi_fn_opts.ops]); + return -EINVAL; + } + } else if (options->multi_fn_opts.ops == + CPERF_MULTI_FN_OPS_PON_CIPHER_CRC_BIP) { + /* + * Buffer length must be: + * - a multiple of 4 + * - large enough to accommodate PON frame header and + * any padding + * - not 12 + */ + if (((buffer_size % PON_FRAME_MULTIPLE_SIZE) != 0) || + (buffer_size < (PON_FRAME_HDR_SIZE + + options->multi_fn_opts.buffer_padding)) || + (buffer_size == PON_FRAME_INVALID_SIZE)) { + + RTE_LOG(ERR, USER1, "Some of the buffer sizes " + "are not suitable for %s " + "multi-function operation\n", + cperf_multi_fn_ops_strs[ + options->multi_fn_opts.ops]); + return -EINVAL; + } + + /* + * Padding length must be valid: + * - 0, if buffer length == 8 + * - less than 8, if buffer length >= 16 + * - less than 4, if buffer length >= 20 + */ + if ((buffer_size == 8 && + options->multi_fn_opts.buffer_padding != 0) || + (buffer_size >= 16 && + options->multi_fn_opts.buffer_padding >= 8) || + (buffer_size >= 20 && + options->multi_fn_opts.buffer_padding >= 4)) { + + RTE_LOG(ERR, USER1, "Padding length not valid " + "for some of the buffer sizes for %s " + "multi-function operation\n", + cperf_multi_fn_ops_strs[ + options->multi_fn_opts.ops]); + return -EINVAL; + } + } + + if (options->inc_buffer_size != 0) + buffer_size += options->inc_buffer_size; + else { + if (++buffer_size_idx == options->buffer_size_count) + break; + buffer_size = + options->buffer_size_list[buffer_size_idx]; + } + } + + return 0; +} +#endif /* MULTI_FN_SUPPORTED */ + int cperf_options_check(struct cperf_options *options) { @@ -1151,6 +1437,13 @@ cperf_options_check(struct cperf_options *options) return -EINVAL; } +#ifdef MULTI_FN_SUPPORTED + if (options->op_type == CPERF_MULTI_FN) { + if (check_multi_fn_options(options) < 0) + return -EINVAL; + } +#endif /* MULTI_FN_SUPPORTED */ + return 0; } @@ -1236,4 +1529,37 @@ cperf_options_dump(struct cperf_options *opts) printf("# aead aad size: %u\n", opts->aead_aad_sz); printf("#\n"); } + +#ifdef MULTI_FN_SUPPORTED + if (opts->op_type == CPERF_MULTI_FN) { + if (opts->multi_fn_opts.ops == + CPERF_MULTI_FN_OPS_DOCSIS_CIPHER_CRC || + opts->multi_fn_opts.ops == + CPERF_MULTI_FN_OPS_PON_CIPHER_CRC_BIP) { + printf("# cipher algorithm: %s\n", + rte_crypto_cipher_algorithm_strings[ + opts->cipher_algo]); + printf("# cipher operation: %s\n", + rte_crypto_cipher_operation_strings[ + opts->cipher_op]); + printf("# cipher key size: %u\n", opts->cipher_key_sz); + printf("# cipher iv size: %u\n", opts->cipher_iv_sz); + printf("# multi fn operations: %s\n", + cperf_multi_fn_ops_strs[ + opts->multi_fn_opts.ops]); + } + if (opts->multi_fn_opts.ops == + CPERF_MULTI_FN_OPS_DOCSIS_CIPHER_CRC) { + printf("# multi fn cipher offset: %u\n", + opts->multi_fn_opts.cipher_offset); + printf("# multi fn crc offset: %u\n", + opts->multi_fn_opts.crc_offset); + } + if (opts->multi_fn_opts.ops == + CPERF_MULTI_FN_OPS_PON_CIPHER_CRC_BIP) + printf("# multi fn buffer padding: %u\n", + opts->multi_fn_opts.buffer_padding); + printf("#\n"); + } +#endif /* MULTI_FN_SUPPORTED */ } diff --git a/app/test-crypto-perf/cperf_test_common.c b/app/test-crypto-perf/cperf_test_common.c index 85603eed5..6a92c1ae6 100644 --- a/app/test-crypto-perf/cperf_test_common.c +++ b/app/test-crypto-perf/cperf_test_common.c @@ -14,6 +14,8 @@ struct obj_params { uint16_t headroom_sz; uint16_t data_len; uint16_t segments_nb; + uint16_t ops_per_obj_nb; + uint8_t multi_fn; }; static void @@ -92,15 +94,39 @@ mempool_obj_init(struct rte_mempool *mp, struct rte_crypto_op *op = obj; struct rte_mbuf *m = (struct rte_mbuf *) ((uint8_t *) obj + params->src_buf_offset); - /* Set crypto operation */ - op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC; - op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; - op->sess_type = RTE_CRYPTO_OP_WITH_SESSION; - op->phys_addr = rte_mem_virt2iova(obj); - op->mempool = mp; + + if (!params->multi_fn) { + /* Set crypto operation */ + op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC; + op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + op->sess_type = RTE_CRYPTO_OP_WITH_SESSION; + op->phys_addr = rte_mem_virt2iova(obj); + op->mempool = mp; + op->sym->m_src = m; + op->sym->m_dst = NULL; + } else { +#ifdef MULTI_FN_SUPPORTED + /* Set multi-function operation(s) */ + struct rte_multi_fn_op *mf_op, *next_mf_op; + uint16_t remaining_ops, op_sz; + remaining_ops = params->ops_per_obj_nb; + mf_op = obj; + op_sz = params->src_buf_offset / params->ops_per_obj_nb; + do { + mf_op->mempool = mp; + mf_op->m_src = m; + mf_op->m_dst = NULL; + next_mf_op = (struct rte_multi_fn_op *) + ((uint8_t *) mf_op + op_sz); + mf_op->next = next_mf_op; + mf_op = next_mf_op; + + remaining_ops--; + } while (remaining_ops > 0); +#endif /* MULTI_FN_SUPPORTED */ + } /* Set source buffer */ - op->sym->m_src = m; if (params->segments_nb == 1) fill_single_seg_mbuf(m, mp, obj, params->src_buf_offset, params->segment_sz, params->headroom_sz, @@ -118,9 +144,15 @@ mempool_obj_init(struct rte_mempool *mp, fill_single_seg_mbuf(m, mp, obj, params->dst_buf_offset, params->segment_sz, params->headroom_sz, params->data_len); - op->sym->m_dst = m; - } else - op->sym->m_dst = NULL; + if (!params->multi_fn) { + op->sym->m_dst = m; + } else { +#ifdef MULTI_FN_SUPPORTED + struct rte_multi_fn_op *mf_op = obj; + mf_op->m_dst = m; +#endif /* MULTI_FN_SUPPORTED */ + } + } } int @@ -134,12 +166,38 @@ cperf_alloc_common_memory(const struct cperf_options *options, { const char *mp_ops_name; char pool_name[32] = ""; + uint8_t multi_fn = 0; int ret; /* Calculate the object size */ - uint16_t crypto_op_size = sizeof(struct rte_crypto_op) + - sizeof(struct rte_crypto_sym_op); + uint16_t crypto_op_size; uint16_t crypto_op_private_size; + uint16_t ops_per_obj_nb; + +#ifdef MULTI_FN_SUPPORTED + if (options->op_type == CPERF_MULTI_FN) { + crypto_op_size = sizeof(struct rte_multi_fn_op); + if (options->multi_fn_opts.ops == + CPERF_MULTI_FN_OPS_DOCSIS_CIPHER_CRC) { + ops_per_obj_nb = 2; + } else if (options->multi_fn_opts.ops == + CPERF_MULTI_FN_OPS_PON_CIPHER_CRC_BIP) { + ops_per_obj_nb = 3; + } else { + RTE_LOG(ERR, USER1, + "Invalid multi-function operations for pool " + "creation\n"); + return -1; + } + multi_fn = 1; + } else +#endif /* MULTI_FN_SUPPORTED */ + { + crypto_op_size = sizeof(struct rte_crypto_op) + + sizeof(struct rte_crypto_sym_op); + ops_per_obj_nb = 1; + } + /* * If doing AES-CCM, IV field needs to be 16 bytes long, * and AAD field needs to be long enough to have 18 bytes, @@ -162,7 +220,7 @@ cperf_alloc_common_memory(const struct cperf_options *options, uint16_t crypto_op_total_size = crypto_op_size + crypto_op_private_size; - uint16_t crypto_op_total_size_padded = + uint16_t crypto_op_total_size_padded = ops_per_obj_nb * RTE_CACHE_LINE_ROUNDUP(crypto_op_total_size); uint32_t mbuf_size = sizeof(struct rte_mbuf) + options->segment_sz; uint32_t max_size = options->max_buffer_size + options->digest_sz; @@ -186,7 +244,9 @@ cperf_alloc_common_memory(const struct cperf_options *options, options->tailroom_sz, .segments_nb = segments_nb, .src_buf_offset = crypto_op_total_size_padded, - .dst_buf_offset = 0 + .dst_buf_offset = 0, + .ops_per_obj_nb = ops_per_obj_nb, + .multi_fn = multi_fn, }; if (options->out_of_place) { diff --git a/app/test-crypto-perf/cperf_test_latency.c b/app/test-crypto-perf/cperf_test_latency.c index 0e4d0e153..92df4fb74 100644 --- a/app/test-crypto-perf/cperf_test_latency.c +++ b/app/test-crypto-perf/cperf_test_latency.c @@ -6,6 +6,10 @@ #include #include #include +#ifdef MULTI_FN_SUPPORTED +#include +#include +#endif /* MULTI_FN_SUPPORTED */ #include "cperf_test_latency.h" #include "cperf_ops.h" @@ -43,18 +47,27 @@ struct priv_op_data { static void cperf_latency_test_free(struct cperf_latency_ctx *ctx) { - if (ctx) { - if (ctx->sess) { + if (!ctx) + return; + + if (ctx->sess) { +#ifdef MULTI_FN_SUPPORTED + if (ctx->options->op_type == CPERF_MULTI_FN) { + rte_multi_fn_session_destroy(ctx->dev_id, + (struct rte_multi_fn_session *)ctx->sess); + } else +#endif /* MULTI_FN_SUPPORTED */ + { rte_cryptodev_sym_session_clear(ctx->dev_id, ctx->sess); rte_cryptodev_sym_session_free(ctx->sess); } + } - if (ctx->pool) - rte_mempool_free(ctx->pool); + if (ctx->pool) + rte_mempool_free(ctx->pool); - rte_free(ctx->res); - rte_free(ctx); - } + rte_free(ctx->res); + rte_free(ctx); } void * @@ -67,6 +80,7 @@ cperf_latency_test_constructor(struct rte_mempool *sess_mp, { struct cperf_latency_ctx *ctx = NULL; size_t extra_op_priv_size = sizeof(struct priv_op_data); + uint16_t iv_offset; ctx = rte_malloc(NULL, sizeof(struct cperf_latency_ctx), 0); if (ctx == NULL) @@ -79,10 +93,19 @@ cperf_latency_test_constructor(struct rte_mempool *sess_mp, ctx->options = options; ctx->test_vector = test_vector; - /* IV goes at the end of the crypto operation */ - uint16_t iv_offset = sizeof(struct rte_crypto_op) + - sizeof(struct rte_crypto_sym_op) + - sizeof(struct cperf_op_result *); +#ifdef MULTI_FN_SUPPORTED + if (options->op_type == CPERF_MULTI_FN) { + /* IV goes at the end of the multi-function operation */ + iv_offset = sizeof(struct rte_multi_fn_op) + + sizeof(struct cperf_op_result *); + } else +#endif /* MULTI_FN_SUPPORTED */ + { + /* IV goes at the end of the crypto operation */ + iv_offset = sizeof(struct rte_crypto_op) + + sizeof(struct rte_crypto_sym_op) + + sizeof(struct cperf_op_result *); + } ctx->sess = op_fns->sess_create(sess_mp, sess_priv_mp, dev_id, options, test_vector, iv_offset); @@ -138,34 +161,65 @@ cperf_latency_test_runner(void *arg) uint32_t lcore = rte_lcore_id(); + uint16_t iv_offset; + + int multi_fn = 0; + +#ifdef MULTI_FN_SUPPORTED + if (ctx->options->op_type == CPERF_MULTI_FN) + multi_fn = 1; +#else + RTE_SET_USED(multi_fn); +#endif /* MULTI_FN_SUPPORTED */ + #ifdef CPERF_LINEARIZATION_ENABLE - struct rte_cryptodev_info dev_info; int linearize = 0; /* Check if source mbufs require coalescing */ if (ctx->options->segment_sz < ctx->options->max_buffer_size) { - rte_cryptodev_info_get(ctx->dev_id, &dev_info); - if ((dev_info.feature_flags & - RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER) == 0) + if (!multi_fn) { + struct rte_cryptodev_info dev_info; + rte_cryptodev_info_get(ctx->dev_id, &dev_info); + if ((dev_info.feature_flags & + RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER) == + 0) + linearize = 1; + } else linearize = 1; } #endif /* CPERF_LINEARIZATION_ENABLE */ ctx->lcore_id = lcore; - /* Warm up the host CPU before starting the test */ - for (i = 0; i < ctx->options->total_ops; i++) - rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, NULL, 0); - /* Get first size from range or list */ if (ctx->options->inc_burst_size != 0) test_burst_size = ctx->options->min_burst_size; else test_burst_size = ctx->options->burst_size_list[0]; - uint16_t iv_offset = sizeof(struct rte_crypto_op) + - sizeof(struct rte_crypto_sym_op) + - sizeof(struct cperf_op_result *); +#ifdef MULTI_FN_SUPPORTED + if (multi_fn) { + /* Warm up the host CPU before starting the test */ + for (i = 0; i < ctx->options->total_ops; i++) + rte_rawdev_enqueue_buffers(ctx->dev_id, NULL, 0, + (rte_rawdev_obj_t)&ctx->qp_id); + + iv_offset = sizeof(struct rte_multi_fn_op) + + sizeof(struct cperf_op_result *); + + multi_fn = 1; + } else +#endif /* MULTI_FN_SUPPORTED */ + { + /* Warm up the host CPU before starting the test */ + for (i = 0; i < ctx->options->total_ops; i++) + rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, + NULL, 0); + + iv_offset = sizeof(struct rte_crypto_op) + + sizeof(struct rte_crypto_sym_op) + + sizeof(struct cperf_op_result *); + } while (test_burst_size <= ctx->options->max_burst_size) { uint64_t ops_enqd = 0, ops_deqd = 0; @@ -215,13 +269,40 @@ cperf_latency_test_runner(void *arg) } #endif /* CPERF_LINEARIZATION_ENABLE */ - /* Enqueue burst of ops on crypto device */ - ops_enqd = rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, - ops, burst_size); +#ifdef MULTI_FN_SUPPORTED + if (multi_fn) { + /* Enqueue burst of op on raw device */ + ops_enqd = rte_rawdev_enqueue_buffers( + ctx->dev_id, + (struct rte_rawdev_buf **)ops, + burst_size, + (rte_rawdev_obj_t)&ctx->qp_id); + + /* + * Dequeue processed burst of ops from raw + * device + */ + ops_deqd = rte_rawdev_dequeue_buffers( + ctx->dev_id, + (struct rte_rawdev_buf **)ops_processed, + test_burst_size, + (rte_rawdev_obj_t)&ctx->qp_id); + } else +#endif /* MULTI_FN_SUPPORTED */ + { + /* Enqueue burst of ops on crypto device */ + ops_enqd = rte_cryptodev_enqueue_burst( + ctx->dev_id, ctx->qp_id, ops, + burst_size); - /* Dequeue processed burst of ops from crypto device */ - ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id, - ops_processed, test_burst_size); + /* + * Dequeue processed burst of ops from crypto + * device + */ + ops_deqd = rte_cryptodev_dequeue_burst( + ctx->dev_id, ctx->qp_id, ops_processed, + test_burst_size); + } tsc_end = rte_rdtsc_precise(); @@ -262,14 +343,41 @@ cperf_latency_test_runner(void *arg) b_idx++; } - /* Dequeue any operations still in the crypto device */ + /* Dequeue any operations still in the device */ while (deqd_tot < ctx->options->total_ops) { - /* Sending 0 length burst to flush sw crypto device */ - rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, NULL, 0); +#ifdef MULTI_FN_SUPPORTED + if (multi_fn) { + /* + * Sending 0 length burst to flush sw raw + * device + */ + rte_rawdev_enqueue_buffers(ctx->dev_id, NULL, 0, + (rte_rawdev_obj_t)&ctx->qp_id); - /* dequeue burst */ - ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id, - ops_processed, test_burst_size); + /* + * Dequeue processed burst of ops from raw + * device + */ + ops_deqd = rte_rawdev_dequeue_buffers( + ctx->dev_id, + (struct rte_rawdev_buf **)ops_processed, + test_burst_size, + (rte_rawdev_obj_t)&ctx->qp_id); + } else +#endif /* MULTI_FN_SUPPORTED */ + { + /* + * Sending 0 length burst to flush sw crypto + * device + */ + rte_cryptodev_enqueue_burst(ctx->dev_id, + ctx->qp_id, NULL, 0); + + /* dequeue burst */ + ops_deqd = rte_cryptodev_dequeue_burst( + ctx->dev_id, ctx->qp_id, ops_processed, + test_burst_size); + } tsc_end = rte_rdtsc_precise(); diff --git a/app/test-crypto-perf/cperf_test_pmd_cyclecount.c b/app/test-crypto-perf/cperf_test_pmd_cyclecount.c index 74371faa8..f0a7dbf7c 100644 --- a/app/test-crypto-perf/cperf_test_pmd_cyclecount.c +++ b/app/test-crypto-perf/cperf_test_pmd_cyclecount.c @@ -6,6 +6,10 @@ #include #include +#ifdef MULTI_FN_SUPPORTED +#include +#include +#endif /* MULTI_FN_SUPPORTED */ #include #include @@ -44,6 +48,7 @@ struct pmd_cyclecount_state { uint32_t lcore; uint64_t delay; int linearize; + int multi_fn; uint32_t ops_enqd; uint32_t ops_deqd; uint32_t ops_enq_retries; @@ -53,29 +58,37 @@ struct pmd_cyclecount_state { double cycles_per_deq; }; -static const uint16_t iv_offset = - sizeof(struct rte_crypto_op) + sizeof(struct rte_crypto_sym_op); +static uint16_t iv_offset; static void cperf_pmd_cyclecount_test_free(struct cperf_pmd_cyclecount_ctx *ctx) { - if (ctx) { - if (ctx->sess) { + if (!ctx) + return; + + if (ctx->sess) { +#ifdef MULTI_FN_SUPPORTED + if (ctx->options->op_type == CPERF_MULTI_FN) { + rte_multi_fn_session_destroy(ctx->dev_id, + (struct rte_multi_fn_session *)ctx->sess); + } else +#endif /* MULTI_FN_SUPPORTED */ + { rte_cryptodev_sym_session_clear(ctx->dev_id, ctx->sess); rte_cryptodev_sym_session_free(ctx->sess); } + } - if (ctx->pool) - rte_mempool_free(ctx->pool); + if (ctx->pool) + rte_mempool_free(ctx->pool); - if (ctx->ops) - rte_free(ctx->ops); + if (ctx->ops) + rte_free(ctx->ops); - if (ctx->ops_processed) - rte_free(ctx->ops_processed); + if (ctx->ops_processed) + rte_free(ctx->ops_processed); - rte_free(ctx); - } + rte_free(ctx); } void * @@ -103,9 +116,17 @@ cperf_pmd_cyclecount_test_constructor(struct rte_mempool *sess_mp, ctx->options = options; ctx->test_vector = test_vector; - /* IV goes at the end of the crypto operation */ - uint16_t iv_offset = sizeof(struct rte_crypto_op) + - sizeof(struct rte_crypto_sym_op); +#ifdef MULTI_FN_SUPPORTED + if (options->op_type == CPERF_MULTI_FN) { + /* IV goes at the end of the multi-function operation */ + iv_offset = sizeof(struct rte_multi_fn_op); + } else +#endif /* MULTI_FN_SUPPORTED */ + { + /* IV goes at the end of the crypto operation */ + iv_offset = sizeof(struct rte_crypto_op) + + sizeof(struct rte_crypto_sym_op); + } ctx->sess = op_fns->sess_create(sess_mp, sess_priv_mp, dev_id, options, test_vector, iv_offset); @@ -237,8 +258,18 @@ pmd_cyclecount_bench_enq(struct pmd_cyclecount_state *state, struct rte_crypto_op **ops = &state->ctx->ops[cur_iter_op]; uint32_t burst_enqd; - burst_enqd = rte_cryptodev_enqueue_burst(state->ctx->dev_id, - state->ctx->qp_id, ops, burst_size); +#ifdef MULTI_FN_SUPPORTED + if (state->multi_fn) + burst_enqd = rte_rawdev_enqueue_buffers( + state->ctx->dev_id, + (struct rte_rawdev_buf **)ops, + burst_size, + (rte_rawdev_obj_t)&state->ctx->qp_id); + else +#endif /* MULTI_FN_SUPPORTED */ + burst_enqd = rte_cryptodev_enqueue_burst( + state->ctx->dev_id, state->ctx->qp_id, + ops, burst_size); /* if we couldn't enqueue anything, the queue is full */ if (!burst_enqd) { @@ -268,8 +299,18 @@ pmd_cyclecount_bench_deq(struct pmd_cyclecount_state *state, &state->ctx->ops[cur_iter_op]; uint32_t burst_deqd; - burst_deqd = rte_cryptodev_dequeue_burst(state->ctx->dev_id, - state->ctx->qp_id, ops_processed, burst_size); +#ifdef MULTI_FN_SUPPORTED + if (state->multi_fn) + burst_deqd = rte_rawdev_dequeue_buffers( + state->ctx->dev_id, + (struct rte_rawdev_buf **)ops_processed, + burst_size, + (rte_rawdev_obj_t)&state->ctx->qp_id); + else +#endif /* MULTI_FN_SUPPORTED */ + burst_deqd = rte_cryptodev_dequeue_burst( + state->ctx->dev_id, state->ctx->qp_id, + ops_processed, burst_size); if (burst_deqd < burst_size) state->ops_deq_retries++; @@ -390,6 +431,12 @@ cperf_pmd_cyclecount_test_runner(void *test_ctx) state.opts = opts; state.lcore = rte_lcore_id(); state.linearize = 0; + state.multi_fn = 0; + +#ifdef MULTI_FN_SUPPORTED + if (opts->op_type == CPERF_MULTI_FN) + state.multi_fn = 1; +#endif /* MULTI_FN_SUPPORTED */ static rte_atomic16_t display_once = RTE_ATOMIC16_INIT(0); static bool warmup = true; @@ -406,12 +453,15 @@ cperf_pmd_cyclecount_test_runner(void *test_ctx) /* Check if source mbufs require coalescing */ if (opts->segments_sz < ctx->options->max_buffer_size) { - rte_cryptodev_info_get(state.ctx->dev_id, &dev_info); - if ((dev_info.feature_flags & + if (!state.multi_fn) { + rte_cryptodev_info_get(state.ctx->dev_id, &dev_info); + if ((dev_info.feature_flags & RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER) == - 0) { + 0) { + state.linearize = 1; + } + } else state.linearize = 1; - } } #endif /* CPERF_LINEARIZATION_ENABLE */ diff --git a/app/test-crypto-perf/cperf_test_throughput.c b/app/test-crypto-perf/cperf_test_throughput.c index 35c51026f..e569d820d 100644 --- a/app/test-crypto-perf/cperf_test_throughput.c +++ b/app/test-crypto-perf/cperf_test_throughput.c @@ -6,6 +6,10 @@ #include #include #include +#ifdef MULTI_FN_SUPPORTED +#include +#include +#endif /* MULTI_FN_SUPPORTED */ #include "cperf_test_throughput.h" #include "cperf_ops.h" @@ -44,11 +48,20 @@ cperf_throughput_test_free(struct cperf_throughput_ctx *ctx) (struct rte_security_session *)ctx->sess); } else #endif - { - rte_cryptodev_sym_session_clear(ctx->dev_id, ctx->sess); - rte_cryptodev_sym_session_free(ctx->sess); +#ifdef MULTI_FN_SUPPORTED + if (ctx->options->op_type == CPERF_MULTI_FN) { + rte_multi_fn_session_destroy(ctx->dev_id, + (struct rte_multi_fn_session *) + ctx->sess); + } else +#endif /* MULTI_FN_SUPPORTED */ + { + rte_cryptodev_sym_session_clear(ctx->dev_id, + ctx->sess); + rte_cryptodev_sym_session_free(ctx->sess); + } } - } + if (ctx->pool) rte_mempool_free(ctx->pool); @@ -64,6 +77,7 @@ cperf_throughput_test_constructor(struct rte_mempool *sess_mp, const struct cperf_op_fns *op_fns) { struct cperf_throughput_ctx *ctx = NULL; + uint16_t iv_offset; ctx = rte_malloc(NULL, sizeof(struct cperf_throughput_ctx), 0); if (ctx == NULL) @@ -76,9 +90,17 @@ cperf_throughput_test_constructor(struct rte_mempool *sess_mp, ctx->options = options; ctx->test_vector = test_vector; - /* IV goes at the end of the crypto operation */ - uint16_t iv_offset = sizeof(struct rte_crypto_op) + - sizeof(struct rte_crypto_sym_op); +#ifdef MULTI_FN_SUPPORTED + if (options->op_type == CPERF_MULTI_FN) { + /* IV goes at the end of the multi-function operation */ + iv_offset = sizeof(struct rte_multi_fn_op); + } else +#endif /* MULTI_FN_SUPPORTED */ + { + /* IV goes at the end of the crypto operation */ + iv_offset = sizeof(struct rte_crypto_op) + + sizeof(struct rte_crypto_sym_op); + } ctx->sess = op_fns->sess_create(sess_mp, sess_priv_mp, dev_id, options, test_vector, iv_offset); @@ -113,24 +135,55 @@ cperf_throughput_test_runner(void *test_ctx) uint32_t lcore = rte_lcore_id(); + uint16_t iv_offset; + + int multi_fn = 0; + +#ifdef MULTI_FN_SUPPORTED + if (ctx->options->op_type == CPERF_MULTI_FN) + multi_fn = 1; +#else + RTE_SET_USED(multi_fn); +#endif /* MULTI_FN_SUPPORTED */ + #ifdef CPERF_LINEARIZATION_ENABLE - struct rte_cryptodev_info dev_info; int linearize = 0; /* Check if source mbufs require coalescing */ if (ctx->options->segment_sz < ctx->options->max_buffer_size) { - rte_cryptodev_info_get(ctx->dev_id, &dev_info); - if ((dev_info.feature_flags & - RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER) == 0) + if (!multi_fn) { + struct rte_cryptodev_info dev_info; + rte_cryptodev_info_get(ctx->dev_id, &dev_info); + if ((dev_info.feature_flags & + RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER) == + 0) + linearize = 1; + } else linearize = 1; } #endif /* CPERF_LINEARIZATION_ENABLE */ ctx->lcore_id = lcore; - /* Warm up the host CPU before starting the test */ - for (i = 0; i < ctx->options->total_ops; i++) - rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, NULL, 0); +#ifdef MULTI_FN_SUPPORTED + if (multi_fn) { + iv_offset = sizeof(struct rte_multi_fn_op); + + /* Warm up the host CPU before starting the test */ + for (i = 0; i < ctx->options->total_ops; i++) + rte_rawdev_enqueue_buffers(ctx->dev_id, NULL, 0, + (rte_rawdev_obj_t)&ctx->qp_id); + } else +#endif /* MULTI_FN_SUPPORTED */ + { + iv_offset = sizeof(struct rte_crypto_op) + + sizeof(struct rte_crypto_sym_op); + + /* Warm up the host CPU before starting the test */ + for (i = 0; i < ctx->options->total_ops; i++) + rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, + NULL, 0); + } /* Get first size from range or list */ if (ctx->options->inc_burst_size != 0) @@ -138,9 +191,6 @@ cperf_throughput_test_runner(void *test_ctx) else test_burst_size = ctx->options->burst_size_list[0]; - uint16_t iv_offset = sizeof(struct rte_crypto_op) + - sizeof(struct rte_crypto_sym_op); - while (test_burst_size <= ctx->options->max_burst_size) { uint64_t ops_enqd = 0, ops_enqd_total = 0, ops_enqd_failed = 0; uint64_t ops_deqd = 0, ops_deqd_total = 0, ops_deqd_failed = 0; @@ -203,9 +253,41 @@ cperf_throughput_test_runner(void *test_ctx) } #endif /* CPERF_LINEARIZATION_ENABLE */ - /* Enqueue burst of ops on crypto device */ - ops_enqd = rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, - ops, burst_size); +#ifdef MULTI_FN_SUPPORTED + if (multi_fn) { + /* Enqueue burst of op on raw device */ + ops_enqd = rte_rawdev_enqueue_buffers( + ctx->dev_id, + (struct rte_rawdev_buf **)ops, + burst_size, + (rte_rawdev_obj_t)&ctx->qp_id); + + /* + * Dequeue processed burst of ops from raw + * device + */ + ops_deqd = rte_rawdev_dequeue_buffers( + ctx->dev_id, + (struct rte_rawdev_buf **)ops_processed, + test_burst_size, + (rte_rawdev_obj_t)&ctx->qp_id); + } else +#endif /* MULTI_FN_SUPPORTED */ + { + /* Enqueue burst of ops on crypto device */ + ops_enqd = rte_cryptodev_enqueue_burst( + ctx->dev_id, ctx->qp_id, ops, + burst_size); + + /* + * Dequeue processed burst of ops from crypto + * device + */ + ops_deqd = rte_cryptodev_dequeue_burst( + ctx->dev_id, ctx->qp_id, + ops_processed, test_burst_size); + } + if (ops_enqd < burst_size) ops_enqd_failed++; @@ -216,11 +298,6 @@ cperf_throughput_test_runner(void *test_ctx) ops_unused = burst_size - ops_enqd; ops_enqd_total += ops_enqd; - - /* Dequeue processed burst of ops from crypto device */ - ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id, - ops_processed, test_burst_size); - if (likely(ops_deqd)) { /* Free crypto ops so they can be reused. */ rte_mempool_put_bulk(ctx->pool, @@ -238,15 +315,38 @@ cperf_throughput_test_runner(void *test_ctx) } - /* Dequeue any operations still in the crypto device */ - + /* Dequeue any operations still in the device */ while (ops_deqd_total < ctx->options->total_ops) { - /* Sending 0 length burst to flush sw crypto device */ - rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, NULL, 0); +#ifdef MULTI_FN_SUPPORTED + if (multi_fn) { + /* + * Sending 0 length burst to flush sw raw + * device + */ + rte_rawdev_enqueue_buffers(ctx->dev_id, NULL, 0, + (rte_rawdev_obj_t)&ctx->qp_id); - /* dequeue burst */ - ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id, - ops_processed, test_burst_size); + /* dequeue burst */ + ops_deqd = rte_rawdev_dequeue_buffers( + ctx->dev_id, + (struct rte_rawdev_buf **)ops_processed, + test_burst_size, + (rte_rawdev_obj_t)&ctx->qp_id); + } else +#endif /* MULTI_FN_SUPPORTED */ + { + /* + * Sending 0 length burst to flush sw crypto + * device + */ + rte_cryptodev_enqueue_burst( + ctx->dev_id, ctx->qp_id, NULL, 0); + + /* dequeue burst */ + ops_deqd = rte_cryptodev_dequeue_burst( + ctx->dev_id, ctx->qp_id, ops_processed, + test_burst_size); + } if (ops_deqd == 0) ops_deqd_failed++; else { diff --git a/app/test-crypto-perf/cperf_test_vector_parsing.c b/app/test-crypto-perf/cperf_test_vector_parsing.c index 1e9dfcfff..1395d86f2 100644 --- a/app/test-crypto-perf/cperf_test_vector_parsing.c +++ b/app/test-crypto-perf/cperf_test_vector_parsing.c @@ -581,11 +581,38 @@ cperf_test_vector_get_from_file(struct cperf_options *opts) } /* other values not included in the file */ - test_vector->data.cipher_offset = 0; - test_vector->data.cipher_length = opts->max_buffer_size; +#ifdef MULTI_FN_SUPPORTED + if (opts->op_type == CPERF_MULTI_FN) { + if (opts->multi_fn_opts.ops == + CPERF_MULTI_FN_OPS_DOCSIS_CIPHER_CRC || + opts->multi_fn_opts.ops == + CPERF_MULTI_FN_OPS_PON_CIPHER_CRC_BIP) { + test_vector->data.cipher_offset = + opts->multi_fn_opts.cipher_offset; + test_vector->data.cipher_length = + opts->max_buffer_size; + + test_vector->multi_fn_data.crc_offset = + opts->multi_fn_opts.crc_offset; + test_vector->multi_fn_data.crc_length = + opts->max_buffer_size; + } - test_vector->data.auth_offset = 0; - test_vector->data.auth_length = opts->max_buffer_size; + if (opts->multi_fn_opts.ops == + CPERF_MULTI_FN_OPS_PON_CIPHER_CRC_BIP) { + test_vector->multi_fn_data.bip_offset = 0; + test_vector->multi_fn_data.bip_length = + opts->max_buffer_size; + } + } else +#endif /* MULTI_FN_SUPPORTED */ + { + test_vector->data.cipher_offset = 0; + test_vector->data.cipher_length = opts->max_buffer_size; + + test_vector->data.auth_offset = 0; + test_vector->data.auth_length = opts->max_buffer_size; + } return test_vector; } diff --git a/app/test-crypto-perf/cperf_test_vectors.c b/app/test-crypto-perf/cperf_test_vectors.c index 41641650c..5b6a048b1 100644 --- a/app/test-crypto-perf/cperf_test_vectors.c +++ b/app/test-crypto-perf/cperf_test_vectors.c @@ -587,5 +587,58 @@ cperf_test_vector_get_dummy(struct cperf_options *options) memcpy(t_vec->aead_iv.data, iv, options->aead_iv_sz); t_vec->aead_iv.length = options->aead_iv_sz; } + +#ifdef MULTI_FN_SUPPORTED + if (options->op_type == CPERF_MULTI_FN) { + if (options->multi_fn_opts.ops == + CPERF_MULTI_FN_OPS_DOCSIS_CIPHER_CRC || + options->multi_fn_opts.ops == + CPERF_MULTI_FN_OPS_PON_CIPHER_CRC_BIP) { + if (options->cipher_algo == RTE_CRYPTO_CIPHER_NULL) { + t_vec->cipher_key.length = 0; + t_vec->ciphertext.data = plaintext; + t_vec->cipher_key.data = NULL; + } else { + t_vec->cipher_key.length = + options->cipher_key_sz; + t_vec->ciphertext.data = ciphertext; + t_vec->cipher_key.data = cipher_key; + } + + /* Init IV data ptr */ + t_vec->cipher_iv.data = NULL; + + if (options->cipher_iv_sz != 0) { + /* Set IV parameters */ + t_vec->cipher_iv.data = rte_malloc(NULL, + options->cipher_iv_sz, 16); + if (t_vec->cipher_iv.data == NULL) { + rte_free(t_vec); + return NULL; + } + memcpy(t_vec->cipher_iv.data, iv, + options->cipher_iv_sz); + } + t_vec->ciphertext.length = options->max_buffer_size; + t_vec->cipher_iv.length = options->cipher_iv_sz; + t_vec->data.cipher_offset = + options->multi_fn_opts.cipher_offset; + t_vec->data.cipher_length = options->max_buffer_size; + + t_vec->multi_fn_data.crc_offset = + options->multi_fn_opts.crc_offset; + t_vec->multi_fn_data.crc_length = + options->max_buffer_size; + } + + if (options->multi_fn_opts.ops == + CPERF_MULTI_FN_OPS_PON_CIPHER_CRC_BIP) { + t_vec->multi_fn_data.bip_offset = 0; + t_vec->multi_fn_data.bip_length = + options->max_buffer_size; + } + } +#endif /* MULTI_FN_SUPPORTED */ + return t_vec; } diff --git a/app/test-crypto-perf/cperf_test_vectors.h b/app/test-crypto-perf/cperf_test_vectors.h index 6f10823ef..ad9ea89e6 100644 --- a/app/test-crypto-perf/cperf_test_vectors.h +++ b/app/test-crypto-perf/cperf_test_vectors.h @@ -68,6 +68,15 @@ struct cperf_test_vector { uint32_t aead_offset; uint32_t aead_length; } data; + +#ifdef MULTI_FN_SUPPORTED + struct { + uint32_t crc_offset; + uint32_t crc_length; + uint32_t bip_offset; + uint32_t bip_length; + } multi_fn_data; +#endif /* MULTI_FN_SUPPORTED */ }; struct cperf_test_vector* diff --git a/app/test-crypto-perf/cperf_test_verify.c b/app/test-crypto-perf/cperf_test_verify.c index 833bc9a55..30ef2f568 100644 --- a/app/test-crypto-perf/cperf_test_verify.c +++ b/app/test-crypto-perf/cperf_test_verify.c @@ -6,6 +6,10 @@ #include #include #include +#ifdef MULTI_FN_SUPPORTED +#include +#include +#endif /* MULTI_FN_SUPPORTED */ #include "cperf_test_verify.h" #include "cperf_ops.h" @@ -36,17 +40,26 @@ struct cperf_op_result { static void cperf_verify_test_free(struct cperf_verify_ctx *ctx) { - if (ctx) { - if (ctx->sess) { + if (ctx) + return; + + if (ctx->sess) { +#ifdef MULTI_FN_SUPPORTED + if (ctx->options->op_type == CPERF_MULTI_FN) { + rte_multi_fn_session_destroy(ctx->dev_id, + (struct rte_multi_fn_session *)ctx->sess); + } else +#endif /* MULTI_FN_SUPPORTED */ + { rte_cryptodev_sym_session_clear(ctx->dev_id, ctx->sess); rte_cryptodev_sym_session_free(ctx->sess); } + } - if (ctx->pool) - rte_mempool_free(ctx->pool); + if (ctx->pool) + rte_mempool_free(ctx->pool); - rte_free(ctx); - } + rte_free(ctx); } void * @@ -58,6 +71,7 @@ cperf_verify_test_constructor(struct rte_mempool *sess_mp, const struct cperf_op_fns *op_fns) { struct cperf_verify_ctx *ctx = NULL; + uint16_t iv_offset; ctx = rte_malloc(NULL, sizeof(struct cperf_verify_ctx), 0); if (ctx == NULL) @@ -70,9 +84,17 @@ cperf_verify_test_constructor(struct rte_mempool *sess_mp, ctx->options = options; ctx->test_vector = test_vector; - /* IV goes at the end of the crypto operation */ - uint16_t iv_offset = sizeof(struct rte_crypto_op) + - sizeof(struct rte_crypto_sym_op); +#ifdef MULTI_FN_SUPPORTED + if (options->op_type == CPERF_MULTI_FN) { + /* IV goes at the end of the multi-function operation */ + iv_offset = sizeof(struct rte_multi_fn_op); + } else +#endif /* MULTI_FN_SUPPORTED */ + { + /* IV goes at the end of the crypto operation */ + iv_offset = sizeof(struct rte_crypto_op) + + sizeof(struct rte_crypto_sym_op); + } ctx->sess = op_fns->sess_create(sess_mp, sess_priv_mp, dev_id, options, test_vector, iv_offset); @@ -91,6 +113,65 @@ cperf_verify_test_constructor(struct rte_mempool *sess_mp, return NULL; } +#ifdef MULTI_FN_SUPPORTED +static int +cperf_verify_mf_op(struct rte_multi_fn_op *op, + const struct cperf_options *options, + const struct cperf_test_vector *vector) +{ + const struct rte_mbuf *m; + uint32_t len; + uint16_t nb_segs; + uint8_t *data; + int res = 0; + + if (op->overall_status != RTE_MULTI_FN_OP_STATUS_SUCCESS) + return 1; + + if (op->m_dst) + m = op->m_dst; + else + m = op->m_src; + nb_segs = m->nb_segs; + len = 0; + while (m && nb_segs != 0) { + len += m->data_len; + m = m->next; + nb_segs--; + } + + data = rte_malloc(NULL, len, 0); + if (data == NULL) + return 1; + + if (op->m_dst) + m = op->m_dst; + else + m = op->m_src; + nb_segs = m->nb_segs; + len = 0; + while (m && nb_segs != 0) { + memcpy(data + len, rte_pktmbuf_mtod(m, uint8_t *), + m->data_len); + len += m->data_len; + m = m->next; + nb_segs--; + } + + if (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) + res += memcmp(data, + vector->ciphertext.data, + options->test_buffer_size); + else + res += memcmp(data, + vector->plaintext.data, + options->test_buffer_size); + + rte_free(data); + return !!res; +} +#endif /* MULTI_FN_SUPPORTED */ + static int cperf_verify_op(struct rte_crypto_op *op, const struct cperf_options *options, @@ -104,6 +185,12 @@ cperf_verify_op(struct rte_crypto_op *op, uint8_t cipher, auth; int res = 0; +#ifdef MULTI_FN_SUPPORTED + if (options->op_type == CPERF_MULTI_FN) + return cperf_verify_mf_op((struct rte_multi_fn_op *)op, options, + vector); +#endif /* MULTI_FN_SUPPORTED */ + if (op->status != RTE_CRYPTO_OP_STATUS_SUCCESS) return 1; @@ -252,15 +339,30 @@ cperf_verify_test_runner(void *test_ctx) uint32_t lcore = rte_lcore_id(); + uint16_t iv_offset; + + int multi_fn = 0; + +#ifdef MULTI_FN_SUPPORTED + if (ctx->options->op_type == CPERF_MULTI_FN) + multi_fn = 1; +#else + RTE_SET_USED(multi_fn); +#endif /* MULTI_FN_SUPPORTED */ + #ifdef CPERF_LINEARIZATION_ENABLE - struct rte_cryptodev_info dev_info; int linearize = 0; /* Check if source mbufs require coalescing */ if (ctx->options->segment_sz < ctx->options->max_buffer_size) { - rte_cryptodev_info_get(ctx->dev_id, &dev_info); - if ((dev_info.feature_flags & - RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER) == 0) + if (!multi_fn) { + struct rte_cryptodev_info dev_info; + rte_cryptodev_info_get(ctx->dev_id, &dev_info); + if ((dev_info.feature_flags & + RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER) == + 0) + linearize = 1; + } else linearize = 1; } #endif /* CPERF_LINEARIZATION_ENABLE */ @@ -271,12 +373,18 @@ cperf_verify_test_runner(void *test_ctx) printf("\n# Running verify test on device: %u, lcore: %u\n", ctx->dev_id, lcore); - uint16_t iv_offset = sizeof(struct rte_crypto_op) + - sizeof(struct rte_crypto_sym_op); +#ifdef MULTI_FN_SUPPORTED + if (multi_fn) + iv_offset = sizeof(struct rte_multi_fn_op); + else +#endif /* MULTI_FN_SUPPORTED */ + iv_offset = sizeof(struct rte_crypto_op) + + sizeof(struct rte_crypto_sym_op); while (ops_enqd_total < ctx->options->total_ops) { - uint16_t burst_size = ((ops_enqd_total + ctx->options->max_burst_size) + uint16_t burst_size = ((ops_enqd_total + + ctx->options->max_burst_size) <= ctx->options->total_ops) ? ctx->options->max_burst_size : ctx->options->total_ops - @@ -319,9 +427,32 @@ cperf_verify_test_runner(void *test_ctx) } #endif /* CPERF_LINEARIZATION_ENABLE */ - /* Enqueue burst of ops on crypto device */ - ops_enqd = rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, - ops, burst_size); +#ifdef MULTI_FN_SUPPORTED + if (multi_fn) { + /* Enqueue burst of op on raw device */ + ops_enqd = rte_rawdev_enqueue_buffers(ctx->dev_id, + (struct rte_rawdev_buf **)ops, + burst_size, + (rte_rawdev_obj_t)&ctx->qp_id); + + /* Dequeue processed burst of ops from raw device */ + ops_deqd = rte_rawdev_dequeue_buffers(ctx->dev_id, + (struct rte_rawdev_buf **)ops_processed, + ctx->options->max_burst_size, + (rte_rawdev_obj_t)&ctx->qp_id); + } else +#endif /* MULTI_FN_SUPPORTED */ + { + /* Enqueue burst of ops on crypto device */ + ops_enqd = rte_cryptodev_enqueue_burst(ctx->dev_id, + ctx->qp_id, ops, burst_size); + + /* Dequeue processed burst of ops from crypto device */ + ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, + ctx->qp_id, ops_processed, + ctx->options->max_burst_size); + } + if (ops_enqd < burst_size) ops_enqd_failed++; @@ -332,11 +463,6 @@ cperf_verify_test_runner(void *test_ctx) ops_unused = burst_size - ops_enqd; ops_enqd_total += ops_enqd; - - /* Dequeue processed burst of ops from crypto device */ - ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id, - ops_processed, ctx->options->max_burst_size); - if (ops_deqd == 0) { /** * Count dequeue polls which didn't return any @@ -358,15 +484,32 @@ cperf_verify_test_runner(void *test_ctx) ops_deqd_total += ops_deqd; } - /* Dequeue any operations still in the crypto device */ - + /* Dequeue any operations still in the device */ while (ops_deqd_total < ctx->options->total_ops) { - /* Sending 0 length burst to flush sw crypto device */ - rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, NULL, 0); +#ifdef MULTI_FN_SUPPORTED + if (multi_fn) { + /* Sending 0 length burst to flush sw raw device */ + rte_rawdev_enqueue_buffers(ctx->dev_id, NULL, 0, + (rte_rawdev_obj_t)&ctx->qp_id); + + /* dequeue burst */ + ops_deqd = rte_rawdev_dequeue_buffers(ctx->dev_id, + (struct rte_rawdev_buf **)ops_processed, + ctx->options->max_burst_size, + (rte_rawdev_obj_t)&ctx->qp_id); + } else +#endif /* MULTI_FN_SUPPORTED */ + { + /* Sending 0 length burst to flush sw crypto device */ + rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, + NULL, 0); + + /* dequeue burst */ + ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, + ctx->qp_id, ops_processed, + ctx->options->max_burst_size); + } - /* dequeue burst */ - ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id, - ops_processed, ctx->options->max_burst_size); if (ops_deqd == 0) { ops_deqd_failed++; continue; diff --git a/app/test-crypto-perf/main.c b/app/test-crypto-perf/main.c index 52a1860fb..c8334160e 100644 --- a/app/test-crypto-perf/main.c +++ b/app/test-crypto-perf/main.c @@ -12,6 +12,10 @@ #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER #include #endif +#ifdef MULTI_FN_SUPPORTED +#include +#include +#endif /* MULTI_FN_SUPPORTED */ #include "cperf.h" #include "cperf_options.h" @@ -39,9 +43,19 @@ const char *cperf_op_type_strs[] = { [CPERF_CIPHER_THEN_AUTH] = "cipher-then-auth", [CPERF_AUTH_THEN_CIPHER] = "auth-then-cipher", [CPERF_AEAD] = "aead", - [CPERF_PDCP] = "pdcp" + [CPERF_PDCP] = "pdcp", +#ifdef MULTI_FN_SUPPORTED + [CPERF_MULTI_FN] = "multi-fn" +#endif /* MULTI_FN_SUPPORTED */ }; +#ifdef MULTI_FN_SUPPORTED +const char *cperf_multi_fn_ops_strs[] = { + [CPERF_MULTI_FN_OPS_DOCSIS_CIPHER_CRC] = "docsis-cipher-crc", + [CPERF_MULTI_FN_OPS_PON_CIPHER_CRC_BIP] = "pon-cipher-crc-bip" +}; +#endif /* MULTI_FN_SUPPORTED */ + const struct cperf_test cperf_testmap[] = { [CPERF_TEST_TYPE_THROUGHPUT] = { cperf_throughput_test_constructor, @@ -294,7 +308,7 @@ cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs) } static int -cperf_verify_devices_capabilities(struct cperf_options *opts, +cperf_verify_crypto_devices_capabilities(struct cperf_options *opts, uint8_t *enabled_cdevs, uint8_t nb_cryptodevs) { struct rte_cryptodev_sym_capability_idx cap_idx; @@ -369,8 +383,136 @@ cperf_verify_devices_capabilities(struct cperf_options *opts, } } +#ifdef MULTI_FN_SUPPORTED + if (opts->op_type == CPERF_MULTI_FN) + return -1; +#endif /* MULTI_FN_SUPPORTED */ + + return 0; +} + +#ifdef MULTI_FN_SUPPORTED +static uint8_t +cperf_get_rawdevs(const char *driver_name, uint8_t *devices, + uint8_t nb_devices) +{ + struct rte_rawdev_info rdev_info; + uint8_t i, count = 0; + + for (i = 0; i < RTE_RAWDEV_MAX_DEVS && count < nb_devices; i++) { + memset(&rdev_info, 0, sizeof(struct rte_rawdev_info)); + if (!rte_rawdev_info_get(i, &rdev_info) && + !strncmp(rdev_info.driver_name, + driver_name, + strlen(driver_name) + 1)) + devices[count++] = i; + } + + return count; +} + +static int +cperf_initialize_rawdev(struct cperf_options *opts, uint8_t *enabled_rdevs) +{ + uint8_t enabled_rdev_count = 0, nb_lcores, rdev_id; + unsigned int i, j; + int ret; + + enabled_rdev_count = cperf_get_rawdevs(opts->device_type, + enabled_rdevs, RTE_RAWDEV_MAX_DEVS); + if (enabled_rdev_count == 0) { + printf("No raw devices type %s available\n", + opts->device_type); + return -EINVAL; + } + + nb_lcores = rte_lcore_count() - 1; + + if (nb_lcores < 1) { + RTE_LOG(ERR, USER1, + "Number of enabled cores need to be higher than 1\n"); + return -EINVAL; + } + + /* + * Calculate number of needed queue pairs, based on the amount + * of available number of logical cores and crypto devices. + * For instance, if there are 4 cores and 2 crypto devices, + * 2 queue pairs will be set up per device. + */ + opts->nb_qps = (nb_lcores % enabled_rdev_count) ? + (nb_lcores / enabled_rdev_count) + 1 : + nb_lcores / enabled_rdev_count; + + for (i = 0; i < enabled_rdev_count && + i < RTE_RAWDEV_MAX_DEVS; i++) { + rdev_id = enabled_rdevs[i]; + + struct rte_rawdev_info rdev_info = {0}; + struct rte_multi_fn_dev_info mf_info = {0}; + struct rte_multi_fn_dev_config mf_dev_conf = {0}; + struct rte_multi_fn_qp_config qp_conf = {0}; + uint8_t socket_id = rte_cryptodev_socket_id(rdev_id); + + /* + * Range check the socket_id - negative values become big + * positive ones due to use of unsigned value + */ + if (socket_id >= RTE_MAX_NUMA_NODES) + socket_id = 0; + + rdev_info.dev_private = &mf_info; + rte_rawdev_info_get(rdev_id, &rdev_info); + if (opts->nb_qps > mf_info.max_nb_queues) { + printf("Number of needed queue pairs is higher " + "than the maximum number of queue pairs " + "per device.\n"); + printf("Lower the number of cores or increase " + "the number of raw devices\n"); + return -EINVAL; + } + + mf_dev_conf.nb_queues = opts->nb_qps; + rdev_info.dev_private = &mf_dev_conf; + qp_conf.nb_descriptors = opts->nb_descriptors; + + ret = rte_rawdev_configure(rdev_id, &rdev_info); + if (ret < 0) { + printf("Failed to configure rawdev %u", rdev_id); + return -EINVAL; + } + + for (j = 0; j < opts->nb_qps; j++) { + ret = rte_rawdev_queue_setup(rdev_id, j, &qp_conf); + if (ret < 0) { + printf("Failed to setup queue pair %u on " + "rawdev %u", j, rdev_id); + return -EINVAL; + } + } + + ret = rte_rawdev_start(rdev_id); + if (ret < 0) { + printf("Failed to start raw device %u: error %d\n", + rdev_id, ret); + return -EPERM; + } + } + + return enabled_rdev_count; +} + +static int +cperf_verify_raw_devices_capabilities(struct cperf_options *opts, + __rte_unused uint8_t *enabled_rdevs, + __rte_unused uint8_t nb_rawdevs) +{ + if (opts->op_type != CPERF_MULTI_FN) + return -1; + return 0; } +#endif /* MULTI_FN_SUPPORTED */ static int cperf_check_test_vector(struct cperf_options *opts, @@ -499,10 +641,16 @@ main(int argc, char **argv) struct cperf_test_vector *t_vec = NULL; struct cperf_op_fns op_fns; void *ctx[RTE_MAX_LCORE] = { }; - int nb_cryptodevs = 0; + int nb_devs = 0; uint16_t total_nb_qps = 0; - uint8_t cdev_id, i; - uint8_t enabled_cdevs[RTE_CRYPTO_MAX_DEVS] = { 0 }; + uint8_t dev_id, i; +#ifndef MULTI_FN_SUPPORTED + uint8_t enabled_devs[RTE_CRYPTO_MAX_DEVS] = { 0 }; +#else + uint8_t max_devs = RTE_MAX(RTE_CRYPTO_MAX_DEVS, RTE_RAWDEV_MAX_DEVS); + uint8_t enabled_devs[max_devs]; + memset(enabled_devs, 0x0, max_devs); +#endif /* MULTI_FN_SUPPORTED */ uint8_t buffer_size_idx = 0; @@ -531,24 +679,49 @@ main(int argc, char **argv) goto err; } - nb_cryptodevs = cperf_initialize_cryptodev(&opts, enabled_cdevs); +#ifdef MULTI_FN_SUPPORTED + if (opts.op_type == CPERF_MULTI_FN) { + nb_devs = cperf_initialize_rawdev(&opts, enabled_devs); - if (!opts.silent) - cperf_options_dump(&opts); + if (!opts.silent) + cperf_options_dump(&opts); - if (nb_cryptodevs < 1) { - RTE_LOG(ERR, USER1, "Failed to initialise requested crypto " - "device type\n"); - nb_cryptodevs = 0; - goto err; - } + if (nb_devs < 1) { + RTE_LOG(ERR, USER1, "Failed to initialise requested " + "raw device type\n"); + nb_devs = 0; + goto err; + } - ret = cperf_verify_devices_capabilities(&opts, enabled_cdevs, - nb_cryptodevs); - if (ret) { - RTE_LOG(ERR, USER1, "Crypto device type does not support " - "capabilities requested\n"); - goto err; + ret = cperf_verify_raw_devices_capabilities(&opts, + enabled_devs, nb_devs); + if (ret) { + RTE_LOG(ERR, USER1, "Raw device type does not " + "support capabilities requested\n"); + goto err; + } + } else +#endif /* MULTI_FN_SUPPORTED */ + { + nb_devs = cperf_initialize_cryptodev(&opts, enabled_devs); + + if (!opts.silent) + cperf_options_dump(&opts); + + if (nb_devs < 1) { + RTE_LOG(ERR, USER1, "Failed to initialise requested " + "crypto device type\n"); + nb_devs = 0; + goto err; + } + + ret = cperf_verify_crypto_devices_capabilities(&opts, + enabled_devs, nb_devs); + if (ret) { + RTE_LOG(ERR, USER1, "Crypto device type does not " + "support capabilities requested\n"); + goto err; + } } if (opts.test_file != NULL) { @@ -585,23 +758,29 @@ main(int argc, char **argv) if (!opts.silent) show_test_vector(t_vec); - total_nb_qps = nb_cryptodevs * opts.nb_qps; + total_nb_qps = nb_devs * opts.nb_qps; i = 0; - uint8_t qp_id = 0, cdev_index = 0; + uint8_t qp_id = 0, dev_index = 0; RTE_LCORE_FOREACH_SLAVE(lcore_id) { if (i == total_nb_qps) break; - cdev_id = enabled_cdevs[cdev_index]; + dev_id = enabled_devs[dev_index]; - uint8_t socket_id = rte_cryptodev_socket_id(cdev_id); + uint8_t socket_id; +#ifdef MULTI_FN_SUPPORTED + if (opts.op_type == CPERF_MULTI_FN) + socket_id = rte_rawdev_socket_id(dev_id); + else +#endif /* MULTI_FN_SUPPORTED */ + socket_id = rte_cryptodev_socket_id(dev_id); ctx[i] = cperf_testmap[opts.test].constructor( session_pool_socket[socket_id].sess_mp, session_pool_socket[socket_id].priv_mp, - cdev_id, qp_id, + dev_id, qp_id, &opts, t_vec, &op_fns); if (ctx[i] == NULL) { RTE_LOG(ERR, USER1, "Test run constructor failed\n"); @@ -609,7 +788,7 @@ main(int argc, char **argv) } qp_id = (qp_id + 1) % opts.nb_qps; if (qp_id == 0) - cdev_index++; + dev_index++; i++; } @@ -726,9 +905,15 @@ main(int argc, char **argv) i++; } - for (i = 0; i < nb_cryptodevs && - i < RTE_CRYPTO_MAX_DEVS; i++) - rte_cryptodev_stop(enabled_cdevs[i]); + for (i = 0; i < nb_devs && + i < RTE_DIM(enabled_devs); i++) { +#ifdef MULTI_FN_SUPPORTED + if (opts.op_type == CPERF_MULTI_FN) + rte_rawdev_stop(enabled_devs[i]); + else +#endif /* MULTI_FN_SUPPORTED */ + rte_cryptodev_stop(enabled_devs[i]); + } free_test_vector(t_vec, &opts); @@ -746,9 +931,15 @@ main(int argc, char **argv) i++; } - for (i = 0; i < nb_cryptodevs && - i < RTE_CRYPTO_MAX_DEVS; i++) - rte_cryptodev_stop(enabled_cdevs[i]); + for (i = 0; i < nb_devs && + i < RTE_DIM(enabled_devs); i++) { +#ifdef MULTI_FN_SUPPORTED + if (opts.op_type == CPERF_MULTI_FN) + rte_rawdev_stop(enabled_devs[i]); + else +#endif /* MULTI_FN_SUPPORTED */ + rte_cryptodev_stop(enabled_devs[i]); + } rte_free(opts.imix_buffer_sizes); free_test_vector(t_vec, &opts); diff --git a/app/test-crypto-perf/meson.build b/app/test-crypto-perf/meson.build index 0674396da..28b54611f 100644 --- a/app/test-crypto-perf/meson.build +++ b/app/test-crypto-perf/meson.build @@ -13,3 +13,9 @@ sources = files('cperf_ops.c', 'cperf_test_verify.c', 'main.c') deps += ['cryptodev', 'security'] +#deps += ['cryptodev', 'security', 'rawdev', 'common_multi_fn'] + +if dpdk_conf.has('RTE_LIBRTE_MULTI_FN_COMMON') and dpdk_conf.has('RTE_LIBRTE_PMD_AESNI_MB_RAWDEV') + deps += ['rawdev', 'common_multi_fn'] + cflags += ['-DMULTI_FN_SUPPORTED'] +endif -- 2.17.1