From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 392E2A057B; Wed, 18 Mar 2020 17:44:15 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 6B50F1BF7E; Wed, 18 Mar 2020 17:44:03 +0100 (CET) Received: from mga04.intel.com (mga04.intel.com [192.55.52.120]) by dpdk.org (Postfix) with ESMTP id E5D92FEB for ; Wed, 18 Mar 2020 17:43:58 +0100 (CET) IronPort-SDR: nugJKwOaVCatz8HxScnrbbFvdYzc6KMUk3V6R59gmM0k5Plv9jqzKQBomfmzFlyDDqL3ZIWNSY zbrE3dMKQx/Q== X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga002.jf.intel.com ([10.7.209.21]) by fmsmga104.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 18 Mar 2020 09:43:57 -0700 IronPort-SDR: eFSmd0+QNzDnuCwyMcnTjPDDTB2woHwu127pUndlJYp0i+J1Dr5SpSP9mpkBII/GK9Vvfpjp8i Ikx7Zzkfj30Q== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.70,568,1574150400"; d="scan'208";a="263446069" Received: from adamdybx-mobl.ger.corp.intel.com (HELO addy-VirtualBox.ger.corp.intel.com) ([10.104.125.86]) by orsmga002.jf.intel.com with ESMTP; 18 Mar 2020 09:43:55 -0700 From: Adam Dybkowski To: dev@dpdk.org, fiona.trahe@intel.com, akhil.goyal@nxp.com Cc: Adam Dybkowski Date: Wed, 18 Mar 2020 17:33:49 +0100 Message-Id: <20200318163349.1789-2-adamx.dybkowski@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20200318163349.1789-1-adamx.dybkowski@intel.com> References: <20200318163349.1789-1-adamx.dybkowski@intel.com> Subject: [dpdk-dev] [PATCH 2/2] test/compress: im buffer too small - add unit tests X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch adds new test suites for verification of the "internal QAT IM buffer too small" case handling. These unit tests are specific to the QAT PMD only - that's why they are contained in new test suites (separate for positive and negative tests for better maintability). Signed-off-by: Adam Dybkowski --- app/test/test_compressdev.c | 1234 +++++++++++++++++++++++++++++++++-- 1 file changed, 1166 insertions(+), 68 deletions(-) diff --git a/app/test/test_compressdev.c b/app/test/test_compressdev.c index 7549135c2..15dea5e08 100644 --- a/app/test/test_compressdev.c +++ b/app/test/test_compressdev.c @@ -1,10 +1,11 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright(c) 2018 - 2019 Intel Corporation */ -#include -#include #include +#include #include +#include +#include #include #include @@ -30,6 +31,7 @@ * due to the compress block headers */ #define COMPRESS_BUF_SIZE_RATIO 1.3 +#define COMPRESS_BUF_SIZE_RATIO_DISABLED 1.0 #define COMPRESS_BUF_SIZE_RATIO_OVERFLOW 0.2 #define NUM_LARGE_MBUFS 16 #define SMALL_SEG_SIZE 256 @@ -52,6 +54,25 @@ #define NUM_BIG_MBUFS 4 #define BIG_DATA_TEST_SIZE (MAX_DATA_MBUF_SIZE * NUM_BIG_MBUFS / 2) +/* constants for "im buffer" tests start here */ +#define IM_NUM_BIG_MBUFS (512 + 1) +#define IM_BIG_DATA_TEST_SIZE (MAX_DATA_MBUF_SIZE * 2) +/* number of mbufs lower than number of inflight ops */ +#define IM_BUF_NUM_MBUFS 3 +/* slightly above THD (59578) and below the mbuf size */ +#define IM_BUF_DATA_TEST_SIZE_LB 59600 +/* data size smaller than the queue capacity */ +#define IM_BUF_DATA_TEST_SIZE_SGL (MAX_DATA_MBUF_SIZE * IM_BUF_NUM_MBUFS) +/* number of mbufs bigger than number of inflight ops */ +#define IM_BUF_NUM_MBUFS_OVER (NUM_MAX_INFLIGHT_OPS + 1) +/* data size bigger than the queue capacity */ +#define IM_BUF_DATA_TEST_SIZE_OVER (MAX_DATA_MBUF_SIZE * IM_BUF_NUM_MBUFS_OVER) +/* number of mid-size mbufs */ +#define IM_BUF_NUM_MBUFS_MID ((NUM_MAX_INFLIGHT_OPS / 3) + 1) +/* capacity of mid-size mbufs */ +#define IM_BUF_DATA_TEST_SIZE_MID (MAX_DATA_MBUF_SIZE * IM_BUF_NUM_MBUFS_MID) + + const char * huffman_type_strings[] = { [RTE_COMP_HUFFMAN_DEFAULT] = "PMD default", @@ -78,6 +99,11 @@ enum overflow_test { OVERFLOW_ENABLED }; +enum ratio_switch { + RATIO_DISABLED, + RATIO_ENABLED +}; + enum operation_type { OPERATION_COMPRESSION, OPERATION_DECOMPRESSION @@ -88,6 +114,7 @@ struct priv_op_data { }; struct comp_testsuite_params { + const char *suite_name; struct rte_mempool *large_mbuf_pool; struct rte_mempool *small_mbuf_pool; struct rte_mempool *big_mbuf_pool; @@ -123,6 +150,7 @@ struct test_data_params { const struct rte_memzone *uncompbuf_memzone; /* overflow test activation */ enum overflow_test overflow; + enum ratio_switch ratio; }; struct test_private_arrays { @@ -141,6 +169,26 @@ struct test_private_arrays { static struct comp_testsuite_params testsuite_params = { 0 }; + +static uint8_t +is_im_buffer_testsuite(void) +{ + return strstr(testsuite_params.suite_name, "'im buffer'") != NULL; +} + +static unsigned int +get_num_big_mbufs(void) +{ + return is_im_buffer_testsuite() ? IM_NUM_BIG_MBUFS : NUM_BIG_MBUFS; +} + +static unsigned int +get_big_data_test_size(void) +{ + return is_im_buffer_testsuite() ? + IM_BIG_DATA_TEST_SIZE : BIG_DATA_TEST_SIZE; +} + static void testsuite_teardown(void) { @@ -212,7 +260,7 @@ testsuite_setup(void) /* Create mempool with big buffers for SGL testing */ ts_params->big_mbuf_pool = rte_pktmbuf_pool_create("big_mbuf_pool", - NUM_BIG_MBUFS + 1, + get_num_big_mbufs() + 1, CACHE_SIZE, 0, MAX_MBUF_SEGMENT_SIZE, rte_socket_id()); @@ -691,7 +739,7 @@ prepare_sgl_bufs(const char *test_buf, struct rte_mbuf *head_buf, if (data_ptr != NULL) { /* Copy characters without NULL terminator */ - strncpy(buf_ptr, data_ptr, data_size); + memcpy(buf_ptr, data_ptr, data_size); data_ptr += data_size; } remaining_data -= data_size; @@ -731,7 +779,7 @@ prepare_sgl_bufs(const char *test_buf, struct rte_mbuf *head_buf, } if (data_ptr != NULL) { /* Copy characters without NULL terminator */ - strncpy(buf_ptr, data_ptr, data_size); + memcpy(buf_ptr, data_ptr, data_size); data_ptr += data_size; } remaining_data -= data_size; @@ -956,7 +1004,9 @@ test_mbufs_calculate_data_size( /* local variables: */ uint32_t data_size; struct priv_op_data *priv_data; - float ratio; + float ratio_val; + enum ratio_switch ratio = test_data->ratio; + uint8_t not_zlib_compr; /* true if zlib isn't current compression dev */ enum overflow_test overflow = test_data->overflow; @@ -973,13 +1023,16 @@ test_mbufs_calculate_data_size( not_zlib_compr = (test_data->zlib_dir == ZLIB_DECOMPRESS || test_data->zlib_dir == ZLIB_NONE); - ratio = (not_zlib_compr && + ratio_val = (ratio == RATIO_ENABLED) ? + COMPRESS_BUF_SIZE_RATIO : + COMPRESS_BUF_SIZE_RATIO_DISABLED; + + ratio_val = (not_zlib_compr && (overflow == OVERFLOW_ENABLED)) ? COMPRESS_BUF_SIZE_RATIO_OVERFLOW : - COMPRESS_BUF_SIZE_RATIO; - - data_size = strlen(test_bufs[i]) * ratio; + ratio_val; + data_size = strlen(test_bufs[i]) * ratio_val; } else { priv_data = (struct priv_op_data *) (ops_processed[i] + 1); @@ -1085,6 +1138,9 @@ test_setup_output_bufs( } else { for (i = 0; i < num_bufs; i++) { + enum rte_comp_huffman comp_huffman = + ts_params->def_comp_xform->compress.deflate.huffman; + /* data size calculation */ data_size = test_mbufs_calculate_data_size( op_type, @@ -1094,6 +1150,11 @@ test_setup_output_bufs( test_data, i); + if (comp_huffman != RTE_COMP_HUFFMAN_DYNAMIC) { + if (op_type == OPERATION_DECOMPRESSION) + data_size *= COMPRESS_BUF_SIZE_RATIO; + } + /* data allocation */ if (buff_type == SGL_BOTH || buff_type == LB_TO_SGL) { ret = prepare_sgl_bufs(NULL, current_bufs[i], @@ -1192,6 +1253,11 @@ test_deflate_comp_run(const struct interim_data_params *int_data, ops[i]->src.length = rte_pktmbuf_pkt_len(uncomp_bufs[i]); ops[i]->dst.offset = 0; + RTE_LOG(DEBUG, USER1, + "Uncompressed buffer length = %u compressed buffer length = %u", + rte_pktmbuf_pkt_len(uncomp_bufs[i]), + rte_pktmbuf_pkt_len(comp_bufs[i])); + if (operation_type == RTE_COMP_OP_STATELESS) { ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL; } else { @@ -1431,7 +1497,7 @@ test_deflate_comp_finalize(const struct interim_data_params *int_data, } RTE_LOG(ERR, USER1, - "Some operations were not successful\n"); + "Comp: Some operations were not successful\n"); return -1; } priv_data = (struct priv_op_data *)(ops_processed[i] + 1); @@ -1490,6 +1556,7 @@ test_deflate_decomp_run(const struct interim_data_params *int_data, /* from test_priv_data: */ struct rte_mbuf **uncomp_bufs = test_priv_data->uncomp_bufs; + struct rte_mbuf **comp_bufs = test_priv_data->comp_bufs; struct rte_comp_op **ops = test_priv_data->ops; struct rte_comp_op **ops_processed = test_priv_data->ops_processed; void **priv_xforms = test_priv_data->priv_xforms; @@ -1510,7 +1577,7 @@ test_deflate_decomp_run(const struct interim_data_params *int_data, /* Source buffer is the compressed data from the previous operations */ for (i = 0; i < num_bufs; i++) { - ops[i]->m_src = ops_processed[i]->m_dst; + ops[i]->m_src = comp_bufs[i]; ops[i]->m_dst = uncomp_bufs[i]; ops[i]->src.offset = 0; /* @@ -1740,6 +1807,10 @@ test_deflate_decomp_finalize(const struct interim_data_params *int_data, RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE || ops_processed[i]->status == RTE_COMP_OP_STATUS_SUCCESS)) { + + RTE_LOG(DEBUG, USER1, + ".............RECOVERABLE\n"); + /* collect the output into all_decomp_data */ const void *ptr = rte_pktmbuf_read( ops_processed[i]->m_dst, @@ -1777,7 +1848,6 @@ test_deflate_decomp_finalize(const struct interim_data_params *int_data, ops[i]->src.length -= ops_processed[i]->consumed; /* repeat the operation */ - //goto next_step; return 2; } else { /* Compare the original stream with the */ @@ -1808,7 +1878,8 @@ test_deflate_decomp_finalize(const struct interim_data_params *int_data, } else if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) { RTE_LOG(ERR, USER1, - "Some operations were not successful\n"); + "Decomp: Some operations were not successful, status = %u\n", + ops_processed[i]->status); return -1; } priv_data = (struct priv_op_data *)(ops_processed[i] + 1); @@ -1986,7 +2057,6 @@ test_deflate_comp_decomp(const struct interim_data_params *int_data, "Compress device does not support DEFLATE\n"); return -1; } - //test_objects_init(&test_priv_data, num_bufs); /* Prepare the source mbufs with the data */ ret = test_setup_com_bufs(int_data, test_data, &test_priv_data); @@ -1995,6 +2065,8 @@ test_deflate_comp_decomp(const struct interim_data_params *int_data, goto exit; } + RTE_LOG(DEBUG, USER1, "<<< COMPRESSION >>>\n"); + /* COMPRESSION */ /* Prepare output (destination) mbufs for compressed data */ @@ -2031,6 +2103,8 @@ test_deflate_comp_decomp(const struct interim_data_params *int_data, /* DECOMPRESSION */ + RTE_LOG(DEBUG, USER1, "<<< DECOMPRESSION >>>\n"); + /* Prepare output (destination) mbufs for decompressed data */ ret = test_setup_output_bufs( OPERATION_DECOMPRESSION, @@ -2152,7 +2226,8 @@ test_compressdev_deflate_stateless_fixed(void) .zlib_dir = ZLIB_DECOMPRESS, .out_of_space = 0, .big_data = 0, - .overflow = OVERFLOW_DISABLED + .overflow = OVERFLOW_DISABLED, + .ratio = RATIO_ENABLED }; for (i = 0; i < RTE_DIM(compress_test_bufs); i++) { @@ -2223,7 +2298,8 @@ test_compressdev_deflate_stateless_dynamic(void) .zlib_dir = ZLIB_DECOMPRESS, .out_of_space = 0, .big_data = 0, - .overflow = OVERFLOW_DISABLED + .overflow = OVERFLOW_DISABLED, + .ratio = RATIO_ENABLED }; for (i = 0; i < RTE_DIM(compress_test_bufs); i++) { @@ -2278,7 +2354,8 @@ test_compressdev_deflate_stateless_multi_op(void) .zlib_dir = ZLIB_DECOMPRESS, .out_of_space = 0, .big_data = 0, - .overflow = OVERFLOW_DISABLED + .overflow = OVERFLOW_DISABLED, + .ratio = RATIO_ENABLED }; /* Compress with compressdev, decompress with Zlib */ @@ -2332,7 +2409,8 @@ test_compressdev_deflate_stateless_multi_level(void) .zlib_dir = ZLIB_DECOMPRESS, .out_of_space = 0, .big_data = 0, - .overflow = OVERFLOW_DISABLED + .overflow = OVERFLOW_DISABLED, + .ratio = RATIO_ENABLED }; for (i = 0; i < RTE_DIM(compress_test_bufs); i++) { @@ -2422,7 +2500,8 @@ test_compressdev_deflate_stateless_multi_xform(void) .zlib_dir = ZLIB_DECOMPRESS, .out_of_space = 0, .big_data = 0, - .overflow = OVERFLOW_DISABLED + .overflow = OVERFLOW_DISABLED, + .ratio = RATIO_ENABLED }; /* Compress with compressdev, decompress with Zlib */ @@ -2471,7 +2550,8 @@ test_compressdev_deflate_stateless_sgl(void) .zlib_dir = ZLIB_DECOMPRESS, .out_of_space = 0, .big_data = 0, - .overflow = OVERFLOW_DISABLED + .overflow = OVERFLOW_DISABLED, + .ratio = RATIO_ENABLED }; for (i = 0; i < RTE_DIM(compress_test_bufs); i++) { @@ -2582,7 +2662,8 @@ test_compressdev_deflate_stateless_checksum(void) .zlib_dir = ZLIB_DECOMPRESS, .out_of_space = 0, .big_data = 0, - .overflow = OVERFLOW_DISABLED + .overflow = OVERFLOW_DISABLED, + .ratio = RATIO_ENABLED }; /* Check if driver supports crc32 checksum and test */ @@ -2700,7 +2781,8 @@ test_compressdev_out_of_space_buffer(void) .zlib_dir = ZLIB_DECOMPRESS, .out_of_space = 1, /* run out-of-space test */ .big_data = 0, - .overflow = OVERFLOW_DISABLED + .overflow = OVERFLOW_DISABLED, + .ratio = RATIO_ENABLED }; /* Compress with compressdev, decompress with Zlib */ test_data.zlib_dir = ZLIB_DECOMPRESS; @@ -2742,7 +2824,7 @@ test_compressdev_deflate_stateless_dynamic_big(void) struct comp_testsuite_params *ts_params = &testsuite_params; uint16_t i = 0; int ret; - int j; + unsigned int j; const struct rte_compressdev_capabilities *capab; char *test_buffer = NULL; @@ -2755,7 +2837,7 @@ test_compressdev_deflate_stateless_dynamic_big(void) if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0) return -ENOTSUP; - test_buffer = rte_malloc(NULL, BIG_DATA_TEST_SIZE, 0); + test_buffer = rte_malloc(NULL, get_big_data_test_size(), 0); if (test_buffer == NULL) { RTE_LOG(ERR, USER1, "Can't allocate buffer for big-data\n"); @@ -2778,7 +2860,8 @@ test_compressdev_deflate_stateless_dynamic_big(void) .zlib_dir = ZLIB_DECOMPRESS, .out_of_space = 0, .big_data = 1, - .overflow = OVERFLOW_DISABLED + .overflow = OVERFLOW_DISABLED, + .ratio = RATIO_DISABLED }; ts_params->def_comp_xform->compress.deflate.huffman = @@ -2786,9 +2869,9 @@ test_compressdev_deflate_stateless_dynamic_big(void) /* fill the buffer with data based on rand. data */ srand(BIG_DATA_TEST_SIZE); - for (j = 0; j < BIG_DATA_TEST_SIZE - 1; ++j) + for (j = 0; j < get_big_data_test_size() - 1; ++j) test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1; - test_buffer[BIG_DATA_TEST_SIZE-1] = 0; + test_buffer[get_big_data_test_size() - 1] = 0; /* Compress with compressdev, decompress with Zlib */ test_data.zlib_dir = ZLIB_DECOMPRESS; @@ -2843,7 +2926,8 @@ test_compressdev_deflate_stateful_decomp(void) .big_data = 0, .decompress_output_block_size = 2000, .decompress_steps_max = 4, - .overflow = OVERFLOW_DISABLED + .overflow = OVERFLOW_DISABLED, + .ratio = RATIO_ENABLED }; /* Compress with Zlib, decompress with compressdev */ @@ -2926,7 +3010,8 @@ test_compressdev_deflate_stateful_decomp_checksum(void) .big_data = 0, .decompress_output_block_size = 2000, .decompress_steps_max = 4, - .overflow = OVERFLOW_DISABLED + .overflow = OVERFLOW_DISABLED, + .ratio = RATIO_ENABLED }; /* Check if driver supports crc32 checksum and test */ @@ -3139,7 +3224,8 @@ test_compressdev_deflate_stateless_fixed_oos_recoverable(void) .zlib_dir = ZLIB_DECOMPRESS, .out_of_space = 0, .big_data = 0, - .overflow = OVERFLOW_ENABLED + .overflow = OVERFLOW_ENABLED, + .ratio = RATIO_ENABLED }; for (i = 0; i < RTE_DIM(compress_test_bufs); i++) { @@ -3176,47 +3262,1059 @@ test_compressdev_deflate_stateless_fixed_oos_recoverable(void) return ret; } -static struct unit_test_suite compressdev_testsuite = { - .suite_name = "compressdev unit test suite", - .setup = testsuite_setup, - .teardown = testsuite_teardown, - .unit_test_cases = { - TEST_CASE_ST(NULL, NULL, - test_compressdev_invalid_configuration), - TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, - test_compressdev_deflate_stateless_fixed), - TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, - test_compressdev_deflate_stateless_dynamic), - TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, - test_compressdev_deflate_stateless_dynamic_big), - TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, - test_compressdev_deflate_stateless_multi_op), - TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, - test_compressdev_deflate_stateless_multi_level), - TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, - test_compressdev_deflate_stateless_multi_xform), - TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, - test_compressdev_deflate_stateless_sgl), - TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, - test_compressdev_deflate_stateless_checksum), - TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, - test_compressdev_out_of_space_buffer), - TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, - test_compressdev_deflate_stateful_decomp), - TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, - test_compressdev_deflate_stateful_decomp_checksum), - TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, - test_compressdev_external_mbufs), - TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, - test_compressdev_deflate_stateless_fixed_oos_recoverable), - TEST_CASES_END() /**< NULL terminate unit test array */ +static int +test_compressdev_deflate_im_buffers_LB_1op(void) +{ + struct comp_testsuite_params *ts_params = &testsuite_params; + uint16_t i = 0; + int ret = TEST_SUCCESS; + int j; + const struct rte_compressdev_capabilities *capab; + char *test_buffer = NULL; + + capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE); + TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities"); + + if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0) + return -ENOTSUP; + + if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0) + return -ENOTSUP; + + test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_LB, 0); + if (test_buffer == NULL) { + RTE_LOG(ERR, USER1, + "Can't allocate buffer for 'im buffer' test\n"); + return TEST_FAILED; } -}; + + struct interim_data_params int_data = { + (const char * const *)&test_buffer, + 1, + &i, + &ts_params->def_comp_xform, + &ts_params->def_decomp_xform, + 1 + }; + + struct test_data_params test_data = { + .compress_state = RTE_COMP_OP_STATELESS, + .decompress_state = RTE_COMP_OP_STATELESS, + /* must be LB to SGL, + * input LB buffer reaches its maximum, + * if ratio 1.3 than another mbuf must be + * created and attached + */ + .buff_type = LB_BOTH, + .zlib_dir = ZLIB_NONE, + .out_of_space = 0, + .big_data = 1, + .overflow = OVERFLOW_DISABLED, + .ratio = RATIO_DISABLED + }; + + ts_params->def_comp_xform->compress.deflate.huffman = + RTE_COMP_HUFFMAN_DYNAMIC; + + /* fill the buffer with data based on rand. data */ + srand(IM_BUF_DATA_TEST_SIZE_LB); + for (j = 0; j < IM_BUF_DATA_TEST_SIZE_LB - 1; ++j) + test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1; + + /* Compress with compressdev, decompress with compressdev */ + if (test_deflate_comp_decomp(&int_data, &test_data) < 0) { + ret = TEST_FAILED; + goto end; + } + +end: + ts_params->def_comp_xform->compress.deflate.huffman = + RTE_COMP_HUFFMAN_DEFAULT; + rte_free(test_buffer); + return ret; +} static int -test_compressdev(void) +test_compressdev_deflate_im_buffers_LB_2ops_first(void) { - return unit_test_suite_runner(&compressdev_testsuite); + struct comp_testsuite_params *ts_params = &testsuite_params; + uint16_t i = 0; + int ret = TEST_SUCCESS; + int j; + const struct rte_compressdev_capabilities *capab; + char *test_buffer = NULL; + const char *test_buffers[2]; + + capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE); + TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities"); + + if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0) + return -ENOTSUP; + + if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0) + return -ENOTSUP; + + test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_LB, 0); + if (test_buffer == NULL) { + RTE_LOG(ERR, USER1, + "Can't allocate buffer for 'im buffer' test\n"); + return TEST_FAILED; + } + + test_buffers[0] = test_buffer; + test_buffers[1] = compress_test_bufs[0]; + + struct interim_data_params int_data = { + (const char * const *)test_buffers, + 2, + &i, + &ts_params->def_comp_xform, + &ts_params->def_decomp_xform, + 1 + }; + + struct test_data_params test_data = { + .compress_state = RTE_COMP_OP_STATELESS, + .decompress_state = RTE_COMP_OP_STATELESS, + .buff_type = LB_BOTH, + .zlib_dir = ZLIB_NONE, + .out_of_space = 0, + .big_data = 1, + .overflow = OVERFLOW_DISABLED, + .ratio = RATIO_DISABLED + }; + + ts_params->def_comp_xform->compress.deflate.huffman = + RTE_COMP_HUFFMAN_DYNAMIC; + + /* fill the buffer with data based on rand. data */ + srand(IM_BUF_DATA_TEST_SIZE_LB); + for (j = 0; j < IM_BUF_DATA_TEST_SIZE_LB - 1; ++j) + test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1; + + /* Compress with compressdev, decompress with compressdev */ + if (test_deflate_comp_decomp(&int_data, &test_data) < 0) { + ret = TEST_FAILED; + goto end; + } + +end: + ts_params->def_comp_xform->compress.deflate.huffman = + RTE_COMP_HUFFMAN_DEFAULT; + rte_free(test_buffer); + return ret; +} + +static int +test_compressdev_deflate_im_buffers_LB_2ops_second(void) +{ + struct comp_testsuite_params *ts_params = &testsuite_params; + uint16_t i = 0; + int ret = TEST_SUCCESS; + int j; + const struct rte_compressdev_capabilities *capab; + char *test_buffer = NULL; + const char *test_buffers[2]; + + capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE); + TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities"); + + if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0) + return -ENOTSUP; + + if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0) + return -ENOTSUP; + + test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_LB, 0); + if (test_buffer == NULL) { + RTE_LOG(ERR, USER1, + "Can't allocate buffer for 'im buffer' test\n"); + return TEST_FAILED; + } + + test_buffers[0] = compress_test_bufs[0]; + test_buffers[1] = test_buffer; + + struct interim_data_params int_data = { + (const char * const *)test_buffers, + 2, + &i, + &ts_params->def_comp_xform, + &ts_params->def_decomp_xform, + 1 + }; + + struct test_data_params test_data = { + .compress_state = RTE_COMP_OP_STATELESS, + .decompress_state = RTE_COMP_OP_STATELESS, + .buff_type = LB_BOTH, + .zlib_dir = ZLIB_NONE, + .out_of_space = 0, + .big_data = 1, + .overflow = OVERFLOW_DISABLED, + .ratio = RATIO_DISABLED + }; + + ts_params->def_comp_xform->compress.deflate.huffman = + RTE_COMP_HUFFMAN_DYNAMIC; + + /* fill the buffer with data based on rand. data */ + srand(IM_BUF_DATA_TEST_SIZE_LB); + for (j = 0; j < IM_BUF_DATA_TEST_SIZE_LB - 1; ++j) + test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1; + + /* Compress with compressdev, decompress with compressdev */ + if (test_deflate_comp_decomp(&int_data, &test_data) < 0) { + ret = TEST_FAILED; + goto end; + } + +end: + ts_params->def_comp_xform->compress.deflate.huffman = + RTE_COMP_HUFFMAN_DEFAULT; + rte_free(test_buffer); + return ret; +} + +static int +test_compressdev_deflate_im_buffers_LB_3ops(void) +{ + struct comp_testsuite_params *ts_params = &testsuite_params; + uint16_t i = 0; + int ret = TEST_SUCCESS; + int j; + const struct rte_compressdev_capabilities *capab; + char *test_buffer = NULL; + const char *test_buffers[3]; + + capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE); + TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities"); + + if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0) + return -ENOTSUP; + + if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0) + return -ENOTSUP; + + test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_LB, 0); + if (test_buffer == NULL) { + RTE_LOG(ERR, USER1, + "Can't allocate buffer for 'im buffer' test\n"); + return TEST_FAILED; + } + + test_buffers[0] = compress_test_bufs[0]; + test_buffers[1] = test_buffer; + test_buffers[2] = compress_test_bufs[1]; + + struct interim_data_params int_data = { + (const char * const *)test_buffers, + 3, + &i, + &ts_params->def_comp_xform, + &ts_params->def_decomp_xform, + 1 + }; + + struct test_data_params test_data = { + .compress_state = RTE_COMP_OP_STATELESS, + .decompress_state = RTE_COMP_OP_STATELESS, + .buff_type = LB_BOTH, + .zlib_dir = ZLIB_NONE, + .out_of_space = 0, + .big_data = 1, + .overflow = OVERFLOW_DISABLED, + .ratio = RATIO_DISABLED + }; + + ts_params->def_comp_xform->compress.deflate.huffman = + RTE_COMP_HUFFMAN_DYNAMIC; + + /* fill the buffer with data based on rand. data */ + srand(IM_BUF_DATA_TEST_SIZE_LB); + for (j = 0; j < IM_BUF_DATA_TEST_SIZE_LB - 1; ++j) + test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1; + + /* Compress with compressdev, decompress with compressdev */ + if (test_deflate_comp_decomp(&int_data, &test_data) < 0) { + ret = TEST_FAILED; + goto end; + } + +end: + ts_params->def_comp_xform->compress.deflate.huffman = + RTE_COMP_HUFFMAN_DEFAULT; + rte_free(test_buffer); + return ret; +} + +static int +test_compressdev_deflate_im_buffers_LB_4ops(void) +{ + struct comp_testsuite_params *ts_params = &testsuite_params; + uint16_t i = 0; + int ret = TEST_SUCCESS; + int j; + const struct rte_compressdev_capabilities *capab; + char *test_buffer = NULL; + const char *test_buffers[4]; + + capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE); + TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities"); + + if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0) + return -ENOTSUP; + + if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0) + return -ENOTSUP; + + test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_LB, 0); + if (test_buffer == NULL) { + RTE_LOG(ERR, USER1, + "Can't allocate buffer for 'im buffer' test\n"); + return TEST_FAILED; + } + + test_buffers[0] = compress_test_bufs[0]; + test_buffers[1] = test_buffer; + test_buffers[2] = compress_test_bufs[1]; + test_buffers[3] = test_buffer; + + struct interim_data_params int_data = { + (const char * const *)test_buffers, + 4, + &i, + &ts_params->def_comp_xform, + &ts_params->def_decomp_xform, + 1 + }; + + struct test_data_params test_data = { + .compress_state = RTE_COMP_OP_STATELESS, + .decompress_state = RTE_COMP_OP_STATELESS, + .buff_type = LB_BOTH, + .zlib_dir = ZLIB_NONE, + .out_of_space = 0, + .big_data = 1, + .overflow = OVERFLOW_DISABLED, + .ratio = RATIO_DISABLED + }; + + ts_params->def_comp_xform->compress.deflate.huffman = + RTE_COMP_HUFFMAN_DYNAMIC; + + /* fill the buffer with data based on rand. data */ + srand(IM_BUF_DATA_TEST_SIZE_LB); + for (j = 0; j < IM_BUF_DATA_TEST_SIZE_LB - 1; ++j) + test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1; + + /* Compress with compressdev, decompress with compressdev */ + if (test_deflate_comp_decomp(&int_data, &test_data) < 0) { + ret = TEST_FAILED; + goto end; + } + +end: + ts_params->def_comp_xform->compress.deflate.huffman = + RTE_COMP_HUFFMAN_DEFAULT; + rte_free(test_buffer); + return ret; +} + + +static int +test_compressdev_deflate_im_buffers_SGL_1op(void) +{ + struct comp_testsuite_params *ts_params = &testsuite_params; + uint16_t i = 0; + int ret = TEST_SUCCESS; + int j; + const struct rte_compressdev_capabilities *capab; + char *test_buffer = NULL; + + capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE); + TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities"); + + if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0) + return -ENOTSUP; + + if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0) + return -ENOTSUP; + + test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_SGL, 0); + if (test_buffer == NULL) { + RTE_LOG(ERR, USER1, + "Can't allocate buffer for big-data\n"); + return TEST_FAILED; + } + + struct interim_data_params int_data = { + (const char * const *)&test_buffer, + 1, + &i, + &ts_params->def_comp_xform, + &ts_params->def_decomp_xform, + 1 + }; + + struct test_data_params test_data = { + .compress_state = RTE_COMP_OP_STATELESS, + .decompress_state = RTE_COMP_OP_STATELESS, + .buff_type = SGL_BOTH, + .zlib_dir = ZLIB_NONE, + .out_of_space = 0, + .big_data = 1, + .overflow = OVERFLOW_DISABLED, + .ratio = RATIO_DISABLED + }; + + ts_params->def_comp_xform->compress.deflate.huffman = + RTE_COMP_HUFFMAN_DYNAMIC; + + /* fill the buffer with data based on rand. data */ + srand(IM_BUF_DATA_TEST_SIZE_SGL); + for (j = 0; j < IM_BUF_DATA_TEST_SIZE_SGL - 1; ++j) + test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1; + + /* Compress with compressdev, decompress with compressdev */ + if (test_deflate_comp_decomp(&int_data, &test_data) < 0) { + ret = TEST_FAILED; + goto end; + } + +end: + ts_params->def_comp_xform->compress.deflate.huffman = + RTE_COMP_HUFFMAN_DEFAULT; + rte_free(test_buffer); + return ret; +} + +static int +test_compressdev_deflate_im_buffers_SGL_2ops_first(void) +{ + struct comp_testsuite_params *ts_params = &testsuite_params; + uint16_t i = 0; + int ret = TEST_SUCCESS; + int j; + const struct rte_compressdev_capabilities *capab; + char *test_buffer = NULL; + const char *test_buffers[2]; + + capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE); + TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities"); + + if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0) + return -ENOTSUP; + + if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0) + return -ENOTSUP; + + test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_SGL, 0); + if (test_buffer == NULL) { + RTE_LOG(ERR, USER1, + "Can't allocate buffer for big-data\n"); + return TEST_FAILED; + } + + test_buffers[0] = test_buffer; + test_buffers[1] = compress_test_bufs[0]; + + struct interim_data_params int_data = { + (const char * const *)test_buffers, + 2, + &i, + &ts_params->def_comp_xform, + &ts_params->def_decomp_xform, + 1 + }; + + struct test_data_params test_data = { + .compress_state = RTE_COMP_OP_STATELESS, + .decompress_state = RTE_COMP_OP_STATELESS, + .buff_type = SGL_BOTH, + .zlib_dir = ZLIB_NONE, + .out_of_space = 0, + .big_data = 1, + .overflow = OVERFLOW_DISABLED, + .ratio = RATIO_DISABLED + }; + + ts_params->def_comp_xform->compress.deflate.huffman = + RTE_COMP_HUFFMAN_DYNAMIC; + + /* fill the buffer with data based on rand. data */ + srand(IM_BUF_DATA_TEST_SIZE_SGL); + for (j = 0; j < IM_BUF_DATA_TEST_SIZE_SGL - 1; ++j) + test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1; + + /* Compress with compressdev, decompress with compressdev */ + if (test_deflate_comp_decomp(&int_data, &test_data) < 0) { + ret = TEST_FAILED; + goto end; + } + +end: + ts_params->def_comp_xform->compress.deflate.huffman = + RTE_COMP_HUFFMAN_DEFAULT; + rte_free(test_buffer); + return ret; +} + +static int +test_compressdev_deflate_im_buffers_SGL_2ops_second(void) +{ + struct comp_testsuite_params *ts_params = &testsuite_params; + uint16_t i = 0; + int ret = TEST_SUCCESS; + int j; + const struct rte_compressdev_capabilities *capab; + char *test_buffer = NULL; + const char *test_buffers[2]; + + capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE); + TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities"); + + if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0) + return -ENOTSUP; + + if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0) + return -ENOTSUP; + + test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_SGL, 0); + if (test_buffer == NULL) { + RTE_LOG(ERR, USER1, + "Can't allocate buffer for big-data\n"); + return TEST_FAILED; + } + + test_buffers[0] = compress_test_bufs[0]; + test_buffers[1] = test_buffer; + + struct interim_data_params int_data = { + (const char * const *)test_buffers, + 2, + &i, + &ts_params->def_comp_xform, + &ts_params->def_decomp_xform, + 1 + }; + + struct test_data_params test_data = { + .compress_state = RTE_COMP_OP_STATELESS, + .decompress_state = RTE_COMP_OP_STATELESS, + .buff_type = SGL_BOTH, + .zlib_dir = ZLIB_NONE, + .out_of_space = 0, + .big_data = 1, + .overflow = OVERFLOW_DISABLED, + .ratio = RATIO_DISABLED + }; + + ts_params->def_comp_xform->compress.deflate.huffman = + RTE_COMP_HUFFMAN_DYNAMIC; + + /* fill the buffer with data based on rand. data */ + srand(IM_BUF_DATA_TEST_SIZE_SGL); + for (j = 0; j < IM_BUF_DATA_TEST_SIZE_SGL - 1; ++j) + test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1; + + /* Compress with compressdev, decompress with compressdev */ + if (test_deflate_comp_decomp(&int_data, &test_data) < 0) { + ret = TEST_FAILED; + goto end; + } + +end: + ts_params->def_comp_xform->compress.deflate.huffman = + RTE_COMP_HUFFMAN_DEFAULT; + rte_free(test_buffer); + return ret; +} + +static int +test_compressdev_deflate_im_buffers_SGL_3ops(void) +{ + struct comp_testsuite_params *ts_params = &testsuite_params; + uint16_t i = 0; + int ret = TEST_SUCCESS; + int j; + const struct rte_compressdev_capabilities *capab; + char *test_buffer = NULL; + const char *test_buffers[3]; + + capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE); + TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities"); + + if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0) + return -ENOTSUP; + + if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0) + return -ENOTSUP; + + test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_SGL, 0); + if (test_buffer == NULL) { + RTE_LOG(ERR, USER1, + "Can't allocate buffer for big-data\n"); + return TEST_FAILED; + } + + test_buffers[0] = compress_test_bufs[0]; + test_buffers[1] = test_buffer; + test_buffers[2] = compress_test_bufs[1]; + + struct interim_data_params int_data = { + (const char * const *)test_buffers, + 3, + &i, + &ts_params->def_comp_xform, + &ts_params->def_decomp_xform, + 1 + }; + + struct test_data_params test_data = { + .compress_state = RTE_COMP_OP_STATELESS, + .decompress_state = RTE_COMP_OP_STATELESS, + .buff_type = SGL_BOTH, + .zlib_dir = ZLIB_NONE, + .out_of_space = 0, + .big_data = 1, + .overflow = OVERFLOW_DISABLED, + .ratio = RATIO_DISABLED + }; + + ts_params->def_comp_xform->compress.deflate.huffman = + RTE_COMP_HUFFMAN_DYNAMIC; + + /* fill the buffer with data based on rand. data */ + srand(IM_BUF_DATA_TEST_SIZE_SGL); + for (j = 0; j < IM_BUF_DATA_TEST_SIZE_SGL - 1; ++j) + test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1; + + /* Compress with compressdev, decompress with compressdev */ + if (test_deflate_comp_decomp(&int_data, &test_data) < 0) { + ret = TEST_FAILED; + goto end; + } + +end: + ts_params->def_comp_xform->compress.deflate.huffman = + RTE_COMP_HUFFMAN_DEFAULT; + rte_free(test_buffer); + return ret; +} + + +static int +test_compressdev_deflate_im_buffers_SGL_4ops(void) +{ + struct comp_testsuite_params *ts_params = &testsuite_params; + uint16_t i = 0; + int ret = TEST_SUCCESS; + int j; + const struct rte_compressdev_capabilities *capab; + char *test_buffer = NULL; + const char *test_buffers[4]; + + capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE); + TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities"); + + if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0) + return -ENOTSUP; + + if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0) + return -ENOTSUP; + + test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_SGL, 0); + if (test_buffer == NULL) { + RTE_LOG(ERR, USER1, + "Can't allocate buffer for big-data\n"); + return TEST_FAILED; + } + + test_buffers[0] = compress_test_bufs[0]; + test_buffers[1] = test_buffer; + test_buffers[2] = compress_test_bufs[1]; + test_buffers[3] = test_buffer; + + struct interim_data_params int_data = { + (const char * const *)test_buffers, + 4, + &i, + &ts_params->def_comp_xform, + &ts_params->def_decomp_xform, + 1 + }; + + struct test_data_params test_data = { + .compress_state = RTE_COMP_OP_STATELESS, + .decompress_state = RTE_COMP_OP_STATELESS, + .buff_type = SGL_BOTH, + .zlib_dir = ZLIB_NONE, + .out_of_space = 0, + .big_data = 1, + .overflow = OVERFLOW_DISABLED, + .ratio = RATIO_DISABLED + }; + + ts_params->def_comp_xform->compress.deflate.huffman = + RTE_COMP_HUFFMAN_DYNAMIC; + + /* fill the buffer with data based on rand. data */ + srand(IM_BUF_DATA_TEST_SIZE_SGL); + for (j = 0; j < IM_BUF_DATA_TEST_SIZE_SGL - 1; ++j) + test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1; + + /* Compress with compressdev, decompress with compressdev */ + if (test_deflate_comp_decomp(&int_data, &test_data) < 0) { + ret = TEST_FAILED; + goto end; + } + +end: + ts_params->def_comp_xform->compress.deflate.huffman = + RTE_COMP_HUFFMAN_DEFAULT; + rte_free(test_buffer); + return ret; +} + +static int +test_compressdev_deflate_im_buffers_SGL_over_1op(void) +{ + struct comp_testsuite_params *ts_params = &testsuite_params; + uint16_t i = 0; + int ret = TEST_SUCCESS; + int j; + const struct rte_compressdev_capabilities *capab; + char *test_buffer = NULL; + + capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE); + TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities"); + + if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0) + return -ENOTSUP; + + if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0) + return -ENOTSUP; + + test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_OVER, 0); + if (test_buffer == NULL) { + RTE_LOG(ERR, USER1, + "Can't allocate buffer for big-data\n"); + return TEST_FAILED; + } + + struct interim_data_params int_data = { + (const char * const *)&test_buffer, + 1, + &i, + &ts_params->def_comp_xform, + &ts_params->def_decomp_xform, + 1 + }; + + struct test_data_params test_data = { + .compress_state = RTE_COMP_OP_STATELESS, + .decompress_state = RTE_COMP_OP_STATELESS, + .buff_type = SGL_BOTH, + .zlib_dir = ZLIB_NONE, + .out_of_space = 0, + .big_data = 1, + .overflow = OVERFLOW_DISABLED, + .ratio = RATIO_DISABLED + }; + + ts_params->def_comp_xform->compress.deflate.huffman = + RTE_COMP_HUFFMAN_DYNAMIC; + + /* fill the buffer with data based on rand. data */ + srand(IM_BUF_DATA_TEST_SIZE_OVER); + for (j = 0; j < IM_BUF_DATA_TEST_SIZE_OVER - 1; ++j) + test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1; + + /* Compress with compressdev, decompress with compressdev */ + if (test_deflate_comp_decomp(&int_data, &test_data) < 0) { + ret = TEST_SUCCESS; + goto end; + } + +end: + ts_params->def_comp_xform->compress.deflate.huffman = + RTE_COMP_HUFFMAN_DEFAULT; + rte_free(test_buffer); + return ret; +} + + +static int +test_compressdev_deflate_im_buffers_SGL_over_2ops_first(void) +{ + struct comp_testsuite_params *ts_params = &testsuite_params; + uint16_t i = 0; + int ret = TEST_SUCCESS; + int j; + const struct rte_compressdev_capabilities *capab; + char *test_buffer = NULL; + const char *test_buffers[2]; + + capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE); + TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities"); + + if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0) + return -ENOTSUP; + + if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0) + return -ENOTSUP; + + test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_OVER, 0); + if (test_buffer == NULL) { + RTE_LOG(ERR, USER1, + "Can't allocate buffer for big-data\n"); + return TEST_FAILED; + } + + test_buffers[0] = test_buffer; + test_buffers[1] = compress_test_bufs[0]; + + struct interim_data_params int_data = { + (const char * const *)test_buffers, + 2, + &i, + &ts_params->def_comp_xform, + &ts_params->def_decomp_xform, + 1 + }; + + struct test_data_params test_data = { + .compress_state = RTE_COMP_OP_STATELESS, + .decompress_state = RTE_COMP_OP_STATELESS, + .buff_type = SGL_BOTH, + .zlib_dir = ZLIB_NONE, + .out_of_space = 0, + .big_data = 1, + .overflow = OVERFLOW_DISABLED, + .ratio = RATIO_DISABLED + }; + + ts_params->def_comp_xform->compress.deflate.huffman = + RTE_COMP_HUFFMAN_DYNAMIC; + + /* fill the buffer with data based on rand. data */ + srand(IM_BUF_DATA_TEST_SIZE_OVER); + for (j = 0; j < IM_BUF_DATA_TEST_SIZE_OVER - 1; ++j) + test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1; + + /* Compress with compressdev, decompress with compressdev */ + if (test_deflate_comp_decomp(&int_data, &test_data) < 0) { + ret = TEST_SUCCESS; + goto end; + } + +end: + ts_params->def_comp_xform->compress.deflate.huffman = + RTE_COMP_HUFFMAN_DEFAULT; + rte_free(test_buffer); + return ret; +} + +static int +test_compressdev_deflate_im_buffers_SGL_over_2ops_second(void) +{ + struct comp_testsuite_params *ts_params = &testsuite_params; + uint16_t i = 0; + int ret = TEST_SUCCESS; + int j; + const struct rte_compressdev_capabilities *capab; + char *test_buffer = NULL; + const char *test_buffers[2]; + + capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE); + TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities"); + + if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0) + return -ENOTSUP; + + if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0) + return -ENOTSUP; + + test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_OVER, 0); + if (test_buffer == NULL) { + RTE_LOG(ERR, USER1, + "Can't allocate buffer for big-data\n"); + return TEST_FAILED; + } + + test_buffers[0] = compress_test_bufs[0]; + test_buffers[1] = test_buffer; + + struct interim_data_params int_data = { + (const char * const *)test_buffers, + 2, + &i, + &ts_params->def_comp_xform, + &ts_params->def_decomp_xform, + 1 + }; + + struct test_data_params test_data = { + .compress_state = RTE_COMP_OP_STATELESS, + .decompress_state = RTE_COMP_OP_STATELESS, + .buff_type = SGL_BOTH, + .zlib_dir = ZLIB_NONE, + .out_of_space = 0, + .big_data = 1, + .overflow = OVERFLOW_DISABLED, + .ratio = RATIO_DISABLED + }; + + ts_params->def_comp_xform->compress.deflate.huffman = + RTE_COMP_HUFFMAN_DYNAMIC; + + /* fill the buffer with data based on rand. data */ + srand(IM_BUF_DATA_TEST_SIZE_OVER); + for (j = 0; j < IM_BUF_DATA_TEST_SIZE_OVER - 1; ++j) + test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1; + + /* Compress with compressdev, decompress with compressdev */ + if (test_deflate_comp_decomp(&int_data, &test_data) < 0) { + ret = TEST_SUCCESS; + goto end; + } + +end: + ts_params->def_comp_xform->compress.deflate.huffman = + RTE_COMP_HUFFMAN_DEFAULT; + rte_free(test_buffer); + return ret; +} + + +static struct unit_test_suite compressdev_testsuite = { + .suite_name = "compressdev unit test suite", + .setup = testsuite_setup, + .teardown = testsuite_teardown, + .unit_test_cases = { + TEST_CASE_ST(NULL, NULL, + test_compressdev_invalid_configuration), + TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, + test_compressdev_deflate_stateless_fixed), + TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, + test_compressdev_deflate_stateless_dynamic), + TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, + test_compressdev_deflate_stateless_dynamic_big), + TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, + test_compressdev_deflate_stateless_multi_op), + TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, + test_compressdev_deflate_stateless_multi_level), + TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, + test_compressdev_deflate_stateless_multi_xform), + TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, + test_compressdev_deflate_stateless_sgl), + TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, + test_compressdev_deflate_stateless_checksum), + TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, + test_compressdev_out_of_space_buffer), + TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, + test_compressdev_deflate_stateful_decomp), + TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, + test_compressdev_deflate_stateful_decomp_checksum), + TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, + test_compressdev_external_mbufs), + TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, + test_compressdev_deflate_stateless_fixed_oos_recoverable), + TEST_CASES_END() /**< NULL terminate unit test array */ + } +}; + +/**********************************/ + +static struct unit_test_suite compressdev_testsuite_im_buf_positive = { + .suite_name = "compressdev unit test suite for 'im buffer'", + .setup = testsuite_setup, + .teardown = testsuite_teardown, + .unit_test_cases = { + TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, + test_compressdev_deflate_im_buffers_LB_1op), + TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, + test_compressdev_deflate_im_buffers_LB_2ops_first), + TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, + test_compressdev_deflate_im_buffers_LB_2ops_second), + TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, + test_compressdev_deflate_im_buffers_LB_3ops), + + TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, + test_compressdev_deflate_im_buffers_LB_4ops), + TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, + test_compressdev_deflate_im_buffers_SGL_1op), + + TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, + test_compressdev_deflate_im_buffers_SGL_2ops_first), + TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, + test_compressdev_deflate_im_buffers_SGL_2ops_second), + TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, + test_compressdev_deflate_im_buffers_SGL_3ops), + TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, + test_compressdev_deflate_im_buffers_SGL_4ops), + + TEST_CASES_END() /**< NULL terminate unit test array */ + } +}; + +static struct unit_test_suite compressdev_testsuite_im_buf_negative = { + .suite_name = "compressdev unit test suite for 'im buffer'", + .setup = testsuite_setup, + .teardown = testsuite_teardown, + .unit_test_cases = { + + /* For this test huge mempool is necessary. + * It tests one case: + * only one op containing big amount of data, so that + * number of requested descriptors higher than number + * of available descriptors (128) + */ + TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, + test_compressdev_deflate_im_buffers_SGL_over_1op), + + /* For this test huge mempool is necessary. + * 2 ops. First op contains big amount of data: + * number of requested descriptors higher than number + * of available descriptors (128), the second op is + * relatively small. In this case both ops are rejected + */ + TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, + test_compressdev_deflate_im_buffers_SGL_over_2ops_first), + + TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, + test_compressdev_deflate_im_buffers_SGL_over_2ops_second), + + TEST_CASES_END() /**< NULL terminate unit test array */ + } +}; + +static int +test_compressdev(void) +{ + testsuite_params.suite_name = compressdev_testsuite.suite_name; + return unit_test_suite_runner(&compressdev_testsuite); +} + +static int +test_compressdev_im_buf_positive(void) +{ + testsuite_params.suite_name = + compressdev_testsuite_im_buf_positive.suite_name; + return unit_test_suite_runner(&compressdev_testsuite_im_buf_positive); +} + +static int +test_compressdev_im_buf_negative(void) +{ + testsuite_params.suite_name = + compressdev_testsuite_im_buf_negative.suite_name; + return unit_test_suite_runner(&compressdev_testsuite_im_buf_negative); } REGISTER_TEST_COMMAND(compressdev_autotest, test_compressdev); +REGISTER_TEST_COMMAND(compressdev_autotest_im_pos, + test_compressdev_im_buf_positive); +REGISTER_TEST_COMMAND(compressdev_autotest_im_neg, + test_compressdev_im_buf_negative); -- 2.17.1