From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 1F37DA0597; Wed, 8 Apr 2020 14:51:28 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id C5D7F1C1AA; Wed, 8 Apr 2020 14:51:10 +0200 (CEST) Received: from mga18.intel.com (mga18.intel.com [134.134.136.126]) by dpdk.org (Postfix) with ESMTP id E5F881C1B2 for ; Wed, 8 Apr 2020 14:51:07 +0200 (CEST) IronPort-SDR: 3PzvJnfDtMZptjV4/gVsaOM83nOmm51iLb0PHL5dpZ/C8ia56yRSKZJZUSctynk4E3QBeGNfyv 0dDvzy/6QZ+Q== X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga002.jf.intel.com ([10.7.209.21]) by orsmga106.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 08 Apr 2020 05:51:07 -0700 IronPort-SDR: divRT7f4osUPizPIMVV6m+k82zCZWh38h9J+3gJ+6kcmfyflHttPA0Ya8WhUByK8qf7udq9oy0 t+j6LJem18Sw== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.72,358,1580803200"; d="scan'208";a="269743316" Received: from adamdybx-mobl.ger.corp.intel.com (HELO addy-VirtualBox.ger.corp.intel.com) ([10.104.121.88]) by orsmga002.jf.intel.com with ESMTP; 08 Apr 2020 05:51:05 -0700 From: Adam Dybkowski To: dev@dpdk.org, fiona.trahe@intel.com, akhil.goyal@nxp.com Cc: Adam Dybkowski Date: Wed, 8 Apr 2020 14:51:01 +0200 Message-Id: <20200408125101.25764-3-adamx.dybkowski@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20200408125101.25764-1-adamx.dybkowski@intel.com> References: <20200408125101.25764-1-adamx.dybkowski@intel.com> Subject: [dpdk-dev] [PATCH v2 2/2] test/compress: im buffer too small - add unit tests X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch adds a new test suite for verification of the "internal QAT IM buffer too small" case handling. These unit tests are specific to the QAT PMD only - that's why they are contained in a separate test suite. Signed-off-by: Adam Dybkowski --- app/test/test_compressdev.c | 1251 ++++++++++++++++++++++++++++++++--- 1 file changed, 1177 insertions(+), 74 deletions(-) diff --git a/app/test/test_compressdev.c b/app/test/test_compressdev.c index 7549135c2..40eb74dd3 100644 --- a/app/test/test_compressdev.c +++ b/app/test/test_compressdev.c @@ -1,10 +1,11 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright(c) 2018 - 2019 Intel Corporation */ -#include -#include #include +#include #include +#include +#include #include #include @@ -18,6 +19,8 @@ #include "test.h" #define DIV_CEIL(a, b) ((a) / (b) + ((a) % (b) != 0)) +#define ENDS_WITH(s, suffix) (strlen(s) >= strlen(suffix) && \ + !strcmp(suffix, s + strlen(s) - strlen(suffix))) #define DEFAULT_WINDOW_SIZE 15 #define DEFAULT_MEM_LEVEL 8 @@ -30,6 +33,7 @@ * due to the compress block headers */ #define COMPRESS_BUF_SIZE_RATIO 1.3 +#define COMPRESS_BUF_SIZE_RATIO_DISABLED 1.0 #define COMPRESS_BUF_SIZE_RATIO_OVERFLOW 0.2 #define NUM_LARGE_MBUFS 16 #define SMALL_SEG_SIZE 256 @@ -52,6 +56,25 @@ #define NUM_BIG_MBUFS 4 #define BIG_DATA_TEST_SIZE (MAX_DATA_MBUF_SIZE * NUM_BIG_MBUFS / 2) +/* constants for "im buffer" tests start here */ +#define IM_NUM_BIG_MBUFS (512 + 1) +#define IM_BIG_DATA_TEST_SIZE (MAX_DATA_MBUF_SIZE * 2) +/* number of mbufs lower than number of inflight ops */ +#define IM_BUF_NUM_MBUFS 3 +/* above threshold (QAT_FALLBACK_THLD) and below max mbuf size */ +#define IM_BUF_DATA_TEST_SIZE_LB 59600 +/* data size smaller than the queue capacity */ +#define IM_BUF_DATA_TEST_SIZE_SGL (MAX_DATA_MBUF_SIZE * IM_BUF_NUM_MBUFS) +/* number of mbufs bigger than number of inflight ops */ +#define IM_BUF_NUM_MBUFS_OVER (NUM_MAX_INFLIGHT_OPS + 1) +/* data size bigger than the queue capacity */ +#define IM_BUF_DATA_TEST_SIZE_OVER (MAX_DATA_MBUF_SIZE * IM_BUF_NUM_MBUFS_OVER) +/* number of mid-size mbufs */ +#define IM_BUF_NUM_MBUFS_MID ((NUM_MAX_INFLIGHT_OPS / 3) + 1) +/* capacity of mid-size mbufs */ +#define IM_BUF_DATA_TEST_SIZE_MID (MAX_DATA_MBUF_SIZE * IM_BUF_NUM_MBUFS_MID) + + const char * huffman_type_strings[] = { [RTE_COMP_HUFFMAN_DEFAULT] = "PMD default", @@ -78,6 +101,11 @@ enum overflow_test { OVERFLOW_ENABLED }; +enum ratio_switch { + RATIO_DISABLED, + RATIO_ENABLED +}; + enum operation_type { OPERATION_COMPRESSION, OPERATION_DECOMPRESSION @@ -88,6 +116,7 @@ struct priv_op_data { }; struct comp_testsuite_params { + const char *suite_name; struct rte_mempool *large_mbuf_pool; struct rte_mempool *small_mbuf_pool; struct rte_mempool *big_mbuf_pool; @@ -123,6 +152,7 @@ struct test_data_params { const struct rte_memzone *uncompbuf_memzone; /* overflow test activation */ enum overflow_test overflow; + enum ratio_switch ratio; }; struct test_private_arrays { @@ -141,6 +171,26 @@ struct test_private_arrays { static struct comp_testsuite_params testsuite_params = { 0 }; + +static uint8_t +is_qat_specific_testsuite(void) +{ + return strstr(testsuite_params.suite_name, "QAT") != NULL; +} + +static unsigned int +get_num_big_mbufs(void) +{ + return is_qat_specific_testsuite() ? IM_NUM_BIG_MBUFS : NUM_BIG_MBUFS; +} + +static unsigned int +get_big_data_test_size(void) +{ + return is_qat_specific_testsuite() ? + IM_BIG_DATA_TEST_SIZE : BIG_DATA_TEST_SIZE; +} + static void testsuite_teardown(void) { @@ -212,7 +262,7 @@ testsuite_setup(void) /* Create mempool with big buffers for SGL testing */ ts_params->big_mbuf_pool = rte_pktmbuf_pool_create("big_mbuf_pool", - NUM_BIG_MBUFS + 1, + get_num_big_mbufs() + 1, CACHE_SIZE, 0, MAX_MBUF_SEGMENT_SIZE, rte_socket_id()); @@ -316,6 +366,8 @@ test_compressdev_invalid_configuration(void) }; struct rte_compressdev_info dev_info; + RTE_LOG(INFO, USER1, "This is a negative test, errors are expected\n"); + /* Invalid configuration with 0 queue pairs */ memcpy(&invalid_config, &valid_config, sizeof(struct rte_compressdev_config)); @@ -691,7 +743,7 @@ prepare_sgl_bufs(const char *test_buf, struct rte_mbuf *head_buf, if (data_ptr != NULL) { /* Copy characters without NULL terminator */ - strncpy(buf_ptr, data_ptr, data_size); + memcpy(buf_ptr, data_ptr, data_size); data_ptr += data_size; } remaining_data -= data_size; @@ -731,7 +783,7 @@ prepare_sgl_bufs(const char *test_buf, struct rte_mbuf *head_buf, } if (data_ptr != NULL) { /* Copy characters without NULL terminator */ - strncpy(buf_ptr, data_ptr, data_size); + memcpy(buf_ptr, data_ptr, data_size); data_ptr += data_size; } remaining_data -= data_size; @@ -760,17 +812,20 @@ test_run_enqueue_dequeue(struct rte_comp_op **ops, { uint16_t num_enqd, num_deqd, num_total_deqd; unsigned int deqd_retries = 0; + int res = 0; /* Enqueue and dequeue all operations */ num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs); if (num_enqd < num_bufs) { RTE_LOG(ERR, USER1, "Some operations could not be enqueued\n"); - return -1; + res = -1; } + /* dequeue ops even on error (same number of ops as was enqueued) */ + num_total_deqd = 0; - do { + while (num_total_deqd < num_enqd) { /* * If retrying a dequeue call, wait for 10 ms to allow * enough time to the driver to process the operations @@ -783,7 +838,8 @@ test_run_enqueue_dequeue(struct rte_comp_op **ops, if (deqd_retries == MAX_DEQD_RETRIES) { RTE_LOG(ERR, USER1, "Not all operations could be dequeued\n"); - return -1; + res = -1; + break; } usleep(DEQUEUE_WAIT_TIME); } @@ -792,9 +848,9 @@ test_run_enqueue_dequeue(struct rte_comp_op **ops, num_total_deqd += num_deqd; deqd_retries++; - } while (num_total_deqd < num_enqd); + } - return 0; + return res; } /** @@ -956,7 +1012,9 @@ test_mbufs_calculate_data_size( /* local variables: */ uint32_t data_size; struct priv_op_data *priv_data; - float ratio; + float ratio_val; + enum ratio_switch ratio = test_data->ratio; + uint8_t not_zlib_compr; /* true if zlib isn't current compression dev */ enum overflow_test overflow = test_data->overflow; @@ -973,13 +1031,16 @@ test_mbufs_calculate_data_size( not_zlib_compr = (test_data->zlib_dir == ZLIB_DECOMPRESS || test_data->zlib_dir == ZLIB_NONE); - ratio = (not_zlib_compr && + ratio_val = (ratio == RATIO_ENABLED) ? + COMPRESS_BUF_SIZE_RATIO : + COMPRESS_BUF_SIZE_RATIO_DISABLED; + + ratio_val = (not_zlib_compr && (overflow == OVERFLOW_ENABLED)) ? COMPRESS_BUF_SIZE_RATIO_OVERFLOW : - COMPRESS_BUF_SIZE_RATIO; - - data_size = strlen(test_bufs[i]) * ratio; + ratio_val; + data_size = strlen(test_bufs[i]) * ratio_val; } else { priv_data = (struct priv_op_data *) (ops_processed[i] + 1); @@ -1085,6 +1146,9 @@ test_setup_output_bufs( } else { for (i = 0; i < num_bufs; i++) { + enum rte_comp_huffman comp_huffman = + ts_params->def_comp_xform->compress.deflate.huffman; + /* data size calculation */ data_size = test_mbufs_calculate_data_size( op_type, @@ -1094,6 +1158,11 @@ test_setup_output_bufs( test_data, i); + if (comp_huffman != RTE_COMP_HUFFMAN_DYNAMIC) { + if (op_type == OPERATION_DECOMPRESSION) + data_size *= COMPRESS_BUF_SIZE_RATIO; + } + /* data allocation */ if (buff_type == SGL_BOTH || buff_type == LB_TO_SGL) { ret = prepare_sgl_bufs(NULL, current_bufs[i], @@ -1192,6 +1261,11 @@ test_deflate_comp_run(const struct interim_data_params *int_data, ops[i]->src.length = rte_pktmbuf_pkt_len(uncomp_bufs[i]); ops[i]->dst.offset = 0; + RTE_LOG(DEBUG, USER1, + "Uncompressed buffer length = %u compressed buffer length = %u", + rte_pktmbuf_pkt_len(uncomp_bufs[i]), + rte_pktmbuf_pkt_len(comp_bufs[i])); + if (operation_type == RTE_COMP_OP_STATELESS) { ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL; } else { @@ -1313,6 +1387,7 @@ test_deflate_comp_run(const struct interim_data_params *int_data, if (ret_status < 0) for (i = 0; i < num_bufs; i++) { rte_comp_op_free(ops[i]); + ops[i] = NULL; ops_processed[i] = NULL; } @@ -1431,7 +1506,7 @@ test_deflate_comp_finalize(const struct interim_data_params *int_data, } RTE_LOG(ERR, USER1, - "Some operations were not successful\n"); + "Comp: Some operations were not successful\n"); return -1; } priv_data = (struct priv_op_data *)(ops_processed[i] + 1); @@ -1490,6 +1565,7 @@ test_deflate_decomp_run(const struct interim_data_params *int_data, /* from test_priv_data: */ struct rte_mbuf **uncomp_bufs = test_priv_data->uncomp_bufs; + struct rte_mbuf **comp_bufs = test_priv_data->comp_bufs; struct rte_comp_op **ops = test_priv_data->ops; struct rte_comp_op **ops_processed = test_priv_data->ops_processed; void **priv_xforms = test_priv_data->priv_xforms; @@ -1510,7 +1586,7 @@ test_deflate_decomp_run(const struct interim_data_params *int_data, /* Source buffer is the compressed data from the previous operations */ for (i = 0; i < num_bufs; i++) { - ops[i]->m_src = ops_processed[i]->m_dst; + ops[i]->m_src = comp_bufs[i]; ops[i]->m_dst = uncomp_bufs[i]; ops[i]->src.offset = 0; /* @@ -1740,6 +1816,10 @@ test_deflate_decomp_finalize(const struct interim_data_params *int_data, RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE || ops_processed[i]->status == RTE_COMP_OP_STATUS_SUCCESS)) { + + RTE_LOG(DEBUG, USER1, + ".............RECOVERABLE\n"); + /* collect the output into all_decomp_data */ const void *ptr = rte_pktmbuf_read( ops_processed[i]->m_dst, @@ -1777,7 +1857,6 @@ test_deflate_decomp_finalize(const struct interim_data_params *int_data, ops[i]->src.length -= ops_processed[i]->consumed; /* repeat the operation */ - //goto next_step; return 2; } else { /* Compare the original stream with the */ @@ -1808,7 +1887,8 @@ test_deflate_decomp_finalize(const struct interim_data_params *int_data, } else if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) { RTE_LOG(ERR, USER1, - "Some operations were not successful\n"); + "Decomp: Some operations were not successful, status = %u\n", + ops_processed[i]->status); return -1; } priv_data = (struct priv_op_data *)(ops_processed[i] + 1); @@ -1986,7 +2066,6 @@ test_deflate_comp_decomp(const struct interim_data_params *int_data, "Compress device does not support DEFLATE\n"); return -1; } - //test_objects_init(&test_priv_data, num_bufs); /* Prepare the source mbufs with the data */ ret = test_setup_com_bufs(int_data, test_data, &test_priv_data); @@ -1995,6 +2074,8 @@ test_deflate_comp_decomp(const struct interim_data_params *int_data, goto exit; } + RTE_LOG(DEBUG, USER1, "<<< COMPRESSION >>>\n"); + /* COMPRESSION */ /* Prepare output (destination) mbufs for compressed data */ @@ -2031,6 +2112,8 @@ test_deflate_comp_decomp(const struct interim_data_params *int_data, /* DECOMPRESSION */ + RTE_LOG(DEBUG, USER1, "<<< DECOMPRESSION >>>\n"); + /* Prepare output (destination) mbufs for decompressed data */ ret = test_setup_output_bufs( OPERATION_DECOMPRESSION, @@ -2096,7 +2179,6 @@ test_deflate_comp_decomp(const struct interim_data_params *int_data, priv_xforms[i] = NULL; } } - for (i = 0; i < num_bufs; i++) { rte_pktmbuf_free(uncomp_bufs[i]); rte_pktmbuf_free(comp_bufs[i]); @@ -2152,7 +2234,8 @@ test_compressdev_deflate_stateless_fixed(void) .zlib_dir = ZLIB_DECOMPRESS, .out_of_space = 0, .big_data = 0, - .overflow = OVERFLOW_DISABLED + .overflow = OVERFLOW_DISABLED, + .ratio = RATIO_ENABLED }; for (i = 0; i < RTE_DIM(compress_test_bufs); i++) { @@ -2223,7 +2306,8 @@ test_compressdev_deflate_stateless_dynamic(void) .zlib_dir = ZLIB_DECOMPRESS, .out_of_space = 0, .big_data = 0, - .overflow = OVERFLOW_DISABLED + .overflow = OVERFLOW_DISABLED, + .ratio = RATIO_ENABLED }; for (i = 0; i < RTE_DIM(compress_test_bufs); i++) { @@ -2278,7 +2362,8 @@ test_compressdev_deflate_stateless_multi_op(void) .zlib_dir = ZLIB_DECOMPRESS, .out_of_space = 0, .big_data = 0, - .overflow = OVERFLOW_DISABLED + .overflow = OVERFLOW_DISABLED, + .ratio = RATIO_ENABLED }; /* Compress with compressdev, decompress with Zlib */ @@ -2332,7 +2417,8 @@ test_compressdev_deflate_stateless_multi_level(void) .zlib_dir = ZLIB_DECOMPRESS, .out_of_space = 0, .big_data = 0, - .overflow = OVERFLOW_DISABLED + .overflow = OVERFLOW_DISABLED, + .ratio = RATIO_ENABLED }; for (i = 0; i < RTE_DIM(compress_test_bufs); i++) { @@ -2422,7 +2508,8 @@ test_compressdev_deflate_stateless_multi_xform(void) .zlib_dir = ZLIB_DECOMPRESS, .out_of_space = 0, .big_data = 0, - .overflow = OVERFLOW_DISABLED + .overflow = OVERFLOW_DISABLED, + .ratio = RATIO_ENABLED }; /* Compress with compressdev, decompress with Zlib */ @@ -2471,7 +2558,8 @@ test_compressdev_deflate_stateless_sgl(void) .zlib_dir = ZLIB_DECOMPRESS, .out_of_space = 0, .big_data = 0, - .overflow = OVERFLOW_DISABLED + .overflow = OVERFLOW_DISABLED, + .ratio = RATIO_ENABLED }; for (i = 0; i < RTE_DIM(compress_test_bufs); i++) { @@ -2582,7 +2670,8 @@ test_compressdev_deflate_stateless_checksum(void) .zlib_dir = ZLIB_DECOMPRESS, .out_of_space = 0, .big_data = 0, - .overflow = OVERFLOW_DISABLED + .overflow = OVERFLOW_DISABLED, + .ratio = RATIO_ENABLED }; /* Check if driver supports crc32 checksum and test */ @@ -2700,7 +2789,8 @@ test_compressdev_out_of_space_buffer(void) .zlib_dir = ZLIB_DECOMPRESS, .out_of_space = 1, /* run out-of-space test */ .big_data = 0, - .overflow = OVERFLOW_DISABLED + .overflow = OVERFLOW_DISABLED, + .ratio = RATIO_ENABLED }; /* Compress with compressdev, decompress with Zlib */ test_data.zlib_dir = ZLIB_DECOMPRESS; @@ -2742,7 +2832,7 @@ test_compressdev_deflate_stateless_dynamic_big(void) struct comp_testsuite_params *ts_params = &testsuite_params; uint16_t i = 0; int ret; - int j; + unsigned int j; const struct rte_compressdev_capabilities *capab; char *test_buffer = NULL; @@ -2755,7 +2845,7 @@ test_compressdev_deflate_stateless_dynamic_big(void) if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0) return -ENOTSUP; - test_buffer = rte_malloc(NULL, BIG_DATA_TEST_SIZE, 0); + test_buffer = rte_malloc(NULL, get_big_data_test_size(), 0); if (test_buffer == NULL) { RTE_LOG(ERR, USER1, "Can't allocate buffer for big-data\n"); @@ -2778,7 +2868,8 @@ test_compressdev_deflate_stateless_dynamic_big(void) .zlib_dir = ZLIB_DECOMPRESS, .out_of_space = 0, .big_data = 1, - .overflow = OVERFLOW_DISABLED + .overflow = OVERFLOW_DISABLED, + .ratio = RATIO_DISABLED }; ts_params->def_comp_xform->compress.deflate.huffman = @@ -2786,9 +2877,9 @@ test_compressdev_deflate_stateless_dynamic_big(void) /* fill the buffer with data based on rand. data */ srand(BIG_DATA_TEST_SIZE); - for (j = 0; j < BIG_DATA_TEST_SIZE - 1; ++j) + for (j = 0; j < get_big_data_test_size() - 1; ++j) test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1; - test_buffer[BIG_DATA_TEST_SIZE-1] = 0; + test_buffer[get_big_data_test_size() - 1] = 0; /* Compress with compressdev, decompress with Zlib */ test_data.zlib_dir = ZLIB_DECOMPRESS; @@ -2843,7 +2934,8 @@ test_compressdev_deflate_stateful_decomp(void) .big_data = 0, .decompress_output_block_size = 2000, .decompress_steps_max = 4, - .overflow = OVERFLOW_DISABLED + .overflow = OVERFLOW_DISABLED, + .ratio = RATIO_ENABLED }; /* Compress with Zlib, decompress with compressdev */ @@ -2926,7 +3018,8 @@ test_compressdev_deflate_stateful_decomp_checksum(void) .big_data = 0, .decompress_output_block_size = 2000, .decompress_steps_max = 4, - .overflow = OVERFLOW_DISABLED + .overflow = OVERFLOW_DISABLED, + .ratio = RATIO_ENABLED }; /* Check if driver supports crc32 checksum and test */ @@ -3139,7 +3232,8 @@ test_compressdev_deflate_stateless_fixed_oos_recoverable(void) .zlib_dir = ZLIB_DECOMPRESS, .out_of_space = 0, .big_data = 0, - .overflow = OVERFLOW_ENABLED + .overflow = OVERFLOW_ENABLED, + .ratio = RATIO_ENABLED }; for (i = 0; i < RTE_DIM(compress_test_bufs); i++) { @@ -3176,47 +3270,1056 @@ test_compressdev_deflate_stateless_fixed_oos_recoverable(void) return ret; } -static struct unit_test_suite compressdev_testsuite = { - .suite_name = "compressdev unit test suite", - .setup = testsuite_setup, - .teardown = testsuite_teardown, - .unit_test_cases = { - TEST_CASE_ST(NULL, NULL, - test_compressdev_invalid_configuration), - TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, - test_compressdev_deflate_stateless_fixed), - TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, - test_compressdev_deflate_stateless_dynamic), - TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, - test_compressdev_deflate_stateless_dynamic_big), - TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, - test_compressdev_deflate_stateless_multi_op), - TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, - test_compressdev_deflate_stateless_multi_level), - TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, - test_compressdev_deflate_stateless_multi_xform), - TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, - test_compressdev_deflate_stateless_sgl), - TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, - test_compressdev_deflate_stateless_checksum), - TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, - test_compressdev_out_of_space_buffer), - TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, - test_compressdev_deflate_stateful_decomp), - TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, - test_compressdev_deflate_stateful_decomp_checksum), - TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, - test_compressdev_external_mbufs), - TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, - test_compressdev_deflate_stateless_fixed_oos_recoverable), - TEST_CASES_END() /**< NULL terminate unit test array */ +static int +test_compressdev_deflate_im_buffers_LB_1op(void) +{ + struct comp_testsuite_params *ts_params = &testsuite_params; + uint16_t i = 0; + int ret = TEST_SUCCESS; + int j; + const struct rte_compressdev_capabilities *capab; + char *test_buffer = NULL; + + capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE); + TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities"); + + if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0) + return -ENOTSUP; + + if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0) + return -ENOTSUP; + + test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_LB, 0); + if (test_buffer == NULL) { + RTE_LOG(ERR, USER1, + "Can't allocate buffer for 'im buffer' test\n"); + return TEST_FAILED; } -}; + + struct interim_data_params int_data = { + (const char * const *)&test_buffer, + 1, + &i, + &ts_params->def_comp_xform, + &ts_params->def_decomp_xform, + 1 + }; + + struct test_data_params test_data = { + .compress_state = RTE_COMP_OP_STATELESS, + .decompress_state = RTE_COMP_OP_STATELESS, + /* must be LB to SGL, + * input LB buffer reaches its maximum, + * if ratio 1.3 than another mbuf must be + * created and attached + */ + .buff_type = LB_BOTH, + .zlib_dir = ZLIB_NONE, + .out_of_space = 0, + .big_data = 1, + .overflow = OVERFLOW_DISABLED, + .ratio = RATIO_DISABLED + }; + + ts_params->def_comp_xform->compress.deflate.huffman = + RTE_COMP_HUFFMAN_DYNAMIC; + + /* fill the buffer with data based on rand. data */ + srand(IM_BUF_DATA_TEST_SIZE_LB); + for (j = 0; j < IM_BUF_DATA_TEST_SIZE_LB - 1; ++j) + test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1; + + /* Compress with compressdev, decompress with compressdev */ + if (test_deflate_comp_decomp(&int_data, &test_data) < 0) { + ret = TEST_FAILED; + goto end; + } + +end: + ts_params->def_comp_xform->compress.deflate.huffman = + RTE_COMP_HUFFMAN_DEFAULT; + rte_free(test_buffer); + return ret; +} static int -test_compressdev(void) +test_compressdev_deflate_im_buffers_LB_2ops_first(void) { - return unit_test_suite_runner(&compressdev_testsuite); + struct comp_testsuite_params *ts_params = &testsuite_params; + uint16_t i = 0; + int ret = TEST_SUCCESS; + int j; + const struct rte_compressdev_capabilities *capab; + char *test_buffer = NULL; + const char *test_buffers[2]; + + capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE); + TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities"); + + if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0) + return -ENOTSUP; + + if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0) + return -ENOTSUP; + + test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_LB, 0); + if (test_buffer == NULL) { + RTE_LOG(ERR, USER1, + "Can't allocate buffer for 'im buffer' test\n"); + return TEST_FAILED; + } + + test_buffers[0] = test_buffer; + test_buffers[1] = compress_test_bufs[0]; + + struct interim_data_params int_data = { + (const char * const *)test_buffers, + 2, + &i, + &ts_params->def_comp_xform, + &ts_params->def_decomp_xform, + 1 + }; + + struct test_data_params test_data = { + .compress_state = RTE_COMP_OP_STATELESS, + .decompress_state = RTE_COMP_OP_STATELESS, + .buff_type = LB_BOTH, + .zlib_dir = ZLIB_NONE, + .out_of_space = 0, + .big_data = 1, + .overflow = OVERFLOW_DISABLED, + .ratio = RATIO_DISABLED + }; + + ts_params->def_comp_xform->compress.deflate.huffman = + RTE_COMP_HUFFMAN_DYNAMIC; + + /* fill the buffer with data based on rand. data */ + srand(IM_BUF_DATA_TEST_SIZE_LB); + for (j = 0; j < IM_BUF_DATA_TEST_SIZE_LB - 1; ++j) + test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1; + + /* Compress with compressdev, decompress with compressdev */ + if (test_deflate_comp_decomp(&int_data, &test_data) < 0) { + ret = TEST_FAILED; + goto end; + } + +end: + ts_params->def_comp_xform->compress.deflate.huffman = + RTE_COMP_HUFFMAN_DEFAULT; + rte_free(test_buffer); + return ret; +} + +static int +test_compressdev_deflate_im_buffers_LB_2ops_second(void) +{ + struct comp_testsuite_params *ts_params = &testsuite_params; + uint16_t i = 0; + int ret = TEST_SUCCESS; + int j; + const struct rte_compressdev_capabilities *capab; + char *test_buffer = NULL; + const char *test_buffers[2]; + + capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE); + TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities"); + + if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0) + return -ENOTSUP; + + if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0) + return -ENOTSUP; + + test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_LB, 0); + if (test_buffer == NULL) { + RTE_LOG(ERR, USER1, + "Can't allocate buffer for 'im buffer' test\n"); + return TEST_FAILED; + } + + test_buffers[0] = compress_test_bufs[0]; + test_buffers[1] = test_buffer; + + struct interim_data_params int_data = { + (const char * const *)test_buffers, + 2, + &i, + &ts_params->def_comp_xform, + &ts_params->def_decomp_xform, + 1 + }; + + struct test_data_params test_data = { + .compress_state = RTE_COMP_OP_STATELESS, + .decompress_state = RTE_COMP_OP_STATELESS, + .buff_type = LB_BOTH, + .zlib_dir = ZLIB_NONE, + .out_of_space = 0, + .big_data = 1, + .overflow = OVERFLOW_DISABLED, + .ratio = RATIO_DISABLED + }; + + ts_params->def_comp_xform->compress.deflate.huffman = + RTE_COMP_HUFFMAN_DYNAMIC; + + /* fill the buffer with data based on rand. data */ + srand(IM_BUF_DATA_TEST_SIZE_LB); + for (j = 0; j < IM_BUF_DATA_TEST_SIZE_LB - 1; ++j) + test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1; + + /* Compress with compressdev, decompress with compressdev */ + if (test_deflate_comp_decomp(&int_data, &test_data) < 0) { + ret = TEST_FAILED; + goto end; + } + +end: + ts_params->def_comp_xform->compress.deflate.huffman = + RTE_COMP_HUFFMAN_DEFAULT; + rte_free(test_buffer); + return ret; +} + +static int +test_compressdev_deflate_im_buffers_LB_3ops(void) +{ + struct comp_testsuite_params *ts_params = &testsuite_params; + uint16_t i = 0; + int ret = TEST_SUCCESS; + int j; + const struct rte_compressdev_capabilities *capab; + char *test_buffer = NULL; + const char *test_buffers[3]; + + capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE); + TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities"); + + if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0) + return -ENOTSUP; + + if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0) + return -ENOTSUP; + + test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_LB, 0); + if (test_buffer == NULL) { + RTE_LOG(ERR, USER1, + "Can't allocate buffer for 'im buffer' test\n"); + return TEST_FAILED; + } + + test_buffers[0] = compress_test_bufs[0]; + test_buffers[1] = test_buffer; + test_buffers[2] = compress_test_bufs[1]; + + struct interim_data_params int_data = { + (const char * const *)test_buffers, + 3, + &i, + &ts_params->def_comp_xform, + &ts_params->def_decomp_xform, + 1 + }; + + struct test_data_params test_data = { + .compress_state = RTE_COMP_OP_STATELESS, + .decompress_state = RTE_COMP_OP_STATELESS, + .buff_type = LB_BOTH, + .zlib_dir = ZLIB_NONE, + .out_of_space = 0, + .big_data = 1, + .overflow = OVERFLOW_DISABLED, + .ratio = RATIO_DISABLED + }; + + ts_params->def_comp_xform->compress.deflate.huffman = + RTE_COMP_HUFFMAN_DYNAMIC; + + /* fill the buffer with data based on rand. data */ + srand(IM_BUF_DATA_TEST_SIZE_LB); + for (j = 0; j < IM_BUF_DATA_TEST_SIZE_LB - 1; ++j) + test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1; + + /* Compress with compressdev, decompress with compressdev */ + if (test_deflate_comp_decomp(&int_data, &test_data) < 0) { + ret = TEST_FAILED; + goto end; + } + +end: + ts_params->def_comp_xform->compress.deflate.huffman = + RTE_COMP_HUFFMAN_DEFAULT; + rte_free(test_buffer); + return ret; +} + +static int +test_compressdev_deflate_im_buffers_LB_4ops(void) +{ + struct comp_testsuite_params *ts_params = &testsuite_params; + uint16_t i = 0; + int ret = TEST_SUCCESS; + int j; + const struct rte_compressdev_capabilities *capab; + char *test_buffer = NULL; + const char *test_buffers[4]; + + capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE); + TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities"); + + if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0) + return -ENOTSUP; + + if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0) + return -ENOTSUP; + + test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_LB, 0); + if (test_buffer == NULL) { + RTE_LOG(ERR, USER1, + "Can't allocate buffer for 'im buffer' test\n"); + return TEST_FAILED; + } + + test_buffers[0] = compress_test_bufs[0]; + test_buffers[1] = test_buffer; + test_buffers[2] = compress_test_bufs[1]; + test_buffers[3] = test_buffer; + + struct interim_data_params int_data = { + (const char * const *)test_buffers, + 4, + &i, + &ts_params->def_comp_xform, + &ts_params->def_decomp_xform, + 1 + }; + + struct test_data_params test_data = { + .compress_state = RTE_COMP_OP_STATELESS, + .decompress_state = RTE_COMP_OP_STATELESS, + .buff_type = LB_BOTH, + .zlib_dir = ZLIB_NONE, + .out_of_space = 0, + .big_data = 1, + .overflow = OVERFLOW_DISABLED, + .ratio = RATIO_DISABLED + }; + + ts_params->def_comp_xform->compress.deflate.huffman = + RTE_COMP_HUFFMAN_DYNAMIC; + + /* fill the buffer with data based on rand. data */ + srand(IM_BUF_DATA_TEST_SIZE_LB); + for (j = 0; j < IM_BUF_DATA_TEST_SIZE_LB - 1; ++j) + test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1; + + /* Compress with compressdev, decompress with compressdev */ + if (test_deflate_comp_decomp(&int_data, &test_data) < 0) { + ret = TEST_FAILED; + goto end; + } + +end: + ts_params->def_comp_xform->compress.deflate.huffman = + RTE_COMP_HUFFMAN_DEFAULT; + rte_free(test_buffer); + return ret; +} + + +static int +test_compressdev_deflate_im_buffers_SGL_1op(void) +{ + struct comp_testsuite_params *ts_params = &testsuite_params; + uint16_t i = 0; + int ret = TEST_SUCCESS; + int j; + const struct rte_compressdev_capabilities *capab; + char *test_buffer = NULL; + + capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE); + TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities"); + + if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0) + return -ENOTSUP; + + if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0) + return -ENOTSUP; + + test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_SGL, 0); + if (test_buffer == NULL) { + RTE_LOG(ERR, USER1, + "Can't allocate buffer for big-data\n"); + return TEST_FAILED; + } + + struct interim_data_params int_data = { + (const char * const *)&test_buffer, + 1, + &i, + &ts_params->def_comp_xform, + &ts_params->def_decomp_xform, + 1 + }; + + struct test_data_params test_data = { + .compress_state = RTE_COMP_OP_STATELESS, + .decompress_state = RTE_COMP_OP_STATELESS, + .buff_type = SGL_BOTH, + .zlib_dir = ZLIB_NONE, + .out_of_space = 0, + .big_data = 1, + .overflow = OVERFLOW_DISABLED, + .ratio = RATIO_DISABLED + }; + + ts_params->def_comp_xform->compress.deflate.huffman = + RTE_COMP_HUFFMAN_DYNAMIC; + + /* fill the buffer with data based on rand. data */ + srand(IM_BUF_DATA_TEST_SIZE_SGL); + for (j = 0; j < IM_BUF_DATA_TEST_SIZE_SGL - 1; ++j) + test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1; + + /* Compress with compressdev, decompress with compressdev */ + if (test_deflate_comp_decomp(&int_data, &test_data) < 0) { + ret = TEST_FAILED; + goto end; + } + +end: + ts_params->def_comp_xform->compress.deflate.huffman = + RTE_COMP_HUFFMAN_DEFAULT; + rte_free(test_buffer); + return ret; +} + +static int +test_compressdev_deflate_im_buffers_SGL_2ops_first(void) +{ + struct comp_testsuite_params *ts_params = &testsuite_params; + uint16_t i = 0; + int ret = TEST_SUCCESS; + int j; + const struct rte_compressdev_capabilities *capab; + char *test_buffer = NULL; + const char *test_buffers[2]; + + capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE); + TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities"); + + if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0) + return -ENOTSUP; + + if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0) + return -ENOTSUP; + + test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_SGL, 0); + if (test_buffer == NULL) { + RTE_LOG(ERR, USER1, + "Can't allocate buffer for big-data\n"); + return TEST_FAILED; + } + + test_buffers[0] = test_buffer; + test_buffers[1] = compress_test_bufs[0]; + + struct interim_data_params int_data = { + (const char * const *)test_buffers, + 2, + &i, + &ts_params->def_comp_xform, + &ts_params->def_decomp_xform, + 1 + }; + + struct test_data_params test_data = { + .compress_state = RTE_COMP_OP_STATELESS, + .decompress_state = RTE_COMP_OP_STATELESS, + .buff_type = SGL_BOTH, + .zlib_dir = ZLIB_NONE, + .out_of_space = 0, + .big_data = 1, + .overflow = OVERFLOW_DISABLED, + .ratio = RATIO_DISABLED + }; + + ts_params->def_comp_xform->compress.deflate.huffman = + RTE_COMP_HUFFMAN_DYNAMIC; + + /* fill the buffer with data based on rand. data */ + srand(IM_BUF_DATA_TEST_SIZE_SGL); + for (j = 0; j < IM_BUF_DATA_TEST_SIZE_SGL - 1; ++j) + test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1; + + /* Compress with compressdev, decompress with compressdev */ + if (test_deflate_comp_decomp(&int_data, &test_data) < 0) { + ret = TEST_FAILED; + goto end; + } + +end: + ts_params->def_comp_xform->compress.deflate.huffman = + RTE_COMP_HUFFMAN_DEFAULT; + rte_free(test_buffer); + return ret; +} + +static int +test_compressdev_deflate_im_buffers_SGL_2ops_second(void) +{ + struct comp_testsuite_params *ts_params = &testsuite_params; + uint16_t i = 0; + int ret = TEST_SUCCESS; + int j; + const struct rte_compressdev_capabilities *capab; + char *test_buffer = NULL; + const char *test_buffers[2]; + + capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE); + TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities"); + + if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0) + return -ENOTSUP; + + if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0) + return -ENOTSUP; + + test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_SGL, 0); + if (test_buffer == NULL) { + RTE_LOG(ERR, USER1, + "Can't allocate buffer for big-data\n"); + return TEST_FAILED; + } + + test_buffers[0] = compress_test_bufs[0]; + test_buffers[1] = test_buffer; + + struct interim_data_params int_data = { + (const char * const *)test_buffers, + 2, + &i, + &ts_params->def_comp_xform, + &ts_params->def_decomp_xform, + 1 + }; + + struct test_data_params test_data = { + .compress_state = RTE_COMP_OP_STATELESS, + .decompress_state = RTE_COMP_OP_STATELESS, + .buff_type = SGL_BOTH, + .zlib_dir = ZLIB_NONE, + .out_of_space = 0, + .big_data = 1, + .overflow = OVERFLOW_DISABLED, + .ratio = RATIO_DISABLED + }; + + ts_params->def_comp_xform->compress.deflate.huffman = + RTE_COMP_HUFFMAN_DYNAMIC; + + /* fill the buffer with data based on rand. data */ + srand(IM_BUF_DATA_TEST_SIZE_SGL); + for (j = 0; j < IM_BUF_DATA_TEST_SIZE_SGL - 1; ++j) + test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1; + + /* Compress with compressdev, decompress with compressdev */ + if (test_deflate_comp_decomp(&int_data, &test_data) < 0) { + ret = TEST_FAILED; + goto end; + } + +end: + ts_params->def_comp_xform->compress.deflate.huffman = + RTE_COMP_HUFFMAN_DEFAULT; + rte_free(test_buffer); + return ret; +} + +static int +test_compressdev_deflate_im_buffers_SGL_3ops(void) +{ + struct comp_testsuite_params *ts_params = &testsuite_params; + uint16_t i = 0; + int ret = TEST_SUCCESS; + int j; + const struct rte_compressdev_capabilities *capab; + char *test_buffer = NULL; + const char *test_buffers[3]; + + capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE); + TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities"); + + if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0) + return -ENOTSUP; + + if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0) + return -ENOTSUP; + + test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_SGL, 0); + if (test_buffer == NULL) { + RTE_LOG(ERR, USER1, + "Can't allocate buffer for big-data\n"); + return TEST_FAILED; + } + + test_buffers[0] = compress_test_bufs[0]; + test_buffers[1] = test_buffer; + test_buffers[2] = compress_test_bufs[1]; + + struct interim_data_params int_data = { + (const char * const *)test_buffers, + 3, + &i, + &ts_params->def_comp_xform, + &ts_params->def_decomp_xform, + 1 + }; + + struct test_data_params test_data = { + .compress_state = RTE_COMP_OP_STATELESS, + .decompress_state = RTE_COMP_OP_STATELESS, + .buff_type = SGL_BOTH, + .zlib_dir = ZLIB_NONE, + .out_of_space = 0, + .big_data = 1, + .overflow = OVERFLOW_DISABLED, + .ratio = RATIO_DISABLED + }; + + ts_params->def_comp_xform->compress.deflate.huffman = + RTE_COMP_HUFFMAN_DYNAMIC; + + /* fill the buffer with data based on rand. data */ + srand(IM_BUF_DATA_TEST_SIZE_SGL); + for (j = 0; j < IM_BUF_DATA_TEST_SIZE_SGL - 1; ++j) + test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1; + + /* Compress with compressdev, decompress with compressdev */ + if (test_deflate_comp_decomp(&int_data, &test_data) < 0) { + ret = TEST_FAILED; + goto end; + } + +end: + ts_params->def_comp_xform->compress.deflate.huffman = + RTE_COMP_HUFFMAN_DEFAULT; + rte_free(test_buffer); + return ret; +} + + +static int +test_compressdev_deflate_im_buffers_SGL_4ops(void) +{ + struct comp_testsuite_params *ts_params = &testsuite_params; + uint16_t i = 0; + int ret = TEST_SUCCESS; + int j; + const struct rte_compressdev_capabilities *capab; + char *test_buffer = NULL; + const char *test_buffers[4]; + + capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE); + TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities"); + + if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0) + return -ENOTSUP; + + if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0) + return -ENOTSUP; + + test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_SGL, 0); + if (test_buffer == NULL) { + RTE_LOG(ERR, USER1, + "Can't allocate buffer for big-data\n"); + return TEST_FAILED; + } + + test_buffers[0] = compress_test_bufs[0]; + test_buffers[1] = test_buffer; + test_buffers[2] = compress_test_bufs[1]; + test_buffers[3] = test_buffer; + + struct interim_data_params int_data = { + (const char * const *)test_buffers, + 4, + &i, + &ts_params->def_comp_xform, + &ts_params->def_decomp_xform, + 1 + }; + + struct test_data_params test_data = { + .compress_state = RTE_COMP_OP_STATELESS, + .decompress_state = RTE_COMP_OP_STATELESS, + .buff_type = SGL_BOTH, + .zlib_dir = ZLIB_NONE, + .out_of_space = 0, + .big_data = 1, + .overflow = OVERFLOW_DISABLED, + .ratio = RATIO_DISABLED + }; + + ts_params->def_comp_xform->compress.deflate.huffman = + RTE_COMP_HUFFMAN_DYNAMIC; + + /* fill the buffer with data based on rand. data */ + srand(IM_BUF_DATA_TEST_SIZE_SGL); + for (j = 0; j < IM_BUF_DATA_TEST_SIZE_SGL - 1; ++j) + test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1; + + /* Compress with compressdev, decompress with compressdev */ + if (test_deflate_comp_decomp(&int_data, &test_data) < 0) { + ret = TEST_FAILED; + goto end; + } + +end: + ts_params->def_comp_xform->compress.deflate.huffman = + RTE_COMP_HUFFMAN_DEFAULT; + rte_free(test_buffer); + return ret; +} + +static int +test_compressdev_deflate_im_buffers_SGL_over_1op(void) +{ + struct comp_testsuite_params *ts_params = &testsuite_params; + uint16_t i = 0; + int ret = TEST_SUCCESS; + int j; + const struct rte_compressdev_capabilities *capab; + char *test_buffer = NULL; + + RTE_LOG(INFO, USER1, "This is a negative test, errors are expected\n"); + + capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE); + TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities"); + + if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0) + return -ENOTSUP; + + if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0) + return -ENOTSUP; + + test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_OVER, 0); + if (test_buffer == NULL) { + RTE_LOG(ERR, USER1, + "Can't allocate buffer for big-data\n"); + return TEST_FAILED; + } + + struct interim_data_params int_data = { + (const char * const *)&test_buffer, + 1, + &i, + &ts_params->def_comp_xform, + &ts_params->def_decomp_xform, + 1 + }; + + struct test_data_params test_data = { + .compress_state = RTE_COMP_OP_STATELESS, + .decompress_state = RTE_COMP_OP_STATELESS, + .buff_type = SGL_BOTH, + .zlib_dir = ZLIB_NONE, + .out_of_space = 0, + .big_data = 1, + .overflow = OVERFLOW_DISABLED, + .ratio = RATIO_DISABLED + }; + + ts_params->def_comp_xform->compress.deflate.huffman = + RTE_COMP_HUFFMAN_DYNAMIC; + + /* fill the buffer with data based on rand. data */ + srand(IM_BUF_DATA_TEST_SIZE_OVER); + for (j = 0; j < IM_BUF_DATA_TEST_SIZE_OVER - 1; ++j) + test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1; + + /* Compress with compressdev, decompress with compressdev */ + if (test_deflate_comp_decomp(&int_data, &test_data) < 0) { + ret = TEST_SUCCESS; + goto end; + } + +end: + ts_params->def_comp_xform->compress.deflate.huffman = + RTE_COMP_HUFFMAN_DEFAULT; + rte_free(test_buffer); + + return ret; +} + + +static int +test_compressdev_deflate_im_buffers_SGL_over_2ops_first(void) +{ + struct comp_testsuite_params *ts_params = &testsuite_params; + uint16_t i = 0; + int ret = TEST_SUCCESS; + int j; + const struct rte_compressdev_capabilities *capab; + char *test_buffer = NULL; + const char *test_buffers[2]; + + RTE_LOG(INFO, USER1, "This is a negative test, errors are expected\n"); + + capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE); + TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities"); + + if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0) + return -ENOTSUP; + + if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0) + return -ENOTSUP; + + test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_OVER, 0); + if (test_buffer == NULL) { + RTE_LOG(ERR, USER1, + "Can't allocate buffer for big-data\n"); + return TEST_FAILED; + } + + test_buffers[0] = test_buffer; + test_buffers[1] = compress_test_bufs[0]; + + struct interim_data_params int_data = { + (const char * const *)test_buffers, + 2, + &i, + &ts_params->def_comp_xform, + &ts_params->def_decomp_xform, + 1 + }; + + struct test_data_params test_data = { + .compress_state = RTE_COMP_OP_STATELESS, + .decompress_state = RTE_COMP_OP_STATELESS, + .buff_type = SGL_BOTH, + .zlib_dir = ZLIB_NONE, + .out_of_space = 0, + .big_data = 1, + .overflow = OVERFLOW_DISABLED, + .ratio = RATIO_DISABLED + }; + + ts_params->def_comp_xform->compress.deflate.huffman = + RTE_COMP_HUFFMAN_DYNAMIC; + + /* fill the buffer with data based on rand. data */ + srand(IM_BUF_DATA_TEST_SIZE_OVER); + for (j = 0; j < IM_BUF_DATA_TEST_SIZE_OVER - 1; ++j) + test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1; + + /* Compress with compressdev, decompress with compressdev */ + if (test_deflate_comp_decomp(&int_data, &test_data) < 0) { + ret = TEST_SUCCESS; + goto end; + } + +end: + ts_params->def_comp_xform->compress.deflate.huffman = + RTE_COMP_HUFFMAN_DEFAULT; + rte_free(test_buffer); + return ret; +} + +static int +test_compressdev_deflate_im_buffers_SGL_over_2ops_second(void) +{ + struct comp_testsuite_params *ts_params = &testsuite_params; + uint16_t i = 0; + int ret = TEST_SUCCESS; + int j; + const struct rte_compressdev_capabilities *capab; + char *test_buffer = NULL; + const char *test_buffers[2]; + + RTE_LOG(INFO, USER1, "This is a negative test, errors are expected\n"); + + capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE); + TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities"); + + if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0) + return -ENOTSUP; + + if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0) + return -ENOTSUP; + + test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_OVER, 0); + if (test_buffer == NULL) { + RTE_LOG(ERR, USER1, + "Can't allocate buffer for big-data\n"); + return TEST_FAILED; + } + + test_buffers[0] = compress_test_bufs[0]; + test_buffers[1] = test_buffer; + + struct interim_data_params int_data = { + (const char * const *)test_buffers, + 2, + &i, + &ts_params->def_comp_xform, + &ts_params->def_decomp_xform, + 1 + }; + + struct test_data_params test_data = { + .compress_state = RTE_COMP_OP_STATELESS, + .decompress_state = RTE_COMP_OP_STATELESS, + .buff_type = SGL_BOTH, + .zlib_dir = ZLIB_NONE, + .out_of_space = 0, + .big_data = 1, + .overflow = OVERFLOW_DISABLED, + .ratio = RATIO_DISABLED + }; + + ts_params->def_comp_xform->compress.deflate.huffman = + RTE_COMP_HUFFMAN_DYNAMIC; + + /* fill the buffer with data based on rand. data */ + srand(IM_BUF_DATA_TEST_SIZE_OVER); + for (j = 0; j < IM_BUF_DATA_TEST_SIZE_OVER - 1; ++j) + test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1; + + /* Compress with compressdev, decompress with compressdev */ + if (test_deflate_comp_decomp(&int_data, &test_data) < 0) { + ret = TEST_SUCCESS; + goto end; + } + +end: + ts_params->def_comp_xform->compress.deflate.huffman = + RTE_COMP_HUFFMAN_DEFAULT; + rte_free(test_buffer); + return ret; +} + +static struct unit_test_suite compressdev_testsuite = { + .suite_name = "compressdev unit test suite", + .setup = testsuite_setup, + .teardown = testsuite_teardown, + .unit_test_cases = { + TEST_CASE_ST(NULL, NULL, + test_compressdev_invalid_configuration), + TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, + test_compressdev_deflate_stateless_fixed), + TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, + test_compressdev_deflate_stateless_dynamic), + TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, + test_compressdev_deflate_stateless_dynamic_big), + TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, + test_compressdev_deflate_stateless_multi_op), + TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, + test_compressdev_deflate_stateless_multi_level), + TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, + test_compressdev_deflate_stateless_multi_xform), + TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, + test_compressdev_deflate_stateless_sgl), + TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, + test_compressdev_deflate_stateless_checksum), + TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, + test_compressdev_out_of_space_buffer), + TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, + test_compressdev_deflate_stateful_decomp), + TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, + test_compressdev_deflate_stateful_decomp_checksum), + TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, + test_compressdev_external_mbufs), + TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, + test_compressdev_deflate_stateless_fixed_oos_recoverable), + TEST_CASES_END() /**< NULL terminate unit test array */ + } +}; + +/**********************************/ + +static struct unit_test_suite compressdev_testsuite_qat = { + .suite_name = "compressdev unit test suite for QAT PMD", + .setup = testsuite_setup, + .teardown = testsuite_teardown, + .unit_test_cases = { + /* Positive test cases for IM buffer handling verification */ + + TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, + test_compressdev_deflate_im_buffers_LB_1op), + TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, + test_compressdev_deflate_im_buffers_LB_2ops_first), + TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, + test_compressdev_deflate_im_buffers_LB_2ops_second), + TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, + test_compressdev_deflate_im_buffers_LB_3ops), + + TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, + test_compressdev_deflate_im_buffers_LB_4ops), + TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, + test_compressdev_deflate_im_buffers_SGL_1op), + + TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, + test_compressdev_deflate_im_buffers_SGL_2ops_first), + TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, + test_compressdev_deflate_im_buffers_SGL_2ops_second), + TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, + test_compressdev_deflate_im_buffers_SGL_3ops), + TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, + test_compressdev_deflate_im_buffers_SGL_4ops), + + /* Negative test cases for IM buffer handling verification */ + + /* For this test huge mempool is necessary. + * It tests one case: + * only one op containing big amount of data, so that + * number of requested descriptors higher than number + * of available descriptors (128) + */ + TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, + test_compressdev_deflate_im_buffers_SGL_over_1op), + + /* For this test huge mempool is necessary. + * 2 ops. First op contains big amount of data: + * number of requested descriptors higher than number + * of available descriptors (128), the second op is + * relatively small. In this case both ops are rejected + */ + TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, + test_compressdev_deflate_im_buffers_SGL_over_2ops_first), + + TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, + test_compressdev_deflate_im_buffers_SGL_over_2ops_second), + + TEST_CASES_END() /**< NULL terminate unit test array */ + } +}; + +static int +test_compressdev(void) +{ + testsuite_params.suite_name = compressdev_testsuite.suite_name; + return unit_test_suite_runner(&compressdev_testsuite); +} + +static int +test_compressdev_qat_specific(void) +{ + /* All tests are run on device 0. Check if it's QAT PMD. */ + if (rte_compressdev_count() == 0 || + !ENDS_WITH(rte_compressdev_name_get(0), "qat_comp")) { + RTE_LOG(ERR, USER1, "QAT PMD must be loaded. Check if " + "CONFIG_RTE_LIBRTE_PMD_QAT is enabled " + "in config file to run this testsuite.\n"); + return TEST_FAILED; + } + + testsuite_params.suite_name = compressdev_testsuite_qat.suite_name; + return unit_test_suite_runner(&compressdev_testsuite_qat); } REGISTER_TEST_COMMAND(compressdev_autotest, test_compressdev); +REGISTER_TEST_COMMAND(compressdev_qat_specific, test_compressdev_qat_specific); -- 2.17.1