From: Nicolas Chautru <nicolas.chautru@intel.com>
To: akhil.goyal@nxp.com, dev@dpdk.org
Cc: thomas@monjalon.net, ferruh.yigit@intel.com,
amr.mokhtar@intel.com,
Nicolas Chautru <nicolas.chautru@intel.com>
Subject: [dpdk-dev] [PATCH v3 08/10] test-bbdev: update of bbdev test-app for 5GNR
Date: Fri, 21 Jun 2019 09:59:10 -0700 [thread overview]
Message-ID: <1561136352-32198-9-git-send-email-nicolas.chautru@intel.com> (raw)
In-Reply-To: <1561136352-32198-1-git-send-email-nicolas.chautru@intel.com>
Extending test framework for FEC 5GNR operations
for UT verification and profiling.
Signed-off-by: Nicolas Chautru <nicolas.chautru@intel.com>
---
app/test-bbdev/main.c | 48 +-
app/test-bbdev/main.h | 6 +-
app/test-bbdev/meson.build | 3 +
app/test-bbdev/test-bbdev.py | 7 +
app/test-bbdev/test_bbdev.c | 12 +-
app/test-bbdev/test_bbdev_perf.c | 1233 ++++++++++++++++++++++++++++++---
app/test-bbdev/test_bbdev_vector.c | 517 +++++++++++++-
app/test-bbdev/test_bbdev_vector.h | 14 +-
app/test-bbdev/turbo_enc_default.data | 2 +-
9 files changed, 1715 insertions(+), 127 deletions(-)
diff --git a/app/test-bbdev/main.c b/app/test-bbdev/main.c
index a2f8722..8a42115 100644
--- a/app/test-bbdev/main.c
+++ b/app/test-bbdev/main.c
@@ -16,11 +16,13 @@
#include "main.h"
+
/* Defines how many testcases can be specified as cmdline args */
#define MAX_CMDLINE_TESTCASES 8
static const char tc_sep = ',';
+/* Declare structure for command line test parameters and options */
static struct test_params {
struct test_command *test_to_run[MAX_CMDLINE_TESTCASES];
unsigned int num_tests;
@@ -28,6 +30,7 @@
unsigned int burst_sz;
unsigned int num_lcores;
char test_vector_filename[PATH_MAX];
+ bool init_device;
} test_params;
static struct test_commands_list commands_list =
@@ -46,9 +49,8 @@
unsigned int total = 0, skipped = 0, succeeded = 0, failed = 0;
uint64_t start, end;
- printf(
- "\n + ------------------------------------------------------- +\n");
- printf(" + Starting Test Suite : %s\n", suite->suite_name);
+ printf("\n===========================================================\n");
+ printf("Starting Test Suite : %s\n", suite->suite_name);
start = rte_rdtsc_precise();
@@ -57,15 +59,13 @@
if (test_result == TEST_FAILED) {
printf(" + Test suite setup %s failed!\n",
suite->suite_name);
- printf(
- " + ------------------------------------------------------- +\n");
+ printf(" + ------------------------------------------------------- +\n");
return 1;
}
if (test_result == TEST_SKIPPED) {
printf(" + Test suite setup %s skipped!\n",
suite->suite_name);
- printf(
- " + ------------------------------------------------------- +\n");
+ printf(" + ------------------------------------------------------- +\n");
return 0;
}
}
@@ -82,15 +82,15 @@
if (test_result == TEST_SUCCESS) {
succeeded++;
- printf(" + TestCase [%2d] : %s passed\n", total,
+ printf("TestCase [%2d] : %s passed\n", total,
suite->unit_test_cases[total].name);
} else if (test_result == TEST_SKIPPED) {
skipped++;
- printf(" + TestCase [%2d] : %s skipped\n", total,
+ printf("TestCase [%2d] : %s skipped\n", total,
suite->unit_test_cases[total].name);
} else {
failed++;
- printf(" + TestCase [%2d] : %s failed\n", total,
+ printf("TestCase [%2d] : %s failed\n", total,
suite->unit_test_cases[total].name);
}
@@ -103,7 +103,7 @@
end = rte_rdtsc_precise();
- printf(" + ------------------------------------------------------- +\n");
+ printf(" + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +\n");
printf(" + Test Suite Summary : %s\n", suite->suite_name);
printf(" + Tests Total : %2d\n", total);
printf(" + Tests Skipped : %2d\n", skipped);
@@ -111,7 +111,7 @@
printf(" + Tests Failed : %2d\n", failed);
printf(" + Tests Lasted : %lg ms\n",
((end - start) * 1000) / (double)rte_get_tsc_hz());
- printf(" + ------------------------------------------------------- +\n");
+ printf(" + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +\n");
return (failed > 0) ? 1 : 0;
}
@@ -140,12 +140,18 @@
return test_params.num_lcores;
}
+bool
+get_init_device(void)
+{
+ return test_params.init_device;
+}
+
static void
print_usage(const char *prog_name)
{
struct test_command *t;
- printf("Usage: %s [EAL params] [-- [-n/--num-ops NUM_OPS]\n"
+ printf("***Usage: %s [EAL params] [-- [-n/--num-ops NUM_OPS]\n"
"\t[-b/--burst-size BURST_SIZE]\n"
"\t[-v/--test-vector VECTOR_FILE]\n"
"\t[-c/--test-cases TEST_CASE[,TEST_CASE,...]]]\n",
@@ -174,11 +180,12 @@
{ "test-cases", 1, 0, 'c' },
{ "test-vector", 1, 0, 'v' },
{ "lcores", 1, 0, 'l' },
+ { "init-device", 0, 0, 'i'},
{ "help", 0, 0, 'h' },
{ NULL, 0, 0, 0 }
};
- while ((opt = getopt_long(argc, argv, "hn:b:c:v:l:", lgopts,
+ while ((opt = getopt_long(argc, argv, "hin:b:c:v:l:", lgopts,
&option_index)) != EOF)
switch (opt) {
case 'n':
@@ -226,8 +233,9 @@
TEST_ASSERT(strlen(optarg) > 0,
"Config file name is null");
- strlcpy(tp->test_vector_filename, optarg,
- sizeof(tp->test_vector_filename));
+ snprintf(tp->test_vector_filename,
+ sizeof(tp->test_vector_filename),
+ "%s", optarg);
break;
case 'l':
TEST_ASSERT(strlen(optarg) > 0,
@@ -237,6 +245,10 @@
"Num of lcores mustn't be greater than %u",
RTE_MAX_LCORE);
break;
+ case 'i':
+ /* indicate fpga fec config required */
+ tp->init_device = true;
+ break;
case 'h':
print_usage(argv[0]);
return 0;
@@ -279,7 +291,7 @@
struct test_command *t;
TAILQ_FOREACH(t, &commands_list, next)
- ret |= t->callback();
+ ret |= (int) t->callback();
return ret;
}
@@ -291,7 +303,7 @@
unsigned int i;
for (i = 0; i < tp->num_tests; ++i)
- ret |= tp->test_to_run[i]->callback();
+ ret |= (int) tp->test_to_run[i]->callback();
return ret;
}
diff --git a/app/test-bbdev/main.h b/app/test-bbdev/main.h
index 2bbe1b8..23b4d58 100644
--- a/app/test-bbdev/main.h
+++ b/app/test-bbdev/main.h
@@ -20,6 +20,7 @@
#define DEFAULT_BURST 32U
#define DEFAULT_OPS 64U
+
#define TEST_ASSERT(cond, msg, ...) do { \
if (!(cond)) { \
printf("TestCase %s() line %d failed: " \
@@ -103,7 +104,8 @@ struct test_command {
.command = RTE_STR(name), \
.callback = test_func_##name, \
}; \
- RTE_INIT(test_register_##name) \
+ static void __attribute__((constructor, used)) \
+ test_register_##name(void) \
{ \
add_test_command(&test_struct_##name); \
}
@@ -116,4 +118,6 @@ struct test_command {
unsigned int get_num_lcores(void);
+bool get_init_device(void);
+
#endif
diff --git a/app/test-bbdev/meson.build b/app/test-bbdev/meson.build
index eb8cc04..d3f2b77 100644
--- a/app/test-bbdev/meson.build
+++ b/app/test-bbdev/meson.build
@@ -7,3 +7,6 @@ sources = files('main.c',
'test_bbdev_vector.c')
allow_experimental_apis = true
deps += ['bbdev', 'bus_vdev']
+if dpdk_conf.has('RTE_LIBRTE_PMD_FPGA_LTE_FEC')
+ deps += ['bbdev_fpga_lte_fec']
+endif
\ No newline at end of file
diff --git a/app/test-bbdev/test-bbdev.py b/app/test-bbdev/test-bbdev.py
index 25340ec..0194be0 100755
--- a/app/test-bbdev/test-bbdev.py
+++ b/app/test-bbdev/test-bbdev.py
@@ -59,6 +59,9 @@ def kill(process):
type=int,
help="Number of lcores to run.",
default=16)
+parser.add_argument("-i", "--init-device",
+ action='store_true',
+ help="Initialise PF device with default values.")
args = parser.parse_args()
@@ -82,6 +85,10 @@ def kill(process):
params.extend(["-c"])
params.extend([",".join(args.test_cases)])
+if args.init_device:
+ params.extend(["-i"])
+
+
exit_status = 0
for vector in args.test_vector:
for burst_size in args.burst_size:
diff --git a/app/test-bbdev/test_bbdev.c b/app/test-bbdev/test_bbdev.c
index 137c74c..ac06d73 100644
--- a/app/test-bbdev/test_bbdev.c
+++ b/app/test-bbdev/test_bbdev.c
@@ -14,8 +14,6 @@
#include <rte_bbdev.h>
#include <rte_bbdev_op.h>
#include <rte_bbdev_pmd.h>
-#include<string.h>
-#include <rte_string_fns.h>
#include "main.h"
@@ -770,7 +768,7 @@ struct bbdev_testsuite_params {
{
struct rte_bbdev *dev1, *dev2;
const char *name = "dev_name";
- char name_tmp[16];
+ char name_tmp[32];
int num_devs, num_devs_tmp;
dev1 = rte_bbdev_allocate(NULL);
@@ -790,14 +788,14 @@ struct bbdev_testsuite_params {
/* Initialize the maximum amount of devices */
do {
- snprintf(name_tmp, sizeof(name_tmp), "%s%i", "name_", num_devs);
+ sprintf(name_tmp, "%s%i", "name_", num_devs);
dev2 = rte_bbdev_allocate(name_tmp);
TEST_ASSERT(dev2 != NULL,
"Failed to initialize bbdev driver");
++num_devs;
} while (num_devs < (RTE_BBDEV_MAX_DEVS - 1));
- snprintf(name_tmp, sizeof(name_tmp), "%s%i", "name_", num_devs);
+ sprintf(name_tmp, "%s%i", "name_", num_devs);
dev2 = rte_bbdev_allocate(name_tmp);
TEST_ASSERT(dev2 == NULL, "Failed to initialize bbdev driver number %d "
"more drivers than RTE_BBDEV_MAX_DEVS: %d ", num_devs,
@@ -806,7 +804,7 @@ struct bbdev_testsuite_params {
num_devs--;
while (num_devs >= num_devs_tmp) {
- snprintf(name_tmp, sizeof(name_tmp), "%s%i", "name_", num_devs);
+ sprintf(name_tmp, "%s%i", "name_", num_devs);
dev2 = rte_bbdev_get_named_dev(name_tmp);
TEST_ASSERT_SUCCESS(rte_bbdev_release(dev2),
"Failed to uninitialize bbdev driver %s ",
@@ -827,7 +825,7 @@ struct bbdev_testsuite_params {
TEST_ASSERT_FAIL(rte_bbdev_release(NULL),
"Failed to uninitialize bbdev driver with NULL bbdev");
- strlcpy(name_tmp, "invalid_name", sizeof(name_tmp));
+ sprintf(name_tmp, "%s", "invalid_name");
dev2 = rte_bbdev_get_named_dev(name_tmp);
TEST_ASSERT_FAIL(rte_bbdev_release(dev2),
"Failed to uninitialize bbdev driver with invalid name");
diff --git a/app/test-bbdev/test_bbdev_perf.c b/app/test-bbdev/test_bbdev_perf.c
index d18ddae..19585b7 100644
--- a/app/test-bbdev/test_bbdev_perf.c
+++ b/app/test-bbdev/test_bbdev_perf.c
@@ -16,6 +16,11 @@
#include <rte_malloc.h>
#include <rte_random.h>
#include <rte_hexdump.h>
+#include <rte_interrupts.h>
+
+#ifdef RTE_LIBRTE_PMD_FPGA_LTE_FEC
+#include <fpga_lte_fec.h>
+#endif
#include "main.h"
#include "test_bbdev_vector.h"
@@ -25,6 +30,18 @@
#define MAX_QUEUES RTE_MAX_LCORE
#define TEST_REPETITIONS 1000
+#ifdef RTE_LIBRTE_PMD_FPGA_LTE_FEC
+#define FPGA_PF_DRIVER_NAME ("intel_fpga_lte_fec_pf")
+#define FPGA_VF_DRIVER_NAME ("intel_fpga_lte_fec_vf")
+#define VF_UL_QUEUE_VALUE 4
+#define VF_DL_QUEUE_VALUE 4
+#define UL_BANDWIDTH 3
+#define DL_BANDWIDTH 3
+#define UL_LOAD_BALANCE 128
+#define DL_LOAD_BALANCE 128
+#define FLR_TIMEOUT 610
+#endif
+
#define OPS_CACHE_SIZE 256U
#define OPS_POOL_SIZE_MIN 511U /* 0.5K per queue */
@@ -49,6 +66,8 @@
struct rte_mempool *in_mbuf_pool;
struct rte_mempool *hard_out_mbuf_pool;
struct rte_mempool *soft_out_mbuf_pool;
+ struct rte_mempool *harq_in_mbuf_pool;
+ struct rte_mempool *harq_out_mbuf_pool;
} active_devs[RTE_BBDEV_MAX_DEVS];
static uint8_t nb_active_devs;
@@ -58,6 +77,8 @@ struct test_buffers {
struct rte_bbdev_op_data *inputs;
struct rte_bbdev_op_data *hard_outputs;
struct rte_bbdev_op_data *soft_outputs;
+ struct rte_bbdev_op_data *harq_inputs;
+ struct rte_bbdev_op_data *harq_outputs;
};
/* Operation parameters specific for given test case */
@@ -128,6 +149,13 @@ typedef int (test_case_function)(struct active_device *ad,
} while (m != NULL);
}
+/* Read flag value 0/1 from bitmap */
+static inline bool
+check_bit(uint32_t bitmap, uint32_t bitmask)
+{
+ return bitmap & bitmask;
+}
+
static inline void
set_avail_op(struct active_device *ad, enum rte_bbdev_op_type op_type)
{
@@ -158,12 +186,15 @@ typedef int (test_case_function)(struct active_device *ad,
check_dev_cap(const struct rte_bbdev_info *dev_info)
{
unsigned int i;
- unsigned int nb_inputs, nb_soft_outputs, nb_hard_outputs;
+ unsigned int nb_inputs, nb_soft_outputs, nb_hard_outputs,
+ nb_harq_inputs, nb_harq_outputs;
const struct rte_bbdev_op_cap *op_cap = dev_info->drv.capabilities;
nb_inputs = test_vector.entries[DATA_INPUT].nb_segments;
nb_soft_outputs = test_vector.entries[DATA_SOFT_OUTPUT].nb_segments;
nb_hard_outputs = test_vector.entries[DATA_HARD_OUTPUT].nb_segments;
+ nb_harq_inputs = test_vector.entries[DATA_HARQ_INPUT].nb_segments;
+ nb_harq_outputs = test_vector.entries[DATA_HARQ_OUTPUT].nb_segments;
for (i = 0; op_cap->type != RTE_BBDEV_OP_NONE; ++i, ++op_cap) {
if (op_cap->type != test_vector.op_type)
@@ -180,7 +211,7 @@ typedef int (test_case_function)(struct active_device *ad,
!(cap->capability_flags &
RTE_BBDEV_TURBO_SOFT_OUTPUT)) {
printf(
- "WARNING: Device \"%s\" does not support soft output - soft output flags will be ignored.\n",
+ "INFO: Device \"%s\" does not support soft output - soft output flags will be ignored.\n",
dev_info->dev_name);
clear_soft_out_cap(
&test_vector.turbo_dec.op_flags);
@@ -233,7 +264,35 @@ typedef int (test_case_function)(struct active_device *ad,
if (nb_hard_outputs > cap->num_buffers_dst) {
printf(
"Too many hard outputs defined: %u, max: %u\n",
- nb_hard_outputs, cap->num_buffers_src);
+ nb_hard_outputs, cap->num_buffers_dst);
+ return TEST_FAILED;
+ }
+ if (intr_enabled && !(cap->capability_flags &
+ RTE_BBDEV_TURBO_ENC_INTERRUPTS)) {
+ printf(
+ "Dequeue interrupts are not supported!\n");
+ return TEST_FAILED;
+ }
+
+ return TEST_SUCCESS;
+ } else if (op_cap->type == RTE_BBDEV_OP_LDPC_ENC) {
+ const struct rte_bbdev_op_cap_ldpc_enc *cap =
+ &op_cap->cap.ldpc_enc;
+
+ if (!flags_match(test_vector.ldpc_enc.op_flags,
+ cap->capability_flags)){
+ printf("Flag Mismatch\n");
+ return TEST_FAILED;
+ }
+ if (nb_inputs > cap->num_buffers_src) {
+ printf("Too many inputs defined: %u, max: %u\n",
+ nb_inputs, cap->num_buffers_src);
+ return TEST_FAILED;
+ }
+ if (nb_hard_outputs > cap->num_buffers_dst) {
+ printf(
+ "Too many hard outputs defined: %u, max: %u\n",
+ nb_hard_outputs, cap->num_buffers_dst);
return TEST_FAILED;
}
if (intr_enabled && !(cap->capability_flags &
@@ -244,6 +303,49 @@ typedef int (test_case_function)(struct active_device *ad,
}
return TEST_SUCCESS;
+ } else if (op_cap->type == RTE_BBDEV_OP_LDPC_DEC) {
+ const struct rte_bbdev_op_cap_ldpc_dec *cap =
+ &op_cap->cap.ldpc_dec;
+
+ if (!flags_match(test_vector.ldpc_dec.op_flags,
+ cap->capability_flags)){
+ printf("Flag Mismatch\n");
+ return TEST_FAILED;
+ }
+ if (nb_inputs > cap->num_buffers_src) {
+ printf("Too many inputs defined: %u, max: %u\n",
+ nb_inputs, cap->num_buffers_src);
+ return TEST_FAILED;
+ }
+ if (nb_hard_outputs > cap->num_buffers_hard_out) {
+ printf(
+ "Too many hard outputs defined: %u, max: %u\n",
+ nb_hard_outputs,
+ cap->num_buffers_hard_out);
+ return TEST_FAILED;
+ }
+ if (nb_harq_inputs > cap->num_buffers_hard_out) {
+ printf(
+ "Too many HARQ inputs defined: %u, max: %u\n",
+ nb_hard_outputs,
+ cap->num_buffers_hard_out);
+ return TEST_FAILED;
+ }
+ if (nb_harq_outputs > cap->num_buffers_hard_out) {
+ printf(
+ "Too many HARQ outputs defined: %u, max: %u\n",
+ nb_hard_outputs,
+ cap->num_buffers_hard_out);
+ return TEST_FAILED;
+ }
+ if (intr_enabled && !(cap->capability_flags &
+ RTE_BBDEV_TURBO_DEC_INTERRUPTS)) {
+ printf(
+ "Dequeue interrupts are not supported!\n");
+ return TEST_FAILED;
+ }
+
+ return TEST_SUCCESS;
}
}
@@ -297,6 +399,10 @@ typedef int (test_case_function)(struct active_device *ad,
&test_vector.entries[DATA_HARD_OUTPUT];
struct op_data_entries *soft_out =
&test_vector.entries[DATA_SOFT_OUTPUT];
+ struct op_data_entries *harq_in =
+ &test_vector.entries[DATA_HARQ_INPUT];
+ struct op_data_entries *harq_out =
+ &test_vector.entries[DATA_HARQ_OUTPUT];
/* allocate ops mempool */
ops_pool_size = optimal_mempool_size(RTE_MAX(
@@ -350,22 +456,53 @@ typedef int (test_case_function)(struct active_device *ad,
socket_id);
ad->hard_out_mbuf_pool = mp;
- if (soft_out->nb_segments == 0)
- return TEST_SUCCESS;
/* Soft outputs */
- mbuf_pool_size = optimal_mempool_size(ops_pool_size *
- soft_out->nb_segments);
- mp = create_mbuf_pool(soft_out, ad->dev_id, socket_id, mbuf_pool_size,
- "soft_out");
- TEST_ASSERT_NOT_NULL(mp,
- "ERROR Failed to create %uB soft output pktmbuf pool for dev %u on socket %u.",
- mbuf_pool_size,
- ad->dev_id,
- socket_id);
- ad->soft_out_mbuf_pool = mp;
+ if (soft_out->nb_segments > 0) {
+ mbuf_pool_size = optimal_mempool_size(ops_pool_size *
+ soft_out->nb_segments);
+ mp = create_mbuf_pool(soft_out, ad->dev_id, socket_id,
+ mbuf_pool_size,
+ "soft_out");
+ TEST_ASSERT_NOT_NULL(mp,
+ "ERROR Failed to create %uB soft output pktmbuf pool for dev %u on socket %u.",
+ mbuf_pool_size,
+ ad->dev_id,
+ socket_id);
+ ad->soft_out_mbuf_pool = mp;
+ }
- return 0;
+ /* HARQ inputs */
+ if (harq_in->nb_segments > 0) {
+ mbuf_pool_size = optimal_mempool_size(ops_pool_size *
+ harq_in->nb_segments);
+ mp = create_mbuf_pool(harq_in, ad->dev_id, socket_id,
+ mbuf_pool_size,
+ "harq_in");
+ TEST_ASSERT_NOT_NULL(mp,
+ "ERROR Failed to create %uB harq input pktmbuf pool for dev %u on socket %u.",
+ mbuf_pool_size,
+ ad->dev_id,
+ socket_id);
+ ad->harq_in_mbuf_pool = mp;
+ }
+
+ /* HARQ outputs */
+ if (harq_out->nb_segments > 0) {
+ mbuf_pool_size = optimal_mempool_size(ops_pool_size *
+ harq_out->nb_segments);
+ mp = create_mbuf_pool(harq_out, ad->dev_id, socket_id,
+ mbuf_pool_size,
+ "harq_out");
+ TEST_ASSERT_NOT_NULL(mp,
+ "ERROR Failed to create %uB harq output pktmbuf pool for dev %u on socket %u.",
+ mbuf_pool_size,
+ ad->dev_id,
+ socket_id);
+ ad->harq_out_mbuf_pool = mp;
+ }
+
+ return TEST_SUCCESS;
}
static int
@@ -379,7 +516,58 @@ typedef int (test_case_function)(struct active_device *ad,
unsigned int nb_queues;
enum rte_bbdev_op_type op_type = vector->op_type;
+/* Configure fpga lte fec with PF & VF values
+ * if '-i' flag is set and using fpga device
+ */
+#ifndef RTE_BUILD_SHARED_LIB
+#ifdef RTE_LIBRTE_PMD_FPGA_LTE_FEC
+ if ((get_init_device() == true) &&
+ (!strcmp(info->drv.driver_name, FPGA_PF_DRIVER_NAME))) {
+ struct fpga_lte_fec_conf conf;
+ unsigned int i;
+
+ printf("Configure FPGA FEC Driver %s with default values\n",
+ info->drv.driver_name);
+
+ /* clear default configuration before initialization */
+ memset(&conf, 0, sizeof(struct fpga_lte_fec_conf));
+
+ /* Set PF mode :
+ * true if PF is used for data plane
+ * false for VFs
+ */
+ conf.pf_mode_en = true;
+
+ for (i = 0; i < FPGA_LTE_FEC_NUM_VFS; ++i) {
+ /* Number of UL queues per VF (fpga supports 8 VFs) */
+ conf.vf_ul_queues_number[i] = VF_UL_QUEUE_VALUE;
+ /* Number of DL queues per VF (fpga supports 8 VFs) */
+ conf.vf_dl_queues_number[i] = VF_DL_QUEUE_VALUE;
+ }
+
+ /* UL bandwidth. Needed for schedule algorithm */
+ conf.ul_bandwidth = UL_BANDWIDTH;
+ /* DL bandwidth */
+ conf.dl_bandwidth = DL_BANDWIDTH;
+
+ /* UL & DL load Balance Factor to 64 */
+ conf.ul_load_balance = UL_LOAD_BALANCE;
+ conf.dl_load_balance = DL_LOAD_BALANCE;
+
+ /**< FLR timeout value */
+ conf.flr_time_out = FLR_TIMEOUT;
+
+ /* setup FPGA PF with configuration information */
+ ret = fpga_lte_fec_configure(info->dev_name, &conf);
+ TEST_ASSERT_SUCCESS(ret,
+ "Failed to configure 4G FPGA PF for bbdev %s",
+ info->dev_name);
+ }
+#endif
+#endif
nb_queues = RTE_MIN(rte_lcore_count(), info->drv.max_num_queues);
+ nb_queues = RTE_MIN(nb_queues, (unsigned int) MAX_QUEUES);
+
/* setup device */
ret = rte_bbdev_setup_queues(dev_id, nb_queues, info->socket_id);
if (ret < 0) {
@@ -596,7 +784,7 @@ typedef int (test_case_function)(struct active_device *ad,
bufs[i].offset = 0;
bufs[i].length = 0;
- if (op_type == DATA_INPUT) {
+ if ((op_type == DATA_INPUT) || (op_type == DATA_HARQ_INPUT)) {
data = rte_pktmbuf_append(m_head, seg->length);
TEST_ASSERT_NOT_NULL(data,
"Couldn't append %u bytes to mbuf from %d data type mbuf pool",
@@ -635,7 +823,6 @@ typedef int (test_case_function)(struct active_device *ad,
"Couldn't chain mbufs from %d data type mbuf pool",
op_type);
}
-
} else {
/* allocate chained-mbuf for output buffer */
@@ -682,7 +869,7 @@ typedef int (test_case_function)(struct active_device *ad,
static void
limit_input_llr_val_range(struct rte_bbdev_op_data *input_ops,
- uint16_t n, int8_t max_llr_modulus)
+ const uint16_t n, const int8_t max_llr_modulus)
{
uint16_t i, byte_idx;
@@ -701,10 +888,50 @@ typedef int (test_case_function)(struct active_device *ad,
}
}
+static void
+ldpc_input_llr_scaling(struct rte_bbdev_op_data *input_ops,
+ const uint16_t n, const int8_t llr_size,
+ const int8_t llr_decimals)
+{
+ if (input_ops == NULL)
+ return;
+
+ uint16_t i, byte_idx;
+
+ int16_t llr_max, llr_min, llr_tmp;
+ llr_max = (1 << (llr_size - 1)) - 1;
+ llr_min = -llr_max;
+ for (i = 0; i < n; ++i) {
+ struct rte_mbuf *m = input_ops[i].data;
+ while (m != NULL) {
+ int8_t *llr = rte_pktmbuf_mtod_offset(m, int8_t *,
+ input_ops[i].offset);
+ for (byte_idx = 0; byte_idx < rte_pktmbuf_data_len(m);
+ ++byte_idx) {
+
+ llr_tmp = llr[byte_idx];
+ if (llr_decimals == 2)
+ llr_tmp *= 2;
+ else if (llr_decimals == 0)
+ llr_tmp /= 2;
+ llr_tmp = RTE_MIN(llr_max,
+ RTE_MAX(llr_min, llr_tmp));
+ llr[byte_idx] = (int8_t) llr_tmp;
+ }
+
+ m = m->next;
+ }
+ }
+}
+
+
+
static int
fill_queue_buffers(struct test_op_params *op_params,
struct rte_mempool *in_mp, struct rte_mempool *hard_out_mp,
- struct rte_mempool *soft_out_mp, uint16_t queue_id,
+ struct rte_mempool *soft_out_mp,
+ struct rte_mempool *harq_in_mp, struct rte_mempool *harq_out_mp,
+ uint16_t queue_id,
const struct rte_bbdev_op_cap *capabilities,
uint16_t min_alignment, const int socket_id)
{
@@ -716,12 +943,16 @@ typedef int (test_case_function)(struct active_device *ad,
in_mp,
soft_out_mp,
hard_out_mp,
+ harq_in_mp,
+ harq_out_mp,
};
struct rte_bbdev_op_data **queue_ops[DATA_NUM_TYPES] = {
&op_params->q_bufs[socket_id][queue_id].inputs,
&op_params->q_bufs[socket_id][queue_id].soft_outputs,
&op_params->q_bufs[socket_id][queue_id].hard_outputs,
+ &op_params->q_bufs[socket_id][queue_id].harq_inputs,
+ &op_params->q_bufs[socket_id][queue_id].harq_outputs,
};
for (type = DATA_INPUT; type < DATA_NUM_TYPES; ++type) {
@@ -746,6 +977,15 @@ typedef int (test_case_function)(struct active_device *ad,
limit_input_llr_val_range(*queue_ops[DATA_INPUT], n,
capabilities->cap.turbo_dec.max_llr_modulus);
+ if (test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC) {
+ ldpc_input_llr_scaling(*queue_ops[DATA_INPUT], n,
+ capabilities->cap.ldpc_dec.llr_size,
+ capabilities->cap.ldpc_dec.llr_decimals);
+ ldpc_input_llr_scaling(*queue_ops[DATA_HARQ_INPUT], n,
+ capabilities->cap.ldpc_dec.llr_size,
+ capabilities->cap.ldpc_dec.llr_decimals);
+ }
+
return 0;
}
@@ -758,12 +998,16 @@ typedef int (test_case_function)(struct active_device *ad,
rte_mempool_free(ad->in_mbuf_pool);
rte_mempool_free(ad->hard_out_mbuf_pool);
rte_mempool_free(ad->soft_out_mbuf_pool);
+ rte_mempool_free(ad->harq_in_mbuf_pool);
+ rte_mempool_free(ad->harq_out_mbuf_pool);
for (i = 0; i < rte_lcore_count(); ++i) {
for (j = 0; j < RTE_MAX_NUMA_NODES; ++j) {
rte_free(op_params->q_bufs[j][i].inputs);
rte_free(op_params->q_bufs[j][i].hard_outputs);
rte_free(op_params->q_bufs[j][i].soft_outputs);
+ rte_free(op_params->q_bufs[j][i].harq_inputs);
+ rte_free(op_params->q_bufs[j][i].harq_outputs);
}
}
}
@@ -863,6 +1107,93 @@ typedef int (test_case_function)(struct active_device *ad,
}
}
+static void
+copy_reference_ldpc_dec_op(struct rte_bbdev_dec_op **ops, unsigned int n,
+ unsigned int start_idx,
+ struct rte_bbdev_op_data *inputs,
+ struct rte_bbdev_op_data *hard_outputs,
+ struct rte_bbdev_op_data *soft_outputs,
+ struct rte_bbdev_op_data *harq_inputs,
+ struct rte_bbdev_op_data *harq_outputs,
+ struct rte_bbdev_dec_op *ref_op)
+{
+ unsigned int i;
+ struct rte_bbdev_op_ldpc_dec *ldpc_dec = &ref_op->ldpc_dec;
+
+ for (i = 0; i < n; ++i) {
+ if (ldpc_dec->code_block_mode == 0) {
+ ops[i]->ldpc_dec.tb_params.ea =
+ ldpc_dec->tb_params.ea;
+ ops[i]->ldpc_dec.tb_params.eb =
+ ldpc_dec->tb_params.eb;
+ ops[i]->ldpc_dec.tb_params.c =
+ ldpc_dec->tb_params.c;
+ ops[i]->ldpc_dec.tb_params.cab =
+ ldpc_dec->tb_params.cab;
+ ops[i]->ldpc_dec.tb_params.r =
+ ldpc_dec->tb_params.r;
+ } else {
+ ops[i]->ldpc_dec.cb_params.e = ldpc_dec->cb_params.e;
+ }
+
+ ops[i]->ldpc_dec.basegraph = ldpc_dec->basegraph;
+ ops[i]->ldpc_dec.z_c = ldpc_dec->z_c;
+ ops[i]->ldpc_dec.q_m = ldpc_dec->q_m;
+ ops[i]->ldpc_dec.n_filler = ldpc_dec->n_filler;
+ ops[i]->ldpc_dec.n_cb = ldpc_dec->n_cb;
+ ops[i]->ldpc_dec.iter_max = ldpc_dec->iter_max;
+ ops[i]->ldpc_dec.rv_index = ldpc_dec->rv_index;
+ ops[i]->ldpc_dec.op_flags = ldpc_dec->op_flags;
+ ops[i]->ldpc_dec.code_block_mode = ldpc_dec->code_block_mode;
+
+ ops[i]->ldpc_dec.hard_output = hard_outputs[start_idx + i];
+ ops[i]->ldpc_dec.input = inputs[start_idx + i];
+ if (soft_outputs != NULL)
+ ops[i]->ldpc_dec.soft_output =
+ soft_outputs[start_idx + i];
+ if (harq_inputs != NULL)
+ ops[i]->ldpc_dec.harq_combined_input =
+ harq_inputs[start_idx + i];
+ if (harq_outputs != NULL)
+ ops[i]->ldpc_dec.harq_combined_output =
+ harq_outputs[start_idx + i];
+ }
+}
+
+
+static void
+copy_reference_ldpc_enc_op(struct rte_bbdev_enc_op **ops, unsigned int n,
+ unsigned int start_idx,
+ struct rte_bbdev_op_data *inputs,
+ struct rte_bbdev_op_data *outputs,
+ struct rte_bbdev_enc_op *ref_op)
+{
+ unsigned int i;
+ struct rte_bbdev_op_ldpc_enc *ldpc_enc = &ref_op->ldpc_enc;
+ for (i = 0; i < n; ++i) {
+ if (ldpc_enc->code_block_mode == 0) {
+ ops[i]->ldpc_enc.tb_params.ea = ldpc_enc->tb_params.ea;
+ ops[i]->ldpc_enc.tb_params.eb = ldpc_enc->tb_params.eb;
+ ops[i]->ldpc_enc.tb_params.cab =
+ ldpc_enc->tb_params.cab;
+ ops[i]->ldpc_enc.tb_params.c = ldpc_enc->tb_params.c;
+ ops[i]->ldpc_enc.tb_params.r = ldpc_enc->tb_params.r;
+ } else {
+ ops[i]->ldpc_enc.cb_params.e = ldpc_enc->cb_params.e;
+ }
+ ops[i]->ldpc_enc.basegraph = ldpc_enc->basegraph;
+ ops[i]->ldpc_enc.z_c = ldpc_enc->z_c;
+ ops[i]->ldpc_enc.q_m = ldpc_enc->q_m;
+ ops[i]->ldpc_enc.n_filler = ldpc_enc->n_filler;
+ ops[i]->ldpc_enc.n_cb = ldpc_enc->n_cb;
+ ops[i]->ldpc_enc.rv_index = ldpc_enc->rv_index;
+ ops[i]->ldpc_enc.op_flags = ldpc_enc->op_flags;
+ ops[i]->ldpc_enc.code_block_mode = ldpc_enc->code_block_mode;
+ ops[i]->ldpc_enc.output = outputs[start_idx + i];
+ ops[i]->ldpc_enc.input = inputs[start_idx + i];
+ }
+}
+
static int
check_dec_status_and_ordering(struct rte_bbdev_dec_op *op,
unsigned int order_idx, const int expected_status)
@@ -975,6 +1306,64 @@ typedef int (test_case_function)(struct active_device *ad,
return TEST_SUCCESS;
}
+
+static int
+validate_ldpc_dec_op(struct rte_bbdev_dec_op **ops, const uint16_t n,
+ struct rte_bbdev_dec_op *ref_op, const int vector_mask)
+{
+ unsigned int i;
+ int ret;
+ struct op_data_entries *hard_data_orig =
+ &test_vector.entries[DATA_HARD_OUTPUT];
+ struct op_data_entries *soft_data_orig =
+ &test_vector.entries[DATA_SOFT_OUTPUT];
+ struct op_data_entries *harq_data_orig =
+ &test_vector.entries[DATA_HARQ_OUTPUT];
+ struct rte_bbdev_op_ldpc_dec *ops_td;
+ struct rte_bbdev_op_data *hard_output;
+ struct rte_bbdev_op_data *harq_output;
+ struct rte_bbdev_op_data *soft_output;
+ struct rte_bbdev_op_ldpc_dec *ref_td = &ref_op->ldpc_dec;
+
+ for (i = 0; i < n; ++i) {
+ ops_td = &ops[i]->ldpc_dec;
+ hard_output = &ops_td->hard_output;
+ harq_output = &ops_td->harq_combined_output;
+ soft_output = &ops_td->soft_output;
+
+ ret = check_dec_status_and_ordering(ops[i], i, ref_op->status);
+ TEST_ASSERT_SUCCESS(ret,
+ "Checking status and ordering for decoder failed");
+ if (vector_mask & TEST_BBDEV_VF_EXPECTED_ITER_COUNT)
+ TEST_ASSERT(ops_td->iter_count <= ref_td->iter_count,
+ "Returned iter_count (%d) > expected iter_count (%d)",
+ ops_td->iter_count, ref_td->iter_count);
+ /* We can ignore data when the decoding failed to converge */
+ if ((ops[i]->status & (1 << RTE_BBDEV_SYNDROME_ERROR)) == 0)
+ TEST_ASSERT_SUCCESS(validate_op_chain(hard_output,
+ hard_data_orig),
+ "Hard output buffers (CB=%u) are not equal",
+ i);
+
+ if (ref_op->ldpc_dec.op_flags & RTE_BBDEV_LDPC_SOFT_OUT_ENABLE)
+ TEST_ASSERT_SUCCESS(validate_op_chain(soft_output,
+ soft_data_orig),
+ "Soft output buffers (CB=%u) are not equal",
+ i);
+ if (ref_op->ldpc_dec.op_flags &
+ RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE) {
+ ldpc_input_llr_scaling(harq_output, 1, 8, 0);
+ TEST_ASSERT_SUCCESS(validate_op_chain(harq_output,
+ harq_data_orig),
+ "HARQ output buffers (CB=%u) are not equal",
+ i);
+ }
+ }
+
+ return TEST_SUCCESS;
+}
+
+
static int
validate_enc_op(struct rte_bbdev_enc_op **ops, const uint16_t n,
struct rte_bbdev_enc_op *ref_op)
@@ -998,6 +1387,29 @@ typedef int (test_case_function)(struct active_device *ad,
return TEST_SUCCESS;
}
+static int
+validate_ldpc_enc_op(struct rte_bbdev_enc_op **ops, const uint16_t n,
+ struct rte_bbdev_enc_op *ref_op)
+{
+ unsigned int i;
+ int ret;
+ struct op_data_entries *hard_data_orig =
+ &test_vector.entries[DATA_HARD_OUTPUT];
+
+ for (i = 0; i < n; ++i) {
+ ret = check_enc_status_and_ordering(ops[i], i, ref_op->status);
+ TEST_ASSERT_SUCCESS(ret,
+ "Checking status and ordering for encoder failed");
+ TEST_ASSERT_SUCCESS(validate_op_chain(
+ &ops[i]->ldpc_enc.output,
+ hard_data_orig),
+ "Output buffers (CB=%u) are not equal",
+ i);
+ }
+
+ return TEST_SUCCESS;
+}
+
static void
create_reference_dec_op(struct rte_bbdev_dec_op *op)
{
@@ -1012,6 +1424,27 @@ typedef int (test_case_function)(struct active_device *ad,
}
static void
+create_reference_ldpc_dec_op(struct rte_bbdev_dec_op *op)
+{
+ unsigned int i;
+ struct op_data_entries *entry;
+
+ op->ldpc_dec = test_vector.ldpc_dec;
+ entry = &test_vector.entries[DATA_INPUT];
+ for (i = 0; i < entry->nb_segments; ++i)
+ op->ldpc_dec.input.length +=
+ entry->segments[i].length;
+ if (test_vector.ldpc_dec.op_flags &
+ RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE) {
+ entry = &test_vector.entries[DATA_HARQ_INPUT];
+ for (i = 0; i < entry->nb_segments; ++i)
+ op->ldpc_dec.harq_combined_input.length +=
+ entry->segments[i].length;
+ }
+}
+
+
+static void
create_reference_enc_op(struct rte_bbdev_enc_op *op)
{
unsigned int i;
@@ -1024,6 +1457,19 @@ typedef int (test_case_function)(struct active_device *ad,
entry->segments[i].length;
}
+static void
+create_reference_ldpc_enc_op(struct rte_bbdev_enc_op *op)
+{
+ unsigned int i;
+ struct op_data_entries *entry;
+
+ op->ldpc_enc = test_vector.ldpc_enc;
+ entry = &test_vector.entries[DATA_INPUT];
+ for (i = 0; i < entry->nb_segments; ++i)
+ op->ldpc_enc.input.length +=
+ entry->segments[i].length;
+}
+
static uint32_t
calc_dec_TB_size(struct rte_bbdev_dec_op *op)
{
@@ -1044,6 +1490,25 @@ typedef int (test_case_function)(struct active_device *ad,
}
static uint32_t
+calc_ldpc_dec_TB_size(struct rte_bbdev_dec_op *op)
+{
+ uint8_t i;
+ uint32_t c, r, tb_size = 0;
+ uint16_t sys_cols = (op->ldpc_dec.basegraph == 1) ? 22 : 10;
+
+ if (op->ldpc_dec.code_block_mode) {
+ tb_size = sys_cols * op->ldpc_dec.z_c - op->ldpc_dec.n_filler;
+ } else {
+ c = op->ldpc_dec.tb_params.c;
+ r = op->ldpc_dec.tb_params.r;
+ for (i = 0; i < c-r; i++)
+ tb_size += sys_cols * op->ldpc_dec.z_c
+ - op->ldpc_dec.n_filler;
+ }
+ return tb_size;
+}
+
+static uint32_t
calc_enc_TB_size(struct rte_bbdev_enc_op *op)
{
uint8_t i;
@@ -1062,6 +1527,26 @@ typedef int (test_case_function)(struct active_device *ad,
return tb_size;
}
+static uint32_t
+calc_ldpc_enc_TB_size(struct rte_bbdev_enc_op *op)
+{
+ uint8_t i;
+ uint32_t c, r, tb_size = 0;
+ uint16_t sys_cols = (op->ldpc_enc.basegraph == 1) ? 22 : 10;
+
+ if (op->turbo_enc.code_block_mode) {
+ tb_size = sys_cols * op->ldpc_enc.z_c - op->ldpc_enc.n_filler;
+ } else {
+ c = op->turbo_enc.tb_params.c;
+ r = op->turbo_enc.tb_params.r;
+ for (i = 0; i < c-r; i++)
+ tb_size += sys_cols * op->ldpc_enc.z_c
+ - op->ldpc_enc.n_filler;
+ }
+ return tb_size;
+}
+
+
static int
init_test_op_params(struct test_op_params *op_params,
enum rte_bbdev_op_type op_type, const int expected_status,
@@ -1069,7 +1554,8 @@ typedef int (test_case_function)(struct active_device *ad,
uint16_t burst_sz, uint16_t num_to_process, uint16_t num_lcores)
{
int ret = 0;
- if (op_type == RTE_BBDEV_OP_TURBO_DEC)
+ if (op_type == RTE_BBDEV_OP_TURBO_DEC ||
+ op_type == RTE_BBDEV_OP_LDPC_DEC)
ret = rte_bbdev_dec_op_alloc_bulk(ops_mp,
&op_params->ref_dec_op, 1);
else
@@ -1083,11 +1569,12 @@ typedef int (test_case_function)(struct active_device *ad,
op_params->num_to_process = num_to_process;
op_params->num_lcores = num_lcores;
op_params->vector_mask = vector_mask;
- if (op_type == RTE_BBDEV_OP_TURBO_DEC)
+ if (op_type == RTE_BBDEV_OP_TURBO_DEC ||
+ op_type == RTE_BBDEV_OP_LDPC_DEC)
op_params->ref_dec_op->status = expected_status;
- else if (op_type == RTE_BBDEV_OP_TURBO_ENC)
+ else if (op_type == RTE_BBDEV_OP_TURBO_ENC
+ || op_type == RTE_BBDEV_OP_LDPC_ENC)
op_params->ref_enc_op->status = expected_status;
-
return 0;
}
@@ -1133,27 +1620,35 @@ typedef int (test_case_function)(struct active_device *ad,
goto fail;
}
- if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC) {
- /* Find Decoder capabilities */
- const struct rte_bbdev_op_cap *cap = info.drv.capabilities;
- while (cap->type != RTE_BBDEV_OP_NONE) {
- if (cap->type == RTE_BBDEV_OP_TURBO_DEC) {
- capabilities = cap;
- break;
- }
- }
- TEST_ASSERT_NOT_NULL(capabilities,
- "Couldn't find Decoder capabilities");
- create_reference_dec_op(op_params->ref_dec_op);
+ /* Find capabilities */
+ const struct rte_bbdev_op_cap *cap = info.drv.capabilities;
+ for (i = 0; i < RTE_BBDEV_OP_TYPE_COUNT; i++) {
+ if (cap->type == test_vector.op_type) {
+ capabilities = cap;
+ break;
+ }
+ cap++;
+ }
+ TEST_ASSERT_NOT_NULL(capabilities,
+ "Couldn't find capabilities");
+
+ if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC) {
+ create_reference_dec_op(op_params->ref_dec_op);
} else if (test_vector.op_type == RTE_BBDEV_OP_TURBO_ENC)
create_reference_enc_op(op_params->ref_enc_op);
+ else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_ENC)
+ create_reference_ldpc_enc_op(op_params->ref_enc_op);
+ else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC)
+ create_reference_ldpc_dec_op(op_params->ref_dec_op);
for (i = 0; i < ad->nb_queues; ++i) {
f_ret = fill_queue_buffers(op_params,
ad->in_mbuf_pool,
ad->hard_out_mbuf_pool,
ad->soft_out_mbuf_pool,
+ ad->harq_in_mbuf_pool,
+ ad->harq_out_mbuf_pool,
ad->queue_ids[i],
capabilities,
info.drv.min_alignment,
@@ -1212,9 +1707,7 @@ typedef int (test_case_function)(struct active_device *ad,
uint16_t deq, burst_sz, num_ops;
uint16_t queue_id = *(uint16_t *) ret_param;
struct rte_bbdev_info info;
-
double tb_len_bits;
-
struct thread_params *tp = cb_arg;
/* Find matching thread params using queue_id */
@@ -1238,7 +1731,8 @@ typedef int (test_case_function)(struct active_device *ad,
burst_sz = rte_atomic16_read(&tp->burst_sz);
num_ops = tp->op_params->num_to_process;
- if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC)
+ if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC ||
+ test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC)
deq = rte_bbdev_dequeue_dec_ops(dev_id, queue_id,
&tp->dec_ops[
rte_atomic16_read(&tp->nb_dequeued)],
@@ -1282,6 +1776,15 @@ typedef int (test_case_function)(struct active_device *ad,
struct rte_bbdev_enc_op *ref_op = tp->op_params->ref_enc_op;
ret = validate_enc_op(tp->enc_ops, num_ops, ref_op);
rte_bbdev_enc_op_free_bulk(tp->enc_ops, deq);
+ } else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_ENC) {
+ struct rte_bbdev_enc_op *ref_op = tp->op_params->ref_enc_op;
+ ret = validate_ldpc_enc_op(tp->enc_ops, num_ops, ref_op);
+ rte_bbdev_enc_op_free_bulk(tp->enc_ops, deq);
+ } else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC) {
+ struct rte_bbdev_dec_op *ref_op = tp->op_params->ref_dec_op;
+ ret = validate_ldpc_dec_op(tp->dec_ops, num_ops, ref_op,
+ tp->op_params->vector_mask);
+ rte_bbdev_dec_op_free_bulk(tp->dec_ops, deq);
}
if (ret) {
@@ -1296,6 +1799,12 @@ typedef int (test_case_function)(struct active_device *ad,
case RTE_BBDEV_OP_TURBO_ENC:
tb_len_bits = calc_enc_TB_size(tp->op_params->ref_enc_op);
break;
+ case RTE_BBDEV_OP_LDPC_DEC:
+ tb_len_bits = calc_ldpc_dec_TB_size(tp->op_params->ref_dec_op);
+ break;
+ case RTE_BBDEV_OP_LDPC_ENC:
+ tb_len_bits = calc_ldpc_enc_TB_size(tp->op_params->ref_enc_op);
+ break;
case RTE_BBDEV_OP_NONE:
tb_len_bits = 0.0;
break;
@@ -1375,8 +1884,8 @@ typedef int (test_case_function)(struct active_device *ad,
enq = 0;
do {
enq += rte_bbdev_enqueue_dec_ops(tp->dev_id,
- queue_id, &ops[enqueued],
- num_to_enq);
+ queue_id, &ops[enqueued],
+ num_to_enq);
} while (unlikely(num_to_enq != enq));
enqueued += enq;
@@ -1390,7 +1899,7 @@ typedef int (test_case_function)(struct active_device *ad,
rte_atomic16_set(&tp->burst_sz, num_to_enq);
/* Wait until processing of previous batch is
- * completed.
+ * completed
*/
while (rte_atomic16_read(&tp->nb_dequeued) !=
(int16_t) enqueued)
@@ -1479,7 +1988,7 @@ typedef int (test_case_function)(struct active_device *ad,
rte_atomic16_set(&tp->burst_sz, num_to_enq);
/* Wait until processing of previous batch is
- * completed.
+ * completed
*/
while (rte_atomic16_read(&tp->nb_dequeued) !=
(int16_t) enqueued)
@@ -1590,6 +2099,116 @@ typedef int (test_case_function)(struct active_device *ad,
}
static int
+throughput_pmd_lcore_ldpc_dec(void *arg)
+{
+ struct thread_params *tp = arg;
+ uint16_t enq, deq;
+ uint64_t total_time = 0, start_time;
+ const uint16_t queue_id = tp->queue_id;
+ const uint16_t burst_sz = tp->op_params->burst_sz;
+ const uint16_t num_ops = tp->op_params->num_to_process;
+ struct rte_bbdev_dec_op *ops_enq[num_ops];
+ struct rte_bbdev_dec_op *ops_deq[num_ops];
+ struct rte_bbdev_dec_op *ref_op = tp->op_params->ref_dec_op;
+ struct test_buffers *bufs = NULL;
+ int i, j, ret;
+ struct rte_bbdev_info info;
+ uint16_t num_to_enq;
+
+ TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
+ "BURST_SIZE should be <= %u", MAX_BURST);
+
+ rte_bbdev_info_get(tp->dev_id, &info);
+
+ TEST_ASSERT_SUCCESS((num_ops > info.drv.queue_size_lim),
+ "NUM_OPS cannot exceed %u for this device",
+ info.drv.queue_size_lim);
+
+ bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
+
+ while (rte_atomic16_read(&tp->op_params->sync) == SYNC_WAIT)
+ rte_pause();
+
+ ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);
+ TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_ops);
+
+ /* For throughput tests we need to disable early termination */
+ if (check_bit(ref_op->ldpc_dec.op_flags,
+ RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE))
+ ref_op->ldpc_dec.op_flags -=
+ RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE;
+ ref_op->ldpc_dec.iter_max = 6;
+ ref_op->ldpc_dec.iter_count = ref_op->ldpc_dec.iter_max;
+
+ if (test_vector.op_type != RTE_BBDEV_OP_NONE)
+ copy_reference_ldpc_dec_op(ops_enq, num_ops, 0, bufs->inputs,
+ bufs->hard_outputs, bufs->soft_outputs,
+ bufs->harq_inputs, bufs->harq_outputs, ref_op);
+
+ /* Set counter to validate the ordering */
+ for (j = 0; j < num_ops; ++j)
+ ops_enq[j]->opaque_data = (void *)(uintptr_t)j;
+
+ for (i = 0; i < TEST_REPETITIONS; ++i) {
+ for (j = 0; j < num_ops; ++j) {
+ mbuf_reset(ops_enq[j]->ldpc_dec.hard_output.data);
+ if (check_bit(ref_op->ldpc_dec.op_flags,
+ RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE))
+ mbuf_reset(
+ ops_enq[j]->ldpc_dec.harq_combined_output.data);
+ }
+
+ start_time = rte_rdtsc_precise();
+
+ for (enq = 0, deq = 0; enq < num_ops;) {
+ num_to_enq = burst_sz;
+
+ if (unlikely(num_ops - enq < num_to_enq))
+ num_to_enq = num_ops - enq;
+
+ enq += rte_bbdev_enqueue_ldpc_dec_ops(tp->dev_id,
+ queue_id, &ops_enq[enq], num_to_enq);
+
+ deq += rte_bbdev_dequeue_ldpc_dec_ops(tp->dev_id,
+ queue_id, &ops_deq[deq], enq - deq);
+ }
+
+ /* dequeue the remaining */
+ while (deq < enq) {
+ deq += rte_bbdev_dequeue_ldpc_dec_ops(tp->dev_id,
+ queue_id, &ops_deq[deq], enq - deq);
+ }
+
+ total_time += rte_rdtsc_precise() - start_time;
+ }
+
+ tp->iter_count = 0;
+ /* get the max of iter_count for all dequeued ops */
+ for (i = 0; i < num_ops; ++i) {
+ tp->iter_count = RTE_MAX(ops_enq[i]->ldpc_dec.iter_count,
+ tp->iter_count);
+ }
+
+ if (test_vector.op_type != RTE_BBDEV_OP_NONE) {
+ ret = validate_ldpc_dec_op(ops_deq, num_ops, ref_op,
+ tp->op_params->vector_mask);
+ TEST_ASSERT_SUCCESS(ret, "Validation failed!");
+ }
+
+ rte_bbdev_dec_op_free_bulk(ops_enq, num_ops);
+
+ double tb_len_bits = calc_ldpc_dec_TB_size(ref_op);
+
+ tp->ops_per_sec = ((double)num_ops * TEST_REPETITIONS) /
+ ((double)total_time / (double)rte_get_tsc_hz());
+ tp->mbps = (((double)(num_ops * TEST_REPETITIONS * tb_len_bits)) /
+ 1000000.0) / ((double)total_time /
+ (double)rte_get_tsc_hz());
+
+ return TEST_SUCCESS;
+}
+
+static int
throughput_pmd_lcore_enc(void *arg)
{
struct thread_params *tp = arg;
@@ -1667,6 +2286,8 @@ typedef int (test_case_function)(struct active_device *ad,
TEST_ASSERT_SUCCESS(ret, "Validation failed!");
}
+ rte_bbdev_enc_op_free_bulk(ops_enq, num_ops);
+
double tb_len_bits = calc_enc_TB_size(ref_op);
tp->ops_per_sec = ((double)num_ops * TEST_REPETITIONS) /
@@ -1678,6 +2299,97 @@ typedef int (test_case_function)(struct active_device *ad,
return TEST_SUCCESS;
}
+static int
+throughput_pmd_lcore_ldpc_enc(void *arg)
+{
+ struct thread_params *tp = arg;
+ uint16_t enq, deq;
+ uint64_t total_time = 0, start_time;
+ const uint16_t queue_id = tp->queue_id;
+ const uint16_t burst_sz = tp->op_params->burst_sz;
+ const uint16_t num_ops = tp->op_params->num_to_process;
+ struct rte_bbdev_enc_op *ops_enq[num_ops];
+ struct rte_bbdev_enc_op *ops_deq[num_ops];
+ struct rte_bbdev_enc_op *ref_op = tp->op_params->ref_enc_op;
+ struct test_buffers *bufs = NULL;
+ int i, j, ret;
+ struct rte_bbdev_info info;
+ uint16_t num_to_enq;
+
+ TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
+ "BURST_SIZE should be <= %u", MAX_BURST);
+
+ rte_bbdev_info_get(tp->dev_id, &info);
+
+ TEST_ASSERT_SUCCESS((num_ops > info.drv.queue_size_lim),
+ "NUM_OPS cannot exceed %u for this device",
+ info.drv.queue_size_lim);
+
+ bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
+
+ while (rte_atomic16_read(&tp->op_params->sync) == SYNC_WAIT)
+ rte_pause();
+
+ ret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp, ops_enq,
+ num_ops);
+ TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops",
+ num_ops);
+ if (test_vector.op_type != RTE_BBDEV_OP_NONE)
+ copy_reference_ldpc_enc_op(ops_enq, num_ops, 0, bufs->inputs,
+ bufs->hard_outputs, ref_op);
+
+ /* Set counter to validate the ordering */
+ for (j = 0; j < num_ops; ++j)
+ ops_enq[j]->opaque_data = (void *)(uintptr_t)j;
+
+ for (i = 0; i < TEST_REPETITIONS; ++i) {
+
+ if (test_vector.op_type != RTE_BBDEV_OP_NONE)
+ for (j = 0; j < num_ops; ++j)
+ mbuf_reset(ops_enq[j]->turbo_enc.output.data);
+
+ start_time = rte_rdtsc_precise();
+
+ for (enq = 0, deq = 0; enq < num_ops;) {
+ num_to_enq = burst_sz;
+
+ if (unlikely(num_ops - enq < num_to_enq))
+ num_to_enq = num_ops - enq;
+
+ enq += rte_bbdev_enqueue_ldpc_enc_ops(tp->dev_id,
+ queue_id, &ops_enq[enq], num_to_enq);
+
+ deq += rte_bbdev_dequeue_ldpc_enc_ops(tp->dev_id,
+ queue_id, &ops_deq[deq], enq - deq);
+ }
+
+ /* dequeue the remaining */
+ while (deq < enq) {
+ deq += rte_bbdev_dequeue_ldpc_enc_ops(tp->dev_id,
+ queue_id, &ops_deq[deq], enq - deq);
+ }
+
+ total_time += rte_rdtsc_precise() - start_time;
+ }
+
+ if (test_vector.op_type != RTE_BBDEV_OP_NONE) {
+ ret = validate_ldpc_enc_op(ops_deq, num_ops, ref_op);
+ TEST_ASSERT_SUCCESS(ret, "Validation failed!");
+ }
+
+ rte_bbdev_enc_op_free_bulk(ops_enq, num_ops);
+
+ double tb_len_bits = calc_ldpc_enc_TB_size(ref_op);
+
+ tp->ops_per_sec = ((double)num_ops * TEST_REPETITIONS) /
+ ((double)total_time / (double)rte_get_tsc_hz());
+ tp->mbps = (((double)(num_ops * TEST_REPETITIONS * tb_len_bits))
+ / 1000000.0) / ((double)total_time /
+ (double)rte_get_tsc_hz());
+
+ return TEST_SUCCESS;
+}
+
static void
print_enc_throughput(struct thread_params *t_params, unsigned int used_cores)
{
@@ -1740,8 +2452,8 @@ typedef int (test_case_function)(struct active_device *ad,
TEST_ASSERT_NOT_NULL(op_type_str, "Invalid op type: %u",
test_vector.op_type);
- printf(
- "Throughput test: dev: %s, nb_queues: %u, burst size: %u, num ops: %u, num_lcores: %u, op type: %s, int mode: %s, GHz: %lg\n",
+ printf("+ ------------------------------------------------------- +\n");
+ printf("== test: throughput\ndev: %s, nb_queues: %u, burst size: %u, num ops: %u, num_lcores: %u, op type: %s, itr mode: %s, GHz: %lg\n",
info.dev_name, ad->nb_queues, op_params->burst_sz,
op_params->num_to_process, op_params->num_lcores,
op_type_str,
@@ -1763,6 +2475,12 @@ typedef int (test_case_function)(struct active_device *ad,
if (intr_enabled) {
if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC)
throughput_function = throughput_intr_lcore_dec;
+ else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC)
+ throughput_function = throughput_intr_lcore_dec;
+ else if (test_vector.op_type == RTE_BBDEV_OP_TURBO_ENC)
+ throughput_function = throughput_intr_lcore_enc;
+ else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_ENC)
+ throughput_function = throughput_intr_lcore_enc;
else
throughput_function = throughput_intr_lcore_enc;
@@ -1777,6 +2495,12 @@ typedef int (test_case_function)(struct active_device *ad,
} else {
if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC)
throughput_function = throughput_pmd_lcore_dec;
+ else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC)
+ throughput_function = throughput_pmd_lcore_ldpc_dec;
+ else if (test_vector.op_type == RTE_BBDEV_OP_TURBO_ENC)
+ throughput_function = throughput_pmd_lcore_enc;
+ else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_ENC)
+ throughput_function = throughput_pmd_lcore_ldpc_enc;
else
throughput_function = throughput_pmd_lcore_enc;
}
@@ -1819,7 +2543,8 @@ typedef int (test_case_function)(struct active_device *ad,
/* Print throughput if interrupts are disabled and test passed */
if (!intr_enabled) {
- if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC)
+ if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC ||
+ test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC)
print_dec_throughput(t_params, num_lcores);
else
print_enc_throughput(t_params, num_lcores);
@@ -1841,7 +2566,7 @@ typedef int (test_case_function)(struct active_device *ad,
tp->ops_per_sec /= TEST_REPETITIONS;
tp->mbps /= TEST_REPETITIONS;
- ret |= rte_atomic16_read(&tp->processing_status);
+ ret |= (int)rte_atomic16_read(&tp->processing_status);
/* Wait for slave lcores operations */
for (used_cores = 1; used_cores < num_lcores; used_cores++) {
@@ -1855,14 +2580,16 @@ typedef int (test_case_function)(struct active_device *ad,
tp->ops_per_sec /= TEST_REPETITIONS;
tp->mbps /= TEST_REPETITIONS;
- ret |= rte_atomic16_read(&tp->processing_status);
+ ret |= (int)rte_atomic16_read(&tp->processing_status);
}
/* Print throughput if test passed */
if (!ret) {
- if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC)
+ if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC ||
+ test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC)
print_dec_throughput(t_params, num_lcores);
- else if (test_vector.op_type == RTE_BBDEV_OP_TURBO_ENC)
+ else if (test_vector.op_type == RTE_BBDEV_OP_TURBO_ENC ||
+ test_vector.op_type == RTE_BBDEV_OP_LDPC_ENC)
print_enc_throughput(t_params, num_lcores);
}
@@ -1940,6 +2667,77 @@ typedef int (test_case_function)(struct active_device *ad,
}
static int
+latency_test_ldpc_dec(struct rte_mempool *mempool,
+ struct test_buffers *bufs, struct rte_bbdev_dec_op *ref_op,
+ int vector_mask, uint16_t dev_id, uint16_t queue_id,
+ const uint16_t num_to_process, uint16_t burst_sz,
+ uint64_t *total_time, uint64_t *min_time, uint64_t *max_time)
+{
+ int ret = TEST_SUCCESS;
+ uint16_t i, j, dequeued;
+ struct rte_bbdev_dec_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST];
+ uint64_t start_time = 0, last_time = 0;
+
+ for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) {
+ uint16_t enq = 0, deq = 0;
+ bool first_time = true;
+ last_time = 0;
+
+ if (unlikely(num_to_process - dequeued < burst_sz))
+ burst_sz = num_to_process - dequeued;
+
+ ret = rte_bbdev_dec_op_alloc_bulk(mempool, ops_enq, burst_sz);
+ TEST_ASSERT_SUCCESS(ret,
+ "rte_bbdev_dec_op_alloc_bulk() failed");
+ if (test_vector.op_type != RTE_BBDEV_OP_NONE)
+ copy_reference_ldpc_dec_op(ops_enq, burst_sz, dequeued,
+ bufs->inputs,
+ bufs->hard_outputs,
+ bufs->soft_outputs,
+ bufs->harq_inputs,
+ bufs->harq_outputs,
+ ref_op);
+
+ /* Set counter to validate the ordering */
+ for (j = 0; j < burst_sz; ++j)
+ ops_enq[j]->opaque_data = (void *)(uintptr_t)j;
+
+ start_time = rte_rdtsc_precise();
+
+ enq = rte_bbdev_enqueue_ldpc_dec_ops(dev_id, queue_id,
+ &ops_enq[enq], burst_sz);
+ TEST_ASSERT(enq == burst_sz,
+ "Error enqueueing burst, expected %u, got %u",
+ burst_sz, enq);
+
+ /* Dequeue */
+ do {
+ deq += rte_bbdev_dequeue_ldpc_dec_ops(dev_id, queue_id,
+ &ops_deq[deq], burst_sz - deq);
+ if (likely(first_time && (deq > 0))) {
+ last_time = rte_rdtsc_precise() - start_time;
+ first_time = false;
+ }
+ } while (unlikely(burst_sz != deq));
+
+ *max_time = RTE_MAX(*max_time, last_time);
+ *min_time = RTE_MIN(*min_time, last_time);
+ *total_time += last_time;
+
+ if (test_vector.op_type != RTE_BBDEV_OP_NONE) {
+ ret = validate_ldpc_dec_op(ops_deq, burst_sz, ref_op,
+ vector_mask);
+ TEST_ASSERT_SUCCESS(ret, "Validation failed!");
+ }
+
+ rte_bbdev_dec_op_free_bulk(ops_enq, deq);
+ dequeued += deq;
+ }
+
+ return i;
+}
+
+static int
latency_test_enc(struct rte_mempool *mempool,
struct test_buffers *bufs, struct rte_bbdev_enc_op *ref_op,
uint16_t dev_id, uint16_t queue_id,
@@ -2007,6 +2805,84 @@ typedef int (test_case_function)(struct active_device *ad,
}
static int
+latency_test_ldpc_enc(struct rte_mempool *mempool,
+ struct test_buffers *bufs, struct rte_bbdev_enc_op *ref_op,
+ uint16_t dev_id, uint16_t queue_id,
+ const uint16_t num_to_process, uint16_t burst_sz,
+ uint64_t *total_time, uint64_t *min_time, uint64_t *max_time)
+{
+ int ret = TEST_SUCCESS;
+ uint16_t i, j, dequeued;
+ struct rte_bbdev_enc_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST];
+ uint64_t start_time = 0, last_time = 0;
+
+ for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) {
+ uint16_t enq = 0, deq = 0;
+ bool first_time = true;
+ last_time = 0;
+
+ if (unlikely(num_to_process - dequeued < burst_sz))
+ burst_sz = num_to_process - dequeued;
+
+ ret = rte_bbdev_enc_op_alloc_bulk(mempool, ops_enq, burst_sz);
+
+ TEST_ASSERT_SUCCESS(ret,
+ "rte_bbdev_enc_op_alloc_bulk() failed");
+ if (test_vector.op_type != RTE_BBDEV_OP_NONE)
+ copy_reference_ldpc_enc_op(ops_enq, burst_sz, dequeued,
+ bufs->inputs,
+ bufs->hard_outputs,
+ ref_op);
+
+ /* Set counter to validate the ordering */
+ for (j = 0; j < burst_sz; ++j)
+ ops_enq[j]->opaque_data = (void *)(uintptr_t)j;
+
+ start_time = rte_rdtsc_precise();
+
+ /*
+ * printf("Latency Debug %d\n",
+ * ops_enq[0]->ldpc_enc.cb_params.z_c); REMOVEME
+ */
+
+ enq = rte_bbdev_enqueue_ldpc_enc_ops(dev_id, queue_id,
+ &ops_enq[enq], burst_sz);
+ TEST_ASSERT(enq == burst_sz,
+ "Error enqueueing burst, expected %u, got %u",
+ burst_sz, enq);
+
+ /* Dequeue */
+ do {
+ deq += rte_bbdev_dequeue_ldpc_enc_ops(dev_id, queue_id,
+ &ops_deq[deq], burst_sz - deq);
+ if (likely(first_time && (deq > 0))) {
+ last_time += rte_rdtsc_precise() - start_time;
+ first_time = false;
+ }
+ } while (unlikely(burst_sz != deq));
+
+ *max_time = RTE_MAX(*max_time, last_time);
+ *min_time = RTE_MIN(*min_time, last_time);
+ *total_time += last_time;
+
+ if (test_vector.op_type != RTE_BBDEV_OP_NONE) {
+ ret = validate_enc_op(ops_deq, burst_sz, ref_op);
+ TEST_ASSERT_SUCCESS(ret, "Validation failed!");
+ }
+
+ /*
+ * printf("Ready to free - deq %d num_to_process %d\n", FIXME
+ * deq, num_to_process);
+ * printf("cache %d\n", ops_enq[0]->mempool->cache_size);
+ */
+ rte_bbdev_enc_op_free_bulk(ops_enq, deq);
+ dequeued += deq;
+ }
+
+ return i;
+}
+
+static int
latency_test(struct active_device *ad,
struct test_op_params *op_params)
{
@@ -2032,8 +2908,8 @@ typedef int (test_case_function)(struct active_device *ad,
op_type_str = rte_bbdev_op_type_str(op_type);
TEST_ASSERT_NOT_NULL(op_type_str, "Invalid op type: %u", op_type);
- printf(
- "\nValidation/Latency test: dev: %s, burst size: %u, num ops: %u, op type: %s\n",
+ printf("+ ------------------------------------------------------- +\n");
+ printf("== test: validation/latency\ndev: %s, burst size: %u, num ops: %u, op type: %s\n",
info.dev_name, burst_sz, num_to_process, op_type_str);
if (op_type == RTE_BBDEV_OP_TURBO_DEC)
@@ -2041,19 +2917,35 @@ typedef int (test_case_function)(struct active_device *ad,
op_params->ref_dec_op, op_params->vector_mask,
ad->dev_id, queue_id, num_to_process,
burst_sz, &total_time, &min_time, &max_time);
- else
+ else if (op_type == RTE_BBDEV_OP_TURBO_ENC)
iter = latency_test_enc(op_params->mp, bufs,
op_params->ref_enc_op, ad->dev_id, queue_id,
num_to_process, burst_sz, &total_time,
&min_time, &max_time);
+ else if (op_type == RTE_BBDEV_OP_LDPC_ENC)
+ iter = latency_test_ldpc_enc(op_params->mp, bufs,
+ op_params->ref_enc_op, ad->dev_id, queue_id,
+ num_to_process, burst_sz, &total_time,
+ &min_time, &max_time);
+ else if (op_type == RTE_BBDEV_OP_LDPC_DEC)
+ iter = latency_test_ldpc_dec(op_params->mp, bufs,
+ op_params->ref_dec_op, op_params->vector_mask,
+ ad->dev_id, queue_id, num_to_process,
+ burst_sz, &total_time, &min_time, &max_time);
+ else
+ iter = latency_test_enc(op_params->mp, bufs,
+ op_params->ref_enc_op,
+ ad->dev_id, queue_id,
+ num_to_process, burst_sz, &total_time,
+ &min_time, &max_time);
if (iter <= 0)
return TEST_FAILED;
printf("Operation latency:\n"
- "\tavg latency: %lg cycles, %lg us\n"
- "\tmin latency: %lg cycles, %lg us\n"
- "\tmax latency: %lg cycles, %lg us\n",
+ "\tavg: %lg cycles, %lg us\n"
+ "\tmin: %lg cycles, %lg us\n"
+ "\tmax: %lg cycles, %lg us\n",
(double)total_time / (double)iter,
(double)(total_time * 1000000) / (double)iter /
(double)rte_get_tsc_hz(), (double)min_time,
@@ -2104,10 +2996,7 @@ typedef int (test_case_function)(struct active_device *ad,
if (unlikely(num_to_process - dequeued < burst_sz))
burst_sz = num_to_process - dequeued;
- ret = rte_bbdev_dec_op_alloc_bulk(mempool, ops_enq, burst_sz);
- TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops",
- burst_sz);
-
+ rte_bbdev_dec_op_alloc_bulk(mempool, ops_enq, burst_sz);
if (test_vector.op_type != RTE_BBDEV_OP_NONE)
copy_reference_dec_op(ops_enq, burst_sz, dequeued,
bufs->inputs,
@@ -2141,7 +3030,7 @@ typedef int (test_case_function)(struct active_device *ad,
stats.acc_offload_cycles);
time_st->enq_acc_total_time += stats.acc_offload_cycles;
- /* ensure enqueue has been completed */
+ /* give time for device to process ops */
rte_delay_us(200);
/* Start time meas for dequeue function offload latency */
@@ -2172,6 +3061,91 @@ typedef int (test_case_function)(struct active_device *ad,
}
static int
+offload_latency_test_ldpc_dec(struct rte_mempool *mempool,
+ struct test_buffers *bufs,
+ struct rte_bbdev_dec_op *ref_op, uint16_t dev_id,
+ uint16_t queue_id, const uint16_t num_to_process,
+ uint16_t burst_sz, struct test_time_stats *time_st)
+{
+ int i, dequeued, ret;
+ struct rte_bbdev_dec_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST];
+ uint64_t enq_start_time, deq_start_time;
+ uint64_t enq_sw_last_time, deq_last_time;
+ struct rte_bbdev_stats stats;
+
+ for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) {
+ uint16_t enq = 0, deq = 0;
+
+ if (unlikely(num_to_process - dequeued < burst_sz))
+ burst_sz = num_to_process - dequeued;
+
+ rte_bbdev_dec_op_alloc_bulk(mempool, ops_enq, burst_sz);
+ if (test_vector.op_type != RTE_BBDEV_OP_NONE)
+ copy_reference_ldpc_dec_op(ops_enq, burst_sz, dequeued,
+ bufs->inputs,
+ bufs->hard_outputs,
+ bufs->soft_outputs,
+ bufs->harq_inputs,
+ bufs->harq_outputs,
+ ref_op);
+
+ /* Start time meas for enqueue function offload latency */
+ enq_start_time = rte_rdtsc_precise();
+ do {
+ enq += rte_bbdev_enqueue_ldpc_dec_ops(dev_id, queue_id,
+ &ops_enq[enq], burst_sz - enq);
+ } while (unlikely(burst_sz != enq));
+
+ ret = get_bbdev_queue_stats(dev_id, queue_id, &stats);
+ TEST_ASSERT_SUCCESS(ret,
+ "Failed to get stats for queue (%u) of device (%u)",
+ queue_id, dev_id);
+
+ enq_sw_last_time = rte_rdtsc_precise() - enq_start_time -
+ stats.acc_offload_cycles;
+ time_st->enq_sw_max_time = RTE_MAX(time_st->enq_sw_max_time,
+ enq_sw_last_time);
+ time_st->enq_sw_min_time = RTE_MIN(time_st->enq_sw_min_time,
+ enq_sw_last_time);
+ time_st->enq_sw_total_time += enq_sw_last_time;
+
+ time_st->enq_acc_max_time = RTE_MAX(time_st->enq_acc_max_time,
+ stats.acc_offload_cycles);
+ time_st->enq_acc_min_time = RTE_MIN(time_st->enq_acc_min_time,
+ stats.acc_offload_cycles);
+ time_st->enq_acc_total_time += stats.acc_offload_cycles;
+
+ /* give time for device to process ops */
+ rte_delay_us(200);
+
+ /* Start time meas for dequeue function offload latency */
+ deq_start_time = rte_rdtsc_precise();
+ /* Dequeue one operation */
+ do {
+ deq += rte_bbdev_dequeue_ldpc_dec_ops(dev_id, queue_id,
+ &ops_deq[deq], 1);
+ } while (unlikely(deq != 1));
+
+ deq_last_time = rte_rdtsc_precise() - deq_start_time;
+ time_st->deq_max_time = RTE_MAX(time_st->deq_max_time,
+ deq_last_time);
+ time_st->deq_min_time = RTE_MIN(time_st->deq_min_time,
+ deq_last_time);
+ time_st->deq_total_time += deq_last_time;
+
+ /* Dequeue remaining operations if needed*/
+ while (burst_sz != deq)
+ deq += rte_bbdev_dequeue_dec_ops(dev_id, queue_id,
+ &ops_deq[deq], burst_sz - deq);
+
+ rte_bbdev_dec_op_free_bulk(ops_enq, deq);
+ dequeued += deq;
+ }
+
+ return i;
+}
+
+static int
offload_latency_test_enc(struct rte_mempool *mempool, struct test_buffers *bufs,
struct rte_bbdev_enc_op *ref_op, uint16_t dev_id,
uint16_t queue_id, const uint16_t num_to_process,
@@ -2189,10 +3163,7 @@ typedef int (test_case_function)(struct active_device *ad,
if (unlikely(num_to_process - dequeued < burst_sz))
burst_sz = num_to_process - dequeued;
- ret = rte_bbdev_enc_op_alloc_bulk(mempool, ops_enq, burst_sz);
- TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops",
- burst_sz);
-
+ rte_bbdev_enc_op_alloc_bulk(mempool, ops_enq, burst_sz);
if (test_vector.op_type != RTE_BBDEV_OP_NONE)
copy_reference_enc_op(ops_enq, burst_sz, dequeued,
bufs->inputs,
@@ -2225,7 +3196,7 @@ typedef int (test_case_function)(struct active_device *ad,
stats.acc_offload_cycles);
time_st->enq_acc_total_time += stats.acc_offload_cycles;
- /* ensure enqueue has been completed */
+ /* give time for device to process ops */
rte_delay_us(200);
/* Start time meas for dequeue function offload latency */
@@ -2253,6 +3224,87 @@ typedef int (test_case_function)(struct active_device *ad,
return i;
}
+
+static int
+offload_latency_test_ldpc_enc(struct rte_mempool *mempool,
+ struct test_buffers *bufs,
+ struct rte_bbdev_enc_op *ref_op, uint16_t dev_id,
+ uint16_t queue_id, const uint16_t num_to_process,
+ uint16_t burst_sz, struct test_time_stats *time_st)
+{
+ int i, dequeued, ret;
+ struct rte_bbdev_enc_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST];
+ uint64_t enq_start_time, deq_start_time;
+ uint64_t enq_sw_last_time, deq_last_time;
+ struct rte_bbdev_stats stats;
+
+ for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) {
+ uint16_t enq = 0, deq = 0;
+
+ if (unlikely(num_to_process - dequeued < burst_sz))
+ burst_sz = num_to_process - dequeued;
+
+ rte_bbdev_enc_op_alloc_bulk(mempool, ops_enq, burst_sz);
+ if (test_vector.op_type != RTE_BBDEV_OP_NONE)
+ copy_reference_ldpc_enc_op(ops_enq, burst_sz, dequeued,
+ bufs->inputs,
+ bufs->hard_outputs,
+ ref_op);
+
+ /* Start time meas for enqueue function offload latency */
+ enq_start_time = rte_rdtsc_precise();
+ do {
+ enq += rte_bbdev_enqueue_ldpc_enc_ops(dev_id, queue_id,
+ &ops_enq[enq], burst_sz - enq);
+ } while (unlikely(burst_sz != enq));
+
+ ret = get_bbdev_queue_stats(dev_id, queue_id, &stats);
+ TEST_ASSERT_SUCCESS(ret,
+ "Failed to get stats for queue (%u) of device (%u)",
+ queue_id, dev_id);
+
+ enq_sw_last_time = rte_rdtsc_precise() - enq_start_time -
+ stats.acc_offload_cycles;
+ time_st->enq_sw_max_time = RTE_MAX(time_st->enq_sw_max_time,
+ enq_sw_last_time);
+ time_st->enq_sw_min_time = RTE_MIN(time_st->enq_sw_min_time,
+ enq_sw_last_time);
+ time_st->enq_sw_total_time += enq_sw_last_time;
+
+ time_st->enq_acc_max_time = RTE_MAX(time_st->enq_acc_max_time,
+ stats.acc_offload_cycles);
+ time_st->enq_acc_min_time = RTE_MIN(time_st->enq_acc_min_time,
+ stats.acc_offload_cycles);
+ time_st->enq_acc_total_time += stats.acc_offload_cycles;
+
+ /* give time for device to process ops */
+ rte_delay_us(200);
+
+ /* Start time meas for dequeue function offload latency */
+ deq_start_time = rte_rdtsc_precise();
+ /* Dequeue one operation */
+ do {
+ deq += rte_bbdev_dequeue_ldpc_enc_ops(dev_id, queue_id,
+ &ops_deq[deq], 1);
+ } while (unlikely(deq != 1));
+
+ deq_last_time = rte_rdtsc_precise() - deq_start_time;
+ time_st->deq_max_time = RTE_MAX(time_st->deq_max_time,
+ deq_last_time);
+ time_st->deq_min_time = RTE_MIN(time_st->deq_min_time,
+ deq_last_time);
+ time_st->deq_total_time += deq_last_time;
+
+ while (burst_sz != deq)
+ deq += rte_bbdev_dequeue_ldpc_enc_ops(dev_id, queue_id,
+ &ops_deq[deq], burst_sz - deq);
+
+ rte_bbdev_enc_op_free_bulk(ops_enq, deq);
+ dequeued += deq;
+ }
+
+ return i;
+}
#endif
static int
@@ -2290,14 +3342,26 @@ typedef int (test_case_function)(struct active_device *ad,
op_type_str = rte_bbdev_op_type_str(op_type);
TEST_ASSERT_NOT_NULL(op_type_str, "Invalid op type: %u", op_type);
- printf(
- "\nOffload latency test: dev: %s, burst size: %u, num ops: %u, op type: %s\n",
+ printf("+ ------------------------------------------------------- +\n");
+ printf("== test: offload latency test\ndev: %s, burst size: %u, num ops: %u, op type: %s\n",
info.dev_name, burst_sz, num_to_process, op_type_str);
if (op_type == RTE_BBDEV_OP_TURBO_DEC)
iter = offload_latency_test_dec(op_params->mp, bufs,
op_params->ref_dec_op, ad->dev_id, queue_id,
num_to_process, burst_sz, &time_st);
+ else if (op_type == RTE_BBDEV_OP_TURBO_ENC)
+ iter = offload_latency_test_enc(op_params->mp, bufs,
+ op_params->ref_enc_op, ad->dev_id, queue_id,
+ num_to_process, burst_sz, &time_st);
+ else if (op_type == RTE_BBDEV_OP_LDPC_ENC)
+ iter = offload_latency_test_ldpc_enc(op_params->mp, bufs,
+ op_params->ref_enc_op, ad->dev_id, queue_id,
+ num_to_process, burst_sz, &time_st);
+ else if (op_type == RTE_BBDEV_OP_LDPC_DEC)
+ iter = offload_latency_test_ldpc_dec(op_params->mp, bufs,
+ op_params->ref_dec_op, ad->dev_id, queue_id,
+ num_to_process, burst_sz, &time_st);
else
iter = offload_latency_test_enc(op_params->mp, bufs,
op_params->ref_enc_op, ad->dev_id, queue_id,
@@ -2306,13 +3370,14 @@ typedef int (test_case_function)(struct active_device *ad,
if (iter <= 0)
return TEST_FAILED;
- printf("Enqueue offload cost latency:\n"
- "\tDriver offload avg %lg cycles, %lg us\n"
- "\tDriver offload min %lg cycles, %lg us\n"
- "\tDriver offload max %lg cycles, %lg us\n"
- "\tAccelerator offload avg %lg cycles, %lg us\n"
- "\tAccelerator offload min %lg cycles, %lg us\n"
- "\tAccelerator offload max %lg cycles, %lg us\n",
+ printf("Enqueue driver offload cost latency:\n"
+ "\tavg: %lg cycles, %lg us\n"
+ "\tmin: %lg cycles, %lg us\n"
+ "\tmax: %lg cycles, %lg us\n"
+ "Enqueue accelerator offload cost latency:\n"
+ "\tavg: %lg cycles, %lg us\n"
+ "\tmin: %lg cycles, %lg us\n"
+ "\tmax: %lg cycles, %lg us\n",
(double)time_st.enq_sw_total_time / (double)iter,
(double)(time_st.enq_sw_total_time * 1000000) /
(double)iter / (double)rte_get_tsc_hz(),
@@ -2331,9 +3396,9 @@ typedef int (test_case_function)(struct active_device *ad,
rte_get_tsc_hz());
printf("Dequeue offload cost latency - one op:\n"
- "\tavg %lg cycles, %lg us\n"
- "\tmin %lg cycles, %lg us\n"
- "\tmax %lg cycles, %lg us\n",
+ "\tavg: %lg cycles, %lg us\n"
+ "\tmin: %lg cycles, %lg us\n"
+ "\tmax: %lg cycles, %lg us\n",
(double)time_st.deq_total_time / (double)iter,
(double)(time_st.deq_total_time * 1000000) /
(double)iter / (double)rte_get_tsc_hz(),
@@ -2437,8 +3502,8 @@ typedef int (test_case_function)(struct active_device *ad,
op_type_str = rte_bbdev_op_type_str(op_type);
TEST_ASSERT_NOT_NULL(op_type_str, "Invalid op type: %u", op_type);
- printf(
- "\nOffload latency empty dequeue test: dev: %s, burst size: %u, num ops: %u, op type: %s\n",
+ printf("+ ------------------------------------------------------- +\n");
+ printf("== test: offload latency empty dequeue\ndev: %s, burst size: %u, num ops: %u, op type: %s\n",
info.dev_name, burst_sz, num_to_process, op_type_str);
if (op_type == RTE_BBDEV_OP_TURBO_DEC)
@@ -2453,10 +3518,10 @@ typedef int (test_case_function)(struct active_device *ad,
if (iter <= 0)
return TEST_FAILED;
- printf("Empty dequeue offload\n"
- "\tavg. latency: %lg cycles, %lg us\n"
- "\tmin. latency: %lg cycles, %lg us\n"
- "\tmax. latency: %lg cycles, %lg us\n",
+ printf("Empty dequeue offload:\n"
+ "\tavg: %lg cycles, %lg us\n"
+ "\tmin: %lg cycles, %lg us\n"
+ "\tmax: %lg cycles, %lg us\n",
(double)deq_total_time / (double)iter,
(double)(deq_total_time * 1000000) / (double)iter /
(double)rte_get_tsc_hz(), (double)deq_min_time,
diff --git a/app/test-bbdev/test_bbdev_vector.c b/app/test-bbdev/test_bbdev_vector.c
index e149ced..d478d76 100644
--- a/app/test-bbdev/test_bbdev_vector.c
+++ b/app/test-bbdev/test_bbdev_vector.c
@@ -2,7 +2,7 @@
* Copyright(c) 2017 Intel Corporation
*/
-#ifdef RTE_EXEC_ENV_FREEBSD
+#ifdef RTE_EXEC_ENV_BSDAPP
#define _WITH_GETLINE
#endif
#include <stdio.h>
@@ -18,6 +18,8 @@
"input",
"soft_output",
"hard_output",
+ "harq_input",
+ "harq_output",
};
/* trim leading and trailing spaces */
@@ -84,6 +86,7 @@
}
values[n_tokens] = (uint32_t) strtoul(tok, &error, 0);
+
if ((error == NULL) || (*error != '\0')) {
printf("Failed with convert '%s'\n", tok);
rte_free(values);
@@ -154,6 +157,54 @@
return 0;
}
+/* convert LDPC flag from string to unsigned long int*/
+static int
+op_ldpc_decoder_flag_strtoul(char *token, uint32_t *op_flag_value)
+{
+ if (!strcmp(token, "RTE_BBDEV_LDPC_CRC_TYPE_24A_CHECK"))
+ *op_flag_value = RTE_BBDEV_LDPC_CRC_TYPE_24A_CHECK;
+ else if (!strcmp(token, "RTE_BBDEV_LDPC_CRC_TYPE_24B_CHECK"))
+ *op_flag_value = RTE_BBDEV_LDPC_CRC_TYPE_24B_CHECK;
+ else if (!strcmp(token, "RTE_BBDEV_LDPC_CRC_TYPE_24B_DROP"))
+ *op_flag_value = RTE_BBDEV_LDPC_CRC_TYPE_24B_DROP;
+ else if (!strcmp(token, "RTE_BBDEV_LDPC_DEINTERLEAVER_BYPASS"))
+ *op_flag_value = RTE_BBDEV_LDPC_DEINTERLEAVER_BYPASS;
+ else if (!strcmp(token, "RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE"))
+ *op_flag_value = RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE;
+ else if (!strcmp(token, "RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE"))
+ *op_flag_value = RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE;
+ else if (!strcmp(token, "RTE_BBDEV_LDPC_DECODE_BYPASS"))
+ *op_flag_value = RTE_BBDEV_LDPC_DECODE_BYPASS;
+ else if (!strcmp(token, "RTE_BBDEV_LDPC_SOFT_OUT_ENABLE"))
+ *op_flag_value = RTE_BBDEV_LDPC_SOFT_OUT_ENABLE;
+ else if (!strcmp(token, "RTE_BBDEV_LDPC_SOFT_OUT_RM_BYPASS"))
+ *op_flag_value = RTE_BBDEV_LDPC_SOFT_OUT_RM_BYPASS;
+ else if (!strcmp(token, "RTE_BBDEV_LDPC_SOFT_OUT_DEINTERLEAVER_BYPASS"))
+ *op_flag_value = RTE_BBDEV_LDPC_SOFT_OUT_DEINTERLEAVER_BYPASS;
+ else if (!strcmp(token, "RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE"))
+ *op_flag_value = RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE;
+ else if (!strcmp(token, "RTE_BBDEV_LDPC_DEC_INTERRUPTS"))
+ *op_flag_value = RTE_BBDEV_LDPC_DEC_INTERRUPTS;
+ else if (!strcmp(token, "RTE_BBDEV_LDPC_DEC_SCATTER_GATHER"))
+ *op_flag_value = RTE_BBDEV_LDPC_DEC_SCATTER_GATHER;
+ else if (!strcmp(token, "RTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION"))
+ *op_flag_value = RTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION;
+ else if (!strcmp(token, "RTE_BBDEV_LDPC_LLR_COMPRESSION"))
+ *op_flag_value = RTE_BBDEV_LDPC_LLR_COMPRESSION;
+ else if (!strcmp(token,
+ "RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_IN_ENABLE"))
+ *op_flag_value = RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_IN_ENABLE;
+ else if (!strcmp(token,
+ "RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_OUT_ENABLE"))
+ *op_flag_value = RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_OUT_ENABLE;
+ else {
+ printf("The given value is not a LDPC decoder flag\n");
+ return -1;
+ }
+
+ return 0;
+}
+
/* convert turbo encoder flag from string to unsigned long int*/
static int
op_encoder_flag_strtoul(char *token, uint32_t *op_flag_value)
@@ -176,6 +227,32 @@
return 0;
}
+/* convert LDPC encoder flag from string to unsigned long int*/
+static int
+op_ldpc_encoder_flag_strtoul(char *token, uint32_t *op_flag_value)
+{
+ if (!strcmp(token, "RTE_BBDEV_LDPC_INTERLEAVER_BYPASS"))
+ *op_flag_value = RTE_BBDEV_LDPC_INTERLEAVER_BYPASS;
+ else if (!strcmp(token, "RTE_BBDEV_LDPC_RATE_MATCH"))
+ *op_flag_value = RTE_BBDEV_LDPC_RATE_MATCH;
+ else if (!strcmp(token, "RTE_BBDEV_LDPC_CRC_24A_ATTACH"))
+ *op_flag_value = RTE_BBDEV_LDPC_CRC_24A_ATTACH;
+ else if (!strcmp(token, "RTE_BBDEV_LDPC_CRC_24B_ATTACH"))
+ *op_flag_value = RTE_BBDEV_LDPC_CRC_24B_ATTACH;
+ else if (!strcmp(token, "RTE_BBDEV_LDPC_CRC_16_ATTACH"))
+ *op_flag_value = RTE_BBDEV_LDPC_CRC_16_ATTACH;
+ else if (!strcmp(token, "RTE_BBDEV_LDPC_ENC_INTERRUPTS"))
+ *op_flag_value = RTE_BBDEV_LDPC_ENC_INTERRUPTS;
+ else if (!strcmp(token, "RTE_BBDEV_LDPC_ENC_SCATTER_GATHER"))
+ *op_flag_value = RTE_BBDEV_LDPC_ENC_SCATTER_GATHER;
+ else {
+ printf("The given value is not a turbo encoder flag\n");
+ return -1;
+ }
+
+ return 0;
+}
+
/* tokenization turbo decoder/encoder flags values separated by a comma */
static int
parse_turbo_flags(char *tokens, uint32_t *op_flags,
@@ -196,6 +273,14 @@
} else if (op_type == RTE_BBDEV_OP_TURBO_ENC) {
if (op_encoder_flag_strtoul(tok, &op_flag_value) == -1)
return -1;
+ } else if (op_type == RTE_BBDEV_OP_LDPC_ENC) {
+ if (op_ldpc_encoder_flag_strtoul(tok, &op_flag_value)
+ == -1)
+ return -1;
+ } else if (op_type == RTE_BBDEV_OP_LDPC_DEC) {
+ if (op_ldpc_decoder_flag_strtoul(tok, &op_flag_value)
+ == -1)
+ return -1;
} else {
return -1;
}
@@ -219,6 +304,10 @@
*op_type = RTE_BBDEV_OP_TURBO_DEC;
else if (!strcmp(token, "RTE_BBDEV_OP_TURBO_ENC"))
*op_type = RTE_BBDEV_OP_TURBO_ENC;
+ else if (!strcmp(token, "RTE_BBDEV_OP_LDPC_ENC"))
+ *op_type = RTE_BBDEV_OP_LDPC_ENC;
+ else if (!strcmp(token, "RTE_BBDEV_OP_LDPC_DEC"))
+ *op_type = RTE_BBDEV_OP_LDPC_DEC;
else if (!strcmp(token, "RTE_BBDEV_OP_NONE"))
*op_type = RTE_BBDEV_OP_NONE;
else {
@@ -248,12 +337,18 @@
*status = *status | (1 << RTE_BBDEV_DRV_ERROR);
else if (!strcmp(tok, "FCW"))
*status = *status | (1 << RTE_BBDEV_DATA_ERROR);
+ else if (!strcmp(tok, "SYNCRC")) {
+ *status = *status | (1 << RTE_BBDEV_SYNDROME_ERROR);
+ *status = *status | (1 << RTE_BBDEV_CRC_ERROR);
+ } else if (!strcmp(tok, "SYN"))
+ *status = *status | (1 << RTE_BBDEV_SYNDROME_ERROR);
else if (!strcmp(tok, "CRC")) {
- if (op_type == RTE_BBDEV_OP_TURBO_DEC)
+ if ((op_type == RTE_BBDEV_OP_TURBO_DEC) ||
+ (op_type == RTE_BBDEV_OP_LDPC_DEC))
*status = *status | (1 << RTE_BBDEV_CRC_ERROR);
else {
printf(
- "CRC is only a valid value for turbo decoder\n");
+ "CRC is only a valid value for decoder\n");
return -1;
}
} else {
@@ -414,7 +509,7 @@
ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
} else if (!strcmp(key_token, "r")) {
vector->mask |= TEST_BBDEV_VF_R;
- turbo_dec->tb_params.r = (uint8_t) strtoul(token, &err, 0);
+ turbo_dec->tb_params.r = (uint8_t)strtoul(token, &err, 0);
ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
} else if (!strcmp(key_token, "code_block_mode")) {
vector->mask |= TEST_BBDEV_VF_CODE_BLOCK_MODE;
@@ -548,6 +643,211 @@
return 0;
}
+
+/* parses LDPC encoder parameters and assigns to global variable */
+static int
+parse_ldpc_encoder_params(const char *key_token, char *token,
+ struct test_bbdev_vector *vector)
+{
+ int ret = 0, status = 0;
+ uint32_t op_flags = 0;
+ char *err = NULL;
+
+ struct rte_bbdev_op_ldpc_enc *ldpc_enc = &vector->ldpc_enc;
+
+ if (starts_with(key_token, op_data_prefixes[DATA_INPUT]))
+ ret = parse_data_entry(key_token, token, vector,
+ DATA_INPUT,
+ op_data_prefixes[DATA_INPUT]);
+ else if (starts_with(key_token, "output"))
+ ret = parse_data_entry(key_token, token, vector,
+ DATA_HARD_OUTPUT,
+ "output");
+ else if (!strcmp(key_token, "e")) {
+ vector->mask |= TEST_BBDEV_VF_E;
+ ldpc_enc->cb_params.e = (uint32_t) strtoul(token, &err, 0);
+ ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+ } else if (!strcmp(key_token, "ea")) {
+ vector->mask |= TEST_BBDEV_VF_EA;
+ ldpc_enc->tb_params.ea = (uint32_t) strtoul(token, &err, 0);
+ ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+ } else if (!strcmp(key_token, "eb")) {
+ vector->mask |= TEST_BBDEV_VF_EB;
+ ldpc_enc->tb_params.eb = (uint32_t) strtoul(token, &err, 0);
+ ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+ } else if (!strcmp(key_token, "c")) {
+ vector->mask |= TEST_BBDEV_VF_C;
+ ldpc_enc->tb_params.c = (uint8_t) strtoul(token, &err, 0);
+ ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+ } else if (!strcmp(key_token, "cab")) {
+ vector->mask |= TEST_BBDEV_VF_CAB;
+ ldpc_enc->tb_params.cab = (uint8_t) strtoul(token, &err, 0);
+ ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+ } else if (!strcmp(key_token, "rv_index")) {
+ vector->mask |= TEST_BBDEV_VF_RV_INDEX;
+ ldpc_enc->rv_index = (uint8_t) strtoul(token, &err, 0);
+ ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+ } else if (!strcmp(key_token, "n_cb")) {
+ vector->mask |= TEST_BBDEV_VF_NCB;
+ ldpc_enc->n_cb = (uint16_t) strtoul(token, &err, 0);
+ ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+ } else if (!strcmp(key_token, "r")) {
+ vector->mask |= TEST_BBDEV_VF_R;
+ ldpc_enc->tb_params.r = (uint8_t) strtoul(token, &err, 0);
+ ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+ } else if (!strcmp(key_token, "q_m")) {
+ vector->mask |= TEST_BBDEV_VF_QM;
+ ldpc_enc->q_m = (uint8_t) strtoul(token, &err, 0);
+ ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+ } else if (!strcmp(key_token, "basegraph")) {
+ vector->mask |= TEST_BBDEV_VF_BG;
+ ldpc_enc->basegraph = (uint8_t) strtoul(token, &err, 0);
+ ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+ } else if (!strcmp(key_token, "z_c")) {
+ vector->mask |= TEST_BBDEV_VF_ZC;
+ ldpc_enc->z_c = (uint16_t) strtoul(token, &err, 0);
+ ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+ } else if (!strcmp(key_token, "n_filler")) {
+ vector->mask |= TEST_BBDEV_VF_F;
+ ldpc_enc->n_filler = (uint16_t) strtoul(token, &err, 0);
+ ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+ } else if (!strcmp(key_token, "code_block_mode")) {
+ vector->mask |= TEST_BBDEV_VF_CODE_BLOCK_MODE;
+ ldpc_enc->code_block_mode = (uint8_t) strtoul(token, &err, 0);
+ ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+ } else if (!strcmp(key_token, "op_flags")) {
+ vector->mask |= TEST_BBDEV_VF_OP_FLAGS;
+ ret = parse_turbo_flags(token, &op_flags, vector->op_type);
+ if (!ret)
+ ldpc_enc->op_flags = op_flags;
+ } else if (!strcmp(key_token, "expected_status")) {
+ vector->mask |= TEST_BBDEV_VF_EXPECTED_STATUS;
+ ret = parse_expected_status(token, &status, vector->op_type);
+ if (!ret)
+ vector->expected_status = status;
+ } else {
+ printf("Not valid ldpc enc key: '%s'\n", key_token);
+ return -1;
+ }
+
+ if (ret != 0) {
+ printf("Failed with convert '%s\t%s'\n", key_token, token);
+ return -1;
+ }
+
+ return 0;
+}
+
+/* parses LDPC decoder parameters and assigns to global variable */
+static int
+parse_ldpc_decoder_params(const char *key_token, char *token,
+ struct test_bbdev_vector *vector)
+{
+ int ret = 0, status = 0;
+ uint32_t op_flags = 0;
+ char *err = NULL;
+
+ struct rte_bbdev_op_ldpc_dec *ldpc_dec = &vector->ldpc_dec;
+
+ if (starts_with(key_token, op_data_prefixes[DATA_INPUT]))
+ ret = parse_data_entry(key_token, token, vector,
+ DATA_INPUT,
+ op_data_prefixes[DATA_INPUT]);
+ else if (starts_with(key_token, "output"))
+ ret = parse_data_entry(key_token, token, vector,
+ DATA_HARD_OUTPUT,
+ "output");
+ else if (starts_with(key_token, op_data_prefixes[DATA_HARQ_INPUT]))
+ ret = parse_data_entry(key_token, token, vector,
+ DATA_HARQ_INPUT,
+ op_data_prefixes[DATA_HARQ_INPUT]);
+ else if (starts_with(key_token, op_data_prefixes[DATA_HARQ_OUTPUT]))
+ ret = parse_data_entry(key_token, token, vector,
+ DATA_HARQ_OUTPUT,
+ op_data_prefixes[DATA_HARQ_OUTPUT]);
+ else if (!strcmp(key_token, "e")) {
+ vector->mask |= TEST_BBDEV_VF_E;
+ ldpc_dec->cb_params.e = (uint32_t) strtoul(token, &err, 0);
+ ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+ } else if (!strcmp(key_token, "ea")) {
+ vector->mask |= TEST_BBDEV_VF_EA;
+ ldpc_dec->tb_params.ea = (uint32_t) strtoul(token, &err, 0);
+ ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+ } else if (!strcmp(key_token, "eb")) {
+ vector->mask |= TEST_BBDEV_VF_EB;
+ ldpc_dec->tb_params.eb = (uint32_t) strtoul(token, &err, 0);
+ ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+ } else if (!strcmp(key_token, "c")) {
+ vector->mask |= TEST_BBDEV_VF_C;
+ ldpc_dec->tb_params.c = (uint8_t) strtoul(token, &err, 0);
+ ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+ } else if (!strcmp(key_token, "cab")) {
+ vector->mask |= TEST_BBDEV_VF_CAB;
+ ldpc_dec->tb_params.cab = (uint8_t) strtoul(token, &err, 0);
+ ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+ } else if (!strcmp(key_token, "rv_index")) {
+ vector->mask |= TEST_BBDEV_VF_RV_INDEX;
+ ldpc_dec->rv_index = (uint8_t) strtoul(token, &err, 0);
+ ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+ } else if (!strcmp(key_token, "n_cb")) {
+ vector->mask |= TEST_BBDEV_VF_NCB;
+ ldpc_dec->n_cb = (uint16_t) strtoul(token, &err, 0);
+ ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+ } else if (!strcmp(key_token, "r")) {
+ vector->mask |= TEST_BBDEV_VF_R;
+ ldpc_dec->tb_params.r = (uint8_t) strtoul(token, &err, 0);
+ ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+ } else if (!strcmp(key_token, "q_m")) {
+ vector->mask |= TEST_BBDEV_VF_QM;
+ ldpc_dec->q_m = (uint8_t) strtoul(token, &err, 0);
+ ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+ } else if (!strcmp(key_token, "basegraph")) {
+ vector->mask |= TEST_BBDEV_VF_BG;
+ ldpc_dec->basegraph = (uint8_t) strtoul(token, &err, 0);
+ ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+ } else if (!strcmp(key_token, "z_c")) {
+ vector->mask |= TEST_BBDEV_VF_ZC;
+ ldpc_dec->z_c = (uint16_t) strtoul(token, &err, 0);
+ ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+ } else if (!strcmp(key_token, "n_filler")) {
+ vector->mask |= TEST_BBDEV_VF_F;
+ ldpc_dec->n_filler = (uint16_t) strtoul(token, &err, 0);
+ ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+ } else if (!strcmp(key_token, "expected_iter_count")) {
+ vector->mask |= TEST_BBDEV_VF_EXPECTED_ITER_COUNT;
+ ldpc_dec->iter_count = (uint8_t) strtoul(token, &err, 0);
+ ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+ } else if (!strcmp(key_token, "iter_max")) {
+ vector->mask |= TEST_BBDEV_VF_ITER_MAX;
+ ldpc_dec->iter_max = (uint8_t) strtoul(token, &err, 0);
+ ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+ } else if (!strcmp(key_token, "code_block_mode")) {
+ vector->mask |= TEST_BBDEV_VF_CODE_BLOCK_MODE;
+ ldpc_dec->code_block_mode = (uint8_t) strtoul(token, &err, 0);
+ ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+ } else if (!strcmp(key_token, "op_flags")) {
+ vector->mask |= TEST_BBDEV_VF_OP_FLAGS;
+ ret = parse_turbo_flags(token, &op_flags, vector->op_type);
+ if (!ret)
+ ldpc_dec->op_flags = op_flags;
+ } else if (!strcmp(key_token, "expected_status")) {
+ vector->mask |= TEST_BBDEV_VF_EXPECTED_STATUS;
+ ret = parse_expected_status(token, &status, vector->op_type);
+ if (!ret)
+ vector->expected_status = status;
+ } else {
+ printf("Not valid ldpc dec key: '%s'\n", key_token);
+ return -1;
+ }
+
+ if (ret != 0) {
+ printf("Failed with convert '%s\t%s'\n", key_token, token);
+ return -1;
+ }
+
+ return 0;
+}
+
/* checks the type of key and assigns data */
static int
parse_entry(char *entry, struct test_bbdev_vector *vector)
@@ -593,6 +893,12 @@
} else if (vector->op_type == RTE_BBDEV_OP_TURBO_ENC) {
if (parse_encoder_params(key_token, token, vector) == -1)
return -1;
+ } else if (vector->op_type == RTE_BBDEV_OP_LDPC_ENC) {
+ if (parse_ldpc_encoder_params(key_token, token, vector) == -1)
+ return -1;
+ } else if (vector->op_type == RTE_BBDEV_OP_LDPC_DEC) {
+ if (parse_ldpc_decoder_params(key_token, token, vector) == -1)
+ return -1;
}
return 0;
@@ -632,6 +938,45 @@
}
static int
+check_ldpc_decoder_segments(struct test_bbdev_vector *vector)
+{
+ unsigned char i;
+ struct rte_bbdev_op_ldpc_dec *ldpc_dec = &vector->ldpc_dec;
+
+ if (vector->entries[DATA_INPUT].nb_segments == 0)
+ return -1;
+
+ for (i = 0; i < vector->entries[DATA_INPUT].nb_segments; i++)
+ if (vector->entries[DATA_INPUT].segments[i].addr == NULL)
+ return -1;
+
+ if (vector->entries[DATA_HARD_OUTPUT].nb_segments == 0)
+ return -1;
+
+ for (i = 0; i < vector->entries[DATA_HARD_OUTPUT].nb_segments; i++)
+ if (vector->entries[DATA_HARD_OUTPUT].segments[i].addr == NULL)
+ return -1;
+
+ if ((ldpc_dec->op_flags & RTE_BBDEV_LDPC_SOFT_OUT_ENABLE) &&
+ (vector->entries[DATA_SOFT_OUTPUT].nb_segments == 0))
+ return -1;
+
+ for (i = 0; i < vector->entries[DATA_SOFT_OUTPUT].nb_segments; i++)
+ if (vector->entries[DATA_SOFT_OUTPUT].segments[i].addr == NULL)
+ return -1;
+
+ if ((ldpc_dec->op_flags & RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE) &&
+ (vector->entries[DATA_HARQ_OUTPUT].nb_segments == 0))
+ return -1;
+
+ for (i = 0; i < vector->entries[DATA_HARQ_OUTPUT].nb_segments; i++)
+ if (vector->entries[DATA_HARQ_OUTPUT].segments[i].addr == NULL)
+ return -1;
+
+ return 0;
+}
+
+static int
check_decoder_llr_spec(struct test_bbdev_vector *vector)
{
struct rte_bbdev_op_turbo_dec *turbo_dec = &vector->turbo_dec;
@@ -648,7 +993,7 @@
!(turbo_dec->op_flags &
RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN)) {
printf(
- "WARNING: input LLR sign formalism was not specified and will be set to negative LLR for '1' bit\n");
+ "INFO: input LLR sign formalism was not specified and will be set to negative LLR for '1' bit\n");
turbo_dec->op_flags |= RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN;
}
@@ -667,7 +1012,7 @@
!(turbo_dec->op_flags &
RTE_BBDEV_TURBO_NEG_LLR_1_BIT_SOFT_OUT)) {
printf(
- "WARNING: soft output LLR sign formalism was not specified and will be set to negative LLR for '1' bit\n");
+ "INFO: soft output LLR sign formalism was not specified and will be set to negative LLR for '1' bit\n");
turbo_dec->op_flags |=
RTE_BBDEV_TURBO_NEG_LLR_1_BIT_SOFT_OUT;
}
@@ -675,6 +1020,21 @@
return 0;
}
+static int
+check_decoder_op_flags(struct test_bbdev_vector *vector)
+{
+ struct rte_bbdev_op_turbo_dec *turbo_dec = &vector->turbo_dec;
+
+ if ((turbo_dec->op_flags & RTE_BBDEV_TURBO_DEC_TB_CRC_24B_KEEP) &&
+ !(turbo_dec->op_flags & RTE_BBDEV_TURBO_CRC_TYPE_24B)) {
+ printf(
+ "WARNING: RTE_BBDEV_TURBO_DEC_TB_CRC_24B_KEEP flag is missing RTE_BBDEV_TURBO_CRC_TYPE_24B\n");
+ return -1;
+ }
+
+ return 0;
+}
+
/* checks decoder parameters */
static int
check_decoder(struct test_bbdev_vector *vector)
@@ -688,6 +1048,9 @@
if (check_decoder_llr_spec(vector) < 0)
return -1;
+ if (check_decoder_op_flags(vector) < 0)
+ return -1;
+
/* Check which params were set */
if (!(mask & TEST_BBDEV_VF_CODE_BLOCK_MODE)) {
printf(
@@ -731,7 +1094,7 @@
}
if (!(mask & TEST_BBDEV_VF_RV_INDEX))
printf(
- "WARNING: rv_index was not specified in vector file and will be set to 0\n");
+ "INFO: rv_index was not specified in vector file and will be set to 0\n");
if (!(mask & TEST_BBDEV_VF_ITER_MIN))
printf(
"WARNING: iter_min was not specified in vector file and will be set to 0\n");
@@ -751,7 +1114,7 @@
} else if (!(turbo_dec->op_flags & RTE_BBDEV_TURBO_MAP_DEC) &&
mask & TEST_BBDEV_VF_NUM_MAPS) {
printf(
- "WARNING: RTE_BBDEV_TURBO_MAP_DEC was not set in vector file and num_maps will be set to 0\n");
+ "INFO: RTE_BBDEV_TURBO_MAP_DEC was not set in vector file and num_maps will be set to 0\n");
turbo_dec->num_maps = 0;
}
if (!(mask & TEST_BBDEV_VF_EXPECTED_STATUS))
@@ -760,6 +1123,72 @@
return 0;
}
+/* checks LDPC decoder parameters */
+static int
+check_ldpc_decoder(struct test_bbdev_vector *vector)
+{
+ struct rte_bbdev_op_ldpc_dec *ldpc_dec = &vector->ldpc_dec;
+ const int mask = vector->mask;
+
+ if (check_ldpc_decoder_segments(vector) < 0)
+ return -1;
+
+ /*
+ * if (check_ldpc_decoder_llr_spec(vector) < 0)
+ * return -1;
+ *
+ * if (check_ldpc_decoder_op_flags(vector) < 0)
+ * return -1;
+ */
+
+ /* Check which params were set */
+ if (!(mask & TEST_BBDEV_VF_CODE_BLOCK_MODE)) {
+ printf(
+ "WARNING: code_block_mode was not specified in vector file and will be set to 1 (0 - TB Mode, 1 - CB mode)\n");
+ ldpc_dec->code_block_mode = 1;
+ }
+ if (ldpc_dec->code_block_mode == 0) {
+ if (!(mask & TEST_BBDEV_VF_EA))
+ printf(
+ "WARNING: ea was not specified in vector file and will be set to 0\n");
+ if (!(mask & TEST_BBDEV_VF_EB))
+ printf(
+ "WARNING: eb was not specified in vector file and will be set to 0\n");
+ if (!(mask & TEST_BBDEV_VF_C)) {
+ printf(
+ "WARNING: c was not specified in vector file and will be set to 1\n");
+ ldpc_dec->tb_params.c = 1;
+ }
+ if (!(mask & TEST_BBDEV_VF_CAB))
+ printf(
+ "WARNING: cab was not specified in vector file and will be set to 0\n");
+ if (!(mask & TEST_BBDEV_VF_R))
+ printf(
+ "WARNING: r was not specified in vector file and will be set to 0\n");
+ } else {
+ if (!(mask & TEST_BBDEV_VF_E))
+ printf(
+ "WARNING: e was not specified in vector file and will be set to 0\n");
+ }
+ if (!(mask & TEST_BBDEV_VF_RV_INDEX))
+ printf(
+ "INFO: rv_index was not specified in vector file and will be set to 0\n");
+ if (!(mask & TEST_BBDEV_VF_ITER_MAX))
+ printf(
+ "WARNING: iter_max was not specified in vector file and will be set to 0\n");
+ if (!(mask & TEST_BBDEV_VF_EXPECTED_ITER_COUNT))
+ printf(
+ "WARNING: expected_iter_count was not specified in vector file and iter_count will not be validated\n");
+ if (!(mask & TEST_BBDEV_VF_OP_FLAGS)) {
+ printf(
+ "WARNING: op_flags was not specified in vector file and capabilities will not be validated\n");
+ }
+ if (!(mask & TEST_BBDEV_VF_EXPECTED_STATUS))
+ printf(
+ "WARNING: expected_status was not specified in vector file and will be set to 0\n");
+ return 0;
+}
+
/* checks encoder parameters */
static int
check_encoder(struct test_bbdev_vector *vector)
@@ -836,10 +1265,66 @@
}
if (!(mask & TEST_BBDEV_VF_RV_INDEX))
printf(
- "WARNING: rv_index was not specified in vector file and will be set to 0\n");
+ "INFO: rv_index was not specified in vector file and will be set to 0\n");
if (!(mask & TEST_BBDEV_VF_OP_FLAGS))
printf(
- "WARNING: op_flags was not specified in vector file and capabilities will not be validated\n");
+ "INFO: op_flags was not specified in vector file and capabilities will not be validated\n");
+ if (!(mask & TEST_BBDEV_VF_EXPECTED_STATUS))
+ printf(
+ "WARNING: expected_status was not specified in vector file and will be set to 0\n");
+
+ return 0;
+}
+
+
+/* checks encoder parameters */
+static int
+check_ldpc_encoder(struct test_bbdev_vector *vector)
+{
+ unsigned char i;
+ const int mask = vector->mask;
+
+ if (vector->entries[DATA_INPUT].nb_segments == 0)
+ return -1;
+
+ for (i = 0; i < vector->entries[DATA_INPUT].nb_segments; i++)
+ if (vector->entries[DATA_INPUT].segments[i].addr == NULL)
+ return -1;
+
+ if (vector->entries[DATA_HARD_OUTPUT].nb_segments == 0)
+ return -1;
+
+ for (i = 0; i < vector->entries[DATA_HARD_OUTPUT].nb_segments; i++)
+ if (vector->entries[DATA_HARD_OUTPUT].segments[i].addr == NULL)
+ return -1;
+
+ if (!(mask & TEST_BBDEV_VF_CODE_BLOCK_MODE)) {
+ printf(
+ "WARNING: code_block_mode was not specified in vector file and will be set to 1\n");
+ vector->turbo_enc.code_block_mode = 1;
+ }
+ if (vector->turbo_enc.code_block_mode == 0) {
+ } else {
+ if (!(mask & TEST_BBDEV_VF_E) && (vector->turbo_enc.op_flags &
+ RTE_BBDEV_TURBO_RATE_MATCH))
+ printf(
+ "WARNING: e was not specified in vector file and will be set to 0\n");
+ if (!(mask & TEST_BBDEV_VF_NCB))
+ printf(
+ "WARNING: ncb was not specified in vector file and will be set to 0\n");
+ }
+ if (!(mask & TEST_BBDEV_VF_BG))
+ printf(
+ "WARNING: BG was not specified in vector file and will be set to 0\n");
+ if (!(mask & TEST_BBDEV_VF_ZC))
+ printf(
+ "WARNING: Zc was not specified in vector file and will be set to 0\n");
+ if (!(mask & TEST_BBDEV_VF_RV_INDEX))
+ printf(
+ "INFO: rv_index was not specified in vector file and will be set to 0\n");
+ if (!(mask & TEST_BBDEV_VF_OP_FLAGS))
+ printf(
+ "INFO: op_flags was not specified in vector file and capabilities will not be validated\n");
if (!(mask & TEST_BBDEV_VF_EXPECTED_STATUS))
printf(
"WARNING: expected_status was not specified in vector file and will be set to 0\n");
@@ -856,6 +1341,12 @@
} else if (vector->op_type == RTE_BBDEV_OP_TURBO_ENC) {
if (check_encoder(vector) == -1)
return -1;
+ } else if (vector->op_type == RTE_BBDEV_OP_LDPC_ENC) {
+ if (check_ldpc_encoder(vector) == -1)
+ return -1;
+ } else if (vector->op_type == RTE_BBDEV_OP_LDPC_DEC) {
+ if (check_ldpc_decoder(vector) == -1)
+ return -1;
} else if (vector->op_type != RTE_BBDEV_OP_NONE) {
printf("Vector was not filled\n");
return -1;
@@ -898,7 +1389,8 @@
goto exit;
}
- strcpy(entry, line);
+ memset(entry, 0, strlen(line) + 1);
+ strncpy(entry, line, strlen(line));
/* check if entry ends with , or = */
if (entry[strlen(entry) - 1] == ','
@@ -920,8 +1412,7 @@
}
entry = entry_extended;
- /* entry has been allocated accordingly */
- strcpy(&entry[strlen(entry)], line);
+ strncat(entry, line, strlen(line));
if (entry[strlen(entry) - 1] != ',')
break;
diff --git a/app/test-bbdev/test_bbdev_vector.h b/app/test-bbdev/test_bbdev_vector.h
index c85e94d..4e5dbf5 100644
--- a/app/test-bbdev/test_bbdev_vector.h
+++ b/app/test-bbdev/test_bbdev_vector.h
@@ -28,15 +28,21 @@ enum {
TEST_BBDEV_VF_NCB_NEG = (1ULL << 16),
TEST_BBDEV_VF_NCB_POS = (1ULL << 17),
TEST_BBDEV_VF_R = (1ULL << 18),
- TEST_BBDEV_VF_CODE_BLOCK_MODE = (1ULL << 19),
- TEST_BBDEV_VF_OP_FLAGS = (1ULL << 20),
- TEST_BBDEV_VF_EXPECTED_STATUS = (1ULL << 21),
+ TEST_BBDEV_VF_BG = (1ULL << 19),
+ TEST_BBDEV_VF_ZC = (1ULL << 20),
+ TEST_BBDEV_VF_F = (1ULL << 21),
+ TEST_BBDEV_VF_QM = (1ULL << 22),
+ TEST_BBDEV_VF_CODE_BLOCK_MODE = (1ULL << 23),
+ TEST_BBDEV_VF_OP_FLAGS = (1ULL << 24),
+ TEST_BBDEV_VF_EXPECTED_STATUS = (1ULL << 25),
};
enum op_data_type {
DATA_INPUT = 0,
DATA_SOFT_OUTPUT,
DATA_HARD_OUTPUT,
+ DATA_HARQ_INPUT,
+ DATA_HARQ_OUTPUT,
DATA_NUM_TYPES,
};
@@ -57,6 +63,8 @@ struct test_bbdev_vector {
union {
struct rte_bbdev_op_turbo_dec turbo_dec;
struct rte_bbdev_op_turbo_enc turbo_enc;
+ struct rte_bbdev_op_ldpc_dec ldpc_dec;
+ struct rte_bbdev_op_ldpc_enc ldpc_enc;
};
/* Additional storage for op data entries */
struct op_data_entries entries[DATA_NUM_TYPES];
diff --git a/app/test-bbdev/turbo_enc_default.data b/app/test-bbdev/turbo_enc_default.data
index 5587f9c..a2bc833 120000
--- a/app/test-bbdev/turbo_enc_default.data
+++ b/app/test-bbdev/turbo_enc_default.data
@@ -1 +1 @@
-test_vectors/turbo_enc_c1_k6144_r0_e32256_crc24b_rm.data
\ No newline at end of file
+test_vectors/turbo_enc_c1_k6144_r0_e18444.data
\ No newline at end of file
--
1.8.3.1
next prev parent reply other threads:[~2019-06-22 0:03 UTC|newest]
Thread overview: 95+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-05-11 0:05 [dpdk-dev] [PATCH 0/5] BBDEV PMD Drivers Extension for 19.08 Nic Chautru
2019-05-11 0:05 ` Nic Chautru
2019-05-11 0:05 ` [dpdk-dev] [PATCH 1/5] baseband/fpga_lte_fec: addition of driver for 4G turbo FEC with PAC N300 FPGA card Nic Chautru
2019-05-11 0:05 ` Nic Chautru
2019-05-11 0:06 ` [dpdk-dev] [PATCH 2/5] bbdev : Extension of BBDEV for 5G FEC Nic Chautru
2019-05-11 0:06 ` Nic Chautru
2019-05-11 0:06 ` [dpdk-dev] [PATCH 3/5] baseband/turbo_sw : Extension of turbo_sw " Nic Chautru
2019-05-11 0:06 ` Nic Chautru
2019-05-11 0:06 ` [dpdk-dev] [PATCH 4/5] test-bbdev : Update of bbdec test-app for FPGA and 5G testing Nic Chautru
2019-05-11 0:06 ` Nic Chautru
2019-05-11 0:06 ` [dpdk-dev] [PATCH 5/5] usertools: update to usertool to allow binding of baseband device Nic Chautru
2019-05-11 0:06 ` Nic Chautru
2019-05-13 11:10 ` [dpdk-dev] [PATCH 0/5] BBDEV PMD Drivers Extension for 19.08 Luca Boccassi
2019-05-13 11:10 ` Luca Boccassi
2019-05-14 0:07 ` Chautru, Nicolas
2019-05-14 0:07 ` Chautru, Nicolas
2019-05-14 20:44 ` Thomas Monjalon
2019-05-14 20:44 ` Thomas Monjalon
2019-05-14 19:45 ` [dpdk-dev] [PATCH v2 " Nicolas Chautru
2019-05-14 19:45 ` Nicolas Chautru
2019-05-14 19:45 ` [dpdk-dev] [PATCH v2 1/5] baseband/fpga_lte_fec: adding driver for FEC on FPGA Nicolas Chautru
2019-05-14 19:45 ` Nicolas Chautru
2019-05-15 8:28 ` Thomas Monjalon
2019-05-15 8:28 ` Thomas Monjalon
2019-05-20 13:44 ` Ferruh Yigit
2019-05-20 13:48 ` Thomas Monjalon
2019-06-21 16:59 ` [dpdk-dev] [PATCH v3 00/10] bbdev: adding support in BBDEV for 5GNR FEC Nicolas Chautru
2019-06-21 16:59 ` [dpdk-dev] [PATCH v3 01/10] baseband/turbo_sw: baseband/turbo_sw: dependency patch Nicolas Chautru
2019-06-24 2:52 ` [dpdk-dev] [PATCH v4 00/10] bbdev: adding support in BBDEV for 5GNR FEC Nicolas Chautru
2019-06-24 2:52 ` [dpdk-dev] [PATCH v4 01/10] baseband/turbo_sw: baseband/turbo_sw: dependency patch Nicolas Chautru
2019-06-30 22:31 ` Mokhtar, Amr
2019-07-01 18:06 ` [dpdk-dev] [PATCH v5 0/7] bbdev: adding support in BBDEV for 5GNR FEC Nicolas Chautru
2019-07-01 18:06 ` [dpdk-dev] [PATCH v5 1/7] bbdev: renaming non-generic LTE specific structure Nicolas Chautru
2019-07-01 18:06 ` [dpdk-dev] [PATCH v5 2/7] bbdev: extension of BBDEV API for 5G FEC Nicolas Chautru
2019-07-01 18:06 ` [dpdk-dev] [PATCH v5 3/7] docs/guides: updating bbdev API for 5GNR operations Nicolas Chautru
2019-07-01 18:06 ` [dpdk-dev] [PATCH v5 4/7] baseband/turbo_sw: extension of turbosw PMD for 5G Nicolas Chautru
2019-07-01 18:06 ` [dpdk-dev] [PATCH v5 5/7] docs/guides: updating building steps for BBDEV PMD Nicolas Chautru
2019-07-01 18:06 ` [dpdk-dev] [PATCH v5 6/7] test-bbdev: update of bbdev test-app for 5GNR Nicolas Chautru
2019-07-01 18:06 ` [dpdk-dev] [PATCH v5 7/7] test-bbdev: test vectors for 5GNR verification Nicolas Chautru
2019-07-03 15:24 ` [dpdk-dev] [PATCH v6 0/7] bbdev: adding support in BBDEV for 5GNR FEC Nicolas Chautru
2019-07-03 15:24 ` [dpdk-dev] [PATCH v6 1/7] bbdev: renaming non-generic LTE specific structure Nicolas Chautru
2019-07-07 8:43 ` Thomas Monjalon
2019-07-03 15:24 ` [dpdk-dev] [PATCH v6 2/7] bbdev: extension of BBDEV API for 5G FEC Nicolas Chautru
2019-07-03 15:24 ` [dpdk-dev] [PATCH v6 3/7] docs/guides: updating bbdev API for 5GNR operations Nicolas Chautru
2019-07-03 15:24 ` [dpdk-dev] [PATCH v6 4/7] baseband/turbo_sw: extension of turbosw PMD for 5G Nicolas Chautru
2019-07-03 15:24 ` [dpdk-dev] [PATCH v6 5/7] docs/guides: updating building steps for BBDEV PMD Nicolas Chautru
2019-07-03 15:24 ` [dpdk-dev] [PATCH v6 6/7] test-bbdev: update of bbdev test-app for 5GNR Nicolas Chautru
2019-07-03 15:24 ` [dpdk-dev] [PATCH v6 7/7] test-bbdev: test vectors for 5GNR verification Nicolas Chautru
2019-07-03 18:05 ` [dpdk-dev] [PATCH v6 0/7] bbdev: adding support in BBDEV for 5GNR FEC Thomas Monjalon
2019-07-03 18:31 ` Chautru, Nicolas
2019-07-03 20:27 ` Thomas Monjalon
2019-07-05 8:56 ` Akhil Goyal
2019-06-24 2:52 ` [dpdk-dev] [PATCH v4 02/10] baseband/fpga_lte_fec: dependency patch Nicolas Chautru
2019-06-30 22:42 ` Mokhtar, Amr
2019-06-24 2:52 ` [dpdk-dev] [PATCH v4 03/10] bbdev: renaming non-generic LTE specific structure Nicolas Chautru
2019-06-30 22:45 ` Mokhtar, Amr
2019-06-24 2:52 ` [dpdk-dev] [PATCH v4 04/10] bbdev: extension of BBDEV API for 5G FEC Nicolas Chautru
2019-06-30 22:48 ` Mokhtar, Amr
2019-06-24 2:52 ` [dpdk-dev] [PATCH v4 05/10] docs/guides: updating bbdev API for 5GNR operations Nicolas Chautru
2019-06-30 22:49 ` Mokhtar, Amr
2019-06-24 2:52 ` [dpdk-dev] [PATCH v4 06/10] baseband/turbo_sw: extension of turbosw PMD for 5G Nicolas Chautru
2019-06-30 22:53 ` Mokhtar, Amr
2019-06-24 2:52 ` [dpdk-dev] [PATCH v4 07/10] docs/guides: updating building steps for BBDEV PMD Nicolas Chautru
2019-06-30 22:54 ` Mokhtar, Amr
2019-06-24 2:52 ` [dpdk-dev] [PATCH v4 08/10] test-bbdev: update of bbdev test-app for 5GNR Nicolas Chautru
2019-06-30 22:55 ` Mokhtar, Amr
2019-06-24 2:52 ` [dpdk-dev] [PATCH v4 09/10] test-bbdev: test vectors for 5GNR verification Nicolas Chautru
2019-06-30 23:03 ` Mokhtar, Amr
2019-07-01 18:18 ` Chautru, Nicolas
2019-06-24 2:52 ` [dpdk-dev] [PATCH v4 10/10] doc: announce bbdev changes Nicolas Chautru
2019-06-30 23:04 ` Mokhtar, Amr
2019-07-01 11:14 ` Akhil Goyal
2019-07-01 18:14 ` Chautru, Nicolas
2019-07-01 19:17 ` Thomas Monjalon
2019-07-01 20:27 ` Chautru, Nicolas
2019-07-01 20:38 ` Bruce Richardson
2019-06-21 16:59 ` [dpdk-dev] [PATCH v3 02/10] baseband/fpga_lte_fec: dependency patch Nicolas Chautru
2019-06-21 16:59 ` [dpdk-dev] [PATCH v3 03/10] bbdev: renaming non-generic LTE specific structure Nicolas Chautru
2019-06-21 16:59 ` [dpdk-dev] [PATCH v3 04/10] bbdev: extension of BBDEV API for 5G FEC Nicolas Chautru
2019-06-21 16:59 ` [dpdk-dev] [PATCH v3 05/10] docs/guides: updating bbdev API for 5GNR operations Nicolas Chautru
2019-06-21 16:59 ` [dpdk-dev] [PATCH v3 06/10] baseband/turbo_sw: extension of turbosw PMD for 5G Nicolas Chautru
2019-06-21 16:59 ` [dpdk-dev] [PATCH v3 07/10] docs/guides: updating building steps for BBDEV PMD Nicolas Chautru
2019-06-21 16:59 ` Nicolas Chautru [this message]
2019-06-21 16:59 ` [dpdk-dev] [PATCH v3 09/10] test-bbdev: test vectors for 5GNR verification Nicolas Chautru
2019-06-21 16:59 ` [dpdk-dev] [PATCH v3 10/10] doc: announce bbdev changes Nicolas Chautru
2019-05-14 19:45 ` [dpdk-dev] [PATCH v2 2/5] bbdev: extension of BBDEV for 5G FEC Nicolas Chautru
2019-05-14 19:45 ` Nicolas Chautru
2019-05-14 19:45 ` [dpdk-dev] [PATCH v2 3/5] baseband/turbo_sw: extension of turbosw " Nicolas Chautru
2019-05-14 19:45 ` Nicolas Chautru
2019-05-14 19:45 ` [dpdk-dev] [PATCH v2 4/5] test-bbdev: update of bbdev test-app Nicolas Chautru
2019-05-14 19:45 ` Nicolas Chautru
2019-05-14 19:45 ` [dpdk-dev] [PATCH v2 5/5] usertools: update to usertool for baseband device Nicolas Chautru
2019-05-14 19:45 ` Nicolas Chautru
2019-05-14 20:54 ` [dpdk-dev] [PATCH v2 0/5] BBDEV PMD Drivers Extension for 19.08 Thomas Monjalon
2019-05-14 20:54 ` Thomas Monjalon
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1561136352-32198-9-git-send-email-nicolas.chautru@intel.com \
--to=nicolas.chautru@intel.com \
--cc=akhil.goyal@nxp.com \
--cc=amr.mokhtar@intel.com \
--cc=dev@dpdk.org \
--cc=ferruh.yigit@intel.com \
--cc=thomas@monjalon.net \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).