From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id BD827A055A; Thu, 27 Feb 2020 05:41:24 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 29B111C001; Thu, 27 Feb 2020 05:40:21 +0100 (CET) Received: from mga14.intel.com (mga14.intel.com [192.55.52.115]) by dpdk.org (Postfix) with ESMTP id AEE021BFCC for ; Thu, 27 Feb 2020 05:40:09 +0100 (CET) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga004.fm.intel.com ([10.253.24.48]) by fmsmga103.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 26 Feb 2020 20:40:06 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.70,490,1574150400"; d="scan'208";a="261310502" Received: from skx-5gnr-sc12-4.sc.intel.com ([172.25.69.210]) by fmsmga004.fm.intel.com with ESMTP; 26 Feb 2020 20:40:06 -0800 From: Nicolas Chautru To: thomas@monjalon.net, akhil.goyal@nxp.com, dev@dpdk.org Cc: ferruh.yigit@intel.com, Nic Chautru Date: Wed, 26 Feb 2020 20:39:02 -0800 Message-Id: <1582778348-113547-9-git-send-email-nicolas.chautru@intel.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1582778348-113547-1-git-send-email-nicolas.chautru@intel.com> References: <1582778348-113547-1-git-send-email-nicolas.chautru@intel.com> Subject: [dpdk-dev] [PATCH v1 08/14] test-bbdev: support for LDPC interrupt test X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Nic Chautru Adding missing implementation for the interrupt tests for LDPC encoder and decoders. Signed-off-by: Nic Chautru --- app/test-bbdev/test_bbdev_perf.c | 202 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 200 insertions(+), 2 deletions(-) diff --git a/app/test-bbdev/test_bbdev_perf.c b/app/test-bbdev/test_bbdev_perf.c index 7bc824b..8fcdda0 100644 --- a/app/test-bbdev/test_bbdev_perf.c +++ b/app/test-bbdev/test_bbdev_perf.c @@ -745,6 +745,9 @@ typedef int (test_case_function)(struct active_device *ad, /* Clear active devices structs. */ memset(active_devs, 0, sizeof(active_devs)); nb_active_devs = 0; + + /* Disable interrupts */ + intr_enabled = false; } static int @@ -2457,6 +2460,109 @@ typedef int (test_case_function)(struct active_device *ad, } static int +throughput_intr_lcore_ldpc_dec(void *arg) +{ + struct thread_params *tp = arg; + unsigned int enqueued; + const uint16_t queue_id = tp->queue_id; + const uint16_t burst_sz = tp->op_params->burst_sz; + const uint16_t num_to_process = tp->op_params->num_to_process; + struct rte_bbdev_dec_op *ops[num_to_process]; + struct test_buffers *bufs = NULL; + struct rte_bbdev_info info; + int ret, i, j; + struct rte_bbdev_dec_op *ref_op = tp->op_params->ref_dec_op; + uint16_t num_to_enq, enq; + + bool loopback = check_bit(ref_op->ldpc_dec.op_flags, + RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK); + bool hc_out = check_bit(ref_op->ldpc_dec.op_flags, + RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE); + + TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST), + "BURST_SIZE should be <= %u", MAX_BURST); + + TEST_ASSERT_SUCCESS(rte_bbdev_queue_intr_enable(tp->dev_id, queue_id), + "Failed to enable interrupts for dev: %u, queue_id: %u", + tp->dev_id, queue_id); + + rte_bbdev_info_get(tp->dev_id, &info); + + TEST_ASSERT_SUCCESS((num_to_process > info.drv.queue_size_lim), + "NUM_OPS cannot exceed %u for this device", + info.drv.queue_size_lim); + + bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id]; + + rte_atomic16_clear(&tp->processing_status); + rte_atomic16_clear(&tp->nb_dequeued); + + while (rte_atomic16_read(&tp->op_params->sync) == SYNC_WAIT) + rte_pause(); + + ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops, + num_to_process); + TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", + num_to_process); + if (test_vector.op_type != RTE_BBDEV_OP_NONE) + copy_reference_ldpc_dec_op(ops, num_to_process, 0, bufs->inputs, + bufs->hard_outputs, bufs->soft_outputs, + bufs->harq_inputs, bufs->harq_outputs, ref_op); + + /* Set counter to validate the ordering */ + for (j = 0; j < num_to_process; ++j) + ops[j]->opaque_data = (void *)(uintptr_t)j; + + for (j = 0; j < TEST_REPETITIONS; ++j) { + for (i = 0; i < num_to_process; ++i) { + if (!loopback) + rte_pktmbuf_reset( + ops[i]->ldpc_dec.hard_output.data); + if (hc_out || loopback) + mbuf_reset( + ops[i]->ldpc_dec.harq_combined_output.data); + } + + tp->start_time = rte_rdtsc_precise(); + for (enqueued = 0; enqueued < num_to_process;) { + num_to_enq = burst_sz; + + if (unlikely(num_to_process - enqueued < num_to_enq)) + num_to_enq = num_to_process - enqueued; + + enq = 0; + do { + enq += rte_bbdev_enqueue_ldpc_dec_ops( + tp->dev_id, + queue_id, &ops[enqueued], + num_to_enq); + } while (unlikely(num_to_enq != enq)); + enqueued += enq; + + /* Write to thread burst_sz current number of enqueued + * descriptors. It ensures that proper number of + * descriptors will be dequeued in callback + * function - needed for last batch in case where + * the number of operations is not a multiple of + * burst size. + */ + rte_atomic16_set(&tp->burst_sz, num_to_enq); + + /* Wait until processing of previous batch is + * completed + */ + while (rte_atomic16_read(&tp->nb_dequeued) != + (int16_t) enqueued) + rte_pause(); + } + if (j != TEST_REPETITIONS - 1) + rte_atomic16_clear(&tp->nb_dequeued); + } + + return TEST_SUCCESS; +} + +static int throughput_intr_lcore_dec(void *arg) { struct thread_params *tp = arg; @@ -2635,6 +2741,98 @@ typedef int (test_case_function)(struct active_device *ad, return TEST_SUCCESS; } + +static int +throughput_intr_lcore_ldpc_enc(void *arg) +{ + struct thread_params *tp = arg; + unsigned int enqueued; + const uint16_t queue_id = tp->queue_id; + const uint16_t burst_sz = tp->op_params->burst_sz; + const uint16_t num_to_process = tp->op_params->num_to_process; + struct rte_bbdev_enc_op *ops[num_to_process]; + struct test_buffers *bufs = NULL; + struct rte_bbdev_info info; + int ret, i, j; + uint16_t num_to_enq, enq; + + TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST), + "BURST_SIZE should be <= %u", MAX_BURST); + + TEST_ASSERT_SUCCESS(rte_bbdev_queue_intr_enable(tp->dev_id, queue_id), + "Failed to enable interrupts for dev: %u, queue_id: %u", + tp->dev_id, queue_id); + + rte_bbdev_info_get(tp->dev_id, &info); + + TEST_ASSERT_SUCCESS((num_to_process > info.drv.queue_size_lim), + "NUM_OPS cannot exceed %u for this device", + info.drv.queue_size_lim); + + bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id]; + + rte_atomic16_clear(&tp->processing_status); + rte_atomic16_clear(&tp->nb_dequeued); + + while (rte_atomic16_read(&tp->op_params->sync) == SYNC_WAIT) + rte_pause(); + + ret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp, ops, + num_to_process); + TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", + num_to_process); + if (test_vector.op_type != RTE_BBDEV_OP_NONE) + copy_reference_ldpc_enc_op(ops, num_to_process, 0, + bufs->inputs, bufs->hard_outputs, + tp->op_params->ref_enc_op); + + /* Set counter to validate the ordering */ + for (j = 0; j < num_to_process; ++j) + ops[j]->opaque_data = (void *)(uintptr_t)j; + + for (j = 0; j < TEST_REPETITIONS; ++j) { + for (i = 0; i < num_to_process; ++i) + rte_pktmbuf_reset(ops[i]->turbo_enc.output.data); + + tp->start_time = rte_rdtsc_precise(); + for (enqueued = 0; enqueued < num_to_process;) { + num_to_enq = burst_sz; + + if (unlikely(num_to_process - enqueued < num_to_enq)) + num_to_enq = num_to_process - enqueued; + + enq = 0; + do { + enq += rte_bbdev_enqueue_ldpc_enc_ops( + tp->dev_id, + queue_id, &ops[enqueued], + num_to_enq); + } while (unlikely(enq != num_to_enq)); + enqueued += enq; + + /* Write to thread burst_sz current number of enqueued + * descriptors. It ensures that proper number of + * descriptors will be dequeued in callback + * function - needed for last batch in case where + * the number of operations is not a multiple of + * burst size. + */ + rte_atomic16_set(&tp->burst_sz, num_to_enq); + + /* Wait until processing of previous batch is + * completed + */ + while (rte_atomic16_read(&tp->nb_dequeued) != + (int16_t) enqueued) + rte_pause(); + } + if (j != TEST_REPETITIONS - 1) + rte_atomic16_clear(&tp->nb_dequeued); + } + + return TEST_SUCCESS; +} + static int throughput_pmd_lcore_dec(void *arg) { @@ -3378,11 +3576,11 @@ typedef int (test_case_function)(struct active_device *ad, if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC) throughput_function = throughput_intr_lcore_dec; else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC) - throughput_function = throughput_intr_lcore_dec; + throughput_function = throughput_intr_lcore_ldpc_dec; else if (test_vector.op_type == RTE_BBDEV_OP_TURBO_ENC) throughput_function = throughput_intr_lcore_enc; else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_ENC) - throughput_function = throughput_intr_lcore_enc; + throughput_function = throughput_intr_lcore_ldpc_enc; else throughput_function = throughput_intr_lcore_enc; -- 1.8.3.1