From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 7D4ACA057C; Fri, 27 Mar 2020 12:47:31 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id ED05D1C1A4; Fri, 27 Mar 2020 12:47:21 +0100 (CET) Received: from mail-wm1-f67.google.com (mail-wm1-f67.google.com [209.85.128.67]) by dpdk.org (Postfix) with ESMTP id 826181C19F for ; Fri, 27 Mar 2020 12:47:20 +0100 (CET) Received: by mail-wm1-f67.google.com with SMTP id 26so9945405wmk.1 for ; Fri, 27 Mar 2020 04:47:20 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=accelercomm-com.20150623.gappssmtp.com; s=20150623; h=subject:to:references:from:message-id:date:user-agent:mime-version :in-reply-to:content-transfer-encoding:content-language; bh=2bqJRJAB583IeKVfuIAVJrA2Qi0eanq5avDI/m0S7iA=; b=L2LhdX54owYL+llQdcipHOrYLgCkvhmggeZ/KWIUn4zIwjaMJSaWj9SiDAsjj0TcdR O7en491jJdO9Y3wd9loWZhwPzig1Bgrit+J/E+zu3EA2M04p2GIOzBGS7JQfaIEP3vh8 is1R3jp58OdrBg12zrJ9jjZJt2bquukq61vWDniey/YzkfruCOTQmwyJs+WeHL1byIBx 4JfEJwwUKtbD9oleiI+9dk8MRFJXFjPGM6MDCpgXpQLOIjIVo8Hy4tih++Rhb66LEOdY 4PY3Ph3RSttBCLk9cTHzjuSJ+7HaM56qn4V4tCc3DUlMPpkfUt2vlVAUJ0vpXmcWFTuw Zw/Q== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20161025; h=x-gm-message-state:subject:to:references:from:message-id:date :user-agent:mime-version:in-reply-to:content-transfer-encoding :content-language; bh=2bqJRJAB583IeKVfuIAVJrA2Qi0eanq5avDI/m0S7iA=; b=IWhz4LzQ4cAbm3hYsfQyDKvtBrUBGImG/Na/gQUvwq4V2qkYSyjgP/9EaoQJnzH+XW yP0lr9Fn3o68EGTGpiJBcs2njYBmsDe0PmhOxqZHkdQXW/cNmmGYfQd0jhc1xs3jPmQy Dlxg4/d5mQPs0J8W368o7gLJr6L/1C68YbF7CUaX7oDLOBM3zyn0L9EMUirGoEnrDdt5 hEYEf1BymLOFWEPJ7i5qG5dkyW2ZzvJptI+Ng0IWHAPzIJnfbxzRhnrKvG782vFapafH T1JlIsHN07fVMz8gOotbnGYuQghbkJ7NT4cdRCaTt29BauV5ct6sl+CzTi1p+z/GaDf0 aYFA== X-Gm-Message-State: ANhLgQ0PPl8BA8vi+b9Or/rPiSQzveEUMGTE6f6rUS0hfnTrOuzX+5UF 0fZXybkvjoXyeYvEgYZCAOHKnSAr+j4= X-Google-Smtp-Source: ADFU+vuATQNeY592OwvuW54RsP0ZXztO2SRwGNYvvvbiOLyTwb2kAg5uZlQ/QkFJCPXFudLbO4uoiw== X-Received: by 2002:a1c:c257:: with SMTP id s84mr4963746wmf.0.1585309639932; Fri, 27 Mar 2020 04:47:19 -0700 (PDT) Received: from ?IPv6:2a00:23c4:cf2d:a200:6023:b810:2491:c6a9? ([2a00:23c4:cf2d:a200:6023:b810:2491:c6a9]) by smtp.gmail.com with ESMTPSA id c85sm7767643wmd.48.2020.03.27.04.47.19 for (version=TLS1_3 cipher=TLS_AES_128_GCM_SHA256 bits=128/128); Fri, 27 Mar 2020 04:47:19 -0700 (PDT) To: dev@dpdk.org References: <1582778348-113547-15-git-send-email-nicolas.chautru@intel.com> <1585193268-74468-1-git-send-email-nicolas.chautru@intel.com> <1585193268-74468-9-git-send-email-nicolas.chautru@intel.com> From: Dave Burley Message-ID: Date: Fri, 27 Mar 2020 11:47:19 +0000 User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:68.0) Gecko/20100101 Thunderbird/68.5.0 MIME-Version: 1.0 In-Reply-To: <1585193268-74468-9-git-send-email-nicolas.chautru@intel.com> Content-Type: text/plain; charset=utf-8; format=flowed Content-Transfer-Encoding: 7bit Content-Language: en-US Subject: Re: [dpdk-dev] [PATCH v5 08/10] test-bbdev: support for LDPC interrupt test X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Acked-by: Dave Burley On 26/03/2020 03:27, Nicolas Chautru wrote: > From: Nic Chautru > > Adding missing implementation for the interrupt tests > for LDPC encoder and decoders. > > Signed-off-by: Nic Chautru > --- > app/test-bbdev/test_bbdev_perf.c | 202 ++++++++++++++++++++++++++++++++++++++- > 1 file changed, 200 insertions(+), 2 deletions(-) > > diff --git a/app/test-bbdev/test_bbdev_perf.c b/app/test-bbdev/test_bbdev_perf.c > index bc73a97..c45cdd2 100644 > --- a/app/test-bbdev/test_bbdev_perf.c > +++ b/app/test-bbdev/test_bbdev_perf.c > @@ -754,6 +754,9 @@ typedef int (test_case_function)(struct active_device *ad, > /* Clear active devices structs. */ > memset(active_devs, 0, sizeof(active_devs)); > nb_active_devs = 0; > + > + /* Disable interrupts */ > + intr_enabled = false; > } > > static int > @@ -2562,6 +2565,109 @@ typedef int (test_case_function)(struct active_device *ad, > } > > static int > +throughput_intr_lcore_ldpc_dec(void *arg) > +{ > + struct thread_params *tp = arg; > + unsigned int enqueued; > + const uint16_t queue_id = tp->queue_id; > + const uint16_t burst_sz = tp->op_params->burst_sz; > + const uint16_t num_to_process = tp->op_params->num_to_process; > + struct rte_bbdev_dec_op *ops[num_to_process]; > + struct test_buffers *bufs = NULL; > + struct rte_bbdev_info info; > + int ret, i, j; > + struct rte_bbdev_dec_op *ref_op = tp->op_params->ref_dec_op; > + uint16_t num_to_enq, enq; > + > + bool loopback = check_bit(ref_op->ldpc_dec.op_flags, > + RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK); > + bool hc_out = check_bit(ref_op->ldpc_dec.op_flags, > + RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE); > + > + TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST), > + "BURST_SIZE should be <= %u", MAX_BURST); > + > + TEST_ASSERT_SUCCESS(rte_bbdev_queue_intr_enable(tp->dev_id, queue_id), > + "Failed to enable interrupts for dev: %u, queue_id: %u", > + tp->dev_id, queue_id); > + > + rte_bbdev_info_get(tp->dev_id, &info); > + > + TEST_ASSERT_SUCCESS((num_to_process > info.drv.queue_size_lim), > + "NUM_OPS cannot exceed %u for this device", > + info.drv.queue_size_lim); > + > + bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id]; > + > + rte_atomic16_clear(&tp->processing_status); > + rte_atomic16_clear(&tp->nb_dequeued); > + > + while (rte_atomic16_read(&tp->op_params->sync) == SYNC_WAIT) > + rte_pause(); > + > + ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops, > + num_to_process); > + TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", > + num_to_process); > + if (test_vector.op_type != RTE_BBDEV_OP_NONE) > + copy_reference_ldpc_dec_op(ops, num_to_process, 0, bufs->inputs, > + bufs->hard_outputs, bufs->soft_outputs, > + bufs->harq_inputs, bufs->harq_outputs, ref_op); > + > + /* Set counter to validate the ordering */ > + for (j = 0; j < num_to_process; ++j) > + ops[j]->opaque_data = (void *)(uintptr_t)j; > + > + for (j = 0; j < TEST_REPETITIONS; ++j) { > + for (i = 0; i < num_to_process; ++i) { > + if (!loopback) > + rte_pktmbuf_reset( > + ops[i]->ldpc_dec.hard_output.data); > + if (hc_out || loopback) > + mbuf_reset( > + ops[i]->ldpc_dec.harq_combined_output.data); > + } > + > + tp->start_time = rte_rdtsc_precise(); > + for (enqueued = 0; enqueued < num_to_process;) { > + num_to_enq = burst_sz; > + > + if (unlikely(num_to_process - enqueued < num_to_enq)) > + num_to_enq = num_to_process - enqueued; > + > + enq = 0; > + do { > + enq += rte_bbdev_enqueue_ldpc_dec_ops( > + tp->dev_id, > + queue_id, &ops[enqueued], > + num_to_enq); > + } while (unlikely(num_to_enq != enq)); > + enqueued += enq; > + > + /* Write to thread burst_sz current number of enqueued > + * descriptors. It ensures that proper number of > + * descriptors will be dequeued in callback > + * function - needed for last batch in case where > + * the number of operations is not a multiple of > + * burst size. > + */ > + rte_atomic16_set(&tp->burst_sz, num_to_enq); > + > + /* Wait until processing of previous batch is > + * completed > + */ > + while (rte_atomic16_read(&tp->nb_dequeued) != > + (int16_t) enqueued) > + rte_pause(); > + } > + if (j != TEST_REPETITIONS - 1) > + rte_atomic16_clear(&tp->nb_dequeued); > + } > + > + return TEST_SUCCESS; > +} > + > +static int > throughput_intr_lcore_dec(void *arg) > { > struct thread_params *tp = arg; > @@ -2740,6 +2846,98 @@ typedef int (test_case_function)(struct active_device *ad, > return TEST_SUCCESS; > } > > + > +static int > +throughput_intr_lcore_ldpc_enc(void *arg) > +{ > + struct thread_params *tp = arg; > + unsigned int enqueued; > + const uint16_t queue_id = tp->queue_id; > + const uint16_t burst_sz = tp->op_params->burst_sz; > + const uint16_t num_to_process = tp->op_params->num_to_process; > + struct rte_bbdev_enc_op *ops[num_to_process]; > + struct test_buffers *bufs = NULL; > + struct rte_bbdev_info info; > + int ret, i, j; > + uint16_t num_to_enq, enq; > + > + TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST), > + "BURST_SIZE should be <= %u", MAX_BURST); > + > + TEST_ASSERT_SUCCESS(rte_bbdev_queue_intr_enable(tp->dev_id, queue_id), > + "Failed to enable interrupts for dev: %u, queue_id: %u", > + tp->dev_id, queue_id); > + > + rte_bbdev_info_get(tp->dev_id, &info); > + > + TEST_ASSERT_SUCCESS((num_to_process > info.drv.queue_size_lim), > + "NUM_OPS cannot exceed %u for this device", > + info.drv.queue_size_lim); > + > + bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id]; > + > + rte_atomic16_clear(&tp->processing_status); > + rte_atomic16_clear(&tp->nb_dequeued); > + > + while (rte_atomic16_read(&tp->op_params->sync) == SYNC_WAIT) > + rte_pause(); > + > + ret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp, ops, > + num_to_process); > + TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", > + num_to_process); > + if (test_vector.op_type != RTE_BBDEV_OP_NONE) > + copy_reference_ldpc_enc_op(ops, num_to_process, 0, > + bufs->inputs, bufs->hard_outputs, > + tp->op_params->ref_enc_op); > + > + /* Set counter to validate the ordering */ > + for (j = 0; j < num_to_process; ++j) > + ops[j]->opaque_data = (void *)(uintptr_t)j; > + > + for (j = 0; j < TEST_REPETITIONS; ++j) { > + for (i = 0; i < num_to_process; ++i) > + rte_pktmbuf_reset(ops[i]->turbo_enc.output.data); > + > + tp->start_time = rte_rdtsc_precise(); > + for (enqueued = 0; enqueued < num_to_process;) { > + num_to_enq = burst_sz; > + > + if (unlikely(num_to_process - enqueued < num_to_enq)) > + num_to_enq = num_to_process - enqueued; > + > + enq = 0; > + do { > + enq += rte_bbdev_enqueue_ldpc_enc_ops( > + tp->dev_id, > + queue_id, &ops[enqueued], > + num_to_enq); > + } while (unlikely(enq != num_to_enq)); > + enqueued += enq; > + > + /* Write to thread burst_sz current number of enqueued > + * descriptors. It ensures that proper number of > + * descriptors will be dequeued in callback > + * function - needed for last batch in case where > + * the number of operations is not a multiple of > + * burst size. > + */ > + rte_atomic16_set(&tp->burst_sz, num_to_enq); > + > + /* Wait until processing of previous batch is > + * completed > + */ > + while (rte_atomic16_read(&tp->nb_dequeued) != > + (int16_t) enqueued) > + rte_pause(); > + } > + if (j != TEST_REPETITIONS - 1) > + rte_atomic16_clear(&tp->nb_dequeued); > + } > + > + return TEST_SUCCESS; > +} > + > static int > throughput_pmd_lcore_dec(void *arg) > { > @@ -3483,11 +3681,11 @@ typedef int (test_case_function)(struct active_device *ad, > if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC) > throughput_function = throughput_intr_lcore_dec; > else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC) > - throughput_function = throughput_intr_lcore_dec; > + throughput_function = throughput_intr_lcore_ldpc_dec; > else if (test_vector.op_type == RTE_BBDEV_OP_TURBO_ENC) > throughput_function = throughput_intr_lcore_enc; > else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_ENC) > - throughput_function = throughput_intr_lcore_enc; > + throughput_function = throughput_intr_lcore_ldpc_enc; > else > throughput_function = throughput_intr_lcore_enc; >