DPDK patches and discussions
 help / color / mirror / Atom feed
From: Maxime Coquelin <maxime.coquelin@redhat.com>
To: Hernan Vargas <hernan.vargas@intel.com>,
	dev@dpdk.org, gakhil@marvell.com, trix@redhat.com
Cc: nicolas.chautru@intel.com, qi.z.zhang@intel.com
Subject: Re: [PATCH v3 09/17] test/bbdev: add timeout for enq/deq loops
Date: Fri, 3 Mar 2023 13:44:33 +0100	[thread overview]
Message-ID: <7bdc1d66-fb5f-333c-9744-5eb97cc6eee4@redhat.com> (raw)
In-Reply-To: <20230302202211.170017-10-hernan.vargas@intel.com>



On 3/2/23 21:22, Hernan Vargas wrote:
> Added timeout to prevent infinite loop condition if the device
> doesn't enqueue/dequeue.
> 
> Signed-off-by: Hernan Vargas <hernan.vargas@intel.com>
> ---
>   app/test-bbdev/test_bbdev_perf.c | 133 ++++++++++++++++++++++++++-----
>   1 file changed, 113 insertions(+), 20 deletions(-)
> 
> diff --git a/app/test-bbdev/test_bbdev_perf.c b/app/test-bbdev/test_bbdev_perf.c
> index 7bfc4cd5779e..7a4841a069ee 100644
> --- a/app/test-bbdev/test_bbdev_perf.c
> +++ b/app/test-bbdev/test_bbdev_perf.c
> @@ -192,6 +192,15 @@ struct test_time_stats {
>   typedef int (test_case_function)(struct active_device *ad,
>   		struct test_op_params *op_params);
>   
> +/* Get device status before timeout exit */
> +static inline void
> +timeout_exit(uint8_t dev_id)
> +{
> +	struct rte_bbdev_info info;
> +	rte_bbdev_info_get(dev_id, &info);
> +	printf("Device Status %s\n", rte_bbdev_device_status_str(info.drv.device_status));
> +}
> +
>   static inline void
>   mbuf_reset(struct rte_mbuf *m)
>   {
> @@ -3553,7 +3562,7 @@ throughput_pmd_lcore_dec(void *arg)
>   		ops_enq[j]->opaque_data = (void *)(uintptr_t)j;
>   
>   	for (i = 0; i < TEST_REPETITIONS; ++i) {
> -
> +		uint32_t time_out = 0;
>   		for (j = 0; j < num_ops; ++j)
>   			mbuf_reset(ops_enq[j]->turbo_dec.hard_output.data);
>   		if (so_enable)
> @@ -3573,12 +3582,23 @@ throughput_pmd_lcore_dec(void *arg)
>   
>   			deq += rte_bbdev_dequeue_dec_ops(tp->dev_id,
>   					queue_id, &ops_deq[deq], enq - deq);
> +			time_out++;
> +			if (time_out >= TIME_OUT_POLL) {
> +				timeout_exit(tp->dev_id);
> +				TEST_ASSERT_SUCCESS(TEST_FAILED, "Enqueue timeout!");
> +			}
>   		}
>   
>   		/* dequeue the remaining */
> +		time_out = 0;
>   		while (deq < enq) {
>   			deq += rte_bbdev_dequeue_dec_ops(tp->dev_id,
>   					queue_id, &ops_deq[deq], enq - deq);
> +			time_out++;
> +			if (time_out >= TIME_OUT_POLL) {
> +				timeout_exit(tp->dev_id);
> +				TEST_ASSERT_SUCCESS(TEST_FAILED, "Dequeue timeout!");
> +			}
>   		}
>   
>   		total_time += rte_rdtsc_precise() - start_time;
> @@ -3669,6 +3689,7 @@ bler_pmd_lcore_ldpc_dec(void *arg)
>   		ops_enq[j]->opaque_data = (void *)(uintptr_t)j;
>   
>   	for (i = 0; i < 1; ++i) { /* Could add more iterations */
> +		uint32_t time_out = 0;
>   		for (j = 0; j < num_ops; ++j) {
>   			if (!loopback)
>   				mbuf_reset(
> @@ -3692,12 +3713,23 @@ bler_pmd_lcore_ldpc_dec(void *arg)
>   
>   			deq += rte_bbdev_dequeue_ldpc_dec_ops(tp->dev_id,
>   					queue_id, &ops_deq[deq], enq - deq);
> +			time_out++;
> +			if (time_out >= TIME_OUT_POLL) {
> +				timeout_exit(tp->dev_id);
> +				TEST_ASSERT_SUCCESS(TEST_FAILED, "Enqueue timeout!");
> +			}
>   		}
>   
>   		/* dequeue the remaining */
> +		time_out = 0;
>   		while (deq < enq) {
>   			deq += rte_bbdev_dequeue_ldpc_dec_ops(tp->dev_id,
>   					queue_id, &ops_deq[deq], enq - deq);
> +			time_out++;
> +			if (time_out >= TIME_OUT_POLL) {
> +				timeout_exit(tp->dev_id);
> +				TEST_ASSERT_SUCCESS(TEST_FAILED, "Dequeue timeout!");
> +			}
>   		}
>   
>   		total_time += rte_rdtsc_precise() - start_time;
> @@ -3796,6 +3828,7 @@ throughput_pmd_lcore_ldpc_dec(void *arg)
>   		ops_enq[j]->opaque_data = (void *)(uintptr_t)j;
>   
>   	for (i = 0; i < TEST_REPETITIONS; ++i) {
> +		uint32_t time_out = 0;
>   		for (j = 0; j < num_ops; ++j) {
>   			if (!loopback)
>   				mbuf_reset(
> @@ -3820,12 +3853,23 @@ throughput_pmd_lcore_ldpc_dec(void *arg)
>   
>   			deq += rte_bbdev_dequeue_ldpc_dec_ops(tp->dev_id,
>   					queue_id, &ops_deq[deq], enq - deq);
> +			time_out++;
> +			if (time_out >= TIME_OUT_POLL) {
> +				timeout_exit(tp->dev_id);
> +				TEST_ASSERT_SUCCESS(TEST_FAILED, "Enqueue timeout!");
> +			}
>   		}
>   
>   		/* dequeue the remaining */
> +		time_out = 0;
>   		while (deq < enq) {
>   			deq += rte_bbdev_dequeue_ldpc_dec_ops(tp->dev_id,
>   					queue_id, &ops_deq[deq], enq - deq);
> +			time_out++;
> +			if (time_out >= TIME_OUT_POLL) {
> +				timeout_exit(tp->dev_id);
> +				TEST_ASSERT_SUCCESS(TEST_FAILED, "Dequeue timeout!");
> +			}
>   		}
>   
>   		total_time += rte_rdtsc_precise() - start_time;
> @@ -3907,7 +3951,7 @@ throughput_pmd_lcore_enc(void *arg)
>   		ops_enq[j]->opaque_data = (void *)(uintptr_t)j;
>   
>   	for (i = 0; i < TEST_REPETITIONS; ++i) {
> -
> +		uint32_t time_out = 0;
>   		if (test_vector.op_type != RTE_BBDEV_OP_NONE)
>   			for (j = 0; j < num_ops; ++j)
>   				mbuf_reset(ops_enq[j]->turbo_enc.output.data);
> @@ -3925,12 +3969,23 @@ throughput_pmd_lcore_enc(void *arg)
>   
>   			deq += rte_bbdev_dequeue_enc_ops(tp->dev_id,
>   					queue_id, &ops_deq[deq], enq - deq);
> +			time_out++;
> +			if (time_out >= TIME_OUT_POLL) {
> +				timeout_exit(tp->dev_id);
> +				TEST_ASSERT_SUCCESS(TEST_FAILED, "Enqueue timeout!");
> +			}
>   		}
>   
>   		/* dequeue the remaining */
> +		time_out = 0;
>   		while (deq < enq) {
>   			deq += rte_bbdev_dequeue_enc_ops(tp->dev_id,
>   					queue_id, &ops_deq[deq], enq - deq);
> +			time_out++;
> +			if (time_out >= TIME_OUT_POLL) {
> +				timeout_exit(tp->dev_id);
> +				TEST_ASSERT_SUCCESS(TEST_FAILED, "Dequeue timeout!");
> +			}
>   		}
>   
>   		total_time += rte_rdtsc_precise() - start_time;
> @@ -3997,7 +4052,7 @@ throughput_pmd_lcore_ldpc_enc(void *arg)
>   		ops_enq[j]->opaque_data = (void *)(uintptr_t)j;
>   
>   	for (i = 0; i < TEST_REPETITIONS; ++i) {
> -
> +		uint32_t time_out = 0;
>   		if (test_vector.op_type != RTE_BBDEV_OP_NONE)
>   			for (j = 0; j < num_ops; ++j)
>   				mbuf_reset(ops_enq[j]->turbo_enc.output.data);
> @@ -4015,12 +4070,23 @@ throughput_pmd_lcore_ldpc_enc(void *arg)
>   
>   			deq += rte_bbdev_dequeue_ldpc_enc_ops(tp->dev_id,
>   					queue_id, &ops_deq[deq], enq - deq);
> +			time_out++;
> +			if (time_out >= TIME_OUT_POLL) {
> +				timeout_exit(tp->dev_id);
> +				TEST_ASSERT_SUCCESS(TEST_FAILED, "Enqueue timeout!");
> +			}
>   		}
>   
>   		/* dequeue the remaining */
> +		time_out = 0;
>   		while (deq < enq) {
>   			deq += rte_bbdev_dequeue_ldpc_enc_ops(tp->dev_id,
>   					queue_id, &ops_deq[deq], enq - deq);
> +			time_out++;
> +			if (time_out >= TIME_OUT_POLL) {
> +				timeout_exit(tp->dev_id);
> +				TEST_ASSERT_SUCCESS(TEST_FAILED, "Dequeue timeout!");
> +			}
>   		}
>   
>   		total_time += rte_rdtsc_precise() - start_time;
> @@ -4086,7 +4152,7 @@ throughput_pmd_lcore_fft(void *arg)
>   		ops_enq[j]->opaque_data = (void *)(uintptr_t)j;
>   
>   	for (i = 0; i < TEST_REPETITIONS; ++i) {
> -
> +		uint32_t time_out = 0;
>   		for (j = 0; j < num_ops; ++j)
>   			mbuf_reset(ops_enq[j]->fft.base_output.data);
>   
> @@ -4103,12 +4169,23 @@ throughput_pmd_lcore_fft(void *arg)
>   
>   			deq += rte_bbdev_dequeue_fft_ops(tp->dev_id,
>   					queue_id, &ops_deq[deq], enq - deq);
> +			time_out++;
> +			if (time_out >= TIME_OUT_POLL) {
> +				timeout_exit(tp->dev_id);
> +				TEST_ASSERT_SUCCESS(TEST_FAILED, "Enqueue timeout!");
> +			}
>   		}
>   
>   		/* dequeue the remaining */
> +		time_out = 0;
>   		while (deq < enq) {
>   			deq += rte_bbdev_dequeue_fft_ops(tp->dev_id,
>   					queue_id, &ops_deq[deq], enq - deq);
> +			time_out++;
> +			if (time_out >= TIME_OUT_POLL) {
> +				timeout_exit(tp->dev_id);
> +				TEST_ASSERT_SUCCESS(TEST_FAILED, "Dequeue timeout!");
> +			}
>   		}
>   
>   		total_time += rte_rdtsc_precise() - start_time;
> @@ -4481,6 +4558,7 @@ latency_test_dec(struct rte_mempool *mempool,
>   
>   	for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) {
>   		uint16_t enq = 0, deq = 0;
> +		uint32_t time_out = 0;
>   		bool first_time = true;
>   		last_time = 0;
>   
> @@ -4523,6 +4601,11 @@ latency_test_dec(struct rte_mempool *mempool,
>   				last_time = rte_rdtsc_precise() - start_time;
>   				first_time = false;
>   			}
> +			time_out++;
> +			if (time_out >= TIME_OUT_POLL) {
> +				timeout_exit(dev_id);
> +				TEST_ASSERT_SUCCESS(TEST_FAILED, "Dequeue timeout!");
> +			}
>   		} while (unlikely(burst_sz != deq));
>   
>   		*max_time = RTE_MAX(*max_time, last_time);
> @@ -4615,7 +4698,11 @@ latency_test_ldpc_dec(struct rte_mempool *mempool,
>   				first_time = false;
>   			}
>   			time_out++;
> -		} while ((burst_sz != deq) && (time_out < TIME_OUT_POLL));
> +			if (time_out >= TIME_OUT_POLL) {
> +				timeout_exit(dev_id);
> +				TEST_ASSERT_SUCCESS(TEST_FAILED, "Dequeue timeout!");
> +			}
> +		} while (unlikely(burst_sz != deq));
>   
>   		*max_time = RTE_MAX(*max_time, last_time);
>   		*min_time = RTE_MIN(*min_time, last_time);
> @@ -4624,14 +4711,8 @@ latency_test_ldpc_dec(struct rte_mempool *mempool,
>   		if (extDdr)
>   			retrieve_harq_ddr(dev_id, queue_id, ops_enq, burst_sz);
>   
> -		if (burst_sz != deq) {
> -			struct rte_bbdev_info info;
> -			ret = TEST_FAILED;
> -			rte_bbdev_info_get(dev_id, &info);
> -			TEST_ASSERT_SUCCESS(ret, "Dequeue timeout!");
> -		} else if (test_vector.op_type != RTE_BBDEV_OP_NONE) {
> -			ret = validate_ldpc_dec_op(ops_deq, burst_sz, ref_op,
> -					vector_mask);
> +		if (test_vector.op_type != RTE_BBDEV_OP_NONE) {
> +			ret = validate_ldpc_dec_op(ops_deq, burst_sz, ref_op, vector_mask);
>   			TEST_ASSERT_SUCCESS(ret, "Validation failed!");
>   		}
>   
> @@ -4692,17 +4773,17 @@ latency_test_enc(struct rte_mempool *mempool,
>   				first_time = false;
>   			}
>   			time_out++;
> -		} while ((burst_sz != deq) && (time_out < TIME_OUT_POLL));
> +			if (time_out >= TIME_OUT_POLL) {
> +				timeout_exit(dev_id);
> +				TEST_ASSERT_SUCCESS(TEST_FAILED, "Dequeue timeout!");
> +			}
> +		} while (unlikely(burst_sz != deq));
>   
>   		*max_time = RTE_MAX(*max_time, last_time);
>   		*min_time = RTE_MIN(*min_time, last_time);
>   		*total_time += last_time;
> -		if (burst_sz != deq) {
> -			struct rte_bbdev_info info;
> -			ret = TEST_FAILED;
> -			rte_bbdev_info_get(dev_id, &info);
> -			TEST_ASSERT_SUCCESS(ret, "Dequeue timeout!");
> -		} else if (test_vector.op_type != RTE_BBDEV_OP_NONE) {
> +
> +		if (test_vector.op_type != RTE_BBDEV_OP_NONE) {
>   			ret = validate_enc_op(ops_deq, burst_sz, ref_op);
>   			TEST_ASSERT_SUCCESS(ret, "Validation failed!");
>   		}
> @@ -4728,6 +4809,7 @@ latency_test_ldpc_enc(struct rte_mempool *mempool,
>   
>   	for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) {
>   		uint16_t enq = 0, deq = 0;
> +		uint32_t time_out = 0;
>   		bool first_time = true;
>   		last_time = 0;
>   
> @@ -4763,6 +4845,11 @@ latency_test_ldpc_enc(struct rte_mempool *mempool,
>   				last_time += rte_rdtsc_precise() - start_time;
>   				first_time = false;
>   			}
> +			time_out++;
> +			if (time_out >= TIME_OUT_POLL) {
> +				timeout_exit(dev_id);
> +				TEST_ASSERT_SUCCESS(TEST_FAILED, "Dequeue timeout!");
> +			}
>   		} while (unlikely(burst_sz != deq));
>   
>   		*max_time = RTE_MAX(*max_time, last_time);
> @@ -4796,6 +4883,7 @@ latency_test_fft(struct rte_mempool *mempool,
>   
>   	for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) {
>   		uint16_t enq = 0, deq = 0;
> +		uint32_t time_out = 0;
>   		bool first_time = true;
>   		last_time = 0;
>   
> @@ -4831,6 +4919,11 @@ latency_test_fft(struct rte_mempool *mempool,
>   				last_time += rte_rdtsc_precise() - start_time;
>   				first_time = false;
>   			}
> +			time_out++;
> +			if (time_out >= TIME_OUT_POLL) {
> +				timeout_exit(dev_id);
> +				TEST_ASSERT_SUCCESS(TEST_FAILED, "Dequeue timeout!");
> +			}
>   		} while (unlikely(burst_sz != deq));
>   
>   		*max_time = RTE_MAX(*max_time, last_time);

Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>

Thanks,
Maxime


  reply	other threads:[~2023-03-03 12:44 UTC|newest]

Thread overview: 23+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-03-02 20:21 [PATCH v3 00/17] test/bbdev: changes for 23.03 Hernan Vargas
2023-03-02 20:21 ` [PATCH v3 01/17] test/bbdev: fix seg fault for non supported HARQ len Hernan Vargas
2023-03-06 13:17   ` Maxime Coquelin
2023-03-02 20:21 ` [PATCH v3 02/17] test/bbdev: extend HARQ tolerance Hernan Vargas
2023-03-02 20:21 ` [PATCH v3 03/17] test/bbdev: remove check for invalid opaque data Hernan Vargas
2023-03-02 20:21 ` [PATCH v3 04/17] test/bbdev: refactor TB throughput report Hernan Vargas
2023-03-02 20:21 ` [PATCH v3 05/17] test/bbdev: add timeout for latency tests Hernan Vargas
2023-03-02 20:22 ` [PATCH v3 06/17] test/bbdev: enable early termination for validation Hernan Vargas
2023-03-02 20:22 ` [PATCH v3 07/17] test/bbdev: report device status in test-bbdev Hernan Vargas
2023-03-02 20:22 ` [PATCH v3 08/17] test/bbdev: test start/stop bbdev API Hernan Vargas
2023-03-03 12:31   ` Maxime Coquelin
2023-03-02 20:22 ` [PATCH v3 09/17] test/bbdev: add timeout for enq/deq loops Hernan Vargas
2023-03-03 12:44   ` Maxime Coquelin [this message]
2023-03-02 20:22 ` [PATCH v3 10/17] test/bbdev: add support for BLER for 4G Hernan Vargas
2023-03-03 16:41   ` Maxime Coquelin
2023-03-02 20:22 ` [PATCH v3 11/17] test/bbdev: extend support for large TB Hernan Vargas
2023-03-02 20:22 ` [PATCH v3 12/17] test/bbdev: adjustment for soft output Hernan Vargas
2023-03-02 20:22 ` [PATCH v3 13/17] test/bbdev: expose warning counters Hernan Vargas
2023-03-02 20:22 ` [PATCH v3 14/17] test/bbdev: remove iteration count check Hernan Vargas
2023-03-02 20:22 ` [PATCH v3 15/17] test/bbdev: use mbuf reset function Hernan Vargas
2023-03-02 20:22 ` [PATCH v3 16/17] test/bbdev: remove max iteration from vectors Hernan Vargas
2023-03-02 20:22 ` [PATCH v3 17/17] test/bbdev: remove iter count from bler test Hernan Vargas
2023-03-06 13:20 ` [PATCH v3 00/17] test/bbdev: changes for 23.03 Maxime Coquelin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=7bdc1d66-fb5f-333c-9744-5eb97cc6eee4@redhat.com \
    --to=maxime.coquelin@redhat.com \
    --cc=dev@dpdk.org \
    --cc=gakhil@marvell.com \
    --cc=hernan.vargas@intel.com \
    --cc=nicolas.chautru@intel.com \
    --cc=qi.z.zhang@intel.com \
    --cc=trix@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).