patches for DPDK stable branches
 help / color / mirror / Atom feed
From: Hernan Vargas <hernan.vargas@intel.com>
To: dev@dpdk.org, gakhil@marvell.com, trix@redhat.com,
	maxime.coquelin@redhat.com
Cc: nicolas.chautru@intel.com, qi.z.zhang@intel.com,
	Hernan Vargas <hernan.vargas@intel.com>,
	stable@dpdk.org
Subject: [PATCH v2 3/9] test/bbdev: fix interrupt tests
Date: Mon, 24 Jun 2024 08:02:31 -0700	[thread overview]
Message-ID: <20240624150237.47169-4-hernan.vargas@intel.com> (raw)
In-Reply-To: <20240624150237.47169-1-hernan.vargas@intel.com>

Fix possible error with regards to setting the burst size from the
enqueue thread.

Fixes: b2e2aec3239e ("app/bbdev: enhance interrupt test")
Cc: stable@dpdk.org

Signed-off-by: Hernan Vargas <hernan.vargas@intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
 app/test-bbdev/test_bbdev_perf.c | 98 ++++++++++++++++----------------
 1 file changed, 49 insertions(+), 49 deletions(-)

diff --git a/app/test-bbdev/test_bbdev_perf.c b/app/test-bbdev/test_bbdev_perf.c
index 9841464922ac..20cd8df19be7 100644
--- a/app/test-bbdev/test_bbdev_perf.c
+++ b/app/test-bbdev/test_bbdev_perf.c
@@ -3419,15 +3419,6 @@ throughput_intr_lcore_ldpc_dec(void *arg)
 			if (unlikely(num_to_process - enqueued < num_to_enq))
 				num_to_enq = num_to_process - enqueued;
 
-			enq = 0;
-			do {
-				enq += rte_bbdev_enqueue_ldpc_dec_ops(
-						tp->dev_id,
-						queue_id, &ops[enqueued],
-						num_to_enq);
-			} while (unlikely(num_to_enq != enq));
-			enqueued += enq;
-
 			/* Write to thread burst_sz current number of enqueued
 			 * descriptors. It ensures that proper number of
 			 * descriptors will be dequeued in callback
@@ -3438,6 +3429,15 @@ throughput_intr_lcore_ldpc_dec(void *arg)
 			rte_atomic_store_explicit(&tp->burst_sz, num_to_enq,
 					rte_memory_order_relaxed);
 
+			enq = 0;
+			do {
+				enq += rte_bbdev_enqueue_ldpc_dec_ops(
+						tp->dev_id,
+						queue_id, &ops[enqueued],
+						num_to_enq);
+			} while (unlikely(num_to_enq != enq));
+			enqueued += enq;
+
 			/* Wait until processing of previous batch is
 			 * completed
 			 */
@@ -3514,14 +3514,6 @@ throughput_intr_lcore_dec(void *arg)
 			if (unlikely(num_to_process - enqueued < num_to_enq))
 				num_to_enq = num_to_process - enqueued;
 
-			enq = 0;
-			do {
-				enq += rte_bbdev_enqueue_dec_ops(tp->dev_id,
-						queue_id, &ops[enqueued],
-						num_to_enq);
-			} while (unlikely(num_to_enq != enq));
-			enqueued += enq;
-
 			/* Write to thread burst_sz current number of enqueued
 			 * descriptors. It ensures that proper number of
 			 * descriptors will be dequeued in callback
@@ -3532,6 +3524,14 @@ throughput_intr_lcore_dec(void *arg)
 			rte_atomic_store_explicit(&tp->burst_sz, num_to_enq,
 					rte_memory_order_relaxed);
 
+			enq = 0;
+			do {
+				enq += rte_bbdev_enqueue_dec_ops(tp->dev_id,
+						queue_id, &ops[enqueued],
+						num_to_enq);
+			} while (unlikely(num_to_enq != enq));
+			enqueued += enq;
+
 			/* Wait until processing of previous batch is
 			 * completed
 			 */
@@ -3603,14 +3603,6 @@ throughput_intr_lcore_enc(void *arg)
 			if (unlikely(num_to_process - enqueued < num_to_enq))
 				num_to_enq = num_to_process - enqueued;
 
-			enq = 0;
-			do {
-				enq += rte_bbdev_enqueue_enc_ops(tp->dev_id,
-						queue_id, &ops[enqueued],
-						num_to_enq);
-			} while (unlikely(enq != num_to_enq));
-			enqueued += enq;
-
 			/* Write to thread burst_sz current number of enqueued
 			 * descriptors. It ensures that proper number of
 			 * descriptors will be dequeued in callback
@@ -3621,6 +3613,14 @@ throughput_intr_lcore_enc(void *arg)
 			rte_atomic_store_explicit(&tp->burst_sz, num_to_enq,
 					rte_memory_order_relaxed);
 
+			enq = 0;
+			do {
+				enq += rte_bbdev_enqueue_enc_ops(tp->dev_id,
+						queue_id, &ops[enqueued],
+						num_to_enq);
+			} while (unlikely(enq != num_to_enq));
+			enqueued += enq;
+
 			/* Wait until processing of previous batch is
 			 * completed
 			 */
@@ -3694,15 +3694,6 @@ throughput_intr_lcore_ldpc_enc(void *arg)
 			if (unlikely(num_to_process - enqueued < num_to_enq))
 				num_to_enq = num_to_process - enqueued;
 
-			enq = 0;
-			do {
-				enq += rte_bbdev_enqueue_ldpc_enc_ops(
-						tp->dev_id,
-						queue_id, &ops[enqueued],
-						num_to_enq);
-			} while (unlikely(enq != num_to_enq));
-			enqueued += enq;
-
 			/* Write to thread burst_sz current number of enqueued
 			 * descriptors. It ensures that proper number of
 			 * descriptors will be dequeued in callback
@@ -3713,6 +3704,15 @@ throughput_intr_lcore_ldpc_enc(void *arg)
 			rte_atomic_store_explicit(&tp->burst_sz, num_to_enq,
 					rte_memory_order_relaxed);
 
+			enq = 0;
+			do {
+				enq += rte_bbdev_enqueue_ldpc_enc_ops(
+						tp->dev_id,
+						queue_id, &ops[enqueued],
+						num_to_enq);
+			} while (unlikely(enq != num_to_enq));
+			enqueued += enq;
+
 			/* Wait until processing of previous batch is
 			 * completed
 			 */
@@ -3786,14 +3786,6 @@ throughput_intr_lcore_fft(void *arg)
 			if (unlikely(num_to_process - enqueued < num_to_enq))
 				num_to_enq = num_to_process - enqueued;
 
-			enq = 0;
-			do {
-				enq += rte_bbdev_enqueue_fft_ops(tp->dev_id,
-						queue_id, &ops[enqueued],
-						num_to_enq);
-			} while (unlikely(enq != num_to_enq));
-			enqueued += enq;
-
 			/* Write to thread burst_sz current number of enqueued
 			 * descriptors. It ensures that proper number of
 			 * descriptors will be dequeued in callback
@@ -3804,6 +3796,14 @@ throughput_intr_lcore_fft(void *arg)
 			rte_atomic_store_explicit(&tp->burst_sz, num_to_enq,
 					rte_memory_order_relaxed);
 
+			enq = 0;
+			do {
+				enq += rte_bbdev_enqueue_fft_ops(tp->dev_id,
+						queue_id, &ops[enqueued],
+						num_to_enq);
+			} while (unlikely(enq != num_to_enq));
+			enqueued += enq;
+
 			/* Wait until processing of previous batch is
 			 * completed
 			 */
@@ -3872,13 +3872,6 @@ throughput_intr_lcore_mldts(void *arg)
 			if (unlikely(num_to_process - enqueued < num_to_enq))
 				num_to_enq = num_to_process - enqueued;
 
-			enq = 0;
-			do {
-				enq += rte_bbdev_enqueue_mldts_ops(tp->dev_id,
-						queue_id, &ops[enqueued], num_to_enq);
-			} while (unlikely(enq != num_to_enq));
-			enqueued += enq;
-
 			/* Write to thread burst_sz current number of enqueued
 			 * descriptors. It ensures that proper number of
 			 * descriptors will be dequeued in callback
@@ -3889,6 +3882,13 @@ throughput_intr_lcore_mldts(void *arg)
 			rte_atomic_store_explicit(&tp->burst_sz, num_to_enq,
 					rte_memory_order_relaxed);
 
+			enq = 0;
+			do {
+				enq += rte_bbdev_enqueue_mldts_ops(tp->dev_id,
+						queue_id, &ops[enqueued], num_to_enq);
+			} while (unlikely(enq != num_to_enq));
+			enqueued += enq;
+
 			/* Wait until processing of previous batch is
 			 * completed
 			 */
-- 
2.37.1


      parent reply	other threads:[~2024-06-24 15:07 UTC|newest]

Thread overview: 4+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
     [not found] <20240624150237.47169-1-hernan.vargas@intel.com>
2024-06-24 15:02 ` [PATCH v2 1/9] test/bbdev: fix TB logic Hernan Vargas
2024-06-24 15:02 ` [PATCH v2 2/9] test/bbdev: fix MLD output size computation Hernan Vargas
2024-06-25  8:39   ` Maxime Coquelin
2024-06-24 15:02 ` Hernan Vargas [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240624150237.47169-4-hernan.vargas@intel.com \
    --to=hernan.vargas@intel.com \
    --cc=dev@dpdk.org \
    --cc=gakhil@marvell.com \
    --cc=maxime.coquelin@redhat.com \
    --cc=nicolas.chautru@intel.com \
    --cc=qi.z.zhang@intel.com \
    --cc=stable@dpdk.org \
    --cc=trix@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).