* [PATCH] test/dma: add SG copy tests
@ 2023-08-10 12:01 Gowrishankar Muthukrishnan
0 siblings, 0 replies; only message in thread
From: Gowrishankar Muthukrishnan @ 2023-08-10 12:01 UTC (permalink / raw)
To: dev
Cc: anoobj, Chengwen Feng, Vamsi Attunuru, Vidya Sagar Velumuri,
Gowrishankar Muthukrishnan
Add scatter-gather copy tests.
Signed-off-by: Vidya Sagar Velumuri <vvelumuri@marvell.com>
Signed-off-by: Gowrishankar Muthukrishnan <gmuthukrishn@marvell.com>
---
app/test/test_dmadev.c | 124 +++++++++++++++++++++++++++-
app/test/test_dmadev_api.c | 163 ++++++++++++++++++++++++++++++++++---
app/test/test_dmadev_api.h | 2 +
3 files changed, 274 insertions(+), 15 deletions(-)
diff --git a/app/test/test_dmadev.c b/app/test/test_dmadev.c
index 0736ff2a18..abe970baaf 100644
--- a/app/test/test_dmadev.c
+++ b/app/test/test_dmadev.c
@@ -18,7 +18,7 @@
#define ERR_RETURN(...) do { print_err(__func__, __LINE__, __VA_ARGS__); return -1; } while (0)
-#define COPY_LEN 1024
+#define COPY_LEN 1032
static struct rte_mempool *pool;
static uint16_t id_count;
@@ -346,6 +346,120 @@ test_stop_start(int16_t dev_id, uint16_t vchan)
return 0;
}
+static int
+test_enqueue_sg_copies(int16_t dev_id, uint16_t vchan)
+{
+ unsigned int src_len, dst_len, n_sge, len, i, j, k;
+ char orig_src[COPY_LEN], orig_dst[COPY_LEN];
+ struct rte_dma_info info = { 0 };
+ enum rte_dma_status_code status;
+ uint16_t id, n_src, n_dst;
+
+ if (rte_dma_info_get(dev_id, &info) < 0)
+ ERR_RETURN("Failed to get dev info");
+
+ n_sge = RTE_MIN(info.max_sges, TEST_SG_MAX);
+ len = COPY_LEN;
+
+ for (n_src = 1; n_src <= n_sge; n_src++) {
+ src_len = len / n_src;
+ for (n_dst = 1; n_dst <= n_sge; n_dst++) {
+ dst_len = len / n_dst;
+
+ struct rte_dma_sge sg_src[n_sge], sg_dst[n_sge];
+ struct rte_mbuf *src[n_sge], *dst[n_sge];
+ char *src_data[n_sge], *dst_data[n_sge];
+
+ for (i = 0 ; i < COPY_LEN; i++)
+ orig_src[i] = rte_rand() & 0xFF;
+
+ memset(orig_dst, 0, COPY_LEN);
+
+ for (i = 0; i < n_src; i++) {
+ src[i] = rte_pktmbuf_alloc(pool);
+ RTE_ASSERT(src[i] != NULL);
+ sg_src[i].addr = rte_pktmbuf_iova(src[i]);
+ sg_src[i].length = src_len;
+ src_data[i] = rte_pktmbuf_mtod(src[i], char *);
+ }
+
+ for (k = 0; k < n_dst; k++) {
+ dst[k] = rte_pktmbuf_alloc(pool);
+ RTE_ASSERT(dst[k] != NULL);
+ sg_dst[k].addr = rte_pktmbuf_iova(dst[k]);
+ sg_dst[k].length = dst_len;
+ dst_data[k] = rte_pktmbuf_mtod(dst[k], char *);
+ }
+
+ for (i = 0; i < n_src; i++) {
+ for (j = 0; j < src_len; j++)
+ src_data[i][j] = orig_src[i * src_len + j];
+ }
+
+ for (k = 0; k < n_dst; k++)
+ memset(dst_data[k], 0, dst_len);
+
+ printf("\tsrc segs: %2d [seg len: %4d] - dst segs: %2d [seg len : %4d]\n",
+ n_src, src_len, n_dst, dst_len);
+
+ id = rte_dma_copy_sg(dev_id, vchan, sg_src, sg_dst, n_src, n_dst,
+ RTE_DMA_OP_FLAG_SUBMIT);
+
+ if (id != id_count)
+ ERR_RETURN("Error with rte_dma_copy_sg, got %u, expected %u\n",
+ id, id_count);
+
+ /* Give time for copy to finish, then check it was done */
+ await_hw(dev_id, vchan);
+
+ for (k = 0; k < n_dst; k++)
+ memcpy((&orig_dst[0] + k * dst_len), dst_data[k], dst_len);
+
+ if (memcmp(orig_src, orig_dst, COPY_LEN))
+ ERR_RETURN("Data mismatch");
+
+ /* Verify completion */
+ id = ~id;
+ if (rte_dma_completed(dev_id, vchan, 1, &id, NULL) != 1)
+ ERR_RETURN("Error with rte_dma_completed\n");
+
+ /* Verify expected index(id_count) */
+ if (id != id_count)
+ ERR_RETURN("Error:incorrect job id received, %u [expected %u]\n",
+ id, id_count);
+
+ /* Check for completed and id when no job done */
+ id = ~id;
+ if (rte_dma_completed(dev_id, vchan, 1, &id, NULL) != 0)
+ ERR_RETURN("Error with rte_dma_completed when no job done\n");
+
+ if (id != id_count)
+ ERR_RETURN("Error:incorrect job id received when no job done, %u [expected %u]\n",
+ id, id_count);
+
+ /* Check for completed_status and id when no job done */
+ id = ~id;
+ if (rte_dma_completed_status(dev_id, vchan, 1, &id, &status) != 0)
+ ERR_RETURN("Error with rte_dma_completed_status when no job done\n");
+ if (id != id_count)
+ ERR_RETURN("Error:incorrect job id received when no job done, %u [expected %u]\n",
+ id, 0);
+
+ for (i = 0; i < n_src; i++)
+ rte_pktmbuf_free(src[i]);
+ for (i = 0; i < n_dst; i++)
+ rte_pktmbuf_free(dst[i]);
+
+ /* Verify that completion returns nothing more */
+ if (rte_dma_completed(dev_id, 0, 1, NULL, NULL) != 0)
+ ERR_RETURN("Error with rte_dma_completed in empty check\n");
+
+ id_count++;
+ }
+ }
+ return 0;
+}
+
/* Failure handling test cases - global macros and variables for those tests*/
#define COMP_BURST_SZ 16
#define OPT_FENCE(idx) ((fence && idx == 8) ? RTE_DMA_OP_FLAG_FENCE : 0)
@@ -852,7 +966,7 @@ test_dmadev_instance(int16_t dev_id)
TEST_RINGSIZE * 2, /* n == num elements */
32, /* cache size */
0, /* priv size */
- 2048, /* data room size */
+ COPY_LEN + RTE_PKTMBUF_HEADROOM, /* data room size */
info.numa_node);
if (pool == NULL)
ERR_RETURN("Error with mempool creation\n");
@@ -865,6 +979,12 @@ test_dmadev_instance(int16_t dev_id)
if (runtest("stop-start", test_stop_start, 1, dev_id, vchan, CHECK_ERRS) < 0)
goto err;
+ /* run SG test cases */
+ if ((info.dev_capa & RTE_DMA_CAPA_OPS_COPY_SG) == 0)
+ printf("DMA Dev %u: No SG support, skipping SG copy tests\n", dev_id);
+ else if (runtest("sg_copy", test_enqueue_sg_copies, 1, dev_id, vchan, CHECK_ERRS) < 0)
+ goto err;
+
/* run some burst capacity tests */
if (rte_dma_burst_capacity(dev_id, vchan) < 64)
printf("DMA Dev %u: insufficient burst capacity (64 required), skipping tests\n",
diff --git a/app/test/test_dmadev_api.c b/app/test/test_dmadev_api.c
index 5cdd87e162..0934cc81cc 100644
--- a/app/test/test_dmadev_api.c
+++ b/app/test/test_dmadev_api.c
@@ -10,8 +10,7 @@
#include <rte_dmadev.h>
#include "test.h"
-
-extern int test_dma_api(uint16_t dev_id);
+#include "test_dmadev_api.h"
#define DMA_TEST_API_RUN(test) \
testsuite_run_test(test, #test)
@@ -24,6 +23,8 @@ static int16_t invalid_dev_id;
static char *src;
static char *dst;
+static char *src_sg[TEST_SG_MAX];
+static char *dst_sg[TEST_SG_MAX];
static int total;
static int passed;
@@ -35,17 +36,27 @@ testsuite_setup(int16_t dev_id)
{
test_dev_id = dev_id;
invalid_dev_id = -1;
+ int i, rc = 0;
+
+ for (i = 0; i < TEST_SG_MAX; i++) {
+ src_sg[i] = rte_malloc("dmadev_test_src", TEST_MEMCPY_SIZE, 0);
+ if (src_sg[i] == NULL) {
+ rc = -ENOMEM;
+ goto exit;
+ }
- src = rte_malloc("dmadev_test_src", TEST_MEMCPY_SIZE, 0);
- if (src == NULL)
- return -ENOMEM;
- dst = rte_malloc("dmadev_test_dst", TEST_MEMCPY_SIZE, 0);
- if (dst == NULL) {
- rte_free(src);
- src = NULL;
- return -ENOMEM;
+ dst_sg[i] = rte_malloc("dmadev_test_dst", TEST_MEMCPY_SIZE, 0);
+ if (dst_sg[i] == NULL) {
+ rte_free(src_sg[i]);
+ src_sg[i] = NULL;
+ rc = -ENOMEM;
+ goto exit;
+ }
}
+ src = src_sg[0];
+ dst = dst_sg[0];
+
total = 0;
passed = 0;
failed = 0;
@@ -56,15 +67,29 @@ testsuite_setup(int16_t dev_id)
*/
rte_log_set_level_pattern("lib.dmadev", RTE_LOG_CRIT);
- return 0;
+ return rc;
+exit:
+ while (--i >= 0) {
+ rte_free(src_sg[i]);
+ rte_free(dst_sg[i]);
+ }
+
+ return rc;
}
static void
testsuite_teardown(void)
{
- rte_free(src);
+ int i;
+
+ for (i = 0; i < TEST_SG_MAX; i++) {
+ rte_free(src_sg[i]);
+ src_sg[i] = NULL;
+ rte_free(dst_sg[i]);
+ dst_sg[i] = NULL;
+ }
+
src = NULL;
- rte_free(dst);
dst = NULL;
/* Ensure the dmadev is stopped. */
rte_dma_stop(test_dev_id);
@@ -484,6 +509,37 @@ verify_memory(void)
return 0;
}
+static void
+sg_memory_setup(int n)
+{
+ int i, j;
+
+ for (i = 0; i < n; i++) {
+ for (j = 0; j < TEST_MEMCPY_SIZE; j++)
+ src_sg[i][j] = (char)j;
+
+ memset(dst_sg[i], 0, TEST_MEMCPY_SIZE);
+ }
+}
+
+static int
+sg_memory_verify(int n)
+{
+ int i, j;
+
+ for (i = 0; i < n; i++) {
+ for (j = 0; j < TEST_MEMCPY_SIZE; j++) {
+ if (src_sg[i][j] == dst_sg[i][j])
+ continue;
+
+ RTE_TEST_ASSERT_EQUAL(src_sg[i][j], dst_sg[i][j], "Failed to copy memory, %d %d",
+ src_sg[i][j], dst_sg[i][j]);
+ }
+ }
+
+ return 0;
+}
+
static int
test_dma_completed(void)
{
@@ -598,6 +654,86 @@ test_dma_completed_status(void)
return TEST_SUCCESS;
}
+static int
+test_dma_sg(void)
+{
+ struct rte_dma_sge src_sge[TEST_SG_MAX], dst_sge[TEST_SG_MAX];
+ struct rte_dma_info dev_info = { 0 };
+ uint16_t last_idx = -1;
+ bool has_error = true;
+ int n_sge, i, ret;
+ uint16_t cpl_ret;
+
+ ret = rte_dma_info_get(test_dev_id, &dev_info);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info, %d", ret);
+
+ if ((dev_info.dev_capa & RTE_DMA_CAPA_OPS_COPY_SG) == 0)
+ return TEST_SKIPPED;
+
+ n_sge = RTE_MIN(dev_info.max_sges, TEST_SG_MAX);
+
+ ret = setup_one_vchan();
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup one vchan, %d", ret);
+
+ ret = rte_dma_start(test_dev_id);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start, %d", ret);
+
+ for (i = 0; i < n_sge; i++) {
+ src_sge[i].addr = rte_malloc_virt2iova(src_sg[i]);
+ src_sge[i].length = TEST_MEMCPY_SIZE;
+ dst_sge[i].addr = rte_malloc_virt2iova(dst_sg[i]);
+ dst_sge[i].length = TEST_MEMCPY_SIZE;
+ }
+
+ sg_memory_setup(n_sge);
+
+ /* Check enqueue without submit */
+ ret = rte_dma_copy_sg(test_dev_id, 0, src_sge, dst_sge, n_sge, n_sge, 0);
+ RTE_TEST_ASSERT_EQUAL(ret, 0, "Failed to enqueue copy, %d", ret);
+
+ rte_delay_us_sleep(TEST_WAIT_US_VAL);
+
+ cpl_ret = rte_dma_completed(test_dev_id, 0, 1, &last_idx, &has_error);
+ RTE_TEST_ASSERT_EQUAL(cpl_ret, 0, "Failed to get completed");
+
+ /* Check DMA submit */
+ ret = rte_dma_submit(test_dev_id, 0);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to submit, %d", ret);
+
+ rte_delay_us_sleep(TEST_WAIT_US_VAL);
+
+ cpl_ret = rte_dma_completed(test_dev_id, 0, 1, &last_idx, &has_error);
+ RTE_TEST_ASSERT_EQUAL(cpl_ret, 1, "Failed to get completed");
+ RTE_TEST_ASSERT_EQUAL(last_idx, 0, "Last idx should be zero, %u", last_idx);
+ RTE_TEST_ASSERT_EQUAL(has_error, false, "Should have no error");
+
+ ret = sg_memory_verify(n_sge);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to verify memory");
+
+ sg_memory_setup(n_sge);
+
+ /* Check for enqueue with submit */
+ ret = rte_dma_copy_sg(test_dev_id, 0, src_sge, dst_sge, n_sge, n_sge,
+ RTE_DMA_OP_FLAG_SUBMIT);
+ RTE_TEST_ASSERT_EQUAL(ret, 1, "Failed to enqueue copy, %d", ret);
+
+ rte_delay_us_sleep(TEST_WAIT_US_VAL);
+
+ cpl_ret = rte_dma_completed(test_dev_id, 0, 1, &last_idx, &has_error);
+ RTE_TEST_ASSERT_EQUAL(cpl_ret, 1, "Failed to get completed");
+ RTE_TEST_ASSERT_EQUAL(last_idx, 1, "Last idx should be 1, %u", last_idx);
+ RTE_TEST_ASSERT_EQUAL(has_error, false, "Should have no error");
+
+ ret = sg_memory_verify(n_sge);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to verify memory");
+
+ /* Stop dmadev to make sure dmadev to a known state */
+ ret = rte_dma_stop(test_dev_id);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to stop, %d", ret);
+
+ return TEST_SUCCESS;
+}
+
int
test_dma_api(uint16_t dev_id)
{
@@ -622,6 +758,7 @@ test_dma_api(uint16_t dev_id)
DMA_TEST_API_RUN(test_dma_dump);
DMA_TEST_API_RUN(test_dma_completed);
DMA_TEST_API_RUN(test_dma_completed_status);
+ DMA_TEST_API_RUN(test_dma_sg);
testsuite_teardown();
diff --git a/app/test/test_dmadev_api.h b/app/test/test_dmadev_api.h
index 33fbc5bd41..10ab925f80 100644
--- a/app/test/test_dmadev_api.h
+++ b/app/test/test_dmadev_api.h
@@ -2,4 +2,6 @@
* Copyright(c) 2021 HiSilicon Limited
*/
+#define TEST_SG_MAX 4
+
int test_dma_api(uint16_t dev_id);
--
2.25.1
^ permalink raw reply [flat|nested] only message in thread
only message in thread, other threads:[~2023-08-10 12:01 UTC | newest]
Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-08-10 12:01 [PATCH] test/dma: add SG copy tests Gowrishankar Muthukrishnan
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).