From: Amit Prakash Shukla <amitprakashs@marvell.com>
Cc: <dev@dpdk.org>, <jerinj@marvell.com>, <fengchengwen@huawei.com>,
<kevin.laatz@intel.com>, <bruce.richardson@intel.com>,
<conor.walsh@intel.com>, <vattunuru@marvell.com>,
<g.singh@nxp.com>, <sachin.saxena@oss.nxp.com>,
<hemant.agrawal@nxp.com>, <cheng1.jiang@intel.com>,
<ndabilpuram@marvell.com>, <anoobj@marvell.com>,
<mb@smartsharesystems.com>,
Amit Prakash Shukla <amitprakashs@marvell.com>
Subject: [PATCH v2 12/12] app/test: add event DMA adapter auto-test
Date: Sat, 23 Sep 2023 01:43:37 +0530 [thread overview]
Message-ID: <20230922201337.3347666-13-amitprakashs@marvell.com> (raw)
In-Reply-To: <20230922201337.3347666-1-amitprakashs@marvell.com>
Added testsuite to test the dma adapter functionality.
The testsuite detects event and DMA device capability
and accordingly dma adapter is configured and modes are
tested.
Signed-off-by: Amit Prakash Shukla <amitprakashs@marvell.com>
---
app/test/meson.build | 1 +
app/test/test_event_dma_adapter.c | 808 ++++++++++++++++++++++++++++++
2 files changed, 809 insertions(+)
create mode 100644 app/test/test_event_dma_adapter.c
diff --git a/app/test/meson.build b/app/test/meson.build
index 05bae9216d..eccd3b72d8 100644
--- a/app/test/meson.build
+++ b/app/test/meson.build
@@ -66,6 +66,7 @@ source_file_deps = {
'test_errno.c': [],
'test_ethdev_link.c': ['ethdev'],
'test_event_crypto_adapter.c': ['cryptodev', 'eventdev', 'bus_vdev'],
+ 'test_event_dma_adapter.c': ['dmadev', 'eventdev'],
'test_event_eth_rx_adapter.c': ['ethdev', 'eventdev', 'bus_vdev'],
'test_event_eth_tx_adapter.c': ['bus_vdev', 'ethdev', 'net_ring', 'eventdev'],
'test_event_ring.c': ['eventdev'],
diff --git a/app/test/test_event_dma_adapter.c b/app/test/test_event_dma_adapter.c
new file mode 100644
index 0000000000..801294c118
--- /dev/null
+++ b/app/test/test_event_dma_adapter.c
@@ -0,0 +1,808 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2023 Marvell.
+ */
+
+#include "test.h"
+#include <string.h>
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_random.h>
+
+#ifdef RTE_EXEC_ENV_WINDOWS
+static int
+test_event_dma_adapter(void)
+{
+ printf("event_dma_adapter not supported on Windows, skipping test\n");
+ return TEST_SKIPPED;
+}
+
+#else
+
+#include <rte_bus_vdev.h>
+#include <rte_dmadev.h>
+#include <rte_eventdev.h>
+#include <rte_event_dma_adapter.h>
+#include <rte_service.h>
+
+#define NUM_MBUFS (8191)
+#define MBUF_CACHE_SIZE (256)
+#define TEST_APP_PORT_ID 0
+#define TEST_APP_EV_QUEUE_ID 0
+#define TEST_APP_EV_PRIORITY 0
+#define TEST_APP_EV_FLOWID 0xAABB
+#define TEST_DMA_EV_QUEUE_ID 1
+#define TEST_ADAPTER_ID 0
+#define TEST_DMA_DEV_ID 0
+#define TEST_DMA_VCHAN_ID 0
+#define PACKET_LENGTH 1024
+#define NB_TEST_PORTS 1
+#define NB_TEST_QUEUES 2
+#define NUM_CORES 2
+#define DMA_OP_POOL_SIZE 128
+#define TEST_MAX_OP 128
+#define TEST_RINGSIZE 512
+
+#define MBUF_SIZE (RTE_PKTMBUF_HEADROOM + PACKET_LENGTH)
+
+/* Handle log statements in same manner as test macros */
+#define LOG_DBG(...) RTE_LOG(DEBUG, EAL, __VA_ARGS__)
+
+struct event_dma_adapter_test_params {
+ struct rte_mempool *src_mbuf_pool;
+ struct rte_mempool *dst_mbuf_pool;
+ struct rte_mempool *op_mpool;
+ uint8_t dma_event_port_id;
+ uint8_t internal_port_op_fwd;
+};
+
+struct rte_event dma_response_info = {
+ .queue_id = TEST_APP_EV_QUEUE_ID,
+ .sched_type = RTE_SCHED_TYPE_ATOMIC,
+ .flow_id = TEST_APP_EV_FLOWID,
+ .priority = TEST_APP_EV_PRIORITY
+};
+
+static struct event_dma_adapter_test_params params;
+static uint8_t dma_adapter_setup_done;
+static uint32_t slcore_id;
+static int evdev;
+
+static int
+send_recv_ev(struct rte_event *ev)
+{
+ struct rte_event recv_ev[TEST_MAX_OP] = {0};
+ struct rte_event_dma_adapter_op *op;
+ uint16_t nb_enqueued = 0;
+ int ret, i = 0;
+
+ if (params.internal_port_op_fwd) {
+ nb_enqueued = rte_event_dma_adapter_enqueue(evdev, TEST_APP_PORT_ID, ev,
+ TEST_MAX_OP);
+ } else {
+ while (nb_enqueued < TEST_MAX_OP) {
+ nb_enqueued += rte_event_enqueue_burst(evdev, TEST_APP_PORT_ID,
+ &ev[nb_enqueued], TEST_MAX_OP -
+ nb_enqueued);
+ }
+ }
+
+ TEST_ASSERT_EQUAL(nb_enqueued, TEST_MAX_OP, "Failed to send event to dma adapter\n");
+
+ while (i < TEST_MAX_OP) {
+ if (rte_event_dequeue_burst(evdev, TEST_APP_PORT_ID, &recv_ev[i], 1, 0) != 1)
+ continue;
+
+ op = recv_ev[i].event_ptr;
+ ret = memcmp((uint8_t *)op->src_seg->addr, (uint8_t *)op->dst_seg->addr,
+ op->src_seg->length);
+ TEST_ASSERT_EQUAL(ret, 0, "Data mismatch for dma adapter\n");
+ i++;
+ }
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_dma_adapter_stats(void)
+{
+ struct rte_event_dma_adapter_stats stats;
+
+ rte_event_dma_adapter_stats_get(TEST_ADAPTER_ID, &stats);
+ printf(" +------------------------------------------------------+\n");
+ printf(" + DMA adapter stats for instance %u:\n", TEST_ADAPTER_ID);
+ printf(" + Event port poll count 0x%" PRIx64 "\n",
+ stats.event_poll_count);
+ printf(" + Event dequeue count 0x%" PRIx64 "\n",
+ stats.event_deq_count);
+ printf(" + DMA dev enqueue count 0x%" PRIx64 "\n",
+ stats.dma_enq_count);
+ printf(" + DMA dev enqueue failed count 0x%" PRIx64 "\n",
+ stats.dma_enq_fail_count);
+ printf(" + DMA dev dequeue count 0x%" PRIx64 "\n",
+ stats.dma_deq_count);
+ printf(" + Event enqueue count 0x%" PRIx64 "\n",
+ stats.event_enq_count);
+ printf(" + Event enqueue retry count 0x%" PRIx64 "\n",
+ stats.event_enq_retry_count);
+ printf(" + Event enqueue fail count 0x%" PRIx64 "\n",
+ stats.event_enq_fail_count);
+ printf(" +------------------------------------------------------+\n");
+
+ rte_event_dma_adapter_stats_reset(TEST_ADAPTER_ID);
+ return TEST_SUCCESS;
+}
+
+static int
+test_dma_adapter_params(void)
+{
+ struct rte_event_dma_adapter_runtime_params in_params;
+ struct rte_event_dma_adapter_runtime_params out_params;
+ struct rte_event event;
+ uint32_t cap;
+ int err, rc;
+
+ err = rte_event_dma_adapter_caps_get(evdev, TEST_DMA_DEV_ID, &cap);
+ TEST_ASSERT_SUCCESS(err, "Failed to get adapter capabilities\n");
+
+ if (cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND) {
+ err = rte_event_dma_adapter_vchan_add(TEST_ADAPTER_ID, TEST_DMA_DEV_ID,
+ TEST_DMA_VCHAN_ID, &event);
+ } else
+ err = rte_event_dma_adapter_vchan_add(TEST_ADAPTER_ID, TEST_DMA_DEV_ID,
+ TEST_DMA_VCHAN_ID, NULL);
+
+ TEST_ASSERT_SUCCESS(err, "Failed to add vchan\n");
+
+ err = rte_event_dma_adapter_runtime_params_init(&in_params);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+ err = rte_event_dma_adapter_runtime_params_init(&out_params);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+ /* Case 1: Get the default value of mbufs processed by adapter */
+ err = rte_event_dma_adapter_runtime_params_get(TEST_ADAPTER_ID, &out_params);
+ if (err == -ENOTSUP) {
+ rc = TEST_SKIPPED;
+ goto vchan_del;
+ }
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+ /* Case 2: Set max_nb = 32 (=BATCH_SEIZE) */
+ in_params.max_nb = 32;
+
+ err = rte_event_dma_adapter_runtime_params_set(TEST_ADAPTER_ID, &in_params);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+ err = rte_event_dma_adapter_runtime_params_get(TEST_ADAPTER_ID, &out_params);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+ TEST_ASSERT(in_params.max_nb == out_params.max_nb, "Expected %u got %u",
+ in_params.max_nb, out_params.max_nb);
+
+ /* Case 3: Set max_nb = 192 */
+ in_params.max_nb = 192;
+
+ err = rte_event_dma_adapter_runtime_params_set(TEST_ADAPTER_ID, &in_params);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+ err = rte_event_dma_adapter_runtime_params_get(TEST_ADAPTER_ID, &out_params);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+ TEST_ASSERT(in_params.max_nb == out_params.max_nb, "Expected %u got %u",
+ in_params.max_nb, out_params.max_nb);
+
+ /* Case 4: Set max_nb = 256 */
+ in_params.max_nb = 256;
+
+ err = rte_event_dma_adapter_runtime_params_set(TEST_ADAPTER_ID, &in_params);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+ err = rte_event_dma_adapter_runtime_params_get(TEST_ADAPTER_ID, &out_params);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+ TEST_ASSERT(in_params.max_nb == out_params.max_nb, "Expected %u got %u",
+ in_params.max_nb, out_params.max_nb);
+
+ /* Case 5: Set max_nb = 30(<BATCH_SIZE) */
+ in_params.max_nb = 30;
+
+ err = rte_event_dma_adapter_runtime_params_set(TEST_ADAPTER_ID, &in_params);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+ err = rte_event_dma_adapter_runtime_params_get(TEST_ADAPTER_ID, &out_params);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+ TEST_ASSERT(in_params.max_nb == out_params.max_nb, "Expected %u got %u",
+ in_params.max_nb, out_params.max_nb);
+
+ /* Case 6: Set max_nb = 512 */
+ in_params.max_nb = 512;
+
+ err = rte_event_dma_adapter_runtime_params_set(TEST_ADAPTER_ID, &in_params);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+ err = rte_event_dma_adapter_runtime_params_get(TEST_ADAPTER_ID, &out_params);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+ TEST_ASSERT(in_params.max_nb == out_params.max_nb, "Expected %u got %u",
+ in_params.max_nb, out_params.max_nb);
+
+ rc = TEST_SUCCESS;
+vchan_del:
+ err = rte_event_dma_adapter_vchan_del(TEST_ADAPTER_ID, TEST_DMA_DEV_ID,
+ TEST_DMA_VCHAN_ID);
+ TEST_ASSERT_SUCCESS(err, "Failed to delete vchan\n");
+
+ return rc;
+}
+
+static int
+test_op_forward_mode(void)
+{
+ struct rte_mbuf *src_mbuf[TEST_MAX_OP];
+ struct rte_mbuf *dst_mbuf[TEST_MAX_OP];
+ struct rte_event ev[TEST_MAX_OP] = {0};
+ struct rte_event response_info;
+ struct rte_event_dma_adapter_op *op;
+ int ret, i;
+
+ ret = rte_pktmbuf_alloc_bulk(params.src_mbuf_pool, src_mbuf, TEST_MAX_OP);
+ TEST_ASSERT_SUCCESS(ret, "alloc src mbufs failed.\n");
+
+ ret = rte_pktmbuf_alloc_bulk(params.dst_mbuf_pool, dst_mbuf, TEST_MAX_OP);
+ TEST_ASSERT_SUCCESS(ret, "alloc dst mbufs failed.\n");
+
+ for (i = 0; i < TEST_MAX_OP; i++) {
+ memset(rte_pktmbuf_mtod(src_mbuf[i], void *), rte_rand(), PACKET_LENGTH);
+ memset(rte_pktmbuf_mtod(dst_mbuf[i], void *), 0, PACKET_LENGTH);
+ }
+
+ for (i = 0; i < TEST_MAX_OP; i++) {
+ rte_mempool_get(params.op_mpool, (void **)&op);
+ TEST_ASSERT_NOT_NULL(op, "Failed to allocate dma operation struct\n");
+
+ op->src_seg = rte_malloc(NULL, sizeof(struct rte_dma_sge), 0);
+ op->dst_seg = rte_malloc(NULL, sizeof(struct rte_dma_sge), 0);
+
+ /* Update Op */
+ op->src_seg->addr = rte_pktmbuf_iova(src_mbuf[i]);
+ op->dst_seg->addr = rte_pktmbuf_iova(dst_mbuf[i]);
+ op->src_seg->length = PACKET_LENGTH;
+ op->dst_seg->length = PACKET_LENGTH;
+ op->nb_src = 1;
+ op->nb_dst = 1;
+ op->flags = RTE_DMA_OP_FLAG_SUBMIT;
+ op->op_mp = params.op_mpool;
+ op->dma_dev_id = TEST_DMA_DEV_ID;
+ op->vchan = TEST_DMA_VCHAN_ID;
+
+ response_info.event = dma_response_info.event;
+ rte_memcpy((uint8_t *)op + sizeof(struct rte_event_dma_adapter_op), &response_info,
+ sizeof(struct rte_event));
+
+ /* Fill in event info and update event_ptr with rte_event_dma_adapter_op */
+ memset(&ev[i], 0, sizeof(struct rte_event));
+ ev[i].event = 0;
+ ev[i].event_type = RTE_EVENT_TYPE_DMADEV;
+ ev[i].queue_id = TEST_DMA_EV_QUEUE_ID;
+ ev[i].sched_type = RTE_SCHED_TYPE_ATOMIC;
+ ev[i].flow_id = 0xAABB;
+ ev[i].event_ptr = op;
+ }
+
+ ret = send_recv_ev(ev);
+ TEST_ASSERT_SUCCESS(ret, "Failed to send/receive event to dma adapter\n");
+
+ test_dma_adapter_stats();
+
+ for (i = 0; i < TEST_MAX_OP; i++) {
+ op = ev[i].event_ptr;
+ ret = memcmp((uint8_t *)op->src_seg->addr, (uint8_t *)op->dst_seg->addr,
+ op->src_seg->length);
+ TEST_ASSERT_EQUAL(ret, 0, "Data mismatch for dma adapter\n");
+
+ rte_free(op->src_seg);
+ rte_free(op->dst_seg);
+ rte_mempool_put(op->op_mp, op);
+ }
+
+ rte_pktmbuf_free_bulk(src_mbuf, TEST_MAX_OP);
+ rte_pktmbuf_free_bulk(dst_mbuf, TEST_MAX_OP);
+
+ return TEST_SUCCESS;
+}
+
+static int
+map_adapter_service_core(void)
+{
+ uint32_t adapter_service_id;
+ int ret;
+
+ if (rte_event_dma_adapter_service_id_get(TEST_ADAPTER_ID, &adapter_service_id) == 0) {
+ uint32_t core_list[NUM_CORES];
+
+ ret = rte_service_lcore_list(core_list, NUM_CORES);
+ TEST_ASSERT(ret >= 0, "Failed to get service core list!");
+
+ if (core_list[0] != slcore_id) {
+ TEST_ASSERT_SUCCESS(rte_service_lcore_add(slcore_id),
+ "Failed to add service core");
+ TEST_ASSERT_SUCCESS(rte_service_lcore_start(slcore_id),
+ "Failed to start service core");
+ }
+
+ TEST_ASSERT_SUCCESS(rte_service_map_lcore_set(
+ adapter_service_id, slcore_id, 1),
+ "Failed to map adapter service");
+ }
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_with_op_forward_mode(void)
+{
+ uint32_t cap;
+ int ret;
+
+ ret = rte_event_dma_adapter_caps_get(evdev, TEST_DMA_DEV_ID, &cap);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n");
+
+ if (!(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&
+ !(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW))
+ map_adapter_service_core();
+ else {
+ if (!(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD))
+ return TEST_SKIPPED;
+ }
+
+ TEST_ASSERT_SUCCESS(rte_event_dma_adapter_start(TEST_ADAPTER_ID),
+ "Failed to start event dma adapter");
+
+ ret = test_op_forward_mode();
+ TEST_ASSERT_SUCCESS(ret, "DMA - FORWARD mode test failed\n");
+ return TEST_SUCCESS;
+}
+
+static int
+configure_dmadev(void)
+{
+ const struct rte_dma_conf conf = { .nb_vchans = 1};
+ const struct rte_dma_vchan_conf qconf = {
+ .direction = RTE_DMA_DIR_MEM_TO_MEM,
+ .nb_desc = TEST_RINGSIZE,
+ };
+ struct rte_dma_info info;
+ unsigned int elt_size;
+ int ret;
+
+ ret = rte_dma_count_avail();
+ RTE_TEST_ASSERT_FAIL(ret, "No dma devices found!\n");
+
+ ret = rte_dma_info_get(TEST_DMA_DEV_ID, &info);
+ TEST_ASSERT_SUCCESS(ret, "Error with rte_dma_info_get()\n");
+
+ if (info.max_vchans < 1)
+ RTE_LOG(ERR, USER1, "Error, no channels available on device id %u\n",
+ TEST_DMA_DEV_ID);
+
+ if (rte_dma_configure(TEST_DMA_DEV_ID, &conf) != 0)
+ RTE_LOG(ERR, USER1, "Error with rte_dma_configure()\n");
+
+ if (rte_dma_vchan_setup(TEST_DMA_DEV_ID, TEST_DMA_VCHAN_ID, &qconf) < 0)
+ RTE_LOG(ERR, USER1, "Error with vchan configuration\n");
+
+ ret = rte_dma_info_get(TEST_DMA_DEV_ID, &info);
+ if (ret != 0 || info.nb_vchans != 1)
+ RTE_LOG(ERR, USER1, "Error, no configured vhcan reported on device id %u\n",
+ TEST_DMA_DEV_ID);
+
+ params.src_mbuf_pool = rte_pktmbuf_pool_create("DMA_ADAPTER_SRC_MBUFPOOL", NUM_MBUFS,
+ MBUF_CACHE_SIZE, 0, MBUF_SIZE,
+ rte_socket_id());
+ RTE_TEST_ASSERT_NOT_NULL(params.src_mbuf_pool, "Can't create DMA_SRC_MBUFPOOL\n");
+
+ params.dst_mbuf_pool = rte_pktmbuf_pool_create("DMA_ADAPTER_DST_MBUFPOOL", NUM_MBUFS,
+ MBUF_CACHE_SIZE, 0, MBUF_SIZE,
+ rte_socket_id());
+ RTE_TEST_ASSERT_NOT_NULL(params.dst_mbuf_pool, "Can't create DMA_DST_MBUFPOOL\n");
+
+ elt_size = sizeof(struct rte_event_dma_adapter_op) + sizeof(struct rte_event);
+ params.op_mpool = rte_mempool_create("EVENT_DMA_OP_POOL", DMA_OP_POOL_SIZE, elt_size, 0,
+ 0, NULL, NULL, NULL, NULL, rte_socket_id(), 0);
+ RTE_TEST_ASSERT_NOT_NULL(params.op_mpool, "Can't create DMA_OP_POOL\n");
+
+ return TEST_SUCCESS;
+}
+
+static inline void
+evdev_set_conf_values(struct rte_event_dev_config *dev_conf, struct rte_event_dev_info *info)
+{
+ memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
+ dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
+ dev_conf->nb_event_ports = NB_TEST_PORTS;
+ dev_conf->nb_event_queues = NB_TEST_QUEUES;
+ dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
+ dev_conf->nb_event_port_dequeue_depth =
+ info->max_event_port_dequeue_depth;
+ dev_conf->nb_event_port_enqueue_depth =
+ info->max_event_port_enqueue_depth;
+ dev_conf->nb_event_port_enqueue_depth =
+ info->max_event_port_enqueue_depth;
+ dev_conf->nb_events_limit =
+ info->max_num_events;
+}
+
+static int
+configure_eventdev(void)
+{
+ struct rte_event_queue_conf queue_conf;
+ struct rte_event_dev_config devconf;
+ struct rte_event_dev_info info;
+ uint32_t queue_count;
+ uint32_t port_count;
+ uint8_t qid;
+ int ret;
+
+ if (!rte_event_dev_count()) {
+ /* If there is no hardware eventdev, or no software vdev was
+ * specified on the command line, create an instance of
+ * event_sw.
+ */
+ LOG_DBG("Failed to find a valid event device... "
+ "testing with event_sw device\n");
+ TEST_ASSERT_SUCCESS(rte_vdev_init("event_sw0", NULL),
+ "Error creating eventdev");
+ evdev = rte_event_dev_get_dev_id("event_sw0");
+ }
+
+ ret = rte_event_dev_info_get(evdev, &info);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info\n");
+
+ evdev_set_conf_values(&devconf, &info);
+
+ ret = rte_event_dev_configure(evdev, &devconf);
+ TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev\n");
+
+ /* Set up event queue */
+ ret = rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count);
+ TEST_ASSERT_SUCCESS(ret, "Queue count get failed\n");
+ TEST_ASSERT_EQUAL(queue_count, 2, "Unexpected queue count\n");
+
+ qid = TEST_APP_EV_QUEUE_ID;
+ ret = rte_event_queue_setup(evdev, qid, NULL);
+ TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d\n", qid);
+
+ queue_conf.nb_atomic_flows = info.max_event_queue_flows;
+ queue_conf.nb_atomic_order_sequences = 32;
+ queue_conf.schedule_type = RTE_SCHED_TYPE_ATOMIC;
+ queue_conf.priority = RTE_EVENT_DEV_PRIORITY_HIGHEST;
+ queue_conf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
+
+ qid = TEST_DMA_EV_QUEUE_ID;
+ ret = rte_event_queue_setup(evdev, qid, &queue_conf);
+ TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%u\n", qid);
+
+ /* Set up event port */
+ ret = rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_PORT_COUNT,
+ &port_count);
+ TEST_ASSERT_SUCCESS(ret, "Port count get failed\n");
+ TEST_ASSERT_EQUAL(port_count, 1, "Unexpected port count\n");
+
+ ret = rte_event_port_setup(evdev, TEST_APP_PORT_ID, NULL);
+ TEST_ASSERT_SUCCESS(ret, "Failed to setup port=%d\n",
+ TEST_APP_PORT_ID);
+
+ qid = TEST_APP_EV_QUEUE_ID;
+ ret = rte_event_port_link(evdev, TEST_APP_PORT_ID, &qid, NULL, 1);
+ TEST_ASSERT(ret >= 0, "Failed to link queue port=%d\n",
+ TEST_APP_PORT_ID);
+
+ return TEST_SUCCESS;
+}
+
+static void
+test_dma_adapter_free(void)
+{
+ rte_event_dma_adapter_free(TEST_ADAPTER_ID);
+}
+
+static int
+test_dma_adapter_create(void)
+{
+ struct rte_event_dev_info evdev_info = {0};
+ struct rte_event_port_conf conf = {0};
+ int ret;
+
+ ret = rte_event_dev_info_get(evdev, &evdev_info);
+ TEST_ASSERT_SUCCESS(ret, "Failed to create event dma adapter\n");
+
+ conf.new_event_threshold = evdev_info.max_num_events;
+ conf.dequeue_depth = evdev_info.max_event_port_dequeue_depth;
+ conf.enqueue_depth = evdev_info.max_event_port_enqueue_depth;
+
+ /* Create adapter with default port creation callback */
+ ret = rte_event_dma_adapter_create(TEST_ADAPTER_ID, evdev, &conf, 0);
+ TEST_ASSERT_SUCCESS(ret, "Failed to create event dma adapter\n");
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_dma_adapter_qp_add_del(void)
+{
+ struct rte_event event;
+ uint32_t cap;
+ int ret;
+
+ ret = rte_event_dma_adapter_caps_get(evdev, TEST_DMA_DEV_ID, &cap);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n");
+
+ if (cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND) {
+ ret = rte_event_dma_adapter_vchan_add(TEST_ADAPTER_ID, TEST_DMA_DEV_ID,
+ TEST_DMA_VCHAN_ID, &event);
+ } else
+ ret = rte_event_dma_adapter_vchan_add(TEST_ADAPTER_ID, TEST_DMA_DEV_ID,
+ TEST_DMA_VCHAN_ID, NULL);
+
+ TEST_ASSERT_SUCCESS(ret, "Failed to create add vchan\n");
+
+ ret = rte_event_dma_adapter_vchan_del(TEST_ADAPTER_ID, TEST_DMA_DEV_ID,
+ TEST_DMA_VCHAN_ID);
+ TEST_ASSERT_SUCCESS(ret, "Failed to delete vchan\n");
+
+ return TEST_SUCCESS;
+}
+
+static int
+configure_event_dma_adapter(enum rte_event_dma_adapter_mode mode)
+{
+ struct rte_event_dev_info evdev_info = {0};
+ struct rte_event_port_conf conf = {0};
+ struct rte_event event;
+ uint32_t cap;
+ int ret;
+
+ ret = rte_event_dma_adapter_caps_get(evdev, TEST_DMA_DEV_ID, &cap);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n");
+
+ /* Skip mode and capability mismatch check for SW eventdev */
+ if (!(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW) &&
+ !(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&
+ !(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND))
+ goto adapter_create;
+
+ if (mode == RTE_EVENT_DMA_ADAPTER_OP_FORWARD) {
+ if (cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD)
+ params.internal_port_op_fwd = 1;
+ else
+ return -ENOTSUP;
+ }
+
+adapter_create:
+ ret = rte_event_dev_info_get(evdev, &evdev_info);
+ TEST_ASSERT_SUCCESS(ret, "Failed to create event dma adapter\n");
+
+ conf.new_event_threshold = evdev_info.max_num_events;
+ conf.dequeue_depth = evdev_info.max_event_port_dequeue_depth;
+ conf.enqueue_depth = evdev_info.max_event_port_enqueue_depth;
+
+ /* Create adapter with default port creation callback */
+ ret = rte_event_dma_adapter_create(TEST_ADAPTER_ID, evdev, &conf, mode);
+ TEST_ASSERT_SUCCESS(ret, "Failed to create event dma adapter\n");
+
+ if (cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND) {
+ ret = rte_event_dma_adapter_vchan_add(TEST_ADAPTER_ID, TEST_DMA_DEV_ID,
+ TEST_DMA_VCHAN_ID, &event);
+ } else
+ ret = rte_event_dma_adapter_vchan_add(TEST_ADAPTER_ID, TEST_DMA_DEV_ID,
+ TEST_DMA_VCHAN_ID, NULL);
+
+ TEST_ASSERT_SUCCESS(ret, "Failed to add vchan\n");
+
+ if (!params.internal_port_op_fwd) {
+ ret = rte_event_dma_adapter_event_port_get(TEST_ADAPTER_ID,
+ ¶ms.dma_event_port_id);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get event port\n");
+ }
+
+ return TEST_SUCCESS;
+}
+
+static void
+test_dma_adapter_stop(void)
+{
+ uint32_t evdev_service_id, adapter_service_id;
+
+ /* retrieve service ids & stop services */
+ if (rte_event_dma_adapter_service_id_get(TEST_ADAPTER_ID,
+ &adapter_service_id) == 0) {
+ rte_service_runstate_set(adapter_service_id, 0);
+ rte_service_lcore_stop(slcore_id);
+ rte_service_lcore_del(slcore_id);
+ rte_event_dma_adapter_stop(TEST_ADAPTER_ID);
+ }
+
+ if (rte_event_dev_service_id_get(evdev, &evdev_service_id) == 0) {
+ rte_service_runstate_set(evdev_service_id, 0);
+ rte_service_lcore_stop(slcore_id);
+ rte_service_lcore_del(slcore_id);
+ rte_dma_stop(TEST_DMA_DEV_ID);
+ rte_event_dev_stop(evdev);
+ } else {
+ rte_dma_stop(TEST_DMA_DEV_ID);
+ rte_event_dev_stop(evdev);
+ }
+}
+
+static int
+test_dma_adapter_conf(enum rte_event_dma_adapter_mode mode)
+{
+ uint32_t evdev_service_id;
+ uint8_t qid;
+ int ret;
+
+ if (!dma_adapter_setup_done) {
+ ret = configure_event_dma_adapter(mode);
+ if (ret)
+ return ret;
+ if (!params.internal_port_op_fwd) {
+ qid = TEST_DMA_EV_QUEUE_ID;
+ ret = rte_event_port_link(evdev,
+ params.dma_event_port_id, &qid, NULL, 1);
+ TEST_ASSERT(ret >= 0, "Failed to link queue %d "
+ "port=%u\n", qid,
+ params.dma_event_port_id);
+ }
+ dma_adapter_setup_done = 1;
+ }
+
+ /* retrieve service ids */
+ if (rte_event_dev_service_id_get(evdev, &evdev_service_id) == 0) {
+ /* add a service core and start it */
+ TEST_ASSERT_SUCCESS(rte_service_lcore_add(slcore_id),
+ "Failed to add service core");
+ TEST_ASSERT_SUCCESS(rte_service_lcore_start(slcore_id),
+ "Failed to start service core");
+
+ /* map services to it */
+ TEST_ASSERT_SUCCESS(rte_service_map_lcore_set(evdev_service_id,
+ slcore_id, 1), "Failed to map evdev service");
+
+ /* set services to running */
+ TEST_ASSERT_SUCCESS(rte_service_runstate_set(evdev_service_id,
+ 1), "Failed to start evdev service");
+ }
+
+ /* start the eventdev */
+ TEST_ASSERT_SUCCESS(rte_event_dev_start(evdev),
+ "Failed to start event device");
+
+ /* start the dma dev */
+ TEST_ASSERT_SUCCESS(rte_dma_start(TEST_DMA_DEV_ID),
+ "Failed to start dma device");
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_dma_adapter_conf_op_forward_mode(void)
+{
+ enum rte_event_dma_adapter_mode mode;
+
+ mode = RTE_EVENT_DMA_ADAPTER_OP_FORWARD;
+
+ return test_dma_adapter_conf(mode);
+}
+
+static int
+testsuite_setup(void)
+{
+ int ret;
+
+ slcore_id = rte_get_next_lcore(-1, 1, 0);
+ TEST_ASSERT_NOT_EQUAL(slcore_id, RTE_MAX_LCORE, "At least 2 lcores "
+ "are required to run this autotest\n");
+
+ /* Setup and start event device. */
+ ret = configure_eventdev();
+ TEST_ASSERT_SUCCESS(ret, "Failed to setup eventdev\n");
+
+ /* Setup and start dma device. */
+ ret = configure_dmadev();
+ TEST_ASSERT_SUCCESS(ret, "dmadev initialization failed\n");
+
+ return TEST_SUCCESS;
+}
+
+static void
+dma_adapter_teardown(void)
+{
+ int ret;
+
+ ret = rte_event_dma_adapter_stop(TEST_ADAPTER_ID);
+ if (ret < 0)
+ RTE_LOG(ERR, USER1, "Failed to stop adapter!");
+
+ ret = rte_event_dma_adapter_vchan_del(TEST_ADAPTER_ID, TEST_DMA_DEV_ID,
+ TEST_DMA_VCHAN_ID);
+ if (ret < 0)
+ RTE_LOG(ERR, USER1, "Failed to delete vchan!");
+
+ ret = rte_event_dma_adapter_free(TEST_ADAPTER_ID);
+ if (ret < 0)
+ RTE_LOG(ERR, USER1, "Failed to free adapter!");
+
+ dma_adapter_setup_done = 0;
+}
+
+static void
+dma_teardown(void)
+{
+ /* Free mbuf mempool */
+ if (params.src_mbuf_pool != NULL) {
+ RTE_LOG(DEBUG, USER1, "DMA_ADAPTER_SRC_MBUFPOOL count %u\n",
+ rte_mempool_avail_count(params.src_mbuf_pool));
+ rte_mempool_free(params.src_mbuf_pool);
+ params.src_mbuf_pool = NULL;
+ }
+
+ if (params.dst_mbuf_pool != NULL) {
+ RTE_LOG(DEBUG, USER1, "DMA_ADAPTER_DST_MBUFPOOL count %u\n",
+ rte_mempool_avail_count(params.dst_mbuf_pool));
+ rte_mempool_free(params.dst_mbuf_pool);
+ params.dst_mbuf_pool = NULL;
+ }
+
+ /* Free ops mempool */
+ if (params.op_mpool != NULL) {
+ RTE_LOG(DEBUG, USER1, "EVENT_DMA_OP_POOL count %u\n",
+ rte_mempool_avail_count(params.op_mpool));
+ rte_mempool_free(params.op_mpool);
+ params.op_mpool = NULL;
+ }
+}
+
+static void
+eventdev_teardown(void)
+{
+ rte_event_dev_stop(evdev);
+}
+
+static void
+testsuite_teardown(void)
+{
+ dma_adapter_teardown();
+ dma_teardown();
+ eventdev_teardown();
+}
+
+static struct unit_test_suite functional_testsuite = {
+ .suite_name = "Event dma adapter test suite",
+ .setup = testsuite_setup,
+ .teardown = testsuite_teardown,
+ .unit_test_cases = {
+
+ TEST_CASE_ST(NULL, test_dma_adapter_free, test_dma_adapter_create),
+
+ TEST_CASE_ST(test_dma_adapter_create, test_dma_adapter_free,
+ test_dma_adapter_qp_add_del),
+
+ TEST_CASE_ST(test_dma_adapter_create, test_dma_adapter_free,
+ test_dma_adapter_stats),
+
+ TEST_CASE_ST(test_dma_adapter_create, test_dma_adapter_free,
+ test_dma_adapter_params),
+
+ TEST_CASE_ST(test_dma_adapter_conf_op_forward_mode, test_dma_adapter_stop,
+ test_with_op_forward_mode),
+
+ TEST_CASES_END() /**< NULL terminate unit test array */
+ }
+};
+
+static int
+test_event_dma_adapter(void)
+{
+ return unit_test_suite_runner(&functional_testsuite);
+}
+
+#endif /* !RTE_EXEC_ENV_WINDOWS */
+
+REGISTER_TEST_COMMAND(event_dma_adapter_autotest, test_event_dma_adapter);
--
2.25.1
next prev parent reply other threads:[~2023-09-22 20:15 UTC|newest]
Thread overview: 110+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-09-19 13:42 [PATCH v1 1/7] eventdev: introduce DMA event adapter library Amit Prakash Shukla
2023-09-19 13:42 ` [PATCH v1 2/7] eventdev: api to get DMA capabilities Amit Prakash Shukla
2023-09-19 13:42 ` [PATCH v1 3/7] eventdev: add DMA adapter implementation Amit Prakash Shukla
2023-09-19 13:42 ` [PATCH v1 4/7] app/test: add event DMA adapter auto-test Amit Prakash Shukla
2023-09-19 13:42 ` [PATCH v1 5/7] common/cnxk: dma result to an offset of the event Amit Prakash Shukla
2023-09-19 13:42 ` [PATCH v1 6/7] dma/cnxk: support for DMA event enqueue dequeue Amit Prakash Shukla
2023-09-19 13:42 ` [PATCH v1 7/7] event/cnxk: support DMA event functions Amit Prakash Shukla
2023-09-21 2:41 ` [PATCH v1 1/7] eventdev: introduce DMA event adapter library Jerin Jacob
2023-09-21 6:42 ` [EXT] " Amit Prakash Shukla
2023-09-22 20:13 ` [PATCH v2 00/12] event DMA adapter library support Amit Prakash Shukla
2023-09-22 20:13 ` [PATCH v2 01/12] eventdev: introduce event DMA adapter library Amit Prakash Shukla
2023-09-22 20:13 ` [PATCH v2 02/12] eventdev: api to get DMA adapter capabilities Amit Prakash Shukla
2023-09-22 20:13 ` [PATCH v2 03/12] eventdev: add DMA adapter API to create and free Amit Prakash Shukla
2023-09-22 20:13 ` [PATCH v2 04/12] eventdev: api support for vchan add and delete Amit Prakash Shukla
2023-09-22 20:13 ` [PATCH v2 05/12] eventdev: add support for service function Amit Prakash Shukla
2023-09-22 20:13 ` [PATCH v2 06/12] eventdev: api support for DMA adapter start stop Amit Prakash Shukla
2023-09-22 20:13 ` [PATCH v2 07/12] eventdev: api support to get DMA adapter service ID Amit Prakash Shukla
2023-09-22 20:13 ` [PATCH v2 08/12] eventdev: add DMA adapter support for runtime params Amit Prakash Shukla
2023-09-22 20:13 ` [PATCH v2 09/12] eventdev: add support for DMA adapter stats Amit Prakash Shukla
2023-09-22 20:13 ` [PATCH v2 10/12] eventdev: add support for DMA adapter enqueue Amit Prakash Shukla
2023-09-22 20:13 ` [PATCH v2 11/12] eventdev: add DMA adapter port get Amit Prakash Shukla
2023-09-22 20:13 ` Amit Prakash Shukla [this message]
2023-09-23 13:34 ` [PATCH v3 00/12] event DMA adapter library support Amit Prakash Shukla
2023-09-23 13:34 ` [PATCH v3 01/12] eventdev: introduce event DMA adapter library Amit Prakash Shukla
2023-09-23 13:34 ` [PATCH v3 02/12] eventdev: api to get DMA adapter capabilities Amit Prakash Shukla
2023-09-23 13:34 ` [PATCH v3 03/12] eventdev: create and free API for DMA adapter Amit Prakash Shukla
2023-09-23 13:34 ` [PATCH v3 04/12] eventdev: add API support for vchan add and delete Amit Prakash Shukla
2023-09-23 13:34 ` [PATCH v3 05/12] eventdev: add support for DMA adapter service function Amit Prakash Shukla
2023-09-23 13:34 ` [PATCH v3 06/12] eventdev: add support for DMA adapter start and stop Amit Prakash Shukla
2023-09-23 13:34 ` [PATCH v3 07/12] eventdev: add support for DMA adapter service ID get Amit Prakash Shukla
2023-09-23 13:34 ` [PATCH v3 08/12] eventdev: add DMA adapter support for runtime params Amit Prakash Shukla
2023-09-23 13:34 ` [PATCH v3 09/12] eventdev: add support for DMA adapter stats Amit Prakash Shukla
2023-09-23 13:34 ` [PATCH v3 10/12] eventdev: add support for DMA adapter enqueue Amit Prakash Shukla
2023-09-23 13:34 ` [PATCH v3 11/12] eventdev: add DMA adapter event port get Amit Prakash Shukla
2023-09-23 13:34 ` [PATCH v3 12/12] app/test: add event DMA adapter auto-test Amit Prakash Shukla
2023-09-26 10:32 ` [PATCH v4 00/12] event DMA adapter library support Amit Prakash Shukla
2023-09-26 10:32 ` [PATCH v4 01/12] eventdev: introduce event DMA adapter library Amit Prakash Shukla
2023-09-27 18:12 ` Jerin Jacob
2023-09-27 20:45 ` Thomas Monjalon
2023-09-28 4:04 ` Jerin Jacob
2023-09-26 10:32 ` [PATCH v4 02/12] eventdev: api to get DMA adapter capabilities Amit Prakash Shukla
2023-09-27 18:20 ` Jerin Jacob
2023-09-26 10:32 ` [PATCH v4 03/12] eventdev: create and free API for DMA adapter Amit Prakash Shukla
2023-09-27 18:23 ` Jerin Jacob
2023-09-26 10:32 ` [PATCH v4 04/12] eventdev: add API support for vchan add and delete Amit Prakash Shukla
2023-09-26 10:32 ` [PATCH v4 05/12] eventdev: add support for DMA adapter service function Amit Prakash Shukla
2023-09-26 10:32 ` [PATCH v4 06/12] eventdev: add support for DMA adapter start and stop Amit Prakash Shukla
2023-09-26 10:32 ` [PATCH v4 07/12] eventdev: add support for DMA adapter service ID get Amit Prakash Shukla
2023-09-26 10:32 ` [PATCH v4 08/12] eventdev: add DMA adapter support for runtime params Amit Prakash Shukla
2023-09-26 10:32 ` [PATCH v4 09/12] eventdev: add support for DMA adapter stats Amit Prakash Shukla
2023-09-26 10:32 ` [PATCH v4 10/12] eventdev: add support for DMA adapter enqueue Amit Prakash Shukla
2023-09-26 10:32 ` [PATCH v4 11/12] eventdev: add DMA adapter event port get Amit Prakash Shukla
2023-09-26 10:32 ` [PATCH v4 12/12] app/test: add event DMA adapter auto-test Amit Prakash Shukla
2023-09-27 18:27 ` Jerin Jacob
2023-09-28 10:36 ` [PATCH v5 00/12] event DMA adapter library support Amit Prakash Shukla
2023-09-28 10:36 ` [PATCH v5 01/12] eventdev/dma: introduce DMA adapter Amit Prakash Shukla
2023-09-28 15:33 ` Jerin Jacob
2023-09-28 10:36 ` [PATCH v5 02/12] eventdev/dma: support adapter capabilities get Amit Prakash Shukla
2023-09-28 10:36 ` [PATCH v5 03/12] eventdev/dma: support adapter create and free Amit Prakash Shukla
2023-09-28 10:36 ` [PATCH v5 04/12] eventdev/dma: support for vchan add and delete Amit Prakash Shukla
2023-09-28 10:36 ` [PATCH v5 05/12] eventdev/dma: support for adapter service function Amit Prakash Shukla
2023-09-28 10:36 ` [PATCH v5 06/12] eventdev/dma: support for adapter start and stop Amit Prakash Shukla
2023-09-28 10:36 ` [PATCH v5 07/12] eventdev/dma: support for adapter service ID get Amit Prakash Shukla
2023-09-28 10:36 ` [PATCH v5 08/12] eventdev/dma: support adapter runtime params Amit Prakash Shukla
2023-09-28 10:36 ` [PATCH v5 09/12] eventdev/dma: support for adapter stats Amit Prakash Shukla
2023-09-28 10:36 ` [PATCH v5 10/12] eventdev/dma: support for adapter enqueue Amit Prakash Shukla
2023-09-28 10:36 ` [PATCH v5 11/12] eventdev/dma: support for adapter event port get Amit Prakash Shukla
2023-09-28 10:36 ` [PATCH v5 12/12] app/test: add event DMA adapter auto-test Amit Prakash Shukla
2023-09-29 7:20 ` Jerin Jacob
2023-09-28 16:49 ` [PATCH v6 00/12] event DMA adapter library support Amit Prakash Shukla
2023-09-28 16:49 ` [PATCH v6 01/12] eventdev/dma: introduce DMA adapter Amit Prakash Shukla
2023-09-28 16:49 ` [PATCH v6 02/12] eventdev/dma: support adapter capabilities get Amit Prakash Shukla
2023-09-28 16:49 ` [PATCH v6 03/12] eventdev/dma: support adapter create and free Amit Prakash Shukla
2023-09-28 16:49 ` [PATCH v6 04/12] eventdev/dma: support vchan add and delete Amit Prakash Shukla
2023-09-28 16:49 ` [PATCH v6 05/12] eventdev/dma: support adapter service function Amit Prakash Shukla
2023-09-28 16:49 ` [PATCH v6 06/12] eventdev/dma: support adapter start and stop Amit Prakash Shukla
2023-09-28 16:49 ` [PATCH v6 07/12] eventdev/dma: support adapter service ID get Amit Prakash Shukla
2023-09-28 16:49 ` [PATCH v6 08/12] eventdev/dma: support adapter runtime params Amit Prakash Shukla
2023-09-28 16:49 ` [PATCH v6 09/12] eventdev/dma: support adapter stats Amit Prakash Shukla
2023-09-28 16:49 ` [PATCH v6 10/12] eventdev/dma: support adapter enqueue Amit Prakash Shukla
2023-09-28 16:49 ` [PATCH v6 11/12] eventdev/dma: support adapter event port get Amit Prakash Shukla
2023-09-28 16:49 ` [PATCH v6 12/12] app/test: add event DMA adapter auto-test Amit Prakash Shukla
2023-09-29 8:12 ` [PATCH v7 00/12] event DMA adapter library support Amit Prakash Shukla
2023-09-29 8:12 ` [PATCH v7 01/12] eventdev/dma: introduce DMA adapter Amit Prakash Shukla
2023-09-29 8:12 ` [PATCH v7 02/12] eventdev/dma: support adapter capabilities get Amit Prakash Shukla
2023-09-29 8:13 ` [PATCH v7 03/12] eventdev/dma: support adapter create and free Amit Prakash Shukla
2023-09-29 8:13 ` [PATCH v7 04/12] eventdev/dma: support vchan add and delete Amit Prakash Shukla
2023-09-29 8:13 ` [PATCH v7 05/12] eventdev/dma: support adapter service function Amit Prakash Shukla
2023-09-29 8:13 ` [PATCH v7 06/12] eventdev/dma: support adapter start and stop Amit Prakash Shukla
2023-09-29 8:13 ` [PATCH v7 07/12] eventdev/dma: support adapter service ID get Amit Prakash Shukla
2023-09-29 8:13 ` [PATCH v7 08/12] eventdev/dma: support adapter runtime params Amit Prakash Shukla
2023-09-29 8:13 ` [PATCH v7 09/12] eventdev/dma: support adapter stats Amit Prakash Shukla
2023-09-29 8:13 ` [PATCH v7 10/12] eventdev/dma: support adapter enqueue Amit Prakash Shukla
2023-09-29 8:13 ` [PATCH v7 11/12] eventdev/dma: support adapter event port get Amit Prakash Shukla
2023-09-29 8:13 ` [PATCH v7 12/12] app/test: add event DMA adapter auto-test Amit Prakash Shukla
2023-09-29 11:50 ` [PATCH v8 00/12] event DMA adapter library support Amit Prakash Shukla
2023-09-29 11:50 ` [PATCH v8 01/12] eventdev/dma: introduce DMA adapter Amit Prakash Shukla
2023-09-29 11:50 ` [PATCH v8 02/12] eventdev/dma: support adapter capabilities get Amit Prakash Shukla
2023-09-29 11:50 ` [PATCH v8 03/12] eventdev/dma: support adapter create and free Amit Prakash Shukla
2023-09-29 11:50 ` [PATCH v8 04/12] eventdev/dma: support vchan add and delete Amit Prakash Shukla
2023-09-29 11:50 ` [PATCH v8 05/12] eventdev/dma: support adapter service function Amit Prakash Shukla
2023-09-29 11:50 ` [PATCH v8 06/12] eventdev/dma: support adapter start and stop Amit Prakash Shukla
2023-09-29 11:50 ` [PATCH v8 07/12] eventdev/dma: support adapter service ID get Amit Prakash Shukla
2023-09-29 11:50 ` [PATCH v8 08/12] eventdev/dma: support adapter runtime params Amit Prakash Shukla
2023-09-29 11:50 ` [PATCH v8 09/12] eventdev/dma: support adapter stats Amit Prakash Shukla
2023-09-29 11:50 ` [PATCH v8 10/12] eventdev/dma: support adapter enqueue Amit Prakash Shukla
2023-09-29 11:50 ` [PATCH v8 11/12] eventdev/dma: support adapter event port get Amit Prakash Shukla
2023-09-29 11:50 ` [PATCH v8 12/12] app/test: add event DMA adapter auto-test Amit Prakash Shukla
2023-10-03 5:13 ` [PATCH v8 00/12] event DMA adapter library support Jerin Jacob
2023-09-26 5:06 ` [PATCH v2 " Jerin Jacob
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230922201337.3347666-13-amitprakashs@marvell.com \
--to=amitprakashs@marvell.com \
--cc=anoobj@marvell.com \
--cc=bruce.richardson@intel.com \
--cc=cheng1.jiang@intel.com \
--cc=conor.walsh@intel.com \
--cc=dev@dpdk.org \
--cc=fengchengwen@huawei.com \
--cc=g.singh@nxp.com \
--cc=hemant.agrawal@nxp.com \
--cc=jerinj@marvell.com \
--cc=kevin.laatz@intel.com \
--cc=mb@smartsharesystems.com \
--cc=ndabilpuram@marvell.com \
--cc=sachin.saxena@oss.nxp.com \
--cc=vattunuru@marvell.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).