From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga07.intel.com (mga07.intel.com [134.134.136.100]) by dpdk.org (Postfix) with ESMTP id 2E2FB4D27 for ; Fri, 17 Aug 2018 06:22:32 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga004.fm.intel.com ([10.253.24.48]) by orsmga105.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 16 Aug 2018 21:22:31 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.53,249,1531810800"; d="scan'208";a="80999903" Received: from unknown (HELO localhost.localdomain.localdomain) ([10.224.122.193]) by fmsmga004.fm.intel.com with ESMTP; 16 Aug 2018 21:22:28 -0700 From: Nikhil Rao To: jerin.jacob@caviumnetworks.com, olivier.matz@6wind.com Cc: dev@dpdk.org, Nikhil Rao Date: Fri, 17 Aug 2018 09:50:52 +0530 Message-Id: <1534479652-80182-4-git-send-email-nikhil.rao@intel.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1534479652-80182-1-git-send-email-nikhil.rao@intel.com> References: <1534479652-80182-1-git-send-email-nikhil.rao@intel.com> Subject: [dpdk-dev] [PATCH v2 4/4] eventdev: add auto test for eth Tx adapter X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Fri, 17 Aug 2018 04:22:33 -0000 This patch adds tests for the eth Tx adapter APIs. It also tests the data path for the rte_service function based implementation of the APIs. Signed-off-by: Nikhil Rao --- test/test/test_event_eth_tx_adapter.c | 676 ++++++++++++++++++++++++++++++++++ MAINTAINERS | 1 + test/test/Makefile | 1 + test/test/meson.build | 2 + 4 files changed, 680 insertions(+) create mode 100644 test/test/test_event_eth_tx_adapter.c diff --git a/test/test/test_event_eth_tx_adapter.c b/test/test/test_event_eth_tx_adapter.c new file mode 100644 index 0000000..2dc487b --- /dev/null +++ b/test/test/test_event_eth_tx_adapter.c @@ -0,0 +1,676 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "test.h" + +#define MAX_NUM_QUEUE RTE_PMD_RING_MAX_RX_RINGS +#define TEST_INST_ID 0 +#define TEST_DEV_ID 0 +#define SOCKET0 0 +#define RING_SIZE 256 +#define ETH_NAME_LEN 32 +#define NUM_ETH_PAIR 1 +#define NUM_ETH_DEV (2 * NUM_ETH_PAIR) +#define NB_MBUF 512 +#define PAIR_PORT_INDEX(p) ((p) + NUM_ETH_PAIR) +#define PORT(p) default_params.port[(p)] +#define TEST_ETHDEV_ID PORT(0) +#define TEST_ETHDEV_PAIR_ID PORT(PAIR_PORT_INDEX(0)) + +#define EDEV_RETRY 0xffff + +struct event_eth_tx_adapter_test_params { + struct rte_mempool *mp; + uint16_t rx_rings, tx_rings; + uint32_t caps; + struct rte_ring *r[NUM_ETH_DEV][MAX_NUM_QUEUE]; + int port[NUM_ETH_DEV]; +}; + +static int event_dev_delete; +static struct event_eth_tx_adapter_test_params default_params; + +static inline int +port_init_common(uint8_t port, const struct rte_eth_conf *port_conf, + struct rte_mempool *mp) +{ + const uint16_t rx_ring_size = RING_SIZE, tx_ring_size = RING_SIZE; + int retval; + uint16_t q; + + if (!rte_eth_dev_is_valid_port(port)) + return -1; + + default_params.rx_rings = MAX_NUM_QUEUE; + default_params.tx_rings = MAX_NUM_QUEUE; + + /* Configure the Ethernet device. */ + retval = rte_eth_dev_configure(port, default_params.rx_rings, + default_params.tx_rings, port_conf); + if (retval != 0) + return retval; + + for (q = 0; q < default_params.rx_rings; q++) { + retval = rte_eth_rx_queue_setup(port, q, rx_ring_size, + rte_eth_dev_socket_id(port), NULL, mp); + if (retval < 0) + return retval; + } + + for (q = 0; q < default_params.tx_rings; q++) { + retval = rte_eth_tx_queue_setup(port, q, tx_ring_size, + rte_eth_dev_socket_id(port), NULL); + if (retval < 0) + return retval; + } + + /* Start the Ethernet port. */ + retval = rte_eth_dev_start(port); + if (retval < 0) + return retval; + + /* Display the port MAC address. */ + struct ether_addr addr; + rte_eth_macaddr_get(port, &addr); + printf("Port %u MAC: %02" PRIx8 " %02" PRIx8 " %02" PRIx8 + " %02" PRIx8 " %02" PRIx8 " %02" PRIx8 "\n", + (unsigned int)port, + addr.addr_bytes[0], addr.addr_bytes[1], + addr.addr_bytes[2], addr.addr_bytes[3], + addr.addr_bytes[4], addr.addr_bytes[5]); + + /* Enable RX in promiscuous mode for the Ethernet device. */ + rte_eth_promiscuous_enable(port); + + return 0; +} + +static inline int +port_init(uint8_t port, struct rte_mempool *mp) +{ + struct rte_eth_conf conf = { 0 }; + return port_init_common(port, &conf, mp); +} + +#define RING_NAME_LEN 20 +#define DEV_NAME_LEN 20 + +static int +init_ports(void) +{ + char ring_name[ETH_NAME_LEN]; + unsigned int i, j; + struct rte_ring * const *c1; + struct rte_ring * const *c2; + int err; + + if (!default_params.mp) + default_params.mp = rte_pktmbuf_pool_create("mbuf_pool", + NB_MBUF, 32, + 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id()); + + if (!default_params.mp) + return -ENOMEM; + + for (i = 0; i < NUM_ETH_DEV; i++) { + for (j = 0; j < MAX_NUM_QUEUE; j++) { + snprintf(ring_name, sizeof(ring_name), "R%u%u", i, j); + default_params.r[i][j] = rte_ring_create(ring_name, + RING_SIZE, + SOCKET0, + RING_F_SP_ENQ | RING_F_SC_DEQ); + TEST_ASSERT((default_params.r[i][j] != NULL), + "Failed to allocate ring"); + } + } + + /* + * To create two pseudo-Ethernet ports where the traffic is + * switched between them, that is, traffic sent to port 1 is + * read back from port 2 and vice-versa + */ + for (i = 0; i < NUM_ETH_PAIR; i++) { + char dev_name[DEV_NAME_LEN]; + int p; + + c1 = default_params.r[i]; + c2 = default_params.r[PAIR_PORT_INDEX(i)]; + + snprintf(dev_name, DEV_NAME_LEN, "%u-%u", i, i + NUM_ETH_PAIR); + p = rte_eth_from_rings(dev_name, c1, MAX_NUM_QUEUE, + c2, MAX_NUM_QUEUE, SOCKET0); + TEST_ASSERT(p >= 0, "Port creation failed %s", dev_name); + err = port_init(p, default_params.mp); + TEST_ASSERT(err == 0, "Port init failed %s", dev_name); + default_params.port[i] = p; + + snprintf(dev_name, DEV_NAME_LEN, "%u-%u", i + NUM_ETH_PAIR, i); + p = rte_eth_from_rings(dev_name, c2, MAX_NUM_QUEUE, + c1, MAX_NUM_QUEUE, SOCKET0); + TEST_ASSERT(p > 0, "Port creation failed %s", dev_name); + err = port_init(p, default_params.mp); + TEST_ASSERT(err == 0, "Port init failed %s", dev_name); + default_params.port[PAIR_PORT_INDEX(i)] = p; + } + + return 0; +} + +static void +deinit_ports(void) +{ + uint16_t i, j; + char name[ETH_NAME_LEN]; + + for (i = 0; i < RTE_DIM(default_params.port); i++) { + rte_eth_dev_stop(default_params.port[i]); + rte_eth_dev_get_name_by_port(default_params.port[i], name); + rte_vdev_uninit(name); + for (j = 0; j < RTE_DIM(default_params.r[i]); j++) + rte_ring_free(default_params.r[i][j]); + } +} + +static int +testsuite_setup(void) +{ + int err; + uint8_t count; + struct rte_event_dev_info dev_info; + uint8_t priority; + uint8_t queue_id; + + count = rte_event_dev_count(); + if (!count) { + printf("Failed to find a valid event device," + " testing with event_sw0 device\n"); + rte_vdev_init("event_sw0", NULL); + event_dev_delete = 1; + } + + struct rte_event_dev_config config = { + .nb_event_queues = 1, + .nb_event_ports = 1, + }; + + struct rte_event_queue_conf wkr_q_conf = { + .schedule_type = RTE_SCHED_TYPE_ORDERED, + .priority = RTE_EVENT_DEV_PRIORITY_NORMAL, + .nb_atomic_flows = 1024, + .nb_atomic_order_sequences = 1024, + }; + + err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info); + config.nb_event_queue_flows = dev_info.max_event_queue_flows; + config.nb_event_port_dequeue_depth = + dev_info.max_event_port_dequeue_depth; + config.nb_event_port_enqueue_depth = + dev_info.max_event_port_enqueue_depth; + config.nb_events_limit = + dev_info.max_num_events; + + rte_log_set_level(0, RTE_LOG_DEBUG); + err = rte_event_dev_configure(TEST_DEV_ID, &config); + TEST_ASSERT(err == 0, "Event device initialization failed err %d\n", + err); + if (rte_event_queue_setup(TEST_DEV_ID, 0, &wkr_q_conf) < 0) { + printf("%d: error creating qid %d\n", __LINE__, 0); + return -1; + } + if (rte_event_port_setup(TEST_DEV_ID, 0, NULL) < 0) { + printf("Error setting up port %d\n", 0); + return -1; + } + + priority = RTE_EVENT_DEV_PRIORITY_LOWEST; + if (rte_event_port_link(TEST_DEV_ID, 0, &queue_id, &priority, 1) != 1) { + printf("Error linking port\n"); + return -1; + } + + err = init_ports(); + TEST_ASSERT(err == 0, "Port initialization failed err %d\n", err); + + err = rte_event_eth_tx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID, + &default_params.caps); + TEST_ASSERT(err == 0, "Failed to get adapter cap err %d\n", + err); + + return err; +} + +#define DEVICE_ID_SIZE 64 + +static void +testsuite_teardown(void) +{ + deinit_ports(); + rte_mempool_free(default_params.mp); + default_params.mp = NULL; + if (event_dev_delete) + rte_vdev_uninit("event_sw0"); +} + +static int +tx_adapter_create(void) +{ + int err; + struct rte_event_dev_info dev_info; + struct rte_event_port_conf tx_p_conf = {0}; + + err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + tx_p_conf.new_event_threshold = dev_info.max_num_events; + tx_p_conf.dequeue_depth = dev_info.max_event_port_dequeue_depth; + tx_p_conf.enqueue_depth = dev_info.max_event_port_enqueue_depth; + err = rte_event_eth_tx_adapter_create(TEST_INST_ID, TEST_DEV_ID, + &tx_p_conf); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + return err; +} + +static void +tx_adapter_free(void) +{ + rte_event_eth_tx_adapter_free(TEST_INST_ID); +} + +static int +tx_adapter_create_free(void) +{ + int err; + struct rte_event_dev_info dev_info; + struct rte_event_port_conf tx_p_conf; + + err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + tx_p_conf.new_event_threshold = dev_info.max_num_events; + tx_p_conf.dequeue_depth = dev_info.max_event_port_dequeue_depth; + tx_p_conf.enqueue_depth = dev_info.max_event_port_enqueue_depth; + + err = rte_event_eth_tx_adapter_create(TEST_INST_ID, TEST_DEV_ID, + NULL); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); + + err = rte_event_eth_tx_adapter_create(TEST_INST_ID, TEST_DEV_ID, + &tx_p_conf); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_event_eth_tx_adapter_create(TEST_INST_ID, + TEST_DEV_ID, &tx_p_conf); + TEST_ASSERT(err == -EEXIST, "Expected -EEXIST %d got %d", -EEXIST, err); + + err = rte_event_eth_tx_adapter_free(TEST_INST_ID); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_event_eth_tx_adapter_free(TEST_INST_ID); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", -EINVAL, err); + + err = rte_event_eth_tx_adapter_free(1); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", -EINVAL, err); + + return TEST_SUCCESS; +} + +static int +tx_adapter_queue_add_del(void) +{ + int err; + uint32_t cap; + + err = rte_event_eth_tx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID, + &cap); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + + err = rte_event_eth_tx_adapter_queue_add(TEST_INST_ID, + rte_eth_dev_count_total(), + -1); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); + + err = rte_event_eth_tx_adapter_queue_add(TEST_INST_ID, + TEST_ETHDEV_ID, + 0); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_event_eth_tx_adapter_queue_add(TEST_INST_ID, + TEST_ETHDEV_ID, + -1); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_event_eth_tx_adapter_queue_del(TEST_INST_ID, + TEST_ETHDEV_ID, + 0); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_event_eth_tx_adapter_queue_del(TEST_INST_ID, + TEST_ETHDEV_ID, + -1); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_event_eth_tx_adapter_queue_del(TEST_INST_ID, + TEST_ETHDEV_ID, + -1); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_event_eth_tx_adapter_queue_add(1, TEST_ETHDEV_ID, -1); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); + + err = rte_event_eth_tx_adapter_queue_del(1, TEST_ETHDEV_ID, -1); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); + + return TEST_SUCCESS; +} + +static int +tx_adapter_start_stop(void) +{ + int err; + + err = rte_event_eth_tx_adapter_queue_add(TEST_INST_ID, TEST_ETHDEV_ID, + -1); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_event_eth_tx_adapter_start(TEST_INST_ID); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_event_eth_tx_adapter_stop(TEST_INST_ID); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_event_eth_tx_adapter_queue_del(TEST_INST_ID, TEST_ETHDEV_ID, + -1); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_event_eth_tx_adapter_start(TEST_INST_ID); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_event_eth_tx_adapter_stop(TEST_INST_ID); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_event_eth_tx_adapter_start(1); + + err = rte_event_eth_tx_adapter_stop(1); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); + + return TEST_SUCCESS; +} + +static uint32_t eid, tid; + +static int +tx_adapter_single(uint16_t port, uint16_t tx_queue_id, + struct rte_mbuf *m, uint8_t qid, + uint8_t sched_type) +{ + struct rte_event event; + struct rte_mbuf *r; + int ret; + unsigned int l; + + event.queue_id = qid; + event.op = RTE_EVENT_OP_NEW; + event.sched_type = sched_type; + event.mbuf = m; + + m->port = port; + rte_event_eth_tx_adapter_txq_set(m, tx_queue_id); + + l = 0; + while (rte_event_enqueue_burst(TEST_DEV_ID, 0, &event, 1) != 1) { + l++; + if (l > EDEV_RETRY) + break; + } + + TEST_ASSERT(l < EDEV_RETRY, "Unable to enqueue to eventdev"); + l = 0; + while (l++ < EDEV_RETRY) { + + ret = rte_service_run_iter_on_app_lcore(eid, 0); + TEST_ASSERT(ret == 0, "failed to run service %d", ret); + + ret = rte_service_run_iter_on_app_lcore(tid, 0); + TEST_ASSERT(ret == 0, "failed to run service %d", ret); + + if (rte_eth_rx_burst(TEST_ETHDEV_PAIR_ID, tx_queue_id, &r, 1)) { + TEST_ASSERT_EQUAL(r, m, "mbuf comparison failed" + " expected %p received %p", m, r); + return 0; + } + } + + TEST_ASSERT(0, "Failed to receive packet"); + return -1; +} + +static int +tx_adapter_service(void) +{ + struct rte_event_eth_tx_adapter_stats stats; + uint32_t i; + int err; + uint8_t ev_port, ev_qid; + struct rte_mbuf bufs[RING_SIZE]; + struct rte_mbuf *pbufs[RING_SIZE]; + struct rte_event_dev_info dev_info; + struct rte_event_dev_config dev_conf; + struct rte_event_queue_conf qconf; + uint32_t qcnt, pcnt; + uint16_t q; + int internal_port; + + err = rte_event_eth_tx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID, + &default_params.caps); + TEST_ASSERT(err == 0, "Failed to get adapter cap err %d\n", + err); + + internal_port = !!(default_params.caps & + RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT); + + if (internal_port) + return TEST_SUCCESS; + + err = rte_event_eth_tx_adapter_queue_add(TEST_INST_ID, TEST_ETHDEV_ID, + -1); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_event_eth_tx_adapter_event_port_get(TEST_INST_ID, + &ev_port); + TEST_ASSERT_SUCCESS(err, "Failed to get event port %d", err); + + err = rte_event_dev_attr_get(TEST_DEV_ID, RTE_EVENT_DEV_ATTR_PORT_COUNT, + &pcnt); + TEST_ASSERT_SUCCESS(err, "Port count get failed"); + + err = rte_event_dev_attr_get(TEST_DEV_ID, + RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &qcnt); + TEST_ASSERT_SUCCESS(err, "Queue count get failed"); + + err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info); + TEST_ASSERT_SUCCESS(err, "Dev info failed"); + + dev_conf.nb_event_queue_flows = dev_info.max_event_queue_flows; + dev_conf.nb_event_port_dequeue_depth = + dev_info.max_event_port_dequeue_depth; + dev_conf.nb_event_port_enqueue_depth = + dev_info.max_event_port_enqueue_depth; + dev_conf.nb_events_limit = + dev_info.max_num_events; + dev_conf.nb_event_queues = qcnt + 1; + dev_conf.nb_event_ports = pcnt; + err = rte_event_dev_configure(TEST_DEV_ID, &dev_conf); + TEST_ASSERT(err == 0, "Event device initialization failed err %d\n", + err); + + ev_qid = qcnt; + qconf.nb_atomic_flows = dev_info.max_event_queue_flows; + qconf.nb_atomic_order_sequences = 32; + qconf.schedule_type = RTE_SCHED_TYPE_ATOMIC; + qconf.priority = RTE_EVENT_DEV_PRIORITY_HIGHEST; + qconf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK; + err = rte_event_queue_setup(TEST_DEV_ID, ev_qid, &qconf); + TEST_ASSERT_SUCCESS(err, "Failed to setup queue %u", ev_qid); + + err = rte_event_port_link(TEST_DEV_ID, ev_port, &ev_qid, NULL, 1); + TEST_ASSERT(err == 1, "Failed to link queue port %u", + ev_port); + + err = rte_event_eth_tx_adapter_start(TEST_INST_ID); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_event_dev_service_id_get(0, &eid); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_event_eth_tx_adapter_service_id_get(TEST_INST_ID, &tid); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_service_runstate_set(tid, 1); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_service_set_runstate_mapped_check(tid, 0); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_service_runstate_set(eid, 1); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_service_set_runstate_mapped_check(eid, 0); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_event_dev_start(TEST_DEV_ID); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + for (q = 0; q < MAX_NUM_QUEUE; q++) { + for (i = 0; i < RING_SIZE; i++) + pbufs[i] = &bufs[i]; + for (i = 0; i < RING_SIZE; i++) { + pbufs[i] = &bufs[i]; + err = tx_adapter_single(TEST_ETHDEV_ID, q, pbufs[i], + ev_qid, + RTE_SCHED_TYPE_ORDERED); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + } + for (i = 0; i < RING_SIZE; i++) { + TEST_ASSERT_EQUAL(pbufs[i], &bufs[i], + "Error: received data does not match" + " that transmitted"); + } + } + + err = rte_event_eth_tx_adapter_stats_get(TEST_INST_ID, NULL); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); + + err = rte_event_eth_tx_adapter_stats_get(TEST_INST_ID, &stats); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + TEST_ASSERT_EQUAL(stats.tx_packets, MAX_NUM_QUEUE * RING_SIZE, + "stats.tx_packets expected %u got %lu", + MAX_NUM_QUEUE * RING_SIZE, + stats.tx_packets); + + err = rte_event_eth_tx_adapter_stats_reset(TEST_INST_ID); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_event_eth_tx_adapter_stats_get(TEST_INST_ID, &stats); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + TEST_ASSERT_EQUAL(stats.tx_packets, 0, + "stats.tx_packets expected %u got %lu", + 0, + stats.tx_packets); + + err = rte_event_eth_tx_adapter_stats_get(1, &stats); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); + + err = rte_event_eth_tx_adapter_queue_del(TEST_INST_ID, TEST_ETHDEV_ID, + -1); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_event_eth_tx_adapter_free(TEST_INST_ID); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + rte_event_dev_stop(TEST_DEV_ID); + + return TEST_SUCCESS; +} + +static int +tx_adapter_dynamic_device(void) +{ + uint16_t port_id = rte_eth_dev_count_avail(); + const char *null_dev[2] = { "eth_null0", "eth_null1" }; + struct rte_eth_conf dev_conf = {0}; + int ret; + size_t i; + + for (i = 0; i < RTE_DIM(null_dev); i++) { + ret = rte_vdev_init(null_dev[i], NULL); + TEST_ASSERT_SUCCESS(ret, "%s Port creation failed %d", + null_dev[i], ret); + + if (i == 0) { + ret = tx_adapter_create(); + TEST_ASSERT_SUCCESS(ret, "Adapter create failed %d", + ret); + } + + ret = rte_eth_dev_configure(port_id + i, MAX_NUM_QUEUE, + MAX_NUM_QUEUE, &dev_conf); + TEST_ASSERT_SUCCESS(ret, "Failed to configure device %d", ret); + + ret = rte_event_eth_tx_adapter_queue_add(TEST_INST_ID, + port_id + i, 0); + TEST_ASSERT_SUCCESS(ret, "Failed to add queues %d", ret); + + } + + for (i = 0; i < RTE_DIM(null_dev); i++) { + ret = rte_event_eth_tx_adapter_queue_del(TEST_INST_ID, + port_id + i, -1); + TEST_ASSERT_SUCCESS(ret, "Failed to delete queues %d", ret); + } + + tx_adapter_free(); + + for (i = 0; i < RTE_DIM(null_dev); i++) + rte_vdev_uninit(null_dev[i]); + + return TEST_SUCCESS; +} + +static struct unit_test_suite event_eth_tx_tests = { + .setup = testsuite_setup, + .teardown = testsuite_teardown, + .suite_name = "tx event eth adapter test suite", + .unit_test_cases = { + TEST_CASE_ST(NULL, NULL, tx_adapter_create_free), + TEST_CASE_ST(tx_adapter_create, tx_adapter_free, + tx_adapter_queue_add_del), + TEST_CASE_ST(tx_adapter_create, tx_adapter_free, + tx_adapter_start_stop), + TEST_CASE_ST(tx_adapter_create, tx_adapter_free, + tx_adapter_service), + TEST_CASE_ST(NULL, NULL, tx_adapter_dynamic_device), + TEST_CASES_END() /**< NULL terminate unit test array */ + } +}; + +static int +test_event_eth_tx_adapter_common(void) +{ + return unit_test_suite_runner(&event_eth_tx_tests); +} + +REGISTER_TEST_COMMAND(event_eth_tx_adapter_autotest, + test_event_eth_tx_adapter_common); diff --git a/MAINTAINERS b/MAINTAINERS index 13f378a..2930f6c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -395,6 +395,7 @@ Eventdev Ethdev Tx Adapter API - EXPERIMENTAL M: Nikhil Rao T: git://dpdk.org/next/dpdk-next-eventdev F: lib/librte_eventdev/*eth_tx_adapter* +F: test/test/test_event_eth_tx_adapter.c Raw device API - EXPERIMENTAL M: Shreyansh Jain diff --git a/test/test/Makefile b/test/test/Makefile index e6967ba..dcea441 100644 --- a/test/test/Makefile +++ b/test/test/Makefile @@ -191,6 +191,7 @@ ifeq ($(CONFIG_RTE_LIBRTE_EVENTDEV),y) SRCS-y += test_eventdev.c SRCS-y += test_event_ring.c SRCS-y += test_event_eth_rx_adapter.c +SRCS-y += test_event_eth_tx_adapter.c SRCS-y += test_event_timer_adapter.c SRCS-y += test_event_crypto_adapter.c endif diff --git a/test/test/meson.build b/test/test/meson.build index b1dd6ec..3d2887b 100644 --- a/test/test/meson.build +++ b/test/test/meson.build @@ -34,6 +34,7 @@ test_sources = files('commands.c', 'test_efd_perf.c', 'test_errno.c', 'test_event_ring.c', + 'test_event_eth_tx_adapter.c', 'test_eventdev.c', 'test_func_reentrancy.c', 'test_flow_classify.c', @@ -152,6 +153,7 @@ test_names = [ 'efd_perf_autotest', 'errno_autotest', 'event_ring_autotest', + 'event_eth_tx_adapter_autotest', 'eventdev_common_autotest', 'eventdev_octeontx_autotest', 'eventdev_sw_autotest', -- 1.8.3.1