DPDK patches and discussions
 help / color / mirror / Atom feed
From: Nikhil Rao <nikhil.rao@intel.com>
To: jerin.jacob@caviumnetworks.com, olivier.matz@6wind.com,
	marko.kovacevic@intel.com, john.mcnamara@intel.com
Cc: dev@dpdk.org, Nikhil Rao <nikhil.rao@intel.com>
Subject: [dpdk-dev] [PATCH v4 4/5] eventdev: add auto test for eth Tx adapter
Date: Thu, 20 Sep 2018 23:11:15 +0530	[thread overview]
Message-ID: <1537465276-77264-4-git-send-email-nikhil.rao@intel.com> (raw)
In-Reply-To: <1537465276-77264-1-git-send-email-nikhil.rao@intel.com>

This patch adds tests for the eth Tx adapter APIs. It also
tests the data path for the rte_service function based
implementation of the APIs.

Signed-off-by: Nikhil Rao <nikhil.rao@intel.com>
Acked-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>
---
 test/test/test_event_eth_tx_adapter.c | 699 ++++++++++++++++++++++++++++++++++
 MAINTAINERS                           |   1 +
 test/test/Makefile                    |   1 +
 test/test/meson.build                 |   2 +
 4 files changed, 703 insertions(+)
 create mode 100644 test/test/test_event_eth_tx_adapter.c

diff --git a/test/test/test_event_eth_tx_adapter.c b/test/test/test_event_eth_tx_adapter.c
new file mode 100644
index 0000000..c26c515
--- /dev/null
+++ b/test/test/test_event_eth_tx_adapter.c
@@ -0,0 +1,699 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include <string.h>
+
+#include <rte_bus_vdev.h>
+#include <rte_common.h>
+#include <rte_ethdev.h>
+#include <rte_eth_ring.h>
+#include <rte_eventdev.h>
+#include <rte_event_eth_tx_adapter.h>
+#include <rte_mbuf.h>
+#include <rte_mempool.h>
+#include <rte_service.h>
+
+#include "test.h"
+
+#define MAX_NUM_QUEUE		RTE_PMD_RING_MAX_RX_RINGS
+#define TEST_INST_ID		0
+#define TEST_DEV_ID		0
+#define SOCKET0			0
+#define RING_SIZE		256
+#define ETH_NAME_LEN		32
+#define NUM_ETH_PAIR		1
+#define NUM_ETH_DEV		(2 * NUM_ETH_PAIR)
+#define NB_MBUF			512
+#define PAIR_PORT_INDEX(p)	((p) + NUM_ETH_PAIR)
+#define PORT(p)			default_params.port[(p)]
+#define TEST_ETHDEV_ID		PORT(0)
+#define TEST_ETHDEV_PAIR_ID	PORT(PAIR_PORT_INDEX(0))
+
+#define EDEV_RETRY		0xffff
+
+struct event_eth_tx_adapter_test_params {
+	struct rte_mempool *mp;
+	uint16_t rx_rings, tx_rings;
+	struct rte_ring *r[NUM_ETH_DEV][MAX_NUM_QUEUE];
+	int port[NUM_ETH_DEV];
+};
+
+static int event_dev_delete;
+static struct event_eth_tx_adapter_test_params default_params;
+static uint64_t eid = ~0ULL;
+static uint32_t tid;
+
+static inline int
+port_init_common(uint8_t port, const struct rte_eth_conf *port_conf,
+		struct rte_mempool *mp)
+{
+	const uint16_t rx_ring_size = RING_SIZE, tx_ring_size = RING_SIZE;
+	int retval;
+	uint16_t q;
+
+	if (!rte_eth_dev_is_valid_port(port))
+		return -1;
+
+	default_params.rx_rings = MAX_NUM_QUEUE;
+	default_params.tx_rings = MAX_NUM_QUEUE;
+
+	/* Configure the Ethernet device. */
+	retval = rte_eth_dev_configure(port, default_params.rx_rings,
+				default_params.tx_rings, port_conf);
+	if (retval != 0)
+		return retval;
+
+	for (q = 0; q < default_params.rx_rings; q++) {
+		retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
+				rte_eth_dev_socket_id(port), NULL, mp);
+		if (retval < 0)
+			return retval;
+	}
+
+	for (q = 0; q < default_params.tx_rings; q++) {
+		retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
+				rte_eth_dev_socket_id(port), NULL);
+		if (retval < 0)
+			return retval;
+	}
+
+	/* Start the Ethernet port. */
+	retval = rte_eth_dev_start(port);
+	if (retval < 0)
+		return retval;
+
+	/* Display the port MAC address. */
+	struct ether_addr addr;
+	rte_eth_macaddr_get(port, &addr);
+	printf("Port %u MAC: %02" PRIx8 " %02" PRIx8 " %02" PRIx8
+			   " %02" PRIx8 " %02" PRIx8 " %02" PRIx8 "\n",
+			(unsigned int)port,
+			addr.addr_bytes[0], addr.addr_bytes[1],
+			addr.addr_bytes[2], addr.addr_bytes[3],
+			addr.addr_bytes[4], addr.addr_bytes[5]);
+
+	/* Enable RX in promiscuous mode for the Ethernet device. */
+	rte_eth_promiscuous_enable(port);
+
+	return 0;
+}
+
+static inline int
+port_init(uint8_t port, struct rte_mempool *mp)
+{
+	struct rte_eth_conf conf = { 0 };
+	return port_init_common(port, &conf, mp);
+}
+
+#define RING_NAME_LEN	20
+#define DEV_NAME_LEN	20
+
+static int
+init_ports(void)
+{
+	char ring_name[ETH_NAME_LEN];
+	unsigned int i, j;
+	struct rte_ring * const *c1;
+	struct rte_ring * const *c2;
+	int err;
+
+	if (!default_params.mp)
+		default_params.mp = rte_pktmbuf_pool_create("mbuf_pool",
+			NB_MBUF, 32,
+			0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
+
+	if (!default_params.mp)
+		return -ENOMEM;
+
+	for (i = 0; i < NUM_ETH_DEV; i++) {
+		for (j = 0; j < MAX_NUM_QUEUE; j++) {
+			snprintf(ring_name, sizeof(ring_name), "R%u%u", i, j);
+			default_params.r[i][j] = rte_ring_create(ring_name,
+						RING_SIZE,
+						SOCKET0,
+						RING_F_SP_ENQ | RING_F_SC_DEQ);
+			TEST_ASSERT((default_params.r[i][j] != NULL),
+				"Failed to allocate ring");
+		}
+	}
+
+	/*
+	 * To create two pseudo-Ethernet ports where the traffic is
+	 * switched between them, that is, traffic sent to port 1 is
+	 * read back from port 2 and vice-versa
+	 */
+	for (i = 0; i < NUM_ETH_PAIR; i++) {
+		char dev_name[DEV_NAME_LEN];
+		int p;
+
+		c1 = default_params.r[i];
+		c2 = default_params.r[PAIR_PORT_INDEX(i)];
+
+		snprintf(dev_name, DEV_NAME_LEN, "%u-%u", i, i + NUM_ETH_PAIR);
+		p = rte_eth_from_rings(dev_name, c1, MAX_NUM_QUEUE,
+				 c2, MAX_NUM_QUEUE, SOCKET0);
+		TEST_ASSERT(p >= 0, "Port creation failed %s", dev_name);
+		err = port_init(p, default_params.mp);
+		TEST_ASSERT(err == 0, "Port init failed %s", dev_name);
+		default_params.port[i] = p;
+
+		snprintf(dev_name, DEV_NAME_LEN, "%u-%u",  i + NUM_ETH_PAIR, i);
+		p = rte_eth_from_rings(dev_name, c2, MAX_NUM_QUEUE,
+				c1, MAX_NUM_QUEUE, SOCKET0);
+		TEST_ASSERT(p > 0, "Port creation failed %s", dev_name);
+		err = port_init(p, default_params.mp);
+		TEST_ASSERT(err == 0, "Port init failed %s", dev_name);
+		default_params.port[PAIR_PORT_INDEX(i)] = p;
+	}
+
+	return 0;
+}
+
+static void
+deinit_ports(void)
+{
+	uint16_t i, j;
+	char name[ETH_NAME_LEN];
+
+	for (i = 0; i < RTE_DIM(default_params.port); i++) {
+		rte_eth_dev_stop(default_params.port[i]);
+		rte_eth_dev_get_name_by_port(default_params.port[i], name);
+		rte_vdev_uninit(name);
+		for (j = 0; j < RTE_DIM(default_params.r[i]); j++)
+			rte_ring_free(default_params.r[i][j]);
+	}
+}
+
+static int
+testsuite_setup(void)
+{
+	const char *vdev_name = "event_sw0";
+
+	int err = init_ports();
+	TEST_ASSERT(err == 0, "Port initialization failed err %d\n", err);
+
+	if (rte_event_dev_count() == 0) {
+		printf("Failed to find a valid event device,"
+			" testing with event_sw0 device\n");
+		err = rte_vdev_init(vdev_name, NULL);
+		TEST_ASSERT(err == 0, "vdev %s creation failed  %d\n",
+			vdev_name, err);
+		event_dev_delete = 1;
+	}
+	return err;
+}
+
+#define DEVICE_ID_SIZE 64
+
+static void
+testsuite_teardown(void)
+{
+	deinit_ports();
+	rte_mempool_free(default_params.mp);
+	default_params.mp = NULL;
+	if (event_dev_delete)
+		rte_vdev_uninit("event_sw0");
+}
+
+static int
+tx_adapter_create(void)
+{
+	int err;
+	struct rte_event_dev_info dev_info;
+	struct rte_event_port_conf tx_p_conf;
+	uint8_t priority;
+	uint8_t queue_id;
+
+	struct rte_event_dev_config config = {
+			.nb_event_queues = 1,
+			.nb_event_ports = 1,
+	};
+
+	struct rte_event_queue_conf wkr_q_conf = {
+			.schedule_type = RTE_SCHED_TYPE_ORDERED,
+			.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+			.nb_atomic_flows = 1024,
+			.nb_atomic_order_sequences = 1024,
+	};
+
+	memset(&tx_p_conf, 0, sizeof(tx_p_conf));
+	err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info);
+	config.nb_event_queue_flows = dev_info.max_event_queue_flows;
+	config.nb_event_port_dequeue_depth =
+			dev_info.max_event_port_dequeue_depth;
+	config.nb_event_port_enqueue_depth =
+			dev_info.max_event_port_enqueue_depth;
+	config.nb_events_limit =
+			dev_info.max_num_events;
+
+	err = rte_event_dev_configure(TEST_DEV_ID, &config);
+	TEST_ASSERT(err == 0, "Event device initialization failed err %d\n",
+			err);
+
+	queue_id = 0;
+	err = rte_event_queue_setup(TEST_DEV_ID, 0, &wkr_q_conf);
+	TEST_ASSERT(err == 0, "Event queue setup failed %d\n", err);
+
+	err = rte_event_port_setup(TEST_DEV_ID, 0, NULL);
+	TEST_ASSERT(err == 0, "Event port setup failed %d\n", err);
+
+	priority = RTE_EVENT_DEV_PRIORITY_LOWEST;
+	err = rte_event_port_link(TEST_DEV_ID, 0, &queue_id, &priority, 1);
+	TEST_ASSERT(err == 1, "Error linking port %s\n",
+		rte_strerror(rte_errno));
+	err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info);
+	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+	tx_p_conf.new_event_threshold = dev_info.max_num_events;
+	tx_p_conf.dequeue_depth = dev_info.max_event_port_dequeue_depth;
+	tx_p_conf.enqueue_depth = dev_info.max_event_port_enqueue_depth;
+	err = rte_event_eth_tx_adapter_create(TEST_INST_ID, TEST_DEV_ID,
+					&tx_p_conf);
+	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+	return err;
+}
+
+static void
+tx_adapter_free(void)
+{
+	rte_event_eth_tx_adapter_free(TEST_INST_ID);
+}
+
+static int
+tx_adapter_create_free(void)
+{
+	int err;
+	struct rte_event_dev_info dev_info;
+	struct rte_event_port_conf tx_p_conf;
+
+	err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info);
+	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+	tx_p_conf.new_event_threshold = dev_info.max_num_events;
+	tx_p_conf.dequeue_depth = dev_info.max_event_port_dequeue_depth;
+	tx_p_conf.enqueue_depth = dev_info.max_event_port_enqueue_depth;
+
+	err = rte_event_eth_tx_adapter_create(TEST_INST_ID, TEST_DEV_ID,
+					NULL);
+	TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
+
+	err = rte_event_eth_tx_adapter_create(TEST_INST_ID, TEST_DEV_ID,
+					&tx_p_conf);
+	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+	err = rte_event_eth_tx_adapter_create(TEST_INST_ID,
+					TEST_DEV_ID, &tx_p_conf);
+	TEST_ASSERT(err == -EEXIST, "Expected -EEXIST %d got %d", -EEXIST, err);
+
+	err = rte_event_eth_tx_adapter_free(TEST_INST_ID);
+	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+	err = rte_event_eth_tx_adapter_free(TEST_INST_ID);
+	TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", -EINVAL, err);
+
+	err = rte_event_eth_tx_adapter_free(1);
+	TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", -EINVAL, err);
+
+	return TEST_SUCCESS;
+}
+
+static int
+tx_adapter_queue_add_del(void)
+{
+	int err;
+	uint32_t cap;
+
+	err = rte_event_eth_tx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID,
+					 &cap);
+	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+
+	err = rte_event_eth_tx_adapter_queue_add(TEST_INST_ID,
+						rte_eth_dev_count_total(),
+						-1);
+	TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
+
+	err = rte_event_eth_tx_adapter_queue_add(TEST_INST_ID,
+						TEST_ETHDEV_ID,
+						0);
+	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+	err = rte_event_eth_tx_adapter_queue_add(TEST_INST_ID,
+						TEST_ETHDEV_ID,
+						-1);
+	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+	err = rte_event_eth_tx_adapter_queue_del(TEST_INST_ID,
+						TEST_ETHDEV_ID,
+						0);
+	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+	err = rte_event_eth_tx_adapter_queue_del(TEST_INST_ID,
+						TEST_ETHDEV_ID,
+						-1);
+	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+	err = rte_event_eth_tx_adapter_queue_del(TEST_INST_ID,
+						TEST_ETHDEV_ID,
+						-1);
+	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+	err = rte_event_eth_tx_adapter_queue_add(1, TEST_ETHDEV_ID, -1);
+	TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
+
+	err = rte_event_eth_tx_adapter_queue_del(1, TEST_ETHDEV_ID, -1);
+	TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
+
+	return TEST_SUCCESS;
+}
+
+static int
+tx_adapter_start_stop(void)
+{
+	int err;
+
+	err = rte_event_eth_tx_adapter_queue_add(TEST_INST_ID, TEST_ETHDEV_ID,
+						-1);
+	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+	err = rte_event_eth_tx_adapter_start(TEST_INST_ID);
+	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+	err = rte_event_eth_tx_adapter_stop(TEST_INST_ID);
+	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+	err = rte_event_eth_tx_adapter_queue_del(TEST_INST_ID, TEST_ETHDEV_ID,
+						-1);
+	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+	err = rte_event_eth_tx_adapter_start(TEST_INST_ID);
+	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+	err = rte_event_eth_tx_adapter_stop(TEST_INST_ID);
+	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+	err = rte_event_eth_tx_adapter_start(1);
+
+	err = rte_event_eth_tx_adapter_stop(1);
+	TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
+
+	return TEST_SUCCESS;
+}
+
+
+static int
+tx_adapter_single(uint16_t port, uint16_t tx_queue_id,
+		struct rte_mbuf *m, uint8_t qid,
+		uint8_t sched_type)
+{
+	struct rte_event event;
+	struct rte_mbuf *r;
+	int ret;
+	unsigned int l;
+
+	event.queue_id = qid;
+	event.op = RTE_EVENT_OP_NEW;
+	event.event_type = RTE_EVENT_TYPE_CPU;
+	event.sched_type = sched_type;
+	event.mbuf = m;
+
+	m->port = port;
+	rte_event_eth_tx_adapter_txq_set(m, tx_queue_id);
+
+	l = 0;
+	while (rte_event_enqueue_burst(TEST_DEV_ID, 0, &event, 1) != 1) {
+		l++;
+		if (l > EDEV_RETRY)
+			break;
+	}
+
+	TEST_ASSERT(l < EDEV_RETRY, "Unable to enqueue to eventdev");
+	l = 0;
+	while (l++ < EDEV_RETRY) {
+
+		if (eid != ~0ULL) {
+			ret = rte_service_run_iter_on_app_lcore(eid, 0);
+			TEST_ASSERT(ret == 0, "failed to run service %d", ret);
+		}
+
+		ret = rte_service_run_iter_on_app_lcore(tid, 0);
+		TEST_ASSERT(ret == 0, "failed to run service %d", ret);
+
+		if (rte_eth_rx_burst(TEST_ETHDEV_PAIR_ID, tx_queue_id,
+				&r, 1)) {
+			TEST_ASSERT_EQUAL(r, m, "mbuf comparison failed"
+					" expected %p received %p", m, r);
+			return 0;
+		}
+	}
+
+	TEST_ASSERT(0, "Failed to receive packet");
+	return -1;
+}
+
+static int
+tx_adapter_service(void)
+{
+	struct rte_event_eth_tx_adapter_stats stats;
+	uint32_t i;
+	int err;
+	uint8_t ev_port, ev_qid;
+	struct rte_mbuf  bufs[RING_SIZE];
+	struct rte_mbuf *pbufs[RING_SIZE];
+	struct rte_event_dev_info dev_info;
+	struct rte_event_dev_config dev_conf;
+	struct rte_event_queue_conf qconf;
+	uint32_t qcnt, pcnt;
+	uint16_t q;
+	int internal_port;
+	uint32_t cap;
+
+	memset(&dev_conf, 0, sizeof(dev_conf));
+	err = rte_event_eth_tx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID,
+						&cap);
+	TEST_ASSERT(err == 0, "Failed to get adapter cap err %d\n", err);
+
+	internal_port = !!(cap & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT);
+	if (internal_port)
+		return TEST_SUCCESS;
+
+	err = rte_event_eth_tx_adapter_queue_add(TEST_INST_ID, TEST_ETHDEV_ID,
+						-1);
+	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+	err = rte_event_eth_tx_adapter_event_port_get(TEST_INST_ID,
+						&ev_port);
+	TEST_ASSERT_SUCCESS(err, "Failed to get event port %d", err);
+
+	err = rte_event_dev_attr_get(TEST_DEV_ID, RTE_EVENT_DEV_ATTR_PORT_COUNT,
+					&pcnt);
+	TEST_ASSERT_SUCCESS(err, "Port count get failed");
+
+	err = rte_event_dev_attr_get(TEST_DEV_ID,
+				RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &qcnt);
+	TEST_ASSERT_SUCCESS(err, "Queue count get failed");
+
+	err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info);
+	TEST_ASSERT_SUCCESS(err, "Dev info failed");
+
+	dev_conf.nb_event_queue_flows = dev_info.max_event_queue_flows;
+	dev_conf.nb_event_port_dequeue_depth =
+			dev_info.max_event_port_dequeue_depth;
+	dev_conf.nb_event_port_enqueue_depth =
+			dev_info.max_event_port_enqueue_depth;
+	dev_conf.nb_events_limit =
+			dev_info.max_num_events;
+	dev_conf.nb_event_queues = qcnt + 1;
+	dev_conf.nb_event_ports = pcnt;
+	err = rte_event_dev_configure(TEST_DEV_ID, &dev_conf);
+	TEST_ASSERT(err == 0, "Event device initialization failed err %d\n",
+			err);
+
+	ev_qid = qcnt;
+	qconf.nb_atomic_flows = dev_info.max_event_queue_flows;
+	qconf.nb_atomic_order_sequences = 32;
+	qconf.schedule_type = RTE_SCHED_TYPE_ATOMIC;
+	qconf.priority = RTE_EVENT_DEV_PRIORITY_HIGHEST;
+	qconf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
+	err = rte_event_queue_setup(TEST_DEV_ID, ev_qid, &qconf);
+	TEST_ASSERT_SUCCESS(err, "Failed to setup queue %u", ev_qid);
+
+	/*
+	 * Setup ports again so that the newly added queue is visible
+	 * to them
+	 */
+	for (i = 0; i < pcnt; i++) {
+
+		int n_links;
+		uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
+		uint8_t priorities[RTE_EVENT_MAX_QUEUES_PER_DEV];
+
+		if (i == ev_port)
+			continue;
+
+		n_links = rte_event_port_links_get(TEST_DEV_ID, i, queues,
+						priorities);
+		TEST_ASSERT(n_links > 0, "Failed to get port links %d\n",
+			n_links);
+		err = rte_event_port_setup(TEST_DEV_ID, i, NULL);
+		TEST_ASSERT(err == 0, "Failed to setup port err %d\n", err);
+		err = rte_event_port_link(TEST_DEV_ID, i, queues, priorities,
+					n_links);
+		TEST_ASSERT(n_links == err, "Failed to link all queues"
+			" err %s\n", rte_strerror(rte_errno));
+	}
+
+	err = rte_event_port_link(TEST_DEV_ID, ev_port, &ev_qid, NULL, 1);
+	TEST_ASSERT(err == 1, "Failed to link queue port %u",
+		    ev_port);
+
+	err = rte_event_eth_tx_adapter_start(TEST_INST_ID);
+	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+	if (!(dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED)) {
+		err = rte_event_dev_service_id_get(0, (uint32_t *)&eid);
+		TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+		err = rte_service_runstate_set(eid, 1);
+		TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+		err = rte_service_set_runstate_mapped_check(eid, 0);
+		TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+	}
+
+	err = rte_event_eth_tx_adapter_service_id_get(TEST_INST_ID, &tid);
+	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+	err = rte_service_runstate_set(tid, 1);
+	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+	err = rte_service_set_runstate_mapped_check(tid, 0);
+	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+	err = rte_event_dev_start(TEST_DEV_ID);
+	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+	for (q = 0; q < MAX_NUM_QUEUE; q++) {
+		for (i = 0; i < RING_SIZE; i++)
+			pbufs[i] = &bufs[i];
+		for (i = 0; i < RING_SIZE; i++) {
+			pbufs[i] = &bufs[i];
+			err = tx_adapter_single(TEST_ETHDEV_ID, q, pbufs[i],
+						ev_qid,
+						RTE_SCHED_TYPE_ORDERED);
+			TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+		}
+		for (i = 0; i < RING_SIZE; i++) {
+			TEST_ASSERT_EQUAL(pbufs[i], &bufs[i],
+				"Error: received data does not match"
+				" that transmitted");
+		}
+	}
+
+	err = rte_event_eth_tx_adapter_stats_get(TEST_INST_ID, NULL);
+	TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
+
+	err = rte_event_eth_tx_adapter_stats_get(TEST_INST_ID, &stats);
+	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+	TEST_ASSERT_EQUAL(stats.tx_packets, MAX_NUM_QUEUE * RING_SIZE,
+			"stats.tx_packets expected %u got %"PRIu64,
+			MAX_NUM_QUEUE * RING_SIZE,
+			stats.tx_packets);
+
+	err = rte_event_eth_tx_adapter_stats_reset(TEST_INST_ID);
+	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+	err = rte_event_eth_tx_adapter_stats_get(TEST_INST_ID, &stats);
+	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+	TEST_ASSERT_EQUAL(stats.tx_packets, 0,
+			"stats.tx_packets expected %u got %"PRIu64,
+			0,
+			stats.tx_packets);
+
+	err = rte_event_eth_tx_adapter_stats_get(1, &stats);
+	TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
+
+	err = rte_event_eth_tx_adapter_queue_del(TEST_INST_ID, TEST_ETHDEV_ID,
+						-1);
+	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+	err = rte_event_eth_tx_adapter_free(TEST_INST_ID);
+	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+	rte_event_dev_stop(TEST_DEV_ID);
+
+	return TEST_SUCCESS;
+}
+
+static int
+tx_adapter_dynamic_device(void)
+{
+	uint16_t port_id = rte_eth_dev_count_avail();
+	const char *null_dev[2] = { "eth_null0", "eth_null1" };
+	struct rte_eth_conf dev_conf;
+	int ret;
+	size_t i;
+
+	memset(&dev_conf, 0, sizeof(dev_conf));
+	for (i = 0; i < RTE_DIM(null_dev); i++) {
+		ret = rte_vdev_init(null_dev[i], NULL);
+		TEST_ASSERT_SUCCESS(ret, "%s Port creation failed %d",
+				null_dev[i], ret);
+
+		if (i == 0) {
+			ret = tx_adapter_create();
+			TEST_ASSERT_SUCCESS(ret, "Adapter create failed %d",
+					ret);
+		}
+
+		ret = rte_eth_dev_configure(port_id + i, MAX_NUM_QUEUE,
+					MAX_NUM_QUEUE, &dev_conf);
+		TEST_ASSERT_SUCCESS(ret, "Failed to configure device %d", ret);
+
+		ret = rte_event_eth_tx_adapter_queue_add(TEST_INST_ID,
+							port_id + i, 0);
+		TEST_ASSERT_SUCCESS(ret, "Failed to add queues %d", ret);
+
+	}
+
+	for (i = 0; i < RTE_DIM(null_dev); i++) {
+		ret = rte_event_eth_tx_adapter_queue_del(TEST_INST_ID,
+							port_id + i, -1);
+		TEST_ASSERT_SUCCESS(ret, "Failed to delete queues %d", ret);
+	}
+
+	tx_adapter_free();
+
+	for (i = 0; i < RTE_DIM(null_dev); i++)
+		rte_vdev_uninit(null_dev[i]);
+
+	return TEST_SUCCESS;
+}
+
+static struct unit_test_suite event_eth_tx_tests = {
+	.setup = testsuite_setup,
+	.teardown = testsuite_teardown,
+	.suite_name = "tx event eth adapter test suite",
+	.unit_test_cases = {
+		TEST_CASE_ST(NULL, NULL, tx_adapter_create_free),
+		TEST_CASE_ST(tx_adapter_create, tx_adapter_free,
+					tx_adapter_queue_add_del),
+		TEST_CASE_ST(tx_adapter_create, tx_adapter_free,
+					tx_adapter_start_stop),
+		TEST_CASE_ST(tx_adapter_create, tx_adapter_free,
+					tx_adapter_service),
+		TEST_CASE_ST(NULL, NULL, tx_adapter_dynamic_device),
+		TEST_CASES_END() /**< NULL terminate unit test array */
+	}
+};
+
+static int
+test_event_eth_tx_adapter_common(void)
+{
+	return unit_test_suite_runner(&event_eth_tx_tests);
+}
+
+REGISTER_TEST_COMMAND(event_eth_tx_adapter_autotest,
+		test_event_eth_tx_adapter_common);
diff --git a/MAINTAINERS b/MAINTAINERS
index 3f06b56..93699ba 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -395,6 +395,7 @@ Eventdev Ethdev Tx Adapter API - EXPERIMENTAL
 M: Nikhil Rao <nikhil.rao@intel.com>
 T: git://dpdk.org/next/dpdk-next-eventdev
 F: lib/librte_eventdev/*eth_tx_adapter*
+F: test/test/test_event_eth_tx_adapter.c
 
 Raw device API - EXPERIMENTAL
 M: Shreyansh Jain <shreyansh.jain@nxp.com>
diff --git a/test/test/Makefile b/test/test/Makefile
index e6967ba..dcea441 100644
--- a/test/test/Makefile
+++ b/test/test/Makefile
@@ -191,6 +191,7 @@ ifeq ($(CONFIG_RTE_LIBRTE_EVENTDEV),y)
 SRCS-y += test_eventdev.c
 SRCS-y += test_event_ring.c
 SRCS-y += test_event_eth_rx_adapter.c
+SRCS-y += test_event_eth_tx_adapter.c
 SRCS-y += test_event_timer_adapter.c
 SRCS-y += test_event_crypto_adapter.c
 endif
diff --git a/test/test/meson.build b/test/test/meson.build
index b1dd6ec..3d2887b 100644
--- a/test/test/meson.build
+++ b/test/test/meson.build
@@ -34,6 +34,7 @@ test_sources = files('commands.c',
 	'test_efd_perf.c',
 	'test_errno.c',
 	'test_event_ring.c',
+	'test_event_eth_tx_adapter.c',
 	'test_eventdev.c',
 	'test_func_reentrancy.c',
 	'test_flow_classify.c',
@@ -152,6 +153,7 @@ test_names = [
 	'efd_perf_autotest',
 	'errno_autotest',
 	'event_ring_autotest',
+	'event_eth_tx_adapter_autotest',
 	'eventdev_common_autotest',
 	'eventdev_octeontx_autotest',
 	'eventdev_sw_autotest',
-- 
1.8.3.1

  parent reply	other threads:[~2018-09-20 17:42 UTC|newest]

Thread overview: 27+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-08-17  4:20 [dpdk-dev] [PATCH v2 1/4] eventdev: add eth Tx adapter APIs Nikhil Rao
2018-08-17  4:20 ` [dpdk-dev] [PATCH v2 2/4] eventdev: add caps API and PMD callbacks for eth Tx adapter Nikhil Rao
2018-08-19 10:45   ` Jerin Jacob
2018-08-21  8:52     ` Rao, Nikhil
2018-08-21  9:11       ` Jerin Jacob
2018-08-22 13:34         ` Rao, Nikhil
2018-08-17  4:20 ` [dpdk-dev] [PATCH v2 3/4] eventdev: add eth Tx adapter implementation Nikhil Rao
2018-08-17  4:20 ` [dpdk-dev] [PATCH v2 4/4] eventdev: add auto test for eth Tx adapter Nikhil Rao
2018-08-17 11:55   ` Pavan Nikhilesh
2018-08-22 16:13     ` Rao, Nikhil
2018-08-22 16:23       ` Pavan Nikhilesh
2018-08-23  1:48         ` Rao, Nikhil
2018-08-19 10:19 ` [dpdk-dev] [PATCH v2 1/4] eventdev: add eth Tx adapter APIs Jerin Jacob
2018-08-31  5:41 ` [dpdk-dev] [PATCH v3 1/5] " Nikhil Rao
2018-08-31  5:41   ` [dpdk-dev] [PATCH v3 2/5] eventdev: add caps API and PMD callbacks for eth Tx adapter Nikhil Rao
2018-08-31  5:41   ` [dpdk-dev] [PATCH v3 3/5] eventdev: add eth Tx adapter implementation Nikhil Rao
2018-08-31  5:41   ` [dpdk-dev] [PATCH v3 4/5] eventdev: add auto test for eth Tx adapter Nikhil Rao
2018-09-17 14:00     ` Jerin Jacob
2018-08-31  5:41   ` [dpdk-dev] [PATCH v3 5/5] doc: add event eth Tx adapter guide Nikhil Rao
2018-09-17 13:56     ` Jerin Jacob
2018-09-20 17:41   ` [dpdk-dev] [PATCH v4 1/5] eventdev: add eth Tx adapter APIs Nikhil Rao
2018-09-20 17:41     ` [dpdk-dev] [PATCH v4 2/5] eventdev: add caps API and PMD callbacks for eth Tx adapter Nikhil Rao
2018-09-20 17:41     ` [dpdk-dev] [PATCH v4 3/5] eventdev: add eth Tx adapter implementation Nikhil Rao
2018-09-20 17:41     ` Nikhil Rao [this message]
2018-09-20 17:41     ` [dpdk-dev] [PATCH v4 5/5] doc: add event eth Tx adapter guide Nikhil Rao
2018-09-21  5:04     ` [dpdk-dev] [PATCH v4 1/5] eventdev: add eth Tx adapter APIs Jerin Jacob
2018-09-28 10:05     ` Jerin Jacob

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1537465276-77264-4-git-send-email-nikhil.rao@intel.com \
    --to=nikhil.rao@intel.com \
    --cc=dev@dpdk.org \
    --cc=jerin.jacob@caviumnetworks.com \
    --cc=john.mcnamara@intel.com \
    --cc=marko.kovacevic@intel.com \
    --cc=olivier.matz@6wind.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).