* [dpdk-dev] [PATCH v8 01/10] examples/l2fwd-event: add default poll mode routines
2019-10-30 16:26 [dpdk-dev] [PATCH v8 00/10] example/l2fwd-event: introduce l2fwd-event example pbhagavatula
@ 2019-10-30 16:26 ` pbhagavatula
2019-10-30 16:26 ` [dpdk-dev] [PATCH v8 02/10] examples/l2fwd-event: add infra for eventdev pbhagavatula
` (9 subsequent siblings)
10 siblings, 0 replies; 13+ messages in thread
From: pbhagavatula @ 2019-10-30 16:26 UTC (permalink / raw)
To: jerinj, Thomas Monjalon, Marko Kovacevic, Ori Kam,
Bruce Richardson, Radu Nicolau, Akhil Goyal, Tomasz Kantecki,
Sunil Kumar Kori, Pavan Nikhilesh
Cc: dev
From: Pavan Nikhilesh <pbhagavatula@marvell.com>
Add the default l2fwd poll mode routines similar to examples/l2fwd.
Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
MAINTAINERS | 5 +
examples/Makefile | 1 +
examples/l2fwd-event/Makefile | 59 ++++
examples/l2fwd-event/l2fwd_common.c | 91 ++++++
examples/l2fwd-event/l2fwd_common.h | 127 ++++++++
examples/l2fwd-event/l2fwd_poll.c | 177 +++++++++++
examples/l2fwd-event/l2fwd_poll.h | 25 ++
examples/l2fwd-event/main.c | 446 ++++++++++++++++++++++++++++
examples/l2fwd-event/meson.build | 15 +
examples/meson.build | 2 +-
10 files changed, 947 insertions(+), 1 deletion(-)
create mode 100644 examples/l2fwd-event/Makefile
create mode 100644 examples/l2fwd-event/l2fwd_common.c
create mode 100644 examples/l2fwd-event/l2fwd_common.h
create mode 100644 examples/l2fwd-event/l2fwd_poll.c
create mode 100644 examples/l2fwd-event/l2fwd_poll.h
create mode 100644 examples/l2fwd-event/main.c
create mode 100644 examples/l2fwd-event/meson.build
diff --git a/MAINTAINERS b/MAINTAINERS
index 717c31801..3df03f9fe 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1496,6 +1496,11 @@ M: Tomasz Kantecki <tomasz.kantecki@intel.com>
F: doc/guides/sample_app_ug/l2_forward_cat.rst
F: examples/l2fwd-cat/
+M: Sunil Kumar Kori <skori@marvell.com>
+M: Pavan Nikhilesh <pbhagavatula@marvell.com>
+F: examples/l2fwd-event/
+T: git://dpdk.org/next/dpdk-next-eventdev
+
F: examples/l3fwd/
F: doc/guides/sample_app_ug/l3_forward.rst
diff --git a/examples/Makefile b/examples/Makefile
index 75c56710f..c4d1343de 100644
--- a/examples/Makefile
+++ b/examples/Makefile
@@ -36,6 +36,7 @@ endif
DIRS-$(CONFIG_RTE_LIBRTE_HASH) += ipv4_multicast
DIRS-$(CONFIG_RTE_LIBRTE_KNI) += kni
DIRS-y += l2fwd
+DIRS-y += l2fwd-event
ifneq ($(PQOS_INSTALL_PATH),)
DIRS-y += l2fwd-cat
endif
diff --git a/examples/l2fwd-event/Makefile b/examples/l2fwd-event/Makefile
new file mode 100644
index 000000000..73f02dd3b
--- /dev/null
+++ b/examples/l2fwd-event/Makefile
@@ -0,0 +1,59 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(C) 2019 Marvell International Ltd.
+#
+
+# binary name
+APP = l2fwd-event
+
+# all source are stored in SRCS-y
+SRCS-y := main.c
+SRCS-y += l2fwd_poll.c
+SRCS-y += l2fwd_common.c
+
+# Build using pkg-config variables if possible
+ifeq ($(shell pkg-config --exists libdpdk && echo 0),0)
+
+all: shared
+.PHONY: shared static
+shared: build/$(APP)-shared
+ ln -sf $(APP)-shared build/$(APP)
+static: build/$(APP)-static
+ ln -sf $(APP)-static build/$(APP)
+
+PKGCONF=pkg-config --define-prefix
+
+PC_FILE := $(shell $(PKGCONF) --path libdpdk)
+CFLAGS += -O3 $(shell $(PKGCONF) --cflags libdpdk)
+LDFLAGS_SHARED = $(shell $(PKGCONF) --libs libdpdk)
+LDFLAGS_STATIC = -Wl,-Bstatic $(shell $(PKGCONF) --static --libs libdpdk)
+
+build/$(APP)-shared: $(SRCS-y) Makefile $(PC_FILE) | build
+ $(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_SHARED)
+
+build/$(APP)-static: $(SRCS-y) Makefile $(PC_FILE) | build
+ $(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_STATIC)
+
+build:
+ @mkdir -p $@
+
+.PHONY: clean
+clean:
+ rm -f build/$(APP) build/$(APP)-static build/$(APP)-shared
+ test -d build && rmdir -p build || true
+
+else # Build using legacy build system
+
+ifeq ($(RTE_SDK),)
+$(error "Please define RTE_SDK environment variable")
+endif
+
+# Default target, detect a build directory, by looking for a path with a .config
+RTE_TARGET ?= $(notdir $(abspath $(dir $(firstword $(wildcard $(RTE_SDK)/*/.config)))))
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+include $(RTE_SDK)/mk/rte.extapp.mk
+endif
diff --git a/examples/l2fwd-event/l2fwd_common.c b/examples/l2fwd-event/l2fwd_common.c
new file mode 100644
index 000000000..c206415d0
--- /dev/null
+++ b/examples/l2fwd-event/l2fwd_common.c
@@ -0,0 +1,91 @@
+#include "l2fwd_common.h"
+
+int
+l2fwd_event_init_ports(struct l2fwd_resources *rsrc)
+{
+ uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
+ uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
+ struct rte_eth_conf port_conf = {
+ .rxmode = {
+ .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
+ .split_hdr_size = 0,
+ },
+ .txmode = {
+ .mq_mode = ETH_MQ_TX_NONE,
+ },
+ };
+ uint16_t nb_ports_available = 0;
+ uint16_t port_id;
+ int ret;
+
+ /* Initialise each port */
+ RTE_ETH_FOREACH_DEV(port_id) {
+ struct rte_eth_conf local_port_conf = port_conf;
+ struct rte_eth_dev_info dev_info;
+ struct rte_eth_rxconf rxq_conf;
+ struct rte_eth_txconf txq_conf;
+
+ /* skip ports that are not enabled */
+ if ((rsrc->enabled_port_mask & (1 << port_id)) == 0) {
+ printf("Skipping disabled port %u\n", port_id);
+ continue;
+ }
+ nb_ports_available++;
+
+ /* init port */
+ printf("Initializing port %u... ", port_id);
+ fflush(stdout);
+ rte_eth_dev_info_get(port_id, &dev_info);
+ if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ local_port_conf.txmode.offloads |=
+ DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ ret = rte_eth_dev_configure(port_id, 1, 1, &local_port_conf);
+ if (ret < 0)
+ rte_panic("Cannot configure device: err=%d, port=%u\n",
+ ret, port_id);
+
+ ret = rte_eth_dev_adjust_nb_rx_tx_desc(port_id, &nb_rxd,
+ &nb_txd);
+ if (ret < 0)
+ rte_panic("Cannot adjust number of descriptors: err=%d, port=%u\n",
+ ret, port_id);
+
+ rte_eth_macaddr_get(port_id, &rsrc->eth_addr[port_id]);
+
+ /* init one RX queue */
+ fflush(stdout);
+ rxq_conf = dev_info.default_rxconf;
+ rxq_conf.offloads = local_port_conf.rxmode.offloads;
+ ret = rte_eth_rx_queue_setup(port_id, 0, nb_rxd,
+ rte_eth_dev_socket_id(port_id),
+ &rxq_conf,
+ rsrc->pktmbuf_pool);
+ if (ret < 0)
+ rte_panic("rte_eth_rx_queue_setup:err=%d, port=%u\n",
+ ret, port_id);
+
+ /* init one TX queue on each port */
+ fflush(stdout);
+ txq_conf = dev_info.default_txconf;
+ txq_conf.offloads = local_port_conf.txmode.offloads;
+ ret = rte_eth_tx_queue_setup(port_id, 0, nb_txd,
+ rte_eth_dev_socket_id(port_id),
+ &txq_conf);
+ if (ret < 0)
+ rte_panic("rte_eth_tx_queue_setup:err=%d, port=%u\n",
+ ret, port_id);
+
+ rte_eth_promiscuous_enable(port_id);
+
+ printf("Port %u,MAC address: %02X:%02X:%02X:%02X:%02X:%02X\n\n",
+ port_id,
+ rsrc->eth_addr[port_id].addr_bytes[0],
+ rsrc->eth_addr[port_id].addr_bytes[1],
+ rsrc->eth_addr[port_id].addr_bytes[2],
+ rsrc->eth_addr[port_id].addr_bytes[3],
+ rsrc->eth_addr[port_id].addr_bytes[4],
+ rsrc->eth_addr[port_id].addr_bytes[5]);
+ }
+
+ return nb_ports_available;
+}
diff --git a/examples/l2fwd-event/l2fwd_common.h b/examples/l2fwd-event/l2fwd_common.h
new file mode 100644
index 000000000..7b74f92b3
--- /dev/null
+++ b/examples/l2fwd-event/l2fwd_common.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef __L2FWD_COMMON_H__
+#define __L2FWD_COMMON_H__
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <sys/types.h>
+#include <sys/queue.h>
+#include <netinet/in.h>
+#include <setjmp.h>
+#include <stdarg.h>
+#include <ctype.h>
+#include <errno.h>
+#include <getopt.h>
+#include <signal.h>
+#include <stdbool.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_eal.h>
+#include <rte_launch.h>
+#include <rte_atomic.h>
+#include <rte_cycles.h>
+#include <rte_prefetch.h>
+#include <rte_lcore.h>
+#include <rte_per_lcore.h>
+#include <rte_branch_prediction.h>
+#include <rte_interrupts.h>
+#include <rte_random.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_eventdev.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_spinlock.h>
+
+#define MAX_PKT_BURST 32
+#define MAX_RX_QUEUE_PER_LCORE 16
+#define MAX_TX_QUEUE_PER_PORT 16
+
+#define RTE_TEST_RX_DESC_DEFAULT 1024
+#define RTE_TEST_TX_DESC_DEFAULT 1024
+
+#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
+#define MEMPOOL_CACHE_SIZE 256
+
+#define DEFAULT_TIMER_PERIOD 10 /* default period is 10 seconds */
+#define MAX_TIMER_PERIOD 86400 /* 1 day max */
+
+/* Per-port statistics struct */
+struct l2fwd_port_statistics {
+ uint64_t dropped;
+ uint64_t tx;
+ uint64_t rx;
+} __rte_cache_aligned;
+
+struct l2fwd_resources {
+ volatile uint8_t force_quit;
+ uint8_t mac_updating;
+ uint8_t rx_queue_per_lcore;
+ uint16_t nb_rxd;
+ uint16_t nb_txd;
+ uint32_t enabled_port_mask;
+ uint64_t timer_period;
+ struct rte_mempool *pktmbuf_pool;
+ uint32_t dst_ports[RTE_MAX_ETHPORTS];
+ struct rte_ether_addr eth_addr[RTE_MAX_ETHPORTS];
+ struct l2fwd_port_statistics port_stats[RTE_MAX_ETHPORTS];
+ void *poll_rsrc;
+} __rte_cache_aligned;
+
+static __rte_always_inline void
+l2fwd_mac_updating(struct rte_mbuf *m, uint32_t dest_port_id,
+ struct rte_ether_addr *addr)
+{
+ struct rte_ether_hdr *eth;
+ void *tmp;
+
+ eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
+
+ /* 02:00:00:00:00:xx */
+ tmp = ð->d_addr.addr_bytes[0];
+ *((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)dest_port_id << 40);
+
+ /* src addr */
+ rte_ether_addr_copy(addr, ð->s_addr);
+}
+
+static __rte_always_inline struct l2fwd_resources *
+l2fwd_get_rsrc(void)
+{
+ static const char name[RTE_MEMZONE_NAMESIZE] = "rsrc";
+ const struct rte_memzone *mz;
+
+ mz = rte_memzone_lookup(name);
+ if (mz != NULL)
+ return mz->addr;
+
+ mz = rte_memzone_reserve(name, sizeof(struct l2fwd_resources), 0, 0);
+ if (mz != NULL) {
+ struct l2fwd_resources *rsrc = mz->addr;
+
+ memset(rsrc, 0, sizeof(struct l2fwd_resources));
+ rsrc->mac_updating = true;
+ rsrc->rx_queue_per_lcore = 1;
+ rsrc->timer_period = 10 * rte_get_timer_hz();
+
+ return mz->addr;
+ }
+
+ rte_panic("Unable to allocate memory for l2fwd resources\n");
+
+ return NULL;
+}
+
+int l2fwd_event_init_ports(struct l2fwd_resources *rsrc);
+
+#endif /* __L2FWD_COMMON_H__ */
diff --git a/examples/l2fwd-event/l2fwd_poll.c b/examples/l2fwd-event/l2fwd_poll.c
new file mode 100644
index 000000000..cc96b14cb
--- /dev/null
+++ b/examples/l2fwd-event/l2fwd_poll.c
@@ -0,0 +1,177 @@
+#include "l2fwd_poll.h"
+
+static inline void
+l2fwd_poll_simple_forward(struct l2fwd_resources *rsrc, struct rte_mbuf *m,
+ uint32_t portid)
+{
+ struct rte_eth_dev_tx_buffer *buffer;
+ uint32_t dst_port;
+ int sent;
+
+ dst_port = rsrc->dst_ports[portid];
+
+ if (rsrc->mac_updating)
+ l2fwd_mac_updating(m, dst_port, &rsrc->eth_addr[dst_port]);
+
+ buffer = ((struct l2fwd_poll_resources *)rsrc->poll_rsrc)->tx_buffer[
+ dst_port];
+ sent = rte_eth_tx_buffer(dst_port, 0, buffer, m);
+ if (sent)
+ rsrc->port_stats[dst_port].tx += sent;
+}
+
+/* main poll mode processing loop */
+static void
+l2fwd_poll_main_loop(struct l2fwd_resources *rsrc)
+{
+ uint64_t prev_tsc, diff_tsc, cur_tsc, drain_tsc;
+ struct l2fwd_poll_resources *poll_rsrc = rsrc->poll_rsrc;
+ struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
+ struct rte_eth_dev_tx_buffer *buf;
+ struct lcore_queue_conf *qconf;
+ uint32_t i, j, port_id, nb_rx;
+ struct rte_mbuf *m;
+ uint32_t lcore_id;
+ int32_t sent;
+
+ drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S *
+ BURST_TX_DRAIN_US;
+ prev_tsc = 0;
+
+ lcore_id = rte_lcore_id();
+ qconf = &poll_rsrc->lcore_queue_conf[lcore_id];
+
+ if (qconf->n_rx_port == 0) {
+ printf("lcore %u has nothing to do\n", lcore_id);
+ return;
+ }
+
+ printf("entering main loop on lcore %u\n", lcore_id);
+
+ for (i = 0; i < qconf->n_rx_port; i++) {
+
+ port_id = qconf->rx_port_list[i];
+ printf(" -- lcoreid=%u port_id=%u\n", lcore_id, port_id);
+
+ }
+
+ while (!rsrc->force_quit) {
+
+ cur_tsc = rte_rdtsc();
+
+ /*
+ * TX burst queue drain
+ */
+ diff_tsc = cur_tsc - prev_tsc;
+ if (unlikely(diff_tsc > drain_tsc)) {
+ for (i = 0; i < qconf->n_rx_port; i++) {
+ port_id =
+ rsrc->dst_ports[qconf->rx_port_list[i]];
+ buf = poll_rsrc->tx_buffer[port_id];
+ sent = rte_eth_tx_buffer_flush(port_id, 0, buf);
+ if (sent)
+ rsrc->port_stats[port_id].tx += sent;
+ }
+
+ prev_tsc = cur_tsc;
+ }
+
+ /*
+ * Read packet from RX queues
+ */
+ for (i = 0; i < qconf->n_rx_port; i++) {
+
+ port_id = qconf->rx_port_list[i];
+ nb_rx = rte_eth_rx_burst(port_id, 0, pkts_burst,
+ MAX_PKT_BURST);
+
+ rsrc->port_stats[port_id].rx += nb_rx;
+
+ for (j = 0; j < nb_rx; j++) {
+ m = pkts_burst[j];
+ rte_prefetch0(rte_pktmbuf_mtod(m, void *));
+ l2fwd_poll_simple_forward(rsrc, m, port_id);
+ }
+ }
+ }
+}
+
+static void
+l2fwd_poll_lcore_config(struct l2fwd_resources *rsrc)
+{
+ struct l2fwd_poll_resources *poll_rsrc = rsrc->poll_rsrc;
+ struct lcore_queue_conf *qconf = NULL;
+ uint32_t rx_lcore_id = 0;
+ uint16_t port_id;
+
+ /* Initialize the port/queue configuration of each logical core */
+ RTE_ETH_FOREACH_DEV(port_id) {
+ /* skip ports that are not enabled */
+ if ((rsrc->enabled_port_mask & (1 << port_id)) == 0)
+ continue;
+
+ /* get the lcore_id for this port */
+ while (rte_lcore_is_enabled(rx_lcore_id) == 0 ||
+ poll_rsrc->lcore_queue_conf[rx_lcore_id].n_rx_port ==
+ rsrc->rx_queue_per_lcore) {
+ rx_lcore_id++;
+ if (rx_lcore_id >= RTE_MAX_LCORE)
+ rte_panic("Not enough cores\n");
+ }
+
+ if (qconf != &poll_rsrc->lcore_queue_conf[rx_lcore_id]) {
+ /* Assigned a new logical core in the loop above. */
+ qconf = &poll_rsrc->lcore_queue_conf[rx_lcore_id];
+ }
+
+ qconf->rx_port_list[qconf->n_rx_port] = port_id;
+ qconf->n_rx_port++;
+ printf("Lcore %u: RX port %u\n", rx_lcore_id, port_id);
+ }
+}
+
+static void
+l2fwd_poll_init_tx_buffers(struct l2fwd_resources *rsrc)
+{
+ struct l2fwd_poll_resources *poll_rsrc = rsrc->poll_rsrc;
+ uint16_t port_id;
+ int ret;
+
+ RTE_ETH_FOREACH_DEV(port_id) {
+ /* Initialize TX buffers */
+ poll_rsrc->tx_buffer[port_id] = rte_zmalloc_socket("tx_buffer",
+ RTE_ETH_TX_BUFFER_SIZE(MAX_PKT_BURST), 0,
+ rte_eth_dev_socket_id(port_id));
+ if (poll_rsrc->tx_buffer[port_id] == NULL)
+ rte_panic("Cannot allocate buffer for tx on port %u\n",
+ port_id);
+
+ rte_eth_tx_buffer_init(poll_rsrc->tx_buffer[port_id],
+ MAX_PKT_BURST);
+
+ ret = rte_eth_tx_buffer_set_err_callback(
+ poll_rsrc->tx_buffer[port_id],
+ rte_eth_tx_buffer_count_callback,
+ &rsrc->port_stats[port_id].dropped);
+ if (ret < 0)
+ rte_panic("Cannot set error callback for tx buffer on port %u\n",
+ port_id);
+ }
+}
+
+void
+l2fwd_poll_resource_setup(struct l2fwd_resources *rsrc)
+{
+ struct l2fwd_poll_resources *poll_rsrc;
+
+ poll_rsrc = rte_zmalloc("l2fwd_poll_rsrc",
+ sizeof(struct l2fwd_poll_resources), 0);
+ if (poll_rsrc == NULL)
+ rte_panic("Failed to allocate resources for l2fwd poll mode\n");
+
+ rsrc->poll_rsrc = poll_rsrc;
+ l2fwd_poll_lcore_config(rsrc);
+ l2fwd_poll_init_tx_buffers(rsrc);
+
+ poll_rsrc->poll_main_loop = l2fwd_poll_main_loop;
+}
diff --git a/examples/l2fwd-event/l2fwd_poll.h b/examples/l2fwd-event/l2fwd_poll.h
new file mode 100644
index 000000000..d59b0c844
--- /dev/null
+++ b/examples/l2fwd-event/l2fwd_poll.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef __L2FWD_POLL_H__
+#define __L2FWD_POLL_H__
+
+#include "l2fwd_common.h"
+
+typedef void (*poll_main_loop_cb)(struct l2fwd_resources *rsrc);
+
+struct lcore_queue_conf {
+ uint32_t rx_port_list[MAX_RX_QUEUE_PER_LCORE];
+ uint32_t n_rx_port;
+} __rte_cache_aligned;
+
+struct l2fwd_poll_resources {
+ poll_main_loop_cb poll_main_loop;
+ struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS];
+ struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
+};
+
+void l2fwd_poll_resource_setup(struct l2fwd_resources *rsrc);
+
+#endif
diff --git a/examples/l2fwd-event/main.c b/examples/l2fwd-event/main.c
new file mode 100644
index 000000000..a4e41ddb4
--- /dev/null
+++ b/examples/l2fwd-event/main.c
@@ -0,0 +1,446 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include "l2fwd_poll.h"
+
+/* display usage */
+static void
+l2fwd_event_usage(const char *prgname)
+{
+ printf("%s [EAL options] -- -p PORTMASK [-q NQ]\n"
+ " -p PORTMASK: hexadecimal bitmask of ports to configure\n"
+ " -q NQ: number of queue (=ports) per lcore (default is 1)\n"
+ " -T PERIOD: statistics will be refreshed each PERIOD seconds "
+ " (0 to disable, 10 default, 86400 maximum)\n"
+ " --[no-]mac-updating: Enable or disable MAC addresses updating (enabled by default)\n"
+ " When enabled:\n"
+ " - The source MAC address is replaced by the TX port MAC address\n"
+ " - The destination MAC address is replaced by 02:00:00:00:00:TX_PORT_ID\n",
+ prgname);
+}
+
+static int
+l2fwd_event_parse_portmask(const char *portmask)
+{
+ char *end = NULL;
+ unsigned long pm;
+
+ /* parse hexadecimal string */
+ pm = strtoul(portmask, &end, 16);
+ if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
+ return -1;
+
+ if (pm == 0)
+ return -1;
+
+ return pm;
+}
+
+static unsigned int
+l2fwd_event_parse_nqueue(const char *q_arg)
+{
+ char *end = NULL;
+ unsigned long n;
+
+ /* parse hexadecimal string */
+ n = strtoul(q_arg, &end, 10);
+ if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
+ return 0;
+ if (n == 0)
+ return 0;
+ if (n >= MAX_RX_QUEUE_PER_LCORE)
+ return 0;
+
+ return n;
+}
+
+static int
+l2fwd_event_parse_timer_period(const char *q_arg)
+{
+ char *end = NULL;
+ int n;
+
+ /* parse number string */
+ n = strtol(q_arg, &end, 10);
+ if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
+ return -1;
+ if (n >= MAX_TIMER_PERIOD)
+ return -1;
+
+ return n;
+}
+
+static const char short_options[] =
+ "p:" /* portmask */
+ "q:" /* number of queues */
+ "T:" /* timer period */
+ ;
+
+#define CMD_LINE_OPT_MAC_UPDATING "mac-updating"
+#define CMD_LINE_OPT_NO_MAC_UPDATING "no-mac-updating"
+
+enum {
+ /* long options mapped to a short option */
+
+ /* first long only option value must be >= 256, so that we won't
+ * conflict with short options
+ */
+ CMD_LINE_OPT_MIN_NUM = 256,
+};
+
+/* Parse the argument given in the command line of the application */
+static int
+l2fwd_event_parse_args(int argc, char **argv,
+ struct l2fwd_resources *rsrc)
+{
+ int mac_updating = 1;
+ struct option lgopts[] = {
+ { CMD_LINE_OPT_MAC_UPDATING, no_argument, &mac_updating, 1},
+ { CMD_LINE_OPT_NO_MAC_UPDATING, no_argument, &mac_updating, 0},
+ {NULL, 0, 0, 0}
+ };
+ int opt, ret, timer_secs;
+ char *prgname = argv[0];
+ char **argvopt;
+ int option_index;
+
+ argvopt = argv;
+ while ((opt = getopt_long(argc, argvopt, short_options,
+ lgopts, &option_index)) != EOF) {
+
+ switch (opt) {
+ /* portmask */
+ case 'p':
+ rsrc->enabled_port_mask =
+ l2fwd_event_parse_portmask(optarg);
+ if (rsrc->enabled_port_mask == 0) {
+ printf("invalid portmask\n");
+ l2fwd_event_usage(prgname);
+ return -1;
+ }
+ break;
+
+ /* nqueue */
+ case 'q':
+ rsrc->rx_queue_per_lcore =
+ l2fwd_event_parse_nqueue(optarg);
+ if (rsrc->rx_queue_per_lcore == 0) {
+ printf("invalid queue number\n");
+ l2fwd_event_usage(prgname);
+ return -1;
+ }
+ break;
+
+ /* timer period */
+ case 'T':
+ timer_secs = l2fwd_event_parse_timer_period(optarg);
+ if (timer_secs < 0) {
+ printf("invalid timer period\n");
+ l2fwd_event_usage(prgname);
+ return -1;
+ }
+ rsrc->timer_period = timer_secs;
+ /* convert to number of cycles */
+ rsrc->timer_period *= rte_get_timer_hz();
+ break;
+
+ /* long options */
+ case 0:
+ break;
+
+ default:
+ l2fwd_event_usage(prgname);
+ return -1;
+ }
+ }
+
+ rsrc->mac_updating = mac_updating;
+
+ if (optind >= 0)
+ argv[optind-1] = prgname;
+
+ ret = optind-1;
+ optind = 1; /* reset getopt lib */
+ return ret;
+}
+
+static int
+l2fwd_launch_one_lcore(void *args)
+{
+ struct l2fwd_resources *rsrc = args;
+ struct l2fwd_poll_resources *poll_rsrc = rsrc->poll_rsrc;
+
+ poll_rsrc->poll_main_loop(rsrc);
+
+ return 0;
+}
+
+/* Check the link status of all ports in up to 9s, and print them finally */
+static void
+check_all_ports_link_status(struct l2fwd_resources *rsrc,
+ uint32_t port_mask)
+{
+#define CHECK_INTERVAL 100 /* 100ms */
+#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
+ uint16_t port_id;
+ uint8_t count, all_ports_up, print_flag = 0;
+ struct rte_eth_link link;
+
+ printf("\nChecking link status...");
+ fflush(stdout);
+ for (count = 0; count <= MAX_CHECK_TIME; count++) {
+ if (rsrc->force_quit)
+ return;
+ all_ports_up = 1;
+ RTE_ETH_FOREACH_DEV(port_id) {
+ if (rsrc->force_quit)
+ return;
+ if ((port_mask & (1 << port_id)) == 0)
+ continue;
+ memset(&link, 0, sizeof(link));
+ rte_eth_link_get_nowait(port_id, &link);
+ /* print link status if flag set */
+ if (print_flag == 1) {
+ if (link.link_status)
+ printf(
+ "Port%d Link Up. Speed %u Mbps - %s\n",
+ port_id, link.link_speed,
+ (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
+ ("full-duplex") : ("half-duplex\n"));
+ else
+ printf("Port %d Link Down\n", port_id);
+ continue;
+ }
+ /* clear all_ports_up flag if any link down */
+ if (link.link_status == ETH_LINK_DOWN) {
+ all_ports_up = 0;
+ break;
+ }
+ }
+ /* after finally printing all link status, get out */
+ if (print_flag == 1)
+ break;
+
+ if (all_ports_up == 0) {
+ printf(".");
+ fflush(stdout);
+ rte_delay_ms(CHECK_INTERVAL);
+ }
+
+ /* set the print_flag if all ports up or timeout */
+ if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
+ print_flag = 1;
+ printf("done\n");
+ }
+ }
+}
+
+/* Print out statistics on packets dropped */
+static void
+print_stats(struct l2fwd_resources *rsrc)
+{
+ uint64_t total_packets_dropped, total_packets_tx, total_packets_rx;
+ uint32_t port_id;
+
+ total_packets_dropped = 0;
+ total_packets_tx = 0;
+ total_packets_rx = 0;
+
+ const char clr[] = {27, '[', '2', 'J', '\0' };
+ const char topLeft[] = {27, '[', '1', ';', '1', 'H', '\0' };
+
+ /* Clear screen and move to top left */
+ printf("%s%s", clr, topLeft);
+
+ printf("\nPort statistics ====================================");
+
+ for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
+ /* skip disabled ports */
+ if ((rsrc->enabled_port_mask & (1 << port_id)) == 0)
+ continue;
+ printf("\nStatistics for port %u ------------------------------"
+ "\nPackets sent: %24"PRIu64
+ "\nPackets received: %20"PRIu64
+ "\nPackets dropped: %21"PRIu64,
+ port_id,
+ rsrc->port_stats[port_id].tx,
+ rsrc->port_stats[port_id].rx,
+ rsrc->port_stats[port_id].dropped);
+
+ total_packets_dropped +=
+ rsrc->port_stats[port_id].dropped;
+ total_packets_tx += rsrc->port_stats[port_id].tx;
+ total_packets_rx += rsrc->port_stats[port_id].rx;
+ }
+ printf("\nAggregate statistics ==============================="
+ "\nTotal packets sent: %18"PRIu64
+ "\nTotal packets received: %14"PRIu64
+ "\nTotal packets dropped: %15"PRIu64,
+ total_packets_tx,
+ total_packets_rx,
+ total_packets_dropped);
+ printf("\n====================================================\n");
+}
+
+static void
+l2fwd_event_print_stats(struct l2fwd_resources *rsrc)
+{
+ uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
+ const uint64_t timer_period = rsrc->timer_period;
+
+ while (!rsrc->force_quit) {
+ /* if timer is enabled */
+ if (timer_period > 0) {
+ cur_tsc = rte_rdtsc();
+ diff_tsc = cur_tsc - prev_tsc;
+
+ /* advance the timer */
+ timer_tsc += diff_tsc;
+
+ /* if timer has reached its timeout */
+ if (unlikely(timer_tsc >= timer_period)) {
+ print_stats(rsrc);
+ /* reset the timer */
+ timer_tsc = 0;
+ }
+ prev_tsc = cur_tsc;
+ }
+ }
+}
+
+
+static void
+signal_handler(int signum)
+{
+ struct l2fwd_resources *rsrc = l2fwd_get_rsrc();
+ if (signum == SIGINT || signum == SIGTERM) {
+ printf("\n\nSignal %d received, preparing to exit...\n",
+ signum);
+ rsrc->force_quit = true;
+ }
+}
+
+int
+main(int argc, char **argv)
+{
+ struct l2fwd_resources *rsrc;
+ uint16_t nb_ports_available = 0;
+ uint32_t nb_ports_in_mask = 0;
+ uint16_t port_id, last_port;
+ uint32_t nb_mbufs;
+ uint16_t nb_ports;
+ int ret;
+
+ /* init EAL */
+ ret = rte_eal_init(argc, argv);
+ if (ret < 0)
+ rte_panic("Invalid EAL arguments\n");
+ argc -= ret;
+ argv += ret;
+
+ rsrc = l2fwd_get_rsrc();
+
+ signal(SIGINT, signal_handler);
+ signal(SIGTERM, signal_handler);
+
+ /* parse application arguments (after the EAL ones) */
+ ret = l2fwd_event_parse_args(argc, argv, rsrc);
+ if (ret < 0)
+ rte_panic("Invalid L2FWD arguments\n");
+
+ printf("MAC updating %s\n", rsrc->mac_updating ? "enabled" :
+ "disabled");
+
+ nb_ports = rte_eth_dev_count_avail();
+ if (nb_ports == 0)
+ rte_panic("No Ethernet ports - bye\n");
+
+ /* check port mask to possible port mask */
+ if (rsrc->enabled_port_mask & ~((1 << nb_ports) - 1))
+ rte_panic("Invalid portmask; possible (0x%x)\n",
+ (1 << nb_ports) - 1);
+
+ /* reset l2fwd_dst_ports */
+ for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++)
+ rsrc->dst_ports[port_id] = 0;
+ last_port = 0;
+
+ /*
+ * Each logical core is assigned a dedicated TX queue on each port.
+ */
+ RTE_ETH_FOREACH_DEV(port_id) {
+ /* skip ports that are not enabled */
+ if ((rsrc->enabled_port_mask & (1 << port_id)) == 0)
+ continue;
+
+ if (nb_ports_in_mask % 2) {
+ rsrc->dst_ports[port_id] = last_port;
+ rsrc->dst_ports[last_port] = port_id;
+ } else {
+ last_port = port_id;
+ }
+
+ nb_ports_in_mask++;
+ }
+ if (nb_ports_in_mask % 2) {
+ printf("Notice: odd number of ports in portmask.\n");
+ rsrc->dst_ports[last_port] = last_port;
+ }
+
+ nb_mbufs = RTE_MAX(nb_ports * (RTE_TEST_RX_DESC_DEFAULT +
+ RTE_TEST_TX_DESC_DEFAULT +
+ MAX_PKT_BURST + rte_lcore_count() *
+ MEMPOOL_CACHE_SIZE), 8192U);
+
+ /* create the mbuf pool */
+ rsrc->pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool",
+ nb_mbufs, MEMPOOL_CACHE_SIZE, 0,
+ RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
+ if (rsrc->pktmbuf_pool == NULL)
+ rte_panic("Cannot init mbuf pool\n");
+
+ nb_ports_available = l2fwd_event_init_ports(rsrc);
+ if (!nb_ports_available)
+ rte_panic("All available ports are disabled. Please set portmask.\n");
+
+ l2fwd_poll_resource_setup(rsrc);
+
+ /* initialize port stats */
+ memset(&rsrc->port_stats, 0,
+ sizeof(struct l2fwd_port_statistics));
+
+ /* All settings are done. Now enable eth devices */
+ RTE_ETH_FOREACH_DEV(port_id) {
+ /* skip ports that are not enabled */
+ if ((rsrc->enabled_port_mask &
+ (1 << port_id)) == 0)
+ continue;
+
+ ret = rte_eth_dev_start(port_id);
+ if (ret < 0)
+ rte_panic("rte_eth_dev_start:err=%d, port=%u\n", ret,
+ port_id);
+ }
+
+ check_all_ports_link_status(rsrc, rsrc->enabled_port_mask);
+
+ /* launch per-lcore init on every lcore */
+ rte_eal_mp_remote_launch(l2fwd_launch_one_lcore, rsrc,
+ SKIP_MASTER);
+ l2fwd_event_print_stats(rsrc);
+ rte_eal_mp_wait_lcore();
+
+ RTE_ETH_FOREACH_DEV(port_id) {
+ if ((rsrc->enabled_port_mask &
+ (1 << port_id)) == 0)
+ continue;
+ printf("Closing port %d...", port_id);
+ rte_eth_dev_stop(port_id);
+ rte_eth_dev_close(port_id);
+ printf(" Done\n");
+ }
+ printf("Bye...\n");
+
+ return 0;
+}
diff --git a/examples/l2fwd-event/meson.build b/examples/l2fwd-event/meson.build
new file mode 100644
index 000000000..c936aa72e
--- /dev/null
+++ b/examples/l2fwd-event/meson.build
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(C) 2019 Marvell International Ltd.
+#
+
+# meson file, for building this example as part of a main DPDK build.
+#
+# To build this example as a standalone application with an already-installed
+# DPDK instance, use 'make'
+
+deps += 'eventdev'
+sources = files(
+ 'main.c',
+ 'l2fwd_poll.c',
+ 'l2fwd_common.c',
+)
diff --git a/examples/meson.build b/examples/meson.build
index 98ae50a49..9d468706d 100644
--- a/examples/meson.build
+++ b/examples/meson.build
@@ -20,7 +20,7 @@ all_examples = [
'ip_fragmentation', 'ip_pipeline',
'ip_reassembly', 'ipsec-secgw',
'ipv4_multicast', 'kni',
- 'l2fwd', 'l2fwd-cat',
+ 'l2fwd', 'l2fwd-cat', 'l2fwd-event',
'l2fwd-crypto', 'l2fwd-jobstats',
'l2fwd-keepalive', 'l3fwd',
'l3fwd-acl', 'l3fwd-power',
--
2.17.1
^ permalink raw reply [flat|nested] 13+ messages in thread
* [dpdk-dev] [PATCH v8 02/10] examples/l2fwd-event: add infra for eventdev
2019-10-30 16:26 [dpdk-dev] [PATCH v8 00/10] example/l2fwd-event: introduce l2fwd-event example pbhagavatula
2019-10-30 16:26 ` [dpdk-dev] [PATCH v8 01/10] examples/l2fwd-event: add default poll mode routines pbhagavatula
@ 2019-10-30 16:26 ` pbhagavatula
2019-10-30 16:26 ` [dpdk-dev] [PATCH v8 03/10] examples/l2fwd-event: add infra to split eventdev framework pbhagavatula
` (8 subsequent siblings)
10 siblings, 0 replies; 13+ messages in thread
From: pbhagavatula @ 2019-10-30 16:26 UTC (permalink / raw)
To: jerinj, Marko Kovacevic, Ori Kam, Bruce Richardson, Radu Nicolau,
Akhil Goyal, Tomasz Kantecki, Sunil Kumar Kori, Pavan Nikhilesh
Cc: dev
From: Pavan Nikhilesh <pbhagavatula@marvell.com>
Add infra to select event device as a mode to process packets through
command line arguments. Also, allow the user to select the schedule type
to be RTE_SCHED_TYPE_ORDERED, RTE_SCHED_TYPE_ATOMIC or
RTE_SCHED_TYPE_PARALLEL.
Usage:
`--mode="eventdev"` or `--mode="poll"`
`--eventq-sched="ordered"`, `--eventq-sched="atomic"` or
`--event-sched=parallel`
Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
examples/l2fwd-event/Makefile | 1 +
examples/l2fwd-event/l2fwd_common.h | 4 +++
examples/l2fwd-event/l2fwd_event.c | 34 +++++++++++++++++++
examples/l2fwd-event/l2fwd_event.h | 21 ++++++++++++
examples/l2fwd-event/main.c | 52 +++++++++++++++++++++++++++--
examples/l2fwd-event/meson.build | 1 +
6 files changed, 111 insertions(+), 2 deletions(-)
create mode 100644 examples/l2fwd-event/l2fwd_event.c
create mode 100644 examples/l2fwd-event/l2fwd_event.h
diff --git a/examples/l2fwd-event/Makefile b/examples/l2fwd-event/Makefile
index 73f02dd3b..08ba1835d 100644
--- a/examples/l2fwd-event/Makefile
+++ b/examples/l2fwd-event/Makefile
@@ -8,6 +8,7 @@ APP = l2fwd-event
# all source are stored in SRCS-y
SRCS-y := main.c
SRCS-y += l2fwd_poll.c
+SRCS-y += l2fwd_event.c
SRCS-y += l2fwd_common.c
# Build using pkg-config variables if possible
diff --git a/examples/l2fwd-event/l2fwd_common.h b/examples/l2fwd-event/l2fwd_common.h
index 7b74f92b3..a4e17ab97 100644
--- a/examples/l2fwd-event/l2fwd_common.h
+++ b/examples/l2fwd-event/l2fwd_common.h
@@ -65,6 +65,8 @@ struct l2fwd_port_statistics {
struct l2fwd_resources {
volatile uint8_t force_quit;
+ uint8_t event_mode;
+ uint8_t sched_type;
uint8_t mac_updating;
uint8_t rx_queue_per_lcore;
uint16_t nb_rxd;
@@ -75,6 +77,7 @@ struct l2fwd_resources {
uint32_t dst_ports[RTE_MAX_ETHPORTS];
struct rte_ether_addr eth_addr[RTE_MAX_ETHPORTS];
struct l2fwd_port_statistics port_stats[RTE_MAX_ETHPORTS];
+ void *evt_rsrc;
void *poll_rsrc;
} __rte_cache_aligned;
@@ -112,6 +115,7 @@ l2fwd_get_rsrc(void)
memset(rsrc, 0, sizeof(struct l2fwd_resources));
rsrc->mac_updating = true;
rsrc->rx_queue_per_lcore = 1;
+ rsrc->sched_type = RTE_SCHED_TYPE_ATOMIC;
rsrc->timer_period = 10 * rte_get_timer_hz();
return mz->addr;
diff --git a/examples/l2fwd-event/l2fwd_event.c b/examples/l2fwd-event/l2fwd_event.c
new file mode 100644
index 000000000..48d32d718
--- /dev/null
+++ b/examples/l2fwd-event/l2fwd_event.c
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include <stdbool.h>
+#include <getopt.h>
+
+#include <rte_atomic.h>
+#include <rte_cycles.h>
+#include <rte_ethdev.h>
+#include <rte_eventdev.h>
+#include <rte_event_eth_rx_adapter.h>
+#include <rte_event_eth_tx_adapter.h>
+#include <rte_lcore.h>
+#include <rte_malloc.h>
+#include <rte_spinlock.h>
+
+#include "l2fwd_event.h"
+
+void
+l2fwd_event_resource_setup(struct l2fwd_resources *rsrc)
+{
+ struct l2fwd_event_resources *evt_rsrc;
+
+ if (!rte_event_dev_count())
+ rte_panic("No Eventdev found\n");
+
+ evt_rsrc = rte_zmalloc("l2fwd_event",
+ sizeof(struct l2fwd_event_resources), 0);
+ if (evt_rsrc == NULL)
+ rte_panic("Failed to allocate memory\n");
+
+ rsrc->evt_rsrc = evt_rsrc;
+}
diff --git a/examples/l2fwd-event/l2fwd_event.h b/examples/l2fwd-event/l2fwd_event.h
new file mode 100644
index 000000000..9a1bb1612
--- /dev/null
+++ b/examples/l2fwd-event/l2fwd_event.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef __L2FWD_EVENT_H__
+#define __L2FWD_EVENT_H__
+
+#include <rte_common.h>
+#include <rte_event_eth_rx_adapter.h>
+#include <rte_event_eth_tx_adapter.h>
+#include <rte_mbuf.h>
+#include <rte_spinlock.h>
+
+#include "l2fwd_common.h"
+
+struct l2fwd_event_resources {
+};
+
+void l2fwd_event_resource_setup(struct l2fwd_resources *rsrc);
+
+#endif /* __L2FWD_EVENT_H__ */
diff --git a/examples/l2fwd-event/main.c b/examples/l2fwd-event/main.c
index a4e41ddb4..2a1fe4e11 100644
--- a/examples/l2fwd-event/main.c
+++ b/examples/l2fwd-event/main.c
@@ -2,6 +2,7 @@
* Copyright(C) 2019 Marvell International Ltd.
*/
+#include "l2fwd_event.h"
#include "l2fwd_poll.h"
/* display usage */
@@ -16,7 +17,12 @@ l2fwd_event_usage(const char *prgname)
" --[no-]mac-updating: Enable or disable MAC addresses updating (enabled by default)\n"
" When enabled:\n"
" - The source MAC address is replaced by the TX port MAC address\n"
- " - The destination MAC address is replaced by 02:00:00:00:00:TX_PORT_ID\n",
+ " - The destination MAC address is replaced by 02:00:00:00:00:TX_PORT_ID\n"
+ " --mode: Packet transfer mode for I/O, poll or eventdev\n"
+ " Default mode = eventdev\n"
+ " --eventq-sched: Event queue schedule type, ordered, atomic or parallel.\n"
+ " Default: atomic\n"
+ " Valid only if --mode=eventdev\n\n",
prgname);
}
@@ -71,6 +77,28 @@ l2fwd_event_parse_timer_period(const char *q_arg)
return n;
}
+static void
+l2fwd_event_parse_mode(const char *optarg,
+ struct l2fwd_resources *rsrc)
+{
+ if (!strncmp(optarg, "poll", 4))
+ rsrc->event_mode = false;
+ else if (!strncmp(optarg, "eventdev", 8))
+ rsrc->event_mode = true;
+}
+
+static void
+l2fwd_event_parse_eventq_sched(const char *optarg,
+ struct l2fwd_resources *rsrc)
+{
+ if (!strncmp(optarg, "ordered", 7))
+ rsrc->sched_type = RTE_SCHED_TYPE_ORDERED;
+ else if (!strncmp(optarg, "atomic", 6))
+ rsrc->sched_type = RTE_SCHED_TYPE_ATOMIC;
+ else if (!strncmp(optarg, "parallel", 8))
+ rsrc->sched_type = RTE_SCHED_TYPE_PARALLEL;
+}
+
static const char short_options[] =
"p:" /* portmask */
"q:" /* number of queues */
@@ -79,6 +107,8 @@ static const char short_options[] =
#define CMD_LINE_OPT_MAC_UPDATING "mac-updating"
#define CMD_LINE_OPT_NO_MAC_UPDATING "no-mac-updating"
+#define CMD_LINE_OPT_MODE "mode"
+#define CMD_LINE_OPT_EVENTQ_SCHED "eventq-sched"
enum {
/* long options mapped to a short option */
@@ -87,6 +117,8 @@ enum {
* conflict with short options
*/
CMD_LINE_OPT_MIN_NUM = 256,
+ CMD_LINE_OPT_MODE_NUM,
+ CMD_LINE_OPT_EVENTQ_SCHED_NUM,
};
/* Parse the argument given in the command line of the application */
@@ -98,6 +130,10 @@ l2fwd_event_parse_args(int argc, char **argv,
struct option lgopts[] = {
{ CMD_LINE_OPT_MAC_UPDATING, no_argument, &mac_updating, 1},
{ CMD_LINE_OPT_NO_MAC_UPDATING, no_argument, &mac_updating, 0},
+ { CMD_LINE_OPT_MODE, required_argument, NULL,
+ CMD_LINE_OPT_MODE_NUM},
+ { CMD_LINE_OPT_EVENTQ_SCHED, required_argument, NULL,
+ CMD_LINE_OPT_EVENTQ_SCHED_NUM},
{NULL, 0, 0, 0}
};
int opt, ret, timer_secs;
@@ -145,6 +181,14 @@ l2fwd_event_parse_args(int argc, char **argv,
rsrc->timer_period *= rte_get_timer_hz();
break;
+ case CMD_LINE_OPT_MODE_NUM:
+ l2fwd_event_parse_mode(optarg, rsrc);
+ break;
+
+ case CMD_LINE_OPT_EVENTQ_SCHED_NUM:
+ l2fwd_event_parse_eventq_sched(optarg, rsrc);
+ break;
+
/* long options */
case 0:
break;
@@ -404,7 +448,11 @@ main(int argc, char **argv)
if (!nb_ports_available)
rte_panic("All available ports are disabled. Please set portmask.\n");
- l2fwd_poll_resource_setup(rsrc);
+ /* Configure eventdev parameters if required */
+ if (rsrc->event_mode)
+ l2fwd_event_resource_setup(rsrc);
+ else
+ l2fwd_poll_resource_setup(rsrc);
/* initialize port stats */
memset(&rsrc->port_stats, 0,
diff --git a/examples/l2fwd-event/meson.build b/examples/l2fwd-event/meson.build
index c936aa72e..c1ae2037c 100644
--- a/examples/l2fwd-event/meson.build
+++ b/examples/l2fwd-event/meson.build
@@ -12,4 +12,5 @@ sources = files(
'main.c',
'l2fwd_poll.c',
'l2fwd_common.c',
+ 'l2fwd_event.c',
)
--
2.17.1
^ permalink raw reply [flat|nested] 13+ messages in thread
* [dpdk-dev] [PATCH v8 03/10] examples/l2fwd-event: add infra to split eventdev framework
2019-10-30 16:26 [dpdk-dev] [PATCH v8 00/10] example/l2fwd-event: introduce l2fwd-event example pbhagavatula
2019-10-30 16:26 ` [dpdk-dev] [PATCH v8 01/10] examples/l2fwd-event: add default poll mode routines pbhagavatula
2019-10-30 16:26 ` [dpdk-dev] [PATCH v8 02/10] examples/l2fwd-event: add infra for eventdev pbhagavatula
@ 2019-10-30 16:26 ` pbhagavatula
2019-10-30 16:26 ` [dpdk-dev] [PATCH v8 04/10] examples/l2fwd-event: add event device setup pbhagavatula
` (7 subsequent siblings)
10 siblings, 0 replies; 13+ messages in thread
From: pbhagavatula @ 2019-10-30 16:26 UTC (permalink / raw)
To: jerinj, Marko Kovacevic, Ori Kam, Bruce Richardson, Radu Nicolau,
Akhil Goyal, Tomasz Kantecki, Sunil Kumar Kori, Pavan Nikhilesh
Cc: dev
From: Sunil Kumar Kori <skori@marvell.com>
Add infra to split eventdev framework based on event Tx adapter
capability.
If event Tx adapter has internal port capability then we use
`rte_event_eth_tx_adapter_enqueue` to transmitting packets else
we use a SINGLE_LINK event queue to enqueue packets to a service
core which is responsible for transmitting packets.
Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
examples/l2fwd-event/Makefile | 2 ++
examples/l2fwd-event/l2fwd_event.c | 26 +++++++++++++++++++
examples/l2fwd-event/l2fwd_event.h | 7 +++++
examples/l2fwd-event/l2fwd_event_generic.c | 23 ++++++++++++++++
.../l2fwd-event/l2fwd_event_internal_port.c | 23 ++++++++++++++++
examples/l2fwd-event/meson.build | 2 ++
6 files changed, 83 insertions(+)
create mode 100644 examples/l2fwd-event/l2fwd_event_generic.c
create mode 100644 examples/l2fwd-event/l2fwd_event_internal_port.c
diff --git a/examples/l2fwd-event/Makefile b/examples/l2fwd-event/Makefile
index 08ba1835d..6f4176882 100644
--- a/examples/l2fwd-event/Makefile
+++ b/examples/l2fwd-event/Makefile
@@ -10,6 +10,8 @@ SRCS-y := main.c
SRCS-y += l2fwd_poll.c
SRCS-y += l2fwd_event.c
SRCS-y += l2fwd_common.c
+SRCS-y += l2fwd_event_generic.c
+SRCS-y += l2fwd_event_internal_port.c
# Build using pkg-config variables if possible
ifeq ($(shell pkg-config --exists libdpdk && echo 0),0)
diff --git a/examples/l2fwd-event/l2fwd_event.c b/examples/l2fwd-event/l2fwd_event.c
index 48d32d718..7f90e6311 100644
--- a/examples/l2fwd-event/l2fwd_event.c
+++ b/examples/l2fwd-event/l2fwd_event.c
@@ -17,6 +17,29 @@
#include "l2fwd_event.h"
+static void
+l2fwd_event_capability_setup(struct l2fwd_event_resources *evt_rsrc)
+{
+ uint32_t caps = 0;
+ uint16_t i;
+ int ret;
+
+ RTE_ETH_FOREACH_DEV(i) {
+ ret = rte_event_eth_tx_adapter_caps_get(0, i, &caps);
+ if (ret)
+ rte_panic("Invalid capability for Tx adptr port %d\n",
+ i);
+
+ evt_rsrc->tx_mode_q |= !(caps &
+ RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT);
+ }
+
+ if (evt_rsrc->tx_mode_q)
+ l2fwd_event_set_generic_ops(&evt_rsrc->ops);
+ else
+ l2fwd_event_set_internal_port_ops(&evt_rsrc->ops);
+}
+
void
l2fwd_event_resource_setup(struct l2fwd_resources *rsrc)
{
@@ -31,4 +54,7 @@ l2fwd_event_resource_setup(struct l2fwd_resources *rsrc)
rte_panic("Failed to allocate memory\n");
rsrc->evt_rsrc = evt_rsrc;
+
+ /* Setup eventdev capability callbacks */
+ l2fwd_event_capability_setup(evt_rsrc);
}
diff --git a/examples/l2fwd-event/l2fwd_event.h b/examples/l2fwd-event/l2fwd_event.h
index 9a1bb1612..b7aaa39f9 100644
--- a/examples/l2fwd-event/l2fwd_event.h
+++ b/examples/l2fwd-event/l2fwd_event.h
@@ -13,9 +13,16 @@
#include "l2fwd_common.h"
+struct event_setup_ops {
+};
+
struct l2fwd_event_resources {
+ uint8_t tx_mode_q;
+ struct event_setup_ops ops;
};
void l2fwd_event_resource_setup(struct l2fwd_resources *rsrc);
+void l2fwd_event_set_generic_ops(struct event_setup_ops *ops);
+void l2fwd_event_set_internal_port_ops(struct event_setup_ops *ops);
#endif /* __L2FWD_EVENT_H__ */
diff --git a/examples/l2fwd-event/l2fwd_event_generic.c b/examples/l2fwd-event/l2fwd_event_generic.c
new file mode 100644
index 000000000..9afade7d2
--- /dev/null
+++ b/examples/l2fwd-event/l2fwd_event_generic.c
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include <stdbool.h>
+#include <getopt.h>
+
+#include <rte_cycles.h>
+#include <rte_ethdev.h>
+#include <rte_eventdev.h>
+#include <rte_event_eth_rx_adapter.h>
+#include <rte_event_eth_tx_adapter.h>
+#include <rte_lcore.h>
+#include <rte_spinlock.h>
+
+#include "l2fwd_common.h"
+#include "l2fwd_event.h"
+
+void
+l2fwd_event_set_generic_ops(struct event_setup_ops *ops)
+{
+ RTE_SET_USED(ops);
+}
diff --git a/examples/l2fwd-event/l2fwd_event_internal_port.c b/examples/l2fwd-event/l2fwd_event_internal_port.c
new file mode 100644
index 000000000..ce95b8e6d
--- /dev/null
+++ b/examples/l2fwd-event/l2fwd_event_internal_port.c
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include <stdbool.h>
+#include <getopt.h>
+
+#include <rte_cycles.h>
+#include <rte_ethdev.h>
+#include <rte_eventdev.h>
+#include <rte_event_eth_rx_adapter.h>
+#include <rte_event_eth_tx_adapter.h>
+#include <rte_lcore.h>
+#include <rte_spinlock.h>
+
+#include "l2fwd_common.h"
+#include "l2fwd_event.h"
+
+void
+l2fwd_event_set_internal_port_ops(struct event_setup_ops *ops)
+{
+ RTE_SET_USED(ops);
+}
diff --git a/examples/l2fwd-event/meson.build b/examples/l2fwd-event/meson.build
index c1ae2037c..4e9a069d6 100644
--- a/examples/l2fwd-event/meson.build
+++ b/examples/l2fwd-event/meson.build
@@ -13,4 +13,6 @@ sources = files(
'l2fwd_poll.c',
'l2fwd_common.c',
'l2fwd_event.c',
+ 'l2fwd_event_internal_port.c',
+ 'l2fwd_event_generic.c'
)
--
2.17.1
^ permalink raw reply [flat|nested] 13+ messages in thread
* [dpdk-dev] [PATCH v8 04/10] examples/l2fwd-event: add event device setup
2019-10-30 16:26 [dpdk-dev] [PATCH v8 00/10] example/l2fwd-event: introduce l2fwd-event example pbhagavatula
` (2 preceding siblings ...)
2019-10-30 16:26 ` [dpdk-dev] [PATCH v8 03/10] examples/l2fwd-event: add infra to split eventdev framework pbhagavatula
@ 2019-10-30 16:26 ` pbhagavatula
2019-10-30 16:26 ` [dpdk-dev] [PATCH v8 05/10] examples/l2fwd-event: add eventdev queue and port setup pbhagavatula
` (6 subsequent siblings)
10 siblings, 0 replies; 13+ messages in thread
From: pbhagavatula @ 2019-10-30 16:26 UTC (permalink / raw)
To: jerinj, Marko Kovacevic, Ori Kam, Bruce Richardson, Radu Nicolau,
Akhil Goyal, Tomasz Kantecki, Sunil Kumar Kori, Pavan Nikhilesh
Cc: dev
From: Sunil Kumar Kori <skori@marvell.com>
Add event device device setup based on event eth Tx adapter
capabilities.
Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
examples/l2fwd-event/l2fwd_event.c | 3 +
examples/l2fwd-event/l2fwd_event.h | 16 ++++
examples/l2fwd-event/l2fwd_event_generic.c | 75 +++++++++++++++++-
.../l2fwd-event/l2fwd_event_internal_port.c | 77 ++++++++++++++++++-
4 files changed, 169 insertions(+), 2 deletions(-)
diff --git a/examples/l2fwd-event/l2fwd_event.c b/examples/l2fwd-event/l2fwd_event.c
index 7f90e6311..a5c1c2c40 100644
--- a/examples/l2fwd-event/l2fwd_event.c
+++ b/examples/l2fwd-event/l2fwd_event.c
@@ -57,4 +57,7 @@ l2fwd_event_resource_setup(struct l2fwd_resources *rsrc)
/* Setup eventdev capability callbacks */
l2fwd_event_capability_setup(evt_rsrc);
+
+ /* Event device configuration */
+ evt_rsrc->ops.event_device_setup(rsrc);
}
diff --git a/examples/l2fwd-event/l2fwd_event.h b/examples/l2fwd-event/l2fwd_event.h
index b7aaa39f9..6b5beb041 100644
--- a/examples/l2fwd-event/l2fwd_event.h
+++ b/examples/l2fwd-event/l2fwd_event.h
@@ -13,11 +13,27 @@
#include "l2fwd_common.h"
+typedef uint32_t (*event_device_setup_cb)(struct l2fwd_resources *rsrc);
+
+struct event_queues {
+ uint8_t nb_queues;
+};
+
+struct event_ports {
+ uint8_t nb_ports;
+};
+
struct event_setup_ops {
+ event_device_setup_cb event_device_setup;
};
struct l2fwd_event_resources {
uint8_t tx_mode_q;
+ uint8_t has_burst;
+ uint8_t event_d_id;
+ uint8_t disable_implicit_release;
+ struct event_ports evp;
+ struct event_queues evq;
struct event_setup_ops ops;
};
diff --git a/examples/l2fwd-event/l2fwd_event_generic.c b/examples/l2fwd-event/l2fwd_event_generic.c
index 9afade7d2..33e570585 100644
--- a/examples/l2fwd-event/l2fwd_event_generic.c
+++ b/examples/l2fwd-event/l2fwd_event_generic.c
@@ -16,8 +16,81 @@
#include "l2fwd_common.h"
#include "l2fwd_event.h"
+static uint32_t
+l2fwd_event_device_setup_generic(struct l2fwd_resources *rsrc)
+{
+ struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
+ struct rte_event_dev_config event_d_conf = {
+ .nb_events_limit = 4096,
+ .nb_event_queue_flows = 1024,
+ .nb_event_port_dequeue_depth = 128,
+ .nb_event_port_enqueue_depth = 128
+ };
+ struct rte_event_dev_info dev_info;
+ const uint8_t event_d_id = 0; /* Always use first event device only */
+ uint32_t event_queue_cfg = 0;
+ uint16_t ethdev_count = 0;
+ uint16_t num_workers = 0;
+ uint16_t port_id;
+ int ret;
+
+ RTE_ETH_FOREACH_DEV(port_id) {
+ if ((rsrc->enabled_port_mask & (1 << port_id)) == 0)
+ continue;
+ ethdev_count++;
+ }
+
+ /* Event device configurtion */
+ rte_event_dev_info_get(event_d_id, &dev_info);
+ evt_rsrc->disable_implicit_release = !!(dev_info.event_dev_cap &
+ RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE);
+
+ if (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES)
+ event_queue_cfg |= RTE_EVENT_QUEUE_CFG_ALL_TYPES;
+
+ /* One queue for each ethdev port + one Tx adapter Single link queue. */
+ event_d_conf.nb_event_queues = ethdev_count + 1;
+ if (dev_info.max_event_queues < event_d_conf.nb_event_queues)
+ event_d_conf.nb_event_queues = dev_info.max_event_queues;
+
+ if (dev_info.max_num_events < event_d_conf.nb_events_limit)
+ event_d_conf.nb_events_limit = dev_info.max_num_events;
+
+ if (dev_info.max_event_queue_flows < event_d_conf.nb_event_queue_flows)
+ event_d_conf.nb_event_queue_flows =
+ dev_info.max_event_queue_flows;
+
+ if (dev_info.max_event_port_dequeue_depth <
+ event_d_conf.nb_event_port_dequeue_depth)
+ event_d_conf.nb_event_port_dequeue_depth =
+ dev_info.max_event_port_dequeue_depth;
+
+ if (dev_info.max_event_port_enqueue_depth <
+ event_d_conf.nb_event_port_enqueue_depth)
+ event_d_conf.nb_event_port_enqueue_depth =
+ dev_info.max_event_port_enqueue_depth;
+
+ num_workers = rte_lcore_count() - rte_service_lcore_count();
+ if (dev_info.max_event_ports < num_workers)
+ num_workers = dev_info.max_event_ports;
+
+ event_d_conf.nb_event_ports = num_workers;
+ evt_rsrc->evp.nb_ports = num_workers;
+ evt_rsrc->evq.nb_queues = event_d_conf.nb_event_queues;
+
+ evt_rsrc->has_burst = !!(dev_info.event_dev_cap &
+ RTE_EVENT_DEV_CAP_BURST_MODE);
+
+ ret = rte_event_dev_configure(event_d_id, &event_d_conf);
+ if (ret < 0)
+ rte_panic("Error in configuring event device\n");
+
+ evt_rsrc->event_d_id = event_d_id;
+ return event_queue_cfg;
+}
+
void
l2fwd_event_set_generic_ops(struct event_setup_ops *ops)
{
- RTE_SET_USED(ops);
+ ops->event_device_setup = l2fwd_event_device_setup_generic;
}
diff --git a/examples/l2fwd-event/l2fwd_event_internal_port.c b/examples/l2fwd-event/l2fwd_event_internal_port.c
index ce95b8e6d..acd98798e 100644
--- a/examples/l2fwd-event/l2fwd_event_internal_port.c
+++ b/examples/l2fwd-event/l2fwd_event_internal_port.c
@@ -16,8 +16,83 @@
#include "l2fwd_common.h"
#include "l2fwd_event.h"
+static uint32_t
+l2fwd_event_device_setup_internal_port(struct l2fwd_resources *rsrc)
+{
+ struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
+ struct rte_event_dev_config event_d_conf = {
+ .nb_events_limit = 4096,
+ .nb_event_queue_flows = 1024,
+ .nb_event_port_dequeue_depth = 128,
+ .nb_event_port_enqueue_depth = 128
+ };
+ struct rte_event_dev_info dev_info;
+ uint8_t disable_implicit_release;
+ const uint8_t event_d_id = 0; /* Always use first event device only */
+ uint32_t event_queue_cfg = 0;
+ uint16_t ethdev_count = 0;
+ uint16_t num_workers = 0;
+ uint16_t port_id;
+ int ret;
+
+ RTE_ETH_FOREACH_DEV(port_id) {
+ if ((rsrc->enabled_port_mask & (1 << port_id)) == 0)
+ continue;
+ ethdev_count++;
+ }
+
+ /* Event device configurtion */
+ rte_event_dev_info_get(event_d_id, &dev_info);
+
+ disable_implicit_release = !!(dev_info.event_dev_cap &
+ RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE);
+ evt_rsrc->disable_implicit_release =
+ disable_implicit_release;
+
+ if (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES)
+ event_queue_cfg |= RTE_EVENT_QUEUE_CFG_ALL_TYPES;
+
+ event_d_conf.nb_event_queues = ethdev_count;
+ if (dev_info.max_event_queues < event_d_conf.nb_event_queues)
+ event_d_conf.nb_event_queues = dev_info.max_event_queues;
+
+ if (dev_info.max_num_events < event_d_conf.nb_events_limit)
+ event_d_conf.nb_events_limit = dev_info.max_num_events;
+
+ if (dev_info.max_event_queue_flows < event_d_conf.nb_event_queue_flows)
+ event_d_conf.nb_event_queue_flows =
+ dev_info.max_event_queue_flows;
+
+ if (dev_info.max_event_port_dequeue_depth <
+ event_d_conf.nb_event_port_dequeue_depth)
+ event_d_conf.nb_event_port_dequeue_depth =
+ dev_info.max_event_port_dequeue_depth;
+
+ if (dev_info.max_event_port_enqueue_depth <
+ event_d_conf.nb_event_port_enqueue_depth)
+ event_d_conf.nb_event_port_enqueue_depth =
+ dev_info.max_event_port_enqueue_depth;
+
+ num_workers = rte_lcore_count();
+ if (dev_info.max_event_ports < num_workers)
+ num_workers = dev_info.max_event_ports;
+
+ event_d_conf.nb_event_ports = num_workers;
+ evt_rsrc->evp.nb_ports = num_workers;
+ evt_rsrc->evq.nb_queues = event_d_conf.nb_event_queues;
+ evt_rsrc->has_burst = !!(dev_info.event_dev_cap &
+ RTE_EVENT_DEV_CAP_BURST_MODE);
+
+ ret = rte_event_dev_configure(event_d_id, &event_d_conf);
+ if (ret < 0)
+ rte_panic("Error in configuring event device\n");
+
+ evt_rsrc->event_d_id = event_d_id;
+ return event_queue_cfg;
+}
+
void
l2fwd_event_set_internal_port_ops(struct event_setup_ops *ops)
{
- RTE_SET_USED(ops);
+ ops->event_device_setup = l2fwd_event_device_setup_internal_port;
}
--
2.17.1
^ permalink raw reply [flat|nested] 13+ messages in thread
* [dpdk-dev] [PATCH v8 05/10] examples/l2fwd-event: add eventdev queue and port setup
2019-10-30 16:26 [dpdk-dev] [PATCH v8 00/10] example/l2fwd-event: introduce l2fwd-event example pbhagavatula
` (3 preceding siblings ...)
2019-10-30 16:26 ` [dpdk-dev] [PATCH v8 04/10] examples/l2fwd-event: add event device setup pbhagavatula
@ 2019-10-30 16:26 ` pbhagavatula
2019-10-30 16:26 ` [dpdk-dev] [PATCH v8 06/10] examples/l2fwd-event: add event Rx/Tx adapter setup pbhagavatula
` (5 subsequent siblings)
10 siblings, 0 replies; 13+ messages in thread
From: pbhagavatula @ 2019-10-30 16:26 UTC (permalink / raw)
To: jerinj, Marko Kovacevic, Ori Kam, Bruce Richardson, Radu Nicolau,
Akhil Goyal, Tomasz Kantecki, Sunil Kumar Kori, Pavan Nikhilesh
Cc: dev
From: Sunil Kumar Kori <skori@marvell.com>
Add event device queue and port setup based on event eth Tx adapter
capabilities.
Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
examples/l2fwd-event/l2fwd_event.c | 9 +-
examples/l2fwd-event/l2fwd_event.h | 10 ++
examples/l2fwd-event/l2fwd_event_generic.c | 104 ++++++++++++++++++
.../l2fwd-event/l2fwd_event_internal_port.c | 100 +++++++++++++++++
4 files changed, 222 insertions(+), 1 deletion(-)
diff --git a/examples/l2fwd-event/l2fwd_event.c b/examples/l2fwd-event/l2fwd_event.c
index a5c1c2c40..8dd00a6d3 100644
--- a/examples/l2fwd-event/l2fwd_event.c
+++ b/examples/l2fwd-event/l2fwd_event.c
@@ -44,6 +44,7 @@ void
l2fwd_event_resource_setup(struct l2fwd_resources *rsrc)
{
struct l2fwd_event_resources *evt_rsrc;
+ uint32_t event_queue_cfg;
if (!rte_event_dev_count())
rte_panic("No Eventdev found\n");
@@ -59,5 +60,11 @@ l2fwd_event_resource_setup(struct l2fwd_resources *rsrc)
l2fwd_event_capability_setup(evt_rsrc);
/* Event device configuration */
- evt_rsrc->ops.event_device_setup(rsrc);
+ event_queue_cfg = evt_rsrc->ops.event_device_setup(rsrc);
+
+ /* Event queue configuration */
+ evt_rsrc->ops.event_queue_setup(rsrc, event_queue_cfg);
+
+ /* Event port configuration */
+ evt_rsrc->ops.event_port_setup(rsrc);
}
diff --git a/examples/l2fwd-event/l2fwd_event.h b/examples/l2fwd-event/l2fwd_event.h
index 6b5beb041..fe7857cdf 100644
--- a/examples/l2fwd-event/l2fwd_event.h
+++ b/examples/l2fwd-event/l2fwd_event.h
@@ -14,27 +14,37 @@
#include "l2fwd_common.h"
typedef uint32_t (*event_device_setup_cb)(struct l2fwd_resources *rsrc);
+typedef void (*event_port_setup_cb)(struct l2fwd_resources *rsrc);
+typedef void (*event_queue_setup_cb)(struct l2fwd_resources *rsrc,
+ uint32_t event_queue_cfg);
struct event_queues {
+ uint8_t *event_q_id;
uint8_t nb_queues;
};
struct event_ports {
+ uint8_t *event_p_id;
uint8_t nb_ports;
+ rte_spinlock_t lock;
};
struct event_setup_ops {
event_device_setup_cb event_device_setup;
+ event_queue_setup_cb event_queue_setup;
+ event_port_setup_cb event_port_setup;
};
struct l2fwd_event_resources {
uint8_t tx_mode_q;
+ uint8_t deq_depth;
uint8_t has_burst;
uint8_t event_d_id;
uint8_t disable_implicit_release;
struct event_ports evp;
struct event_queues evq;
struct event_setup_ops ops;
+ struct rte_event_port_conf def_p_conf;
};
void l2fwd_event_resource_setup(struct l2fwd_resources *rsrc);
diff --git a/examples/l2fwd-event/l2fwd_event_generic.c b/examples/l2fwd-event/l2fwd_event_generic.c
index 33e570585..f72d21c0b 100644
--- a/examples/l2fwd-event/l2fwd_event_generic.c
+++ b/examples/l2fwd-event/l2fwd_event_generic.c
@@ -89,8 +89,112 @@ l2fwd_event_device_setup_generic(struct l2fwd_resources *rsrc)
return event_queue_cfg;
}
+static void
+l2fwd_event_port_setup_generic(struct l2fwd_resources *rsrc)
+{
+ struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
+ uint8_t event_d_id = evt_rsrc->event_d_id;
+ struct rte_event_port_conf event_p_conf = {
+ .dequeue_depth = 32,
+ .enqueue_depth = 32,
+ .new_event_threshold = 4096
+ };
+ struct rte_event_port_conf def_p_conf;
+ uint8_t event_p_id;
+ int32_t ret;
+
+ evt_rsrc->evp.event_p_id = (uint8_t *)malloc(sizeof(uint8_t) *
+ evt_rsrc->evp.nb_ports);
+ if (!evt_rsrc->evp.event_p_id)
+ rte_panic("No space is available\n");
+
+ memset(&def_p_conf, 0, sizeof(struct rte_event_port_conf));
+ rte_event_port_default_conf_get(event_d_id, 0, &def_p_conf);
+
+ if (def_p_conf.new_event_threshold < event_p_conf.new_event_threshold)
+ event_p_conf.new_event_threshold =
+ def_p_conf.new_event_threshold;
+
+ if (def_p_conf.dequeue_depth < event_p_conf.dequeue_depth)
+ event_p_conf.dequeue_depth = def_p_conf.dequeue_depth;
+
+ if (def_p_conf.enqueue_depth < event_p_conf.enqueue_depth)
+ event_p_conf.enqueue_depth = def_p_conf.enqueue_depth;
+
+ event_p_conf.disable_implicit_release =
+ evt_rsrc->disable_implicit_release;
+ evt_rsrc->deq_depth = def_p_conf.dequeue_depth;
+
+ for (event_p_id = 0; event_p_id < evt_rsrc->evp.nb_ports;
+ event_p_id++) {
+ ret = rte_event_port_setup(event_d_id, event_p_id,
+ &event_p_conf);
+ if (ret < 0)
+ rte_panic("Error in configuring event port %d\n",
+ event_p_id);
+
+ ret = rte_event_port_link(event_d_id, event_p_id,
+ evt_rsrc->evq.event_q_id,
+ NULL,
+ evt_rsrc->evq.nb_queues - 1);
+ if (ret != (evt_rsrc->evq.nb_queues - 1))
+ rte_panic("Error in linking event port %d to queues\n",
+ event_p_id);
+ evt_rsrc->evp.event_p_id[event_p_id] = event_p_id;
+ }
+ /* init spinlock */
+ rte_spinlock_init(&evt_rsrc->evp.lock);
+
+ evt_rsrc->def_p_conf = event_p_conf;
+}
+
+static void
+l2fwd_event_queue_setup_generic(struct l2fwd_resources *rsrc,
+ uint32_t event_queue_cfg)
+{
+ struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
+ uint8_t event_d_id = evt_rsrc->event_d_id;
+ struct rte_event_queue_conf event_q_conf = {
+ .nb_atomic_flows = 1024,
+ .nb_atomic_order_sequences = 1024,
+ .event_queue_cfg = event_queue_cfg,
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL
+ };
+ struct rte_event_queue_conf def_q_conf;
+ uint8_t event_q_id;
+ int32_t ret;
+
+ event_q_conf.schedule_type = rsrc->sched_type;
+ evt_rsrc->evq.event_q_id = (uint8_t *)malloc(sizeof(uint8_t) *
+ evt_rsrc->evq.nb_queues);
+ if (!evt_rsrc->evq.event_q_id)
+ rte_panic("Memory allocation failure\n");
+
+ rte_event_queue_default_conf_get(event_d_id, 0, &def_q_conf);
+ if (def_q_conf.nb_atomic_flows < event_q_conf.nb_atomic_flows)
+ event_q_conf.nb_atomic_flows = def_q_conf.nb_atomic_flows;
+
+ for (event_q_id = 0; event_q_id < (evt_rsrc->evq.nb_queues - 1);
+ event_q_id++) {
+ ret = rte_event_queue_setup(event_d_id, event_q_id,
+ &event_q_conf);
+ if (ret < 0)
+ rte_panic("Error in configuring event queue\n");
+ evt_rsrc->evq.event_q_id[event_q_id] = event_q_id;
+ }
+
+ event_q_conf.event_queue_cfg |= RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
+ event_q_conf.priority = RTE_EVENT_DEV_PRIORITY_HIGHEST,
+ ret = rte_event_queue_setup(event_d_id, event_q_id, &event_q_conf);
+ if (ret < 0)
+ rte_panic("Error in configuring event queue for Tx adapter\n");
+ evt_rsrc->evq.event_q_id[event_q_id] = event_q_id;
+}
+
void
l2fwd_event_set_generic_ops(struct event_setup_ops *ops)
{
ops->event_device_setup = l2fwd_event_device_setup_generic;
+ ops->event_queue_setup = l2fwd_event_queue_setup_generic;
+ ops->event_port_setup = l2fwd_event_port_setup_generic;
}
diff --git a/examples/l2fwd-event/l2fwd_event_internal_port.c b/examples/l2fwd-event/l2fwd_event_internal_port.c
index acd98798e..dab3f24ee 100644
--- a/examples/l2fwd-event/l2fwd_event_internal_port.c
+++ b/examples/l2fwd-event/l2fwd_event_internal_port.c
@@ -91,8 +91,108 @@ l2fwd_event_device_setup_internal_port(struct l2fwd_resources *rsrc)
return event_queue_cfg;
}
+static void
+l2fwd_event_port_setup_internal_port(struct l2fwd_resources *rsrc)
+{
+ struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
+ uint8_t event_d_id = evt_rsrc->event_d_id;
+ struct rte_event_port_conf event_p_conf = {
+ .dequeue_depth = 32,
+ .enqueue_depth = 32,
+ .new_event_threshold = 4096
+ };
+ struct rte_event_port_conf def_p_conf;
+ uint8_t event_p_id;
+ int32_t ret;
+
+ evt_rsrc->evp.event_p_id = (uint8_t *)malloc(sizeof(uint8_t) *
+ evt_rsrc->evp.nb_ports);
+ if (!evt_rsrc->evp.event_p_id)
+ rte_panic("Failed to allocate memory for Event Ports\n");
+
+ rte_event_port_default_conf_get(event_d_id, 0, &def_p_conf);
+ if (def_p_conf.new_event_threshold < event_p_conf.new_event_threshold)
+ event_p_conf.new_event_threshold =
+ def_p_conf.new_event_threshold;
+
+ if (def_p_conf.dequeue_depth < event_p_conf.dequeue_depth)
+ event_p_conf.dequeue_depth = def_p_conf.dequeue_depth;
+
+ if (def_p_conf.enqueue_depth < event_p_conf.enqueue_depth)
+ event_p_conf.enqueue_depth = def_p_conf.enqueue_depth;
+
+ event_p_conf.disable_implicit_release =
+ evt_rsrc->disable_implicit_release;
+
+ for (event_p_id = 0; event_p_id < evt_rsrc->evp.nb_ports;
+ event_p_id++) {
+ ret = rte_event_port_setup(event_d_id, event_p_id,
+ &event_p_conf);
+ if (ret < 0)
+ rte_panic("Error in configuring event port %d\n",
+ event_p_id);
+
+ ret = rte_event_port_link(event_d_id, event_p_id, NULL,
+ NULL, 0);
+ if (ret < 0)
+ rte_panic("Error in linking event port %d to queue\n",
+ event_p_id);
+ evt_rsrc->evp.event_p_id[event_p_id] = event_p_id;
+
+ /* init spinlock */
+ rte_spinlock_init(&evt_rsrc->evp.lock);
+ }
+
+ evt_rsrc->def_p_conf = event_p_conf;
+}
+
+static void
+l2fwd_event_queue_setup_internal_port(struct l2fwd_resources *rsrc,
+ uint32_t event_queue_cfg)
+{
+ struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
+ uint8_t event_d_id = evt_rsrc->event_d_id;
+ struct rte_event_queue_conf event_q_conf = {
+ .nb_atomic_flows = 1024,
+ .nb_atomic_order_sequences = 1024,
+ .event_queue_cfg = event_queue_cfg,
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL
+ };
+ struct rte_event_queue_conf def_q_conf;
+ uint8_t event_q_id = 0;
+ int32_t ret;
+
+ rte_event_queue_default_conf_get(event_d_id, event_q_id, &def_q_conf);
+
+ if (def_q_conf.nb_atomic_flows < event_q_conf.nb_atomic_flows)
+ event_q_conf.nb_atomic_flows = def_q_conf.nb_atomic_flows;
+
+ if (def_q_conf.nb_atomic_order_sequences <
+ event_q_conf.nb_atomic_order_sequences)
+ event_q_conf.nb_atomic_order_sequences =
+ def_q_conf.nb_atomic_order_sequences;
+
+ event_q_conf.event_queue_cfg = event_queue_cfg;
+ event_q_conf.schedule_type = rsrc->sched_type;
+ evt_rsrc->evq.event_q_id = (uint8_t *)malloc(sizeof(uint8_t) *
+ evt_rsrc->evq.nb_queues);
+ if (!evt_rsrc->evq.event_q_id)
+ rte_panic("Memory allocation failure\n");
+
+ for (event_q_id = 0; event_q_id < evt_rsrc->evq.nb_queues;
+ event_q_id++) {
+ ret = rte_event_queue_setup(event_d_id, event_q_id,
+ &event_q_conf);
+ if (ret < 0)
+ rte_panic("Error in configuring event queue\n");
+ evt_rsrc->evq.event_q_id[event_q_id] = event_q_id;
+ }
+}
+
void
l2fwd_event_set_internal_port_ops(struct event_setup_ops *ops)
{
ops->event_device_setup = l2fwd_event_device_setup_internal_port;
+ ops->event_queue_setup = l2fwd_event_queue_setup_internal_port;
+ ops->event_port_setup = l2fwd_event_port_setup_internal_port;
}
--
2.17.1
^ permalink raw reply [flat|nested] 13+ messages in thread
* [dpdk-dev] [PATCH v8 06/10] examples/l2fwd-event: add event Rx/Tx adapter setup
2019-10-30 16:26 [dpdk-dev] [PATCH v8 00/10] example/l2fwd-event: introduce l2fwd-event example pbhagavatula
` (4 preceding siblings ...)
2019-10-30 16:26 ` [dpdk-dev] [PATCH v8 05/10] examples/l2fwd-event: add eventdev queue and port setup pbhagavatula
@ 2019-10-30 16:26 ` pbhagavatula
2019-10-30 16:26 ` [dpdk-dev] [PATCH v8 07/10] examples/l2fwd-event: add service core setup pbhagavatula
` (4 subsequent siblings)
10 siblings, 0 replies; 13+ messages in thread
From: pbhagavatula @ 2019-10-30 16:26 UTC (permalink / raw)
To: jerinj, Marko Kovacevic, Ori Kam, Bruce Richardson, Radu Nicolau,
Akhil Goyal, Tomasz Kantecki, Sunil Kumar Kori, Pavan Nikhilesh
Cc: dev
From: Sunil Kumar Kori <skori@marvell.com>
Add event eth Rx/Tx adapter setup for both generic and internal port
event device pipelines.
Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
examples/l2fwd-event/l2fwd_event.c | 3 +
examples/l2fwd-event/l2fwd_event.h | 16 +++
examples/l2fwd-event/l2fwd_event_generic.c | 115 ++++++++++++++++++
.../l2fwd-event/l2fwd_event_internal_port.c | 96 +++++++++++++++
4 files changed, 230 insertions(+)
diff --git a/examples/l2fwd-event/l2fwd_event.c b/examples/l2fwd-event/l2fwd_event.c
index 8dd00a6d3..33c702739 100644
--- a/examples/l2fwd-event/l2fwd_event.c
+++ b/examples/l2fwd-event/l2fwd_event.c
@@ -67,4 +67,7 @@ l2fwd_event_resource_setup(struct l2fwd_resources *rsrc)
/* Event port configuration */
evt_rsrc->ops.event_port_setup(rsrc);
+
+ /* Rx/Tx adapters configuration */
+ evt_rsrc->ops.adapter_setup(rsrc);
}
diff --git a/examples/l2fwd-event/l2fwd_event.h b/examples/l2fwd-event/l2fwd_event.h
index fe7857cdf..1d7090ddf 100644
--- a/examples/l2fwd-event/l2fwd_event.h
+++ b/examples/l2fwd-event/l2fwd_event.h
@@ -17,6 +17,7 @@ typedef uint32_t (*event_device_setup_cb)(struct l2fwd_resources *rsrc);
typedef void (*event_port_setup_cb)(struct l2fwd_resources *rsrc);
typedef void (*event_queue_setup_cb)(struct l2fwd_resources *rsrc,
uint32_t event_queue_cfg);
+typedef void (*adapter_setup_cb)(struct l2fwd_resources *rsrc);
struct event_queues {
uint8_t *event_q_id;
@@ -29,10 +30,23 @@ struct event_ports {
rte_spinlock_t lock;
};
+struct event_rx_adptr {
+ uint32_t service_id;
+ uint8_t nb_rx_adptr;
+ uint8_t *rx_adptr;
+};
+
+struct event_tx_adptr {
+ uint32_t service_id;
+ uint8_t nb_tx_adptr;
+ uint8_t *tx_adptr;
+};
+
struct event_setup_ops {
event_device_setup_cb event_device_setup;
event_queue_setup_cb event_queue_setup;
event_port_setup_cb event_port_setup;
+ adapter_setup_cb adapter_setup;
};
struct l2fwd_event_resources {
@@ -44,6 +58,8 @@ struct l2fwd_event_resources {
struct event_ports evp;
struct event_queues evq;
struct event_setup_ops ops;
+ struct event_rx_adptr rx_adptr;
+ struct event_tx_adptr tx_adptr;
struct rte_event_port_conf def_p_conf;
};
diff --git a/examples/l2fwd-event/l2fwd_event_generic.c b/examples/l2fwd-event/l2fwd_event_generic.c
index f72d21c0b..f99608173 100644
--- a/examples/l2fwd-event/l2fwd_event_generic.c
+++ b/examples/l2fwd-event/l2fwd_event_generic.c
@@ -191,10 +191,125 @@ l2fwd_event_queue_setup_generic(struct l2fwd_resources *rsrc,
evt_rsrc->evq.event_q_id[event_q_id] = event_q_id;
}
+static void
+l2fwd_rx_tx_adapter_setup_generic(struct l2fwd_resources *rsrc)
+{
+ struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
+ struct rte_event_eth_rx_adapter_queue_conf eth_q_conf = {
+ .rx_queue_flags = 0,
+ .ev = {
+ .queue_id = 0,
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ }
+ };
+ uint8_t event_d_id = evt_rsrc->event_d_id;
+ uint8_t rx_adptr_id = 0;
+ uint8_t tx_adptr_id = 0;
+ uint8_t tx_port_id = 0;
+ uint16_t port_id;
+ uint32_t service_id;
+ int32_t ret, i = 0;
+
+ /* Rx adapter setup */
+ evt_rsrc->rx_adptr.nb_rx_adptr = 1;
+ evt_rsrc->rx_adptr.rx_adptr = (uint8_t *)malloc(sizeof(uint8_t) *
+ evt_rsrc->rx_adptr.nb_rx_adptr);
+ if (!evt_rsrc->rx_adptr.rx_adptr) {
+ free(evt_rsrc->evp.event_p_id);
+ free(evt_rsrc->evq.event_q_id);
+ rte_panic("Failed to allocate memery for Rx adapter\n");
+ }
+
+ ret = rte_event_eth_rx_adapter_create(rx_adptr_id, event_d_id,
+ &evt_rsrc->def_p_conf);
+ if (ret)
+ rte_panic("Failed to create rx adapter\n");
+
+ /* Configure user requested sched type */
+ eth_q_conf.ev.sched_type = rsrc->sched_type;
+ RTE_ETH_FOREACH_DEV(port_id) {
+ if ((rsrc->enabled_port_mask & (1 << port_id)) == 0)
+ continue;
+ eth_q_conf.ev.queue_id = evt_rsrc->evq.event_q_id[i];
+ ret = rte_event_eth_rx_adapter_queue_add(rx_adptr_id, port_id,
+ -1, ð_q_conf);
+ if (ret)
+ rte_panic("Failed to add queues to Rx adapter\n");
+ if (i < evt_rsrc->evq.nb_queues)
+ i++;
+ }
+
+ ret = rte_event_eth_rx_adapter_service_id_get(rx_adptr_id, &service_id);
+ if (ret != -ESRCH && ret != 0)
+ rte_panic("Error getting the service ID for rx adptr\n");
+
+ rte_service_runstate_set(service_id, 1);
+ rte_service_set_runstate_mapped_check(service_id, 0);
+ evt_rsrc->rx_adptr.service_id = service_id;
+
+ ret = rte_event_eth_rx_adapter_start(rx_adptr_id);
+ if (ret)
+ rte_panic("Rx adapter[%d] start Failed\n", rx_adptr_id);
+
+ evt_rsrc->rx_adptr.rx_adptr[0] = rx_adptr_id;
+
+ /* Tx adapter setup */
+ evt_rsrc->tx_adptr.nb_tx_adptr = 1;
+ evt_rsrc->tx_adptr.tx_adptr = (uint8_t *)malloc(sizeof(uint8_t) *
+ evt_rsrc->tx_adptr.nb_tx_adptr);
+ if (!evt_rsrc->tx_adptr.tx_adptr) {
+ free(evt_rsrc->rx_adptr.rx_adptr);
+ free(evt_rsrc->evp.event_p_id);
+ free(evt_rsrc->evq.event_q_id);
+ rte_panic("Failed to allocate memery for Rx adapter\n");
+ }
+
+ ret = rte_event_eth_tx_adapter_create(tx_adptr_id, event_d_id,
+ &evt_rsrc->def_p_conf);
+ if (ret)
+ rte_panic("Failed to create tx adapter\n");
+
+ RTE_ETH_FOREACH_DEV(port_id) {
+ if ((rsrc->enabled_port_mask & (1 << port_id)) == 0)
+ continue;
+ ret = rte_event_eth_tx_adapter_queue_add(tx_adptr_id, port_id,
+ -1);
+ if (ret)
+ rte_panic("Failed to add queues to Tx adapter\n");
+ }
+
+ ret = rte_event_eth_tx_adapter_service_id_get(tx_adptr_id, &service_id);
+ if (ret != -ESRCH && ret != 0)
+ rte_panic("Failed to get Tx adapter service ID\n");
+
+ rte_service_runstate_set(service_id, 1);
+ rte_service_set_runstate_mapped_check(service_id, 0);
+ evt_rsrc->tx_adptr.service_id = service_id;
+
+ ret = rte_event_eth_tx_adapter_event_port_get(tx_adptr_id, &tx_port_id);
+ if (ret)
+ rte_panic("Failed to get Tx adapter port id: %d\n", ret);
+
+ ret = rte_event_port_link(event_d_id, tx_port_id,
+ &evt_rsrc->evq.event_q_id[
+ evt_rsrc->evq.nb_queues - 1],
+ NULL, 1);
+ if (ret != 1)
+ rte_panic("Unable to link Tx adapter port to Tx queue:err=%d\n",
+ ret);
+
+ ret = rte_event_eth_tx_adapter_start(tx_adptr_id);
+ if (ret)
+ rte_panic("Tx adapter[%d] start Failed\n", tx_adptr_id);
+
+ evt_rsrc->tx_adptr.tx_adptr[0] = tx_adptr_id;
+}
+
void
l2fwd_event_set_generic_ops(struct event_setup_ops *ops)
{
ops->event_device_setup = l2fwd_event_device_setup_generic;
ops->event_queue_setup = l2fwd_event_queue_setup_generic;
ops->event_port_setup = l2fwd_event_port_setup_generic;
+ ops->adapter_setup = l2fwd_rx_tx_adapter_setup_generic;
}
diff --git a/examples/l2fwd-event/l2fwd_event_internal_port.c b/examples/l2fwd-event/l2fwd_event_internal_port.c
index dab3f24ee..bed94754f 100644
--- a/examples/l2fwd-event/l2fwd_event_internal_port.c
+++ b/examples/l2fwd-event/l2fwd_event_internal_port.c
@@ -189,10 +189,106 @@ l2fwd_event_queue_setup_internal_port(struct l2fwd_resources *rsrc,
}
}
+static void
+l2fwd_rx_tx_adapter_setup_internal_port(struct l2fwd_resources *rsrc)
+{
+ struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
+ struct rte_event_eth_rx_adapter_queue_conf eth_q_conf = {
+ .rx_queue_flags = 0,
+ .ev = {
+ .queue_id = 0,
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ }
+ };
+ uint8_t event_d_id = evt_rsrc->event_d_id;
+ uint16_t adapter_id = 0;
+ uint16_t nb_adapter = 0;
+ uint16_t port_id;
+ uint8_t q_id = 0;
+ int ret;
+
+ RTE_ETH_FOREACH_DEV(port_id) {
+ if ((rsrc->enabled_port_mask & (1 << port_id)) == 0)
+ continue;
+ nb_adapter++;
+ }
+
+ evt_rsrc->rx_adptr.nb_rx_adptr = nb_adapter;
+ evt_rsrc->rx_adptr.rx_adptr = (uint8_t *)malloc(sizeof(uint8_t) *
+ evt_rsrc->rx_adptr.nb_rx_adptr);
+ if (!evt_rsrc->rx_adptr.rx_adptr) {
+ free(evt_rsrc->evp.event_p_id);
+ free(evt_rsrc->evq.event_q_id);
+ rte_panic("Failed to allocate memery for Rx adapter\n");
+ }
+
+
+ RTE_ETH_FOREACH_DEV(port_id) {
+ if ((rsrc->enabled_port_mask & (1 << port_id)) == 0)
+ continue;
+ ret = rte_event_eth_rx_adapter_create(adapter_id, event_d_id,
+ &evt_rsrc->def_p_conf);
+ if (ret)
+ rte_panic("Failed to create rx adapter[%d]\n",
+ adapter_id);
+
+ /* Configure user requested sched type*/
+ eth_q_conf.ev.sched_type = rsrc->sched_type;
+ eth_q_conf.ev.queue_id = evt_rsrc->evq.event_q_id[q_id];
+ ret = rte_event_eth_rx_adapter_queue_add(adapter_id, port_id,
+ -1, ð_q_conf);
+ if (ret)
+ rte_panic("Failed to add queues to Rx adapter\n");
+
+ ret = rte_event_eth_rx_adapter_start(adapter_id);
+ if (ret)
+ rte_panic("Rx adapter[%d] start Failed\n", adapter_id);
+
+ evt_rsrc->rx_adptr.rx_adptr[adapter_id] = adapter_id;
+ adapter_id++;
+ if (q_id < evt_rsrc->evq.nb_queues)
+ q_id++;
+ }
+
+ evt_rsrc->tx_adptr.nb_tx_adptr = nb_adapter;
+ evt_rsrc->tx_adptr.tx_adptr = (uint8_t *)malloc(sizeof(uint8_t) *
+ evt_rsrc->tx_adptr.nb_tx_adptr);
+ if (!evt_rsrc->tx_adptr.tx_adptr) {
+ free(evt_rsrc->rx_adptr.rx_adptr);
+ free(evt_rsrc->evp.event_p_id);
+ free(evt_rsrc->evq.event_q_id);
+ rte_panic("Failed to allocate memery for Rx adapter\n");
+ }
+
+ adapter_id = 0;
+ RTE_ETH_FOREACH_DEV(port_id) {
+ if ((rsrc->enabled_port_mask & (1 << port_id)) == 0)
+ continue;
+ ret = rte_event_eth_tx_adapter_create(adapter_id, event_d_id,
+ &evt_rsrc->def_p_conf);
+ if (ret)
+ rte_panic("Failed to create tx adapter[%d]\n",
+ adapter_id);
+
+ ret = rte_event_eth_tx_adapter_queue_add(adapter_id, port_id,
+ -1);
+ if (ret)
+ rte_panic("Failed to add queues to Tx adapter\n");
+
+ ret = rte_event_eth_tx_adapter_start(adapter_id);
+ if (ret)
+ rte_panic("Tx adapter[%d] start Failed\n", adapter_id);
+
+ evt_rsrc->tx_adptr.tx_adptr[adapter_id] = adapter_id;
+ adapter_id++;
+ }
+}
+
void
l2fwd_event_set_internal_port_ops(struct event_setup_ops *ops)
{
ops->event_device_setup = l2fwd_event_device_setup_internal_port;
ops->event_queue_setup = l2fwd_event_queue_setup_internal_port;
ops->event_port_setup = l2fwd_event_port_setup_internal_port;
+ ops->adapter_setup = l2fwd_rx_tx_adapter_setup_internal_port;
}
--
2.17.1
^ permalink raw reply [flat|nested] 13+ messages in thread
* [dpdk-dev] [PATCH v8 07/10] examples/l2fwd-event: add service core setup
2019-10-30 16:26 [dpdk-dev] [PATCH v8 00/10] example/l2fwd-event: introduce l2fwd-event example pbhagavatula
` (5 preceding siblings ...)
2019-10-30 16:26 ` [dpdk-dev] [PATCH v8 06/10] examples/l2fwd-event: add event Rx/Tx adapter setup pbhagavatula
@ 2019-10-30 16:26 ` pbhagavatula
2019-10-30 16:26 ` [dpdk-dev] [PATCH v8 08/10] examples/l2fwd-event: add eventdev main loop pbhagavatula
` (3 subsequent siblings)
10 siblings, 0 replies; 13+ messages in thread
From: pbhagavatula @ 2019-10-30 16:26 UTC (permalink / raw)
To: jerinj, Marko Kovacevic, Ori Kam, Bruce Richardson, Radu Nicolau,
Akhil Goyal, Tomasz Kantecki, Sunil Kumar Kori, Pavan Nikhilesh
Cc: dev
From: Pavan Nikhilesh <pbhagavatula@marvell.com>
Setup service cores for eventdev and Rx/Tx adapter when they don't have
internal port capability.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
examples/l2fwd-event/l2fwd_event.c | 82 ++++++++++++++++++++++++++++++
examples/l2fwd-event/l2fwd_event.h | 1 +
examples/l2fwd-event/main.c | 3 ++
3 files changed, 86 insertions(+)
diff --git a/examples/l2fwd-event/l2fwd_event.c b/examples/l2fwd-event/l2fwd_event.c
index 33c702739..562d61292 100644
--- a/examples/l2fwd-event/l2fwd_event.c
+++ b/examples/l2fwd-event/l2fwd_event.c
@@ -17,6 +17,88 @@
#include "l2fwd_event.h"
+static inline int
+l2fwd_event_service_enable(uint32_t service_id)
+{
+ uint8_t min_service_count = UINT8_MAX;
+ uint32_t slcore_array[RTE_MAX_LCORE];
+ unsigned int slcore = 0;
+ uint8_t service_count;
+ int32_t slcore_count;
+
+ if (!rte_service_lcore_count())
+ return -ENOENT;
+
+ slcore_count = rte_service_lcore_list(slcore_array, RTE_MAX_LCORE);
+ if (slcore_count < 0)
+ return -ENOENT;
+ /* Get the core which has least number of services running. */
+ while (slcore_count--) {
+ /* Reset default mapping */
+ rte_service_map_lcore_set(service_id,
+ slcore_array[slcore_count], 0);
+ service_count = rte_service_lcore_count_services(
+ slcore_array[slcore_count]);
+ if (service_count < min_service_count) {
+ slcore = slcore_array[slcore_count];
+ min_service_count = service_count;
+ }
+ }
+ if (rte_service_map_lcore_set(service_id, slcore, 1))
+ return -ENOENT;
+ rte_service_lcore_start(slcore);
+
+ return 0;
+}
+
+void
+l2fwd_event_service_setup(struct l2fwd_resources *rsrc)
+{
+ struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
+ struct rte_event_dev_info evdev_info;
+ uint32_t service_id, caps;
+ int ret, i;
+
+ rte_event_dev_info_get(evt_rsrc->event_d_id, &evdev_info);
+ if (evdev_info.event_dev_cap & RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED) {
+ ret = rte_event_dev_service_id_get(evt_rsrc->event_d_id,
+ &service_id);
+ if (ret != -ESRCH && ret != 0)
+ rte_panic("Error in starting eventdev service\n");
+ l2fwd_event_service_enable(service_id);
+ }
+
+ for (i = 0; i < evt_rsrc->rx_adptr.nb_rx_adptr; i++) {
+ ret = rte_event_eth_rx_adapter_caps_get(evt_rsrc->event_d_id,
+ evt_rsrc->rx_adptr.rx_adptr[i], &caps);
+ if (ret < 0)
+ rte_panic("Failed to get Rx adapter[%d] caps\n",
+ evt_rsrc->rx_adptr.rx_adptr[i]);
+ ret = rte_event_eth_rx_adapter_service_id_get(
+ evt_rsrc->event_d_id,
+ &service_id);
+ if (ret != -ESRCH && ret != 0)
+ rte_panic("Error in starting Rx adapter[%d] service\n",
+ evt_rsrc->rx_adptr.rx_adptr[i]);
+ l2fwd_event_service_enable(service_id);
+ }
+
+ for (i = 0; i < evt_rsrc->tx_adptr.nb_tx_adptr; i++) {
+ ret = rte_event_eth_tx_adapter_caps_get(evt_rsrc->event_d_id,
+ evt_rsrc->tx_adptr.tx_adptr[i], &caps);
+ if (ret < 0)
+ rte_panic("Failed to get Rx adapter[%d] caps\n",
+ evt_rsrc->tx_adptr.tx_adptr[i]);
+ ret = rte_event_eth_tx_adapter_service_id_get(
+ evt_rsrc->event_d_id,
+ &service_id);
+ if (ret != -ESRCH && ret != 0)
+ rte_panic("Error in starting Rx adapter[%d] service\n",
+ evt_rsrc->tx_adptr.tx_adptr[i]);
+ l2fwd_event_service_enable(service_id);
+ }
+}
+
static void
l2fwd_event_capability_setup(struct l2fwd_event_resources *evt_rsrc)
{
diff --git a/examples/l2fwd-event/l2fwd_event.h b/examples/l2fwd-event/l2fwd_event.h
index 1d7090ddf..ebfbfe460 100644
--- a/examples/l2fwd-event/l2fwd_event.h
+++ b/examples/l2fwd-event/l2fwd_event.h
@@ -66,5 +66,6 @@ struct l2fwd_event_resources {
void l2fwd_event_resource_setup(struct l2fwd_resources *rsrc);
void l2fwd_event_set_generic_ops(struct event_setup_ops *ops);
void l2fwd_event_set_internal_port_ops(struct event_setup_ops *ops);
+void l2fwd_event_service_setup(struct l2fwd_resources *rsrc);
#endif /* __L2FWD_EVENT_H__ */
diff --git a/examples/l2fwd-event/main.c b/examples/l2fwd-event/main.c
index 2a1fe4e11..1ae18bb51 100644
--- a/examples/l2fwd-event/main.c
+++ b/examples/l2fwd-event/main.c
@@ -471,6 +471,9 @@ main(int argc, char **argv)
port_id);
}
+ if (rsrc->event_mode)
+ l2fwd_event_service_setup(rsrc);
+
check_all_ports_link_status(rsrc, rsrc->enabled_port_mask);
/* launch per-lcore init on every lcore */
--
2.17.1
^ permalink raw reply [flat|nested] 13+ messages in thread
* [dpdk-dev] [PATCH v8 08/10] examples/l2fwd-event: add eventdev main loop
2019-10-30 16:26 [dpdk-dev] [PATCH v8 00/10] example/l2fwd-event: introduce l2fwd-event example pbhagavatula
` (6 preceding siblings ...)
2019-10-30 16:26 ` [dpdk-dev] [PATCH v8 07/10] examples/l2fwd-event: add service core setup pbhagavatula
@ 2019-10-30 16:26 ` pbhagavatula
2019-10-30 16:26 ` [dpdk-dev] [PATCH v8 09/10] examples/l2fwd-event: add graceful teardown pbhagavatula
` (2 subsequent siblings)
10 siblings, 0 replies; 13+ messages in thread
From: pbhagavatula @ 2019-10-30 16:26 UTC (permalink / raw)
To: jerinj, Marko Kovacevic, Ori Kam, Bruce Richardson, Radu Nicolau,
Akhil Goyal, Tomasz Kantecki, Sunil Kumar Kori, Pavan Nikhilesh
Cc: dev
From: Pavan Nikhilesh <pbhagavatula@marvell.com>
Add event dev main loop based on enabled l2fwd options and eventdev
capabilities.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
examples/l2fwd-event/l2fwd_common.c | 6 +
examples/l2fwd-event/l2fwd_common.h | 1 +
examples/l2fwd-event/l2fwd_event.c | 238 ++++++++++++++++++++++++++++
examples/l2fwd-event/l2fwd_event.h | 2 +
examples/l2fwd-event/main.c | 68 +++++++-
examples/l2fwd-event/meson.build | 5 +
6 files changed, 312 insertions(+), 8 deletions(-)
diff --git a/examples/l2fwd-event/l2fwd_common.c b/examples/l2fwd-event/l2fwd_common.c
index c206415d0..4ba788550 100644
--- a/examples/l2fwd-event/l2fwd_common.c
+++ b/examples/l2fwd-event/l2fwd_common.c
@@ -18,6 +18,12 @@ l2fwd_event_init_ports(struct l2fwd_resources *rsrc)
uint16_t port_id;
int ret;
+ if (rsrc->event_mode) {
+ port_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
+ port_conf.rx_adv_conf.rss_conf.rss_key = NULL;
+ port_conf.rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IP;
+ }
+
/* Initialise each port */
RTE_ETH_FOREACH_DEV(port_id) {
struct rte_eth_conf local_port_conf = port_conf;
diff --git a/examples/l2fwd-event/l2fwd_common.h b/examples/l2fwd-event/l2fwd_common.h
index a4e17ab97..7e33ee749 100644
--- a/examples/l2fwd-event/l2fwd_common.h
+++ b/examples/l2fwd-event/l2fwd_common.h
@@ -114,6 +114,7 @@ l2fwd_get_rsrc(void)
memset(rsrc, 0, sizeof(struct l2fwd_resources));
rsrc->mac_updating = true;
+ rsrc->event_mode = true;
rsrc->rx_queue_per_lcore = 1;
rsrc->sched_type = RTE_SCHED_TYPE_ATOMIC;
rsrc->timer_period = 10 * rte_get_timer_hz();
diff --git a/examples/l2fwd-event/l2fwd_event.c b/examples/l2fwd-event/l2fwd_event.c
index 562d61292..c7782cbc5 100644
--- a/examples/l2fwd-event/l2fwd_event.c
+++ b/examples/l2fwd-event/l2fwd_event.c
@@ -17,6 +17,12 @@
#include "l2fwd_event.h"
+#define L2FWD_EVENT_SINGLE 0x1
+#define L2FWD_EVENT_BURST 0x2
+#define L2FWD_EVENT_TX_DIRECT 0x4
+#define L2FWD_EVENT_TX_ENQ 0x8
+#define L2FWD_EVENT_UPDT_MAC 0x10
+
static inline int
l2fwd_event_service_enable(uint32_t service_id)
{
@@ -122,11 +128,233 @@ l2fwd_event_capability_setup(struct l2fwd_event_resources *evt_rsrc)
l2fwd_event_set_internal_port_ops(&evt_rsrc->ops);
}
+static __rte_noinline int
+l2fwd_get_free_event_port(struct l2fwd_event_resources *evt_rsrc)
+{
+ static int index;
+ int port_id;
+
+ rte_spinlock_lock(&evt_rsrc->evp.lock);
+ if (index >= evt_rsrc->evp.nb_ports) {
+ printf("No free event port is available\n");
+ return -1;
+ }
+
+ port_id = evt_rsrc->evp.event_p_id[index];
+ index++;
+ rte_spinlock_unlock(&evt_rsrc->evp.lock);
+
+ return port_id;
+}
+
+static __rte_always_inline void
+l2fwd_event_fwd(struct l2fwd_resources *rsrc, struct rte_event *ev,
+ const uint8_t tx_q_id, const uint64_t timer_period,
+ const uint32_t flags)
+{
+ struct rte_mbuf *mbuf = ev->mbuf;
+ uint16_t dst_port;
+
+ rte_prefetch0(rte_pktmbuf_mtod(mbuf, void *));
+ dst_port = rsrc->dst_ports[mbuf->port];
+
+ if (timer_period > 0)
+ __atomic_fetch_add(&rsrc->port_stats[mbuf->port].rx,
+ 1, __ATOMIC_RELAXED);
+ mbuf->port = dst_port;
+
+ if (flags & L2FWD_EVENT_UPDT_MAC)
+ l2fwd_mac_updating(mbuf, dst_port, &rsrc->eth_addr[dst_port]);
+
+ if (flags & L2FWD_EVENT_TX_ENQ) {
+ ev->queue_id = tx_q_id;
+ ev->op = RTE_EVENT_OP_FORWARD;
+ }
+
+ if (flags & L2FWD_EVENT_TX_DIRECT)
+ rte_event_eth_tx_adapter_txq_set(mbuf, 0);
+
+ if (timer_period > 0)
+ __atomic_fetch_add(&rsrc->port_stats[mbuf->port].tx,
+ 1, __ATOMIC_RELAXED);
+}
+
+static __rte_always_inline void
+l2fwd_event_loop_single(struct l2fwd_resources *rsrc,
+ const uint32_t flags)
+{
+ struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
+ const int port_id = l2fwd_get_free_event_port(evt_rsrc);
+ const uint8_t tx_q_id = evt_rsrc->evq.event_q_id[
+ evt_rsrc->evq.nb_queues - 1];
+ const uint64_t timer_period = rsrc->timer_period;
+ const uint8_t event_d_id = evt_rsrc->event_d_id;
+ struct rte_event ev;
+
+ if (port_id < 0)
+ return;
+
+ printf("%s(): entering eventdev main loop on lcore %u\n", __func__,
+ rte_lcore_id());
+
+ while (!rsrc->force_quit) {
+ /* Read packet from eventdev */
+ if (!rte_event_dequeue_burst(event_d_id, port_id, &ev, 1, 0))
+ continue;
+
+ l2fwd_event_fwd(rsrc, &ev, tx_q_id, timer_period, flags);
+
+ if (flags & L2FWD_EVENT_TX_ENQ) {
+ while (rte_event_enqueue_burst(event_d_id, port_id,
+ &ev, 1) &&
+ !rsrc->force_quit)
+ ;
+ }
+
+ if (flags & L2FWD_EVENT_TX_DIRECT) {
+ while (!rte_event_eth_tx_adapter_enqueue(event_d_id,
+ port_id,
+ &ev, 1, 0) &&
+ !rsrc->force_quit)
+ ;
+ }
+ }
+}
+
+static __rte_always_inline void
+l2fwd_event_loop_burst(struct l2fwd_resources *rsrc,
+ const uint32_t flags)
+{
+ struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
+ const int port_id = l2fwd_get_free_event_port(evt_rsrc);
+ const uint8_t tx_q_id = evt_rsrc->evq.event_q_id[
+ evt_rsrc->evq.nb_queues - 1];
+ const uint64_t timer_period = rsrc->timer_period;
+ const uint8_t event_d_id = evt_rsrc->event_d_id;
+ const uint8_t deq_len = evt_rsrc->deq_depth;
+ struct rte_event ev[MAX_PKT_BURST];
+ uint16_t nb_rx, nb_tx;
+ uint8_t i;
+
+ if (port_id < 0)
+ return;
+
+ printf("%s(): entering eventdev main loop on lcore %u\n", __func__,
+ rte_lcore_id());
+
+ while (!rsrc->force_quit) {
+ /* Read packet from eventdev */
+ nb_rx = rte_event_dequeue_burst(event_d_id, port_id, ev,
+ deq_len, 0);
+ if (nb_rx == 0)
+ continue;
+
+ for (i = 0; i < nb_rx; i++) {
+ l2fwd_event_fwd(rsrc, &ev[i], tx_q_id, timer_period,
+ flags);
+ }
+
+ if (flags & L2FWD_EVENT_TX_ENQ) {
+ nb_tx = rte_event_enqueue_burst(event_d_id, port_id,
+ ev, nb_rx);
+ while (nb_tx < nb_rx && !rsrc->force_quit)
+ nb_tx += rte_event_enqueue_burst(event_d_id,
+ port_id, ev + nb_tx,
+ nb_rx - nb_tx);
+ }
+
+ if (flags & L2FWD_EVENT_TX_DIRECT) {
+ nb_tx = rte_event_eth_tx_adapter_enqueue(event_d_id,
+ port_id, ev,
+ nb_rx, 0);
+ while (nb_tx < nb_rx && !rsrc->force_quit)
+ nb_tx += rte_event_eth_tx_adapter_enqueue(
+ event_d_id, port_id,
+ ev + nb_tx, nb_rx - nb_tx, 0);
+ }
+ }
+}
+
+static __rte_always_inline void
+l2fwd_event_loop(struct l2fwd_resources *rsrc,
+ const uint32_t flags)
+{
+ if (flags & L2FWD_EVENT_SINGLE)
+ l2fwd_event_loop_single(rsrc, flags);
+ if (flags & L2FWD_EVENT_BURST)
+ l2fwd_event_loop_burst(rsrc, flags);
+}
+
+static void __rte_noinline
+l2fwd_event_main_loop_tx_d(struct l2fwd_resources *rsrc)
+{
+ l2fwd_event_loop(rsrc,
+ L2FWD_EVENT_TX_DIRECT | L2FWD_EVENT_SINGLE);
+}
+
+static void __rte_noinline
+l2fwd_event_main_loop_tx_d_brst(struct l2fwd_resources *rsrc)
+{
+ l2fwd_event_loop(rsrc, L2FWD_EVENT_TX_DIRECT | L2FWD_EVENT_BURST);
+}
+
+static void __rte_noinline
+l2fwd_event_main_loop_tx_q(struct l2fwd_resources *rsrc)
+{
+ l2fwd_event_loop(rsrc, L2FWD_EVENT_TX_ENQ | L2FWD_EVENT_SINGLE);
+}
+
+static void __rte_noinline
+l2fwd_event_main_loop_tx_q_brst(struct l2fwd_resources *rsrc)
+{
+ l2fwd_event_loop(rsrc, L2FWD_EVENT_TX_ENQ | L2FWD_EVENT_BURST);
+}
+
+static void __rte_noinline
+l2fwd_event_main_loop_tx_d_mac(struct l2fwd_resources *rsrc)
+{
+ l2fwd_event_loop(rsrc, L2FWD_EVENT_UPDT_MAC |
+ L2FWD_EVENT_TX_DIRECT | L2FWD_EVENT_SINGLE);
+}
+
+static void __rte_noinline
+l2fwd_event_main_loop_tx_d_brst_mac(struct l2fwd_resources *rsrc)
+{
+ l2fwd_event_loop(rsrc, L2FWD_EVENT_UPDT_MAC |
+ L2FWD_EVENT_TX_DIRECT | L2FWD_EVENT_BURST);
+}
+
+static void __rte_noinline
+l2fwd_event_main_loop_tx_q_mac(struct l2fwd_resources *rsrc)
+{
+ l2fwd_event_loop(rsrc, L2FWD_EVENT_UPDT_MAC |
+ L2FWD_EVENT_TX_ENQ | L2FWD_EVENT_SINGLE);
+}
+
+static void __rte_noinline
+l2fwd_event_main_loop_tx_q_brst_mac(struct l2fwd_resources *rsrc)
+{
+ l2fwd_event_loop(rsrc, L2FWD_EVENT_UPDT_MAC |
+ L2FWD_EVENT_TX_ENQ | L2FWD_EVENT_BURST);
+}
+
void
l2fwd_event_resource_setup(struct l2fwd_resources *rsrc)
{
+ /* [MAC_UPDT][TX_MODE][BURST] */
+ const event_loop_cb event_loop[2][2][2] = {
+ [0][0][0] = l2fwd_event_main_loop_tx_d,
+ [0][0][1] = l2fwd_event_main_loop_tx_d_brst,
+ [0][1][0] = l2fwd_event_main_loop_tx_q,
+ [0][1][1] = l2fwd_event_main_loop_tx_q_brst,
+ [1][0][0] = l2fwd_event_main_loop_tx_d_mac,
+ [1][0][1] = l2fwd_event_main_loop_tx_d_brst_mac,
+ [1][1][0] = l2fwd_event_main_loop_tx_q_mac,
+ [1][1][1] = l2fwd_event_main_loop_tx_q_brst_mac,
+ };
struct l2fwd_event_resources *evt_rsrc;
uint32_t event_queue_cfg;
+ int ret;
if (!rte_event_dev_count())
rte_panic("No Eventdev found\n");
@@ -152,4 +380,14 @@ l2fwd_event_resource_setup(struct l2fwd_resources *rsrc)
/* Rx/Tx adapters configuration */
evt_rsrc->ops.adapter_setup(rsrc);
+
+ /* Start event device */
+ ret = rte_event_dev_start(evt_rsrc->event_d_id);
+ if (ret < 0)
+ rte_panic("Error in starting eventdev\n");
+
+ evt_rsrc->ops.l2fwd_event_loop = event_loop
+ [rsrc->mac_updating]
+ [evt_rsrc->tx_mode_q]
+ [evt_rsrc->has_burst];
}
diff --git a/examples/l2fwd-event/l2fwd_event.h b/examples/l2fwd-event/l2fwd_event.h
index ebfbfe460..78f22e5f9 100644
--- a/examples/l2fwd-event/l2fwd_event.h
+++ b/examples/l2fwd-event/l2fwd_event.h
@@ -18,6 +18,7 @@ typedef void (*event_port_setup_cb)(struct l2fwd_resources *rsrc);
typedef void (*event_queue_setup_cb)(struct l2fwd_resources *rsrc,
uint32_t event_queue_cfg);
typedef void (*adapter_setup_cb)(struct l2fwd_resources *rsrc);
+typedef void (*event_loop_cb)(struct l2fwd_resources *rsrc);
struct event_queues {
uint8_t *event_q_id;
@@ -47,6 +48,7 @@ struct event_setup_ops {
event_queue_setup_cb event_queue_setup;
event_port_setup_cb event_port_setup;
adapter_setup_cb adapter_setup;
+ event_loop_cb l2fwd_event_loop;
};
struct l2fwd_event_resources {
diff --git a/examples/l2fwd-event/main.c b/examples/l2fwd-event/main.c
index 1ae18bb51..01eb19f3e 100644
--- a/examples/l2fwd-event/main.c
+++ b/examples/l2fwd-event/main.c
@@ -214,8 +214,12 @@ l2fwd_launch_one_lcore(void *args)
{
struct l2fwd_resources *rsrc = args;
struct l2fwd_poll_resources *poll_rsrc = rsrc->poll_rsrc;
+ struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
- poll_rsrc->poll_main_loop(rsrc);
+ if (rsrc->event_mode)
+ evt_rsrc->ops.l2fwd_event_loop(rsrc);
+ else
+ poll_rsrc->poll_main_loop(rsrc);
return 0;
}
@@ -304,9 +308,9 @@ print_stats(struct l2fwd_resources *rsrc)
if ((rsrc->enabled_port_mask & (1 << port_id)) == 0)
continue;
printf("\nStatistics for port %u ------------------------------"
- "\nPackets sent: %24"PRIu64
- "\nPackets received: %20"PRIu64
- "\nPackets dropped: %21"PRIu64,
+ "\nPackets sent: %29"PRIu64
+ "\nPackets received: %25"PRIu64
+ "\nPackets dropped: %26"PRIu64,
port_id,
rsrc->port_stats[port_id].tx,
rsrc->port_stats[port_id].rx,
@@ -317,10 +321,58 @@ print_stats(struct l2fwd_resources *rsrc)
total_packets_tx += rsrc->port_stats[port_id].tx;
total_packets_rx += rsrc->port_stats[port_id].rx;
}
- printf("\nAggregate statistics ==============================="
- "\nTotal packets sent: %18"PRIu64
- "\nTotal packets received: %14"PRIu64
- "\nTotal packets dropped: %15"PRIu64,
+
+ if (rsrc->event_mode) {
+ struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
+ struct rte_event_eth_rx_adapter_stats rx_adptr_stats;
+ struct rte_event_eth_tx_adapter_stats tx_adptr_stats;
+ int ret, i;
+
+ for (i = 0; i < evt_rsrc->rx_adptr.nb_rx_adptr; i++) {
+ ret = rte_event_eth_rx_adapter_stats_get(
+ evt_rsrc->rx_adptr.rx_adptr[i],
+ &rx_adptr_stats);
+ if (ret < 0)
+ continue;
+ printf("\nRx adapter[%d] statistics===================="
+ "\nReceive queue poll count: %17"PRIu64
+ "\nReceived packet count: %20"PRIu64
+ "\nEventdev enqueue count: %19"PRIu64
+ "\nEventdev enqueue retry count: %13"PRIu64
+ "\nReceived packet dropped count: %12"PRIu64
+ "\nRx enqueue start timestamp: %15"PRIu64
+ "\nRx enqueue block cycles: %18"PRIu64
+ "\nRx enqueue unblock timestamp: %13"PRIu64,
+ evt_rsrc->rx_adptr.rx_adptr[i],
+ rx_adptr_stats.rx_poll_count,
+ rx_adptr_stats.rx_packets,
+ rx_adptr_stats.rx_enq_count,
+ rx_adptr_stats.rx_enq_retry,
+ rx_adptr_stats.rx_dropped,
+ rx_adptr_stats.rx_enq_start_ts,
+ rx_adptr_stats.rx_enq_block_cycles,
+ rx_adptr_stats.rx_enq_end_ts);
+ }
+ for (i = 0; i < evt_rsrc->tx_adptr.nb_tx_adptr; i++) {
+ ret = rte_event_eth_tx_adapter_stats_get(
+ evt_rsrc->tx_adptr.tx_adptr[i],
+ &tx_adptr_stats);
+ if (ret < 0)
+ continue;
+ printf("\nTx adapter[%d] statistics===================="
+ "\nNumber of transmit retries: %15"PRIu64
+ "\nNumber of packets transmitted: %12"PRIu64
+ "\nNumber of packets dropped: %16"PRIu64,
+ evt_rsrc->tx_adptr.tx_adptr[i],
+ tx_adptr_stats.tx_retry,
+ tx_adptr_stats.tx_packets,
+ tx_adptr_stats.tx_dropped);
+ }
+ }
+ printf("\nAggregate lcore statistics ========================="
+ "\nTotal packets sent: %23"PRIu64
+ "\nTotal packets received: %19"PRIu64
+ "\nTotal packets dropped: %20"PRIu64,
total_packets_tx,
total_packets_rx,
total_packets_dropped);
diff --git a/examples/l2fwd-event/meson.build b/examples/l2fwd-event/meson.build
index 4e9a069d6..c4664c3a3 100644
--- a/examples/l2fwd-event/meson.build
+++ b/examples/l2fwd-event/meson.build
@@ -16,3 +16,8 @@ sources = files(
'l2fwd_event_internal_port.c',
'l2fwd_event_generic.c'
)
+
+# for clang 32-bit compiles we need libatomic for 64-bit atomic ops
+if cc.get_id() == 'clang' and dpdk_conf.get('RTE_ARCH_64') == false
+ ext_deps += cc.find_library('atomic')
+endif
--
2.17.1
^ permalink raw reply [flat|nested] 13+ messages in thread
* [dpdk-dev] [PATCH v8 09/10] examples/l2fwd-event: add graceful teardown
2019-10-30 16:26 [dpdk-dev] [PATCH v8 00/10] example/l2fwd-event: introduce l2fwd-event example pbhagavatula
` (7 preceding siblings ...)
2019-10-30 16:26 ` [dpdk-dev] [PATCH v8 08/10] examples/l2fwd-event: add eventdev main loop pbhagavatula
@ 2019-10-30 16:26 ` pbhagavatula
2019-10-30 17:03 ` [dpdk-dev] [PATCH v8 10/10] doc: add application usage guide for l2fwd-event pbhagavatula
2019-10-30 18:17 ` [dpdk-dev] [PATCH v8 00/10] example/l2fwd-event: introduce l2fwd-event example Jerin Jacob
10 siblings, 0 replies; 13+ messages in thread
From: pbhagavatula @ 2019-10-30 16:26 UTC (permalink / raw)
To: jerinj, Marko Kovacevic, Ori Kam, Bruce Richardson, Radu Nicolau,
Akhil Goyal, Tomasz Kantecki, Sunil Kumar Kori, Pavan Nikhilesh
Cc: dev
From: Pavan Nikhilesh <pbhagavatula@marvell.com>
Add graceful teardown that addresses both event mode and poll mode.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
examples/l2fwd-event/main.c | 50 +++++++++++++++++++++++++++++--------
1 file changed, 40 insertions(+), 10 deletions(-)
diff --git a/examples/l2fwd-event/main.c b/examples/l2fwd-event/main.c
index 01eb19f3e..142c00e8f 100644
--- a/examples/l2fwd-event/main.c
+++ b/examples/l2fwd-event/main.c
@@ -426,7 +426,7 @@ main(int argc, char **argv)
uint16_t port_id, last_port;
uint32_t nb_mbufs;
uint16_t nb_ports;
- int ret;
+ int i, ret;
/* init EAL */
ret = rte_eal_init(argc, argv);
@@ -532,16 +532,46 @@ main(int argc, char **argv)
rte_eal_mp_remote_launch(l2fwd_launch_one_lcore, rsrc,
SKIP_MASTER);
l2fwd_event_print_stats(rsrc);
- rte_eal_mp_wait_lcore();
+ if (rsrc->event_mode) {
+ struct l2fwd_event_resources *evt_rsrc =
+ rsrc->evt_rsrc;
+ for (i = 0; i < evt_rsrc->rx_adptr.nb_rx_adptr; i++)
+ rte_event_eth_rx_adapter_stop(
+ evt_rsrc->rx_adptr.rx_adptr[i]);
+ for (i = 0; i < evt_rsrc->tx_adptr.nb_tx_adptr; i++)
+ rte_event_eth_tx_adapter_stop(
+ evt_rsrc->tx_adptr.tx_adptr[i]);
- RTE_ETH_FOREACH_DEV(port_id) {
- if ((rsrc->enabled_port_mask &
- (1 << port_id)) == 0)
- continue;
- printf("Closing port %d...", port_id);
- rte_eth_dev_stop(port_id);
- rte_eth_dev_close(port_id);
- printf(" Done\n");
+ RTE_ETH_FOREACH_DEV(port_id) {
+ if ((rsrc->enabled_port_mask &
+ (1 << port_id)) == 0)
+ continue;
+ rte_eth_dev_stop(port_id);
+ }
+
+ rte_eal_mp_wait_lcore();
+ RTE_ETH_FOREACH_DEV(port_id) {
+ if ((rsrc->enabled_port_mask &
+ (1 << port_id)) == 0)
+ continue;
+ rte_eth_dev_close(port_id);
+ }
+
+ rte_event_dev_stop(evt_rsrc->event_d_id);
+ rte_event_dev_close(evt_rsrc->event_d_id);
+
+ } else {
+ rte_eal_mp_wait_lcore();
+
+ RTE_ETH_FOREACH_DEV(port_id) {
+ if ((rsrc->enabled_port_mask &
+ (1 << port_id)) == 0)
+ continue;
+ printf("Closing port %d...", port_id);
+ rte_eth_dev_stop(port_id);
+ rte_eth_dev_close(port_id);
+ printf(" Done\n");
+ }
}
printf("Bye...\n");
--
2.17.1
^ permalink raw reply [flat|nested] 13+ messages in thread
* [dpdk-dev] [PATCH v8 10/10] doc: add application usage guide for l2fwd-event
2019-10-30 16:26 [dpdk-dev] [PATCH v8 00/10] example/l2fwd-event: introduce l2fwd-event example pbhagavatula
` (8 preceding siblings ...)
2019-10-30 16:26 ` [dpdk-dev] [PATCH v8 09/10] examples/l2fwd-event: add graceful teardown pbhagavatula
@ 2019-10-30 17:03 ` pbhagavatula
2019-10-30 18:17 ` [dpdk-dev] [PATCH v8 00/10] example/l2fwd-event: introduce l2fwd-event example Jerin Jacob
10 siblings, 0 replies; 13+ messages in thread
From: pbhagavatula @ 2019-10-30 17:03 UTC (permalink / raw)
To: jerinj, Thomas Monjalon, John McNamara, Marko Kovacevic, Ori Kam,
Bruce Richardson, Radu Nicolau, Akhil Goyal, Tomasz Kantecki,
Sunil Kumar Kori, Pavan Nikhilesh
Cc: dev
From: Sunil Kumar Kori <skori@marvell.com>
Add documentation for l2fwd-event example.
Update release notes.
Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
---
MAINTAINERS | 1 +
doc/guides/rel_notes/release_19_11.rst | 6 +
doc/guides/sample_app_ug/index.rst | 1 +
doc/guides/sample_app_ug/intro.rst | 5 +
doc/guides/sample_app_ug/l2_forward_event.rst | 698 ++++++++++++++++++
5 files changed, 711 insertions(+)
create mode 100644 doc/guides/sample_app_ug/l2_forward_event.rst
diff --git a/MAINTAINERS b/MAINTAINERS
index 3df03f9fe..10aea67a5 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1499,6 +1499,7 @@ F: examples/l2fwd-cat/
M: Sunil Kumar Kori <skori@marvell.com>
M: Pavan Nikhilesh <pbhagavatula@marvell.com>
F: examples/l2fwd-event/
+F: doc/guides/sample_app_ug/l2_forward_event.rst
T: git://dpdk.org/next/dpdk-next-eventdev
F: examples/l3fwd/
diff --git a/doc/guides/rel_notes/release_19_11.rst b/doc/guides/rel_notes/release_19_11.rst
index ae8e7b2f0..3a39f562c 100644
--- a/doc/guides/rel_notes/release_19_11.rst
+++ b/doc/guides/rel_notes/release_19_11.rst
@@ -231,6 +231,12 @@ New Features
* Added a console command to testpmd app, ``show port (port_id) ptypes`` which
gives ability to print port supported ptypes in different protocol layers.
+* **Added new example l2fwd-event application.**
+
+ Added an example application `l2fwd-event` that adds event device support to
+ traditional l2fwd example. It demonstrates usage of poll and event mode IO
+ mechanism under a single application.
+
Removed Items
-------------
diff --git a/doc/guides/sample_app_ug/index.rst b/doc/guides/sample_app_ug/index.rst
index a3737c118..16ddabe69 100644
--- a/doc/guides/sample_app_ug/index.rst
+++ b/doc/guides/sample_app_ug/index.rst
@@ -26,6 +26,7 @@ Sample Applications User Guides
l2_forward_crypto
l2_forward_job_stats
l2_forward_real_virtual
+ l2_forward_event
l2_forward_cat
l3_forward
l3_forward_power_man
diff --git a/doc/guides/sample_app_ug/intro.rst b/doc/guides/sample_app_ug/intro.rst
index 6a3b6722e..66a490b8e 100644
--- a/doc/guides/sample_app_ug/intro.rst
+++ b/doc/guides/sample_app_ug/intro.rst
@@ -45,6 +45,11 @@ examples are highlighted below.
forwarding, or ``l2fwd`` application does forwarding based on Ethernet MAC
addresses like a simple switch.
+* :doc:`Network Layer 2 forwarding<l2_forward_event>`: The Network Layer 2
+ forwarding, or ``l2fwd-event`` application does forwarding based on Ethernet MAC
+ addresses like a simple switch. It demonstrates usage of poll and event mode
+ IO mechanism under a single application.
+
* :doc:`Network Layer 3 forwarding<l3_forward>`: The Network Layer3
forwarding, or ``l3fwd`` application does forwarding based on Internet
Protocol, IPv4 or IPv6 like a simple router.
diff --git a/doc/guides/sample_app_ug/l2_forward_event.rst b/doc/guides/sample_app_ug/l2_forward_event.rst
new file mode 100644
index 000000000..52a570b97
--- /dev/null
+++ b/doc/guides/sample_app_ug/l2_forward_event.rst
@@ -0,0 +1,698 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+ Copyright(c) 2010-2014 Intel Corporation.
+
+.. _l2_fwd_event_app:
+
+L2 Forwarding Eventdev Sample Application
+=========================================
+
+The L2 Forwarding eventdev sample application is a simple example of packet
+processing using the Data Plane Development Kit (DPDK) to demonstrate usage of
+poll and event mode packet I/O mechanism.
+
+Overview
+--------
+
+The L2 Forwarding eventdev sample application, performs L2 forwarding for each
+packet that is received on an RX_PORT. The destination port is the adjacent port
+from the enabled portmask, that is, if the first four ports are enabled (portmask=0x0f),
+ports 1 and 2 forward into each other, and ports 3 and 4 forward into each other.
+Also, if MAC addresses updating is enabled, the MAC addresses are affected as follows:
+
+* The source MAC address is replaced by the TX_PORT MAC address
+
+* The destination MAC address is replaced by 02:00:00:00:00:TX_PORT_ID
+
+Application receives packets from RX_PORT using below mentioned methods:
+
+* Poll mode
+
+* Eventdev mode (default)
+
+This application can be used to benchmark performance using a traffic-generator,
+as shown in the :numref:`figure_l2fwd_event_benchmark_setup`.
+
+.. _figure_l2fwd_event_benchmark_setup:
+
+.. figure:: img/l2_fwd_benchmark_setup.*
+
+ Performance Benchmark Setup (Basic Environment)
+
+Compiling the Application
+-------------------------
+
+To compile the sample application see :doc:`compiling`.
+
+The application is located in the ``l2fwd-event`` sub-directory.
+
+Running the Application
+-----------------------
+
+The application requires a number of command line options:
+
+.. code-block:: console
+
+ ./build/l2fwd-event [EAL options] -- -p PORTMASK [-q NQ] --[no-]mac-updating --mode=MODE --eventq-sched=SCHED_MODE
+
+where,
+
+* p PORTMASK: A hexadecimal bitmask of the ports to configure
+
+* q NQ: A number of queues (=ports) per lcore (default is 1)
+
+* --[no-]mac-updating: Enable or disable MAC addresses updating (enabled by default).
+
+* --mode=MODE: Packet transfer mode for I/O, poll or eventdev. Eventdev by default.
+
+* --eventq-sched=SCHED_MODE: Event queue schedule mode, Ordered, Atomic or Parallel. Atomic by default.
+
+Sample usage commands are given below to run the application into different mode:
+
+Poll mode with 4 lcores, 16 ports and 8 RX queues per lcore and MAC address updating enabled,
+issue the command:
+
+.. code-block:: console
+
+ ./build/l2fwd-event -l 0-3 -n 4 -- -q 8 -p ffff --mode=poll
+
+Eventdev mode with 4 lcores, 16 ports , sched method ordered and MAC address updating enabled,
+issue the command:
+
+.. code-block:: console
+
+ ./build/l2fwd-event -l 0-3 -n 4 -- -p ffff --eventq-sched=ordered
+
+or
+
+.. code-block:: console
+
+ ./build/l2fwd-event -l 0-3 -n 4 -- -q 8 -p ffff --mode=eventdev --eventq-sched=ordered
+
+Refer to the *DPDK Getting Started Guide* for general information on running
+applications and the Environment Abstraction Layer (EAL) options.
+
+To run application with S/W scheduler, it uses following DPDK services:
+
+* Software scheduler
+* Rx adapter service function
+* Tx adapter service function
+
+Application needs service cores to run above mentioned services. Service cores
+must be provided as EAL parameters along with the --vdev=event_sw0 to enable S/W
+scheduler. Following is the sample command:
+
+.. code-block:: console
+
+ ./build/l2fwd-event -l 0-7 -s 0-3 -n 4 ---vdev event_sw0 --q 8 -p ffff --mode=eventdev --eventq-sched=ordered
+
+Explanation
+-----------
+
+The following sections provide some explanation of the code.
+
+.. _l2_fwd_event_app_cmd_arguments:
+
+Command Line Arguments
+~~~~~~~~~~~~~~~~~~~~~~
+
+The L2 Forwarding eventdev sample application takes specific parameters,
+in addition to Environment Abstraction Layer (EAL) arguments.
+The preferred way to parse parameters is to use the getopt() function,
+since it is part of a well-defined and portable library.
+
+The parsing of arguments is done in the **l2fwd_parse_args()** function for non
+eventdev parameters and in **parse_eventdev_args()** for eventdev parameters.
+The method of argument parsing is not described here. Refer to the
+*glibc getopt(3)* man page for details.
+
+EAL arguments are parsed first, then application-specific arguments.
+This is done at the beginning of the main() function and eventdev parameters
+are parsed in eventdev_resource_setup() function during eventdev setup:
+
+.. code-block:: c
+
+ /* init EAL */
+
+ ret = rte_eal_init(argc, argv);
+ if (ret < 0)
+ rte_panic("Invalid EAL arguments\n");
+
+ argc -= ret;
+ argv += ret;
+
+ /* parse application arguments (after the EAL ones) */
+
+ ret = l2fwd_parse_args(argc, argv);
+ if (ret < 0)
+ rte_panic("Invalid L2FWD arguments\n");
+ .
+ .
+ .
+
+ /* Parse eventdev command line options */
+ ret = parse_eventdev_args(argc, argv);
+ if (ret < 0)
+ return ret;
+
+
+
+
+.. _l2_fwd_event_app_mbuf_init:
+
+Mbuf Pool Initialization
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+Once the arguments are parsed, the mbuf pool is created.
+The mbuf pool contains a set of mbuf objects that will be used by the driver
+and the application to store network packet data:
+
+.. code-block:: c
+
+ /* create the mbuf pool */
+
+ l2fwd_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", NB_MBUF,
+ MEMPOOL_CACHE_SIZE, 0,
+ RTE_MBUF_DEFAULT_BUF_SIZE,
+ rte_socket_id());
+ if (l2fwd_pktmbuf_pool == NULL)
+ rte_panic("Cannot init mbuf pool\n");
+
+The rte_mempool is a generic structure used to handle pools of objects.
+In this case, it is necessary to create a pool that will be used by the driver.
+The number of allocated pkt mbufs is NB_MBUF, with a data room size of
+RTE_MBUF_DEFAULT_BUF_SIZE each.
+A per-lcore cache of 32 mbufs is kept.
+The memory is allocated in NUMA socket 0,
+but it is possible to extend this code to allocate one mbuf pool per socket.
+
+The rte_pktmbuf_pool_create() function uses the default mbuf pool and mbuf
+initializers, respectively rte_pktmbuf_pool_init() and rte_pktmbuf_init().
+An advanced application may want to use the mempool API to create the
+mbuf pool with more control.
+
+.. _l2_fwd_event_app_drv_init:
+
+Driver Initialization
+~~~~~~~~~~~~~~~~~~~~~
+
+The main part of the code in the main() function relates to the initialization
+of the driver. To fully understand this code, it is recommended to study the
+chapters that related to the Poll Mode and Event mode Driver in the
+*DPDK Programmer's Guide* - Rel 1.4 EAR and the *DPDK API Reference*.
+
+.. code-block:: c
+
+ if (rte_pci_probe() < 0)
+ rte_panic("Cannot probe PCI\n");
+
+ /* reset l2fwd_dst_ports */
+
+ for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++)
+ l2fwd_dst_ports[portid] = 0;
+
+ last_port = 0;
+
+ /*
+ * Each logical core is assigned a dedicated TX queue on each port.
+ */
+
+ RTE_ETH_FOREACH_DEV(portid) {
+ /* skip ports that are not enabled */
+
+ if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
+ continue;
+
+ if (nb_ports_in_mask % 2) {
+ l2fwd_dst_ports[portid] = last_port;
+ l2fwd_dst_ports[last_port] = portid;
+ }
+ else
+ last_port = portid;
+
+ nb_ports_in_mask++;
+
+ rte_eth_dev_info_get((uint8_t) portid, &dev_info);
+ }
+
+Observe that:
+
+* rte_pci_probe() parses the devices on the PCI bus and initializes recognized
+ devices.
+
+The next step is to configure the RX and TX queues. For each port, there is only
+one RX queue (only one lcore is able to poll a given port). The number of TX
+queues depends on the number of available lcores. The rte_eth_dev_configure()
+function is used to configure the number of queues for a port:
+
+.. code-block:: c
+
+ ret = rte_eth_dev_configure((uint8_t)portid, 1, 1, &port_conf);
+ if (ret < 0)
+ rte_panic("Cannot configure device: err=%d, port=%u\n",
+ ret, portid);
+
+.. _l2_fwd_event_app_rx_init:
+
+RX Queue Initialization
+~~~~~~~~~~~~~~~~~~~~~~~
+
+The application uses one lcore to poll one or several ports, depending on the -q
+option, which specifies the number of queues per lcore.
+
+For example, if the user specifies -q 4, the application is able to poll four
+ports with one lcore. If there are 16 ports on the target (and if the portmask
+argument is -p ffff ), the application will need four lcores to poll all the
+ports.
+
+.. code-block:: c
+
+ ret = rte_eth_rx_queue_setup((uint8_t) portid, 0, nb_rxd, SOCKET0,
+ &rx_conf, l2fwd_pktmbuf_pool);
+ if (ret < 0)
+
+ rte_panic("rte_eth_rx_queue_setup: err=%d, port=%u\n",
+ ret, portid);
+
+The list of queues that must be polled for a given lcore is stored in a private
+structure called struct lcore_queue_conf.
+
+.. code-block:: c
+
+ struct lcore_queue_conf {
+ unsigned n_rx_port;
+ unsigned rx_port_list[MAX_RX_QUEUE_PER_LCORE];
+ struct mbuf_table tx_mbufs[L2FWD_MAX_PORTS];
+ } rte_cache_aligned;
+
+ struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
+
+The values n_rx_port and rx_port_list[] are used in the main packet processing
+loop (see :ref:`l2_fwd_event_app_rx_tx_packets`).
+
+.. _l2_fwd_event_app_tx_init:
+
+TX Queue Initialization
+~~~~~~~~~~~~~~~~~~~~~~~
+
+Each lcore should be able to transmit on any port. For every port, a single TX
+queue is initialized.
+
+.. code-block:: c
+
+ /* init one TX queue on each port */
+
+ fflush(stdout);
+
+ ret = rte_eth_tx_queue_setup((uint8_t) portid, 0, nb_txd,
+ rte_eth_dev_socket_id(portid), &tx_conf);
+ if (ret < 0)
+ rte_panic("rte_eth_tx_queue_setup:err=%d, port=%u\n",
+ ret, (unsigned) portid);
+
+To configure eventdev support, application setups following components:
+
+* Event dev
+* Event queue
+* Event Port
+* Rx/Tx adapters
+* Ethernet ports
+
+.. _l2_fwd_event_app_event_dev_init:
+
+Event device Initialization
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Application can use either H/W or S/W based event device scheduler
+implementation and supports single instance of event device. It configures event
+device as per below configuration
+
+.. code-block:: c
+
+ struct rte_event_dev_config event_d_conf = {
+ .nb_event_queues = ethdev_count, /* Dedicated to each Ethernet port */
+ .nb_event_ports = num_workers, /* Dedicated to each lcore */
+ .nb_events_limit = 4096,
+ .nb_event_queue_flows = 1024,
+ .nb_event_port_dequeue_depth = 128,
+ .nb_event_port_enqueue_depth = 128
+ };
+
+ ret = rte_event_dev_configure(event_d_id, &event_d_conf);
+ if (ret < 0)
+ rte_panic("Error in configuring event device\n");
+
+In case of S/W scheduler, application runs eventdev scheduler service on service
+core. Application retrieves service id and finds the best possible service core to
+run S/W scheduler.
+
+.. code-block:: c
+
+ rte_event_dev_info_get(evt_rsrc->event_d_id, &evdev_info);
+ if (evdev_info.event_dev_cap & RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED) {
+ ret = rte_event_dev_service_id_get(evt_rsrc->event_d_id,
+ &service_id);
+ if (ret != -ESRCH && ret != 0)
+ rte_panic("Error in starting eventdev service\n");
+ l2fwd_event_service_enable(service_id);
+ }
+
+.. _l2_fwd_app_event_queue_init:
+
+Event queue Initialization
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+Each Ethernet device is assigned a dedicated event queue which will be linked
+to all available event ports i.e. each lcore can dequeue packets from any of the
+Ethernet ports.
+
+.. code-block:: c
+
+ struct rte_event_queue_conf event_q_conf = {
+ .nb_atomic_flows = 1024,
+ .nb_atomic_order_sequences = 1024,
+ .event_queue_cfg = 0,
+ .schedule_type = RTE_SCHED_TYPE_ATOMIC,
+ .priority = RTE_EVENT_DEV_PRIORITY_HIGHEST
+ };
+
+ /* User requested sched mode */
+ event_q_conf.schedule_type = eventq_sched_mode;
+ for (event_q_id = 0; event_q_id < ethdev_count; event_q_id++) {
+ ret = rte_event_queue_setup(event_d_id, event_q_id,
+ &event_q_conf);
+ if (ret < 0)
+ rte_panic("Error in configuring event queue\n");
+ }
+
+In case of S/W scheduler, an extra event queue is created which will be used for
+Tx adapter service function for enqueue operation.
+
+.. _l2_fwd_app_event_port_init:
+
+Event port Initialization
+~~~~~~~~~~~~~~~~~~~~~~~~~
+Each worker thread is assigned a dedicated event port for enq/deq operations
+to/from an event device. All event ports are linked with all available event
+queues.
+
+.. code-block:: c
+
+ struct rte_event_port_conf event_p_conf = {
+ .dequeue_depth = 32,
+ .enqueue_depth = 32,
+ .new_event_threshold = 4096
+ };
+
+ for (event_p_id = 0; event_p_id < num_workers; event_p_id++) {
+ ret = rte_event_port_setup(event_d_id, event_p_id,
+ &event_p_conf);
+ if (ret < 0)
+ rte_panic("Error in configuring event port %d\n", event_p_id);
+
+ ret = rte_event_port_link(event_d_id, event_p_id, NULL,
+ NULL, 0);
+ if (ret < 0)
+ rte_panic("Error in linking event port %d to queue\n",
+ event_p_id);
+ }
+
+In case of S/W scheduler, an extra event port is created by DPDK library which
+is retrieved by the application and same will be used by Tx adapter service.
+
+.. code-block:: c
+
+ ret = rte_event_eth_tx_adapter_event_port_get(tx_adptr_id, &tx_port_id);
+ if (ret)
+ rte_panic("Failed to get Tx adapter port id: %d\n", ret);
+
+ ret = rte_event_port_link(event_d_id, tx_port_id,
+ &evt_rsrc.evq.event_q_id[
+ evt_rsrc.evq.nb_queues - 1],
+ NULL, 1);
+ if (ret != 1)
+ rte_panic("Unable to link Tx adapter port to Tx queue:err=%d\n",
+ ret);
+
+.. _l2_fwd_event_app_adapter_init:
+
+Rx/Tx adapter Initialization
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Each Ethernet port is assigned a dedicated Rx/Tx adapter for H/W scheduler. Each
+Ethernet port's Rx queues are connected to its respective event queue at
+priority 0 via Rx adapter configuration and Ethernet port's tx queues are
+connected via Tx adapter.
+
+.. code-block:: c
+
+ RTE_ETH_FOREACH_DEV(port_id) {
+ if ((rsrc->enabled_port_mask & (1 << port_id)) == 0)
+ continue;
+ ret = rte_event_eth_rx_adapter_create(adapter_id, event_d_id,
+ &evt_rsrc->def_p_conf);
+ if (ret)
+ rte_panic("Failed to create rx adapter[%d]\n",
+ adapter_id);
+
+ /* Configure user requested sched type*/
+ eth_q_conf.ev.sched_type = rsrc->sched_type;
+ eth_q_conf.ev.queue_id = evt_rsrc->evq.event_q_id[q_id];
+ ret = rte_event_eth_rx_adapter_queue_add(adapter_id, port_id,
+ -1, ð_q_conf);
+ if (ret)
+ rte_panic("Failed to add queues to Rx adapter\n");
+
+ ret = rte_event_eth_rx_adapter_start(adapter_id);
+ if (ret)
+ rte_panic("Rx adapter[%d] start Failed\n", adapter_id);
+
+ evt_rsrc->rx_adptr.rx_adptr[adapter_id] = adapter_id;
+ adapter_id++;
+ if (q_id < evt_rsrc->evq.nb_queues)
+ q_id++;
+ }
+
+ adapter_id = 0;
+ RTE_ETH_FOREACH_DEV(port_id) {
+ if ((rsrc->enabled_port_mask & (1 << port_id)) == 0)
+ continue;
+ ret = rte_event_eth_tx_adapter_create(adapter_id, event_d_id,
+ &evt_rsrc->def_p_conf);
+ if (ret)
+ rte_panic("Failed to create tx adapter[%d]\n",
+ adapter_id);
+
+ ret = rte_event_eth_tx_adapter_queue_add(adapter_id, port_id,
+ -1);
+ if (ret)
+ rte_panic("Failed to add queues to Tx adapter\n");
+
+ ret = rte_event_eth_tx_adapter_start(adapter_id);
+ if (ret)
+ rte_panic("Tx adapter[%d] start Failed\n", adapter_id);
+
+ evt_rsrc->tx_adptr.tx_adptr[adapter_id] = adapter_id;
+ adapter_id++;
+ }
+
+For S/W scheduler instead of dedicated adapters, common Rx/Tx adapters are
+configured which will be shared among all the Ethernet ports. Also DPDK library
+need service cores to run internal services for Rx/Tx adapters. Application gets
+service id for Rx/Tx adapters and after successful setup it runs the services
+on dedicated service cores.
+
+.. code-block:: c
+
+ for (i = 0; i < evt_rsrc->rx_adptr.nb_rx_adptr; i++) {
+ ret = rte_event_eth_rx_adapter_caps_get(evt_rsrc->event_d_id,
+ evt_rsrc->rx_adptr.rx_adptr[i], &caps);
+ if (ret < 0)
+ rte_panic("Failed to get Rx adapter[%d] caps\n",
+ evt_rsrc->rx_adptr.rx_adptr[i]);
+ ret = rte_event_eth_rx_adapter_service_id_get(
+ evt_rsrc->event_d_id,
+ &service_id);
+ if (ret != -ESRCH && ret != 0)
+ rte_panic("Error in starting Rx adapter[%d] service\n",
+ evt_rsrc->rx_adptr.rx_adptr[i]);
+ l2fwd_event_service_enable(service_id);
+ }
+
+ for (i = 0; i < evt_rsrc->tx_adptr.nb_tx_adptr; i++) {
+ ret = rte_event_eth_tx_adapter_caps_get(evt_rsrc->event_d_id,
+ evt_rsrc->tx_adptr.tx_adptr[i], &caps);
+ if (ret < 0)
+ rte_panic("Failed to get Rx adapter[%d] caps\n",
+ evt_rsrc->tx_adptr.tx_adptr[i]);
+ ret = rte_event_eth_tx_adapter_service_id_get(
+ evt_rsrc->event_d_id,
+ &service_id);
+ if (ret != -ESRCH && ret != 0)
+ rte_panic("Error in starting Rx adapter[%d] service\n",
+ evt_rsrc->tx_adptr.tx_adptr[i]);
+ l2fwd_event_service_enable(service_id);
+ }
+
+.. _l2_fwd_event_app_rx_tx_packets:
+
+Receive, Process and Transmit Packets
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In the **l2fwd_main_loop()** function, the main task is to read ingress packets from
+the RX queues. This is done using the following code:
+
+.. code-block:: c
+
+ /*
+ * Read packet from RX queues
+ */
+
+ for (i = 0; i < qconf->n_rx_port; i++) {
+ portid = qconf->rx_port_list[i];
+ nb_rx = rte_eth_rx_burst((uint8_t) portid, 0, pkts_burst,
+ MAX_PKT_BURST);
+
+ for (j = 0; j < nb_rx; j++) {
+ m = pkts_burst[j];
+ rte_prefetch0(rte_pktmbuf_mtod(m, void *));
+ l2fwd_simple_forward(m, portid);
+ }
+ }
+
+Packets are read in a burst of size MAX_PKT_BURST. The rte_eth_rx_burst()
+function writes the mbuf pointers in a local table and returns the number of
+available mbufs in the table.
+
+Then, each mbuf in the table is processed by the l2fwd_simple_forward()
+function. The processing is very simple: process the TX port from the RX port,
+then replace the source and destination MAC addresses if MAC addresses updating
+is enabled.
+
+During the initialization process, a static array of destination ports
+(l2fwd_dst_ports[]) is filled such that for each source port, a destination port
+is assigned that is either the next or previous enabled port from the portmask.
+If number of ports are odd in portmask then packet from last port will be
+forwarded to first port i.e. if portmask=0x07, then forwarding will take place
+like p0--->p1, p1--->p2, p2--->p0.
+
+Also to optimize enqueue operation, l2fwd_simple_forward() stores incoming mbufs
+up to MAX_PKT_BURST. Once it reaches up to limit, all packets are transmitted to
+destination ports.
+
+.. code-block:: c
+
+ static void
+ l2fwd_simple_forward(struct rte_mbuf *m, uint32_t portid)
+ {
+ uint32_t dst_port;
+ int32_t sent;
+ struct rte_eth_dev_tx_buffer *buffer;
+
+ dst_port = l2fwd_dst_ports[portid];
+
+ if (mac_updating)
+ l2fwd_mac_updating(m, dst_port);
+
+ buffer = tx_buffer[dst_port];
+ sent = rte_eth_tx_buffer(dst_port, 0, buffer, m);
+ if (sent)
+ port_statistics[dst_port].tx += sent;
+ }
+
+For this test application, the processing is exactly the same for all packets
+arriving on the same RX port. Therefore, it would have been possible to call
+the rte_eth_tx_buffer() function directly from the main loop to send all the
+received packets on the same TX port, using the burst-oriented send function,
+which is more efficient.
+
+However, in real-life applications (such as, L3 routing),
+packet N is not necessarily forwarded on the same port as packet N-1.
+The application is implemented to illustrate that, so the same approach can be
+reused in a more complex application.
+
+To ensure that no packets remain in the tables, each lcore does a draining of TX
+queue in its main loop. This technique introduces some latency when there are
+not many packets to send, however it improves performance:
+
+.. code-block:: c
+
+ cur_tsc = rte_rdtsc();
+
+ /*
+ * TX burst queue drain
+ */
+ diff_tsc = cur_tsc - prev_tsc;
+ if (unlikely(diff_tsc > drain_tsc)) {
+ for (i = 0; i < qconf->n_rx_port; i++) {
+ portid = l2fwd_dst_ports[qconf->rx_port_list[i]];
+ buffer = tx_buffer[portid];
+ sent = rte_eth_tx_buffer_flush(portid, 0,
+ buffer);
+ if (sent)
+ port_statistics[portid].tx += sent;
+ }
+
+ /* if timer is enabled */
+ if (timer_period > 0) {
+ /* advance the timer */
+ timer_tsc += diff_tsc;
+
+ /* if timer has reached its timeout */
+ if (unlikely(timer_tsc >= timer_period)) {
+ /* do this only on master core */
+ if (lcore_id == rte_get_master_lcore()) {
+ print_stats();
+ /* reset the timer */
+ timer_tsc = 0;
+ }
+ }
+ }
+
+ prev_tsc = cur_tsc;
+ }
+
+In the **l2fwd_event_loop()** function, the main task is to read ingress
+packets from the event ports. This is done using the following code:
+
+.. code-block:: c
+
+ /* Read packet from eventdev */
+ nb_rx = rte_event_dequeue_burst(event_d_id, event_p_id,
+ events, deq_len, 0);
+ if (nb_rx == 0) {
+ rte_pause();
+ continue;
+ }
+
+ for (i = 0; i < nb_rx; i++) {
+ mbuf[i] = events[i].mbuf;
+ rte_prefetch0(rte_pktmbuf_mtod(mbuf[i], void *));
+ }
+
+
+Before reading packets, deq_len is fetched to ensure correct allowed deq length
+by the eventdev.
+The rte_event_dequeue_burst() function writes the mbuf pointers in a local table
+and returns the number of available mbufs in the table.
+
+Then, each mbuf in the table is processed by the l2fwd_eventdev_forward()
+function. The processing is very simple: process the TX port from the RX port,
+then replace the source and destination MAC addresses if MAC addresses updating
+is enabled.
+
+During the initialization process, a static array of destination ports
+(l2fwd_dst_ports[]) is filled such that for each source port, a destination port
+is assigned that is either the next or previous enabled port from the portmask.
+If number of ports are odd in portmask then packet from last port will be
+forwarded to first port i.e. if portmask=0x07, then forwarding will take place
+like p0--->p1, p1--->p2, p2--->p0.
+
+l2fwd_eventdev_forward() does not stores incoming mbufs. Packet will forwarded
+be to destination ports via Tx adapter or generic event dev enqueue API
+depending H/W or S/W scheduler is used.
+
+.. code-block:: c
+
+ nb_tx = rte_event_eth_tx_adapter_enqueue(event_d_id, port_id, ev,
+ nb_rx);
+ while (nb_tx < nb_rx && !rsrc->force_quit)
+ nb_tx += rte_event_eth_tx_adapter_enqueue(
+ event_d_id, port_id,
+ ev + nb_tx, nb_rx - nb_tx);
--
2.20.1
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [dpdk-dev] [PATCH v8 00/10] example/l2fwd-event: introduce l2fwd-event example
2019-10-30 16:26 [dpdk-dev] [PATCH v8 00/10] example/l2fwd-event: introduce l2fwd-event example pbhagavatula
` (9 preceding siblings ...)
2019-10-30 17:03 ` [dpdk-dev] [PATCH v8 10/10] doc: add application usage guide for l2fwd-event pbhagavatula
@ 2019-10-30 18:17 ` Jerin Jacob
2019-11-04 17:11 ` Jerin Jacob
10 siblings, 1 reply; 13+ messages in thread
From: Jerin Jacob @ 2019-10-30 18:17 UTC (permalink / raw)
To: Pavan Nikhilesh, Nipun Gupta, Sunil Kumar Kori, Rao, Nikhil,
Hemant Agrawal
Cc: Jerin Jacob, dpdk-dev
On Wed, Oct 30, 2019 at 9:57 PM <pbhagavatula@marvell.com> wrote:
>
> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
>
> This patchset adds a new application to demonstrate the usage of event
> mode. The poll mode is also available to help with the transition.
>
> The following new command line parameters are added:
> --mode: Dictates the mode of operation either poll or event.
> --eventq_sched: Dictates event scheduling mode ordered, atomic or
> parallel.
>
> Based on event device capability the configuration is done as follows:
> - A single event device is enabled.
> - The number of event ports is equal to the number of worker
> cores enabled in the core mask. Additional event ports might
> be configured based on Rx/Tx adapter capability.
> - The number of event queues is equal to the number of ethernet
> ports. If Tx adapter doesn't have internal port capability then
> an additional single link event queue is used to enqueue events
> to Tx adapter.
> - Each event port is linked to all existing event queues.
> - Dedicated Rx/Tx adapters for each Ethernet port.
Forgot to add the Nipun Gupta <nipun.gupta@nxp.com> ACK
I will add his ACK on apply.
Series Acked-by: Jerin Jacob <jerinj@marvell.com>
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [dpdk-dev] [PATCH v8 00/10] example/l2fwd-event: introduce l2fwd-event example
2019-10-30 18:17 ` [dpdk-dev] [PATCH v8 00/10] example/l2fwd-event: introduce l2fwd-event example Jerin Jacob
@ 2019-11-04 17:11 ` Jerin Jacob
0 siblings, 0 replies; 13+ messages in thread
From: Jerin Jacob @ 2019-11-04 17:11 UTC (permalink / raw)
To: Pavan Nikhilesh, Nipun Gupta, Sunil Kumar Kori, Rao, Nikhil,
Hemant Agrawal
Cc: Jerin Jacob, dpdk-dev
On Wed, Oct 30, 2019 at 11:47 PM Jerin Jacob <jerinjacobk@gmail.com> wrote:
>
> On Wed, Oct 30, 2019 at 9:57 PM <pbhagavatula@marvell.com> wrote:
> >
> > From: Pavan Nikhilesh <pbhagavatula@marvell.com>
> >
> > This patchset adds a new application to demonstrate the usage of event
> > mode. The poll mode is also available to help with the transition.
> >
> > The following new command line parameters are added:
> > --mode: Dictates the mode of operation either poll or event.
> > --eventq_sched: Dictates event scheduling mode ordered, atomic or
> > parallel.
> >
> > Based on event device capability the configuration is done as follows:
> > - A single event device is enabled.
> > - The number of event ports is equal to the number of worker
> > cores enabled in the core mask. Additional event ports might
> > be configured based on Rx/Tx adapter capability.
> > - The number of event queues is equal to the number of ethernet
> > ports. If Tx adapter doesn't have internal port capability then
> > an additional single link event queue is used to enqueue events
> > to Tx adapter.
> > - Each event port is linked to all existing event queues.
> > - Dedicated Rx/Tx adapters for each Ethernet port.
>
>
> Forgot to add the Nipun Gupta <nipun.gupta@nxp.com> ACK
> I will add his ACK on apply.
>
> Series Acked-by: Jerin Jacob <jerinj@marvell.com>
Series applied to dpdk-next-eventdev/master. Thanks.
^ permalink raw reply [flat|nested] 13+ messages in thread