* [dpdk-dev] [PATCH v6 01/11] examples/l3fwd: add framework for event device
2020-01-28 5:34 ` [dpdk-dev] [PATCH v6 00/11] example/l3fwd: introduce event device support pbhagavatula
@ 2020-01-28 5:34 ` pbhagavatula
2020-01-28 5:34 ` [dpdk-dev] [PATCH v6 02/11] examples/l3fwd: split pipelines based on capability pbhagavatula
` (10 subsequent siblings)
11 siblings, 0 replies; 13+ messages in thread
From: pbhagavatula @ 2020-01-28 5:34 UTC (permalink / raw)
To: jerinj, konstantin.ananyev, Marko Kovacevic, Ori Kam,
Bruce Richardson, Radu Nicolau, Akhil Goyal, Tomasz Kantecki,
Sunil Kumar Kori, Pavan Nikhilesh
Cc: dev, Nipun Gupta
From: Sunil Kumar Kori <skori@marvell.com>
Add framework to enable event device as a producer of packets.
To switch between event mode and poll mode the following options
have been added:
`--mode="eventdev"` or `--mode="poll"`
Allow the user to select the schedule type to be either
RTE_SCHED_TYPE_ORDERED, RTE_SCHED_TYPE_ATOMIC or RTE_SCHED_TYPE_PARALLEL
through:
`--eventq-sched="ordered"` or `--eventq-sched="atomic"` or
`--eventq-sched="parallel"`
Allow the user to specify the number of Rx queues to be connected to
event queue using:
`--event-eth-rxqs`
Poll mode is still the default operation mode.
Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
Series-Acked-by: Jerin Jacob <jerinj@marvell.com>
---
examples/l3fwd/Makefile | 2 +-
examples/l3fwd/l3fwd.h | 1 +
examples/l3fwd/l3fwd_event.c | 40 +++++++++++++
examples/l3fwd/l3fwd_event.h | 23 ++++++++
examples/l3fwd/main.c | 110 ++++++++++++++++++++++++++++++++++-
examples/l3fwd/meson.build | 4 +-
6 files changed, 174 insertions(+), 6 deletions(-)
create mode 100644 examples/l3fwd/l3fwd_event.c
create mode 100644 examples/l3fwd/l3fwd_event.h
diff --git a/examples/l3fwd/Makefile b/examples/l3fwd/Makefile
index b2dbf2607..c892b867b 100644
--- a/examples/l3fwd/Makefile
+++ b/examples/l3fwd/Makefile
@@ -5,7 +5,7 @@
APP = l3fwd
# all source are stored in SRCS-y
-SRCS-y := main.c l3fwd_lpm.c l3fwd_em.c
+SRCS-y := main.c l3fwd_lpm.c l3fwd_em.c l3fwd_event.c
# Build using pkg-config variables if possible
ifeq ($(shell pkg-config --exists libdpdk && echo 0),0)
diff --git a/examples/l3fwd/l3fwd.h b/examples/l3fwd/l3fwd.h
index 293fb1fa2..e2399d76b 100644
--- a/examples/l3fwd/l3fwd.h
+++ b/examples/l3fwd/l3fwd.h
@@ -5,6 +5,7 @@
#ifndef __L3_FWD_H__
#define __L3_FWD_H__
+#include <rte_ethdev.h>
#include <rte_vect.h>
#define DO_RFC_1812_CHECKS
diff --git a/examples/l3fwd/l3fwd_event.c b/examples/l3fwd/l3fwd_event.c
new file mode 100644
index 000000000..1040da4ea
--- /dev/null
+++ b/examples/l3fwd/l3fwd_event.c
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include <stdbool.h>
+#include <getopt.h>
+
+#include <rte_malloc.h>
+
+#include "l3fwd.h"
+#include "l3fwd_event.h"
+
+struct l3fwd_event_resources *
+l3fwd_get_eventdev_rsrc(void)
+{
+ static struct l3fwd_event_resources *rsrc;
+
+ if (rsrc != NULL)
+ return rsrc;
+
+ rsrc = rte_zmalloc("l3fwd", sizeof(struct l3fwd_event_resources), 0);
+ if (rsrc != NULL) {
+ rsrc->sched_type = RTE_SCHED_TYPE_ATOMIC;
+ rsrc->eth_rx_queues = 1;
+ return rsrc;
+ }
+
+ rte_exit(EXIT_FAILURE, "Unable to allocate memory for eventdev cfg\n");
+
+ return NULL;
+}
+
+void
+l3fwd_event_resource_setup(void)
+{
+ struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
+
+ if (!evt_rsrc->enabled)
+ return;
+}
diff --git a/examples/l3fwd/l3fwd_event.h b/examples/l3fwd/l3fwd_event.h
new file mode 100644
index 000000000..4c23c4e1a
--- /dev/null
+++ b/examples/l3fwd/l3fwd_event.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef __L3FWD_EVENTDEV_H__
+#define __L3FWD_EVENTDEV_H__
+
+#include <rte_common.h>
+#include <rte_eventdev.h>
+#include <rte_spinlock.h>
+
+#include "l3fwd.h"
+
+struct l3fwd_event_resources {
+ uint8_t sched_type;
+ uint8_t enabled;
+ uint8_t eth_rx_queues;
+};
+
+struct l3fwd_event_resources *l3fwd_get_eventdev_rsrc(void);
+void l3fwd_event_resource_setup(void);
+
+#endif /* __L3FWD_EVENTDEV_H__ */
diff --git a/examples/l3fwd/main.c b/examples/l3fwd/main.c
index 4dea12a65..8a5cdb52e 100644
--- a/examples/l3fwd/main.c
+++ b/examples/l3fwd/main.c
@@ -19,6 +19,7 @@
#include <rte_vect.h>
#include <rte_byteorder.h>
#include <rte_log.h>
+#include <rte_malloc.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
#include <rte_eal.h>
@@ -33,7 +34,6 @@
#include <rte_random.h>
#include <rte_debug.h>
#include <rte_ether.h>
-#include <rte_ethdev.h>
#include <rte_mempool.h>
#include <rte_mbuf.h>
#include <rte_ip.h>
@@ -46,6 +46,7 @@
#include <cmdline_parse_etheraddr.h>
#include "l3fwd.h"
+#include "l3fwd_event.h"
/*
* Configurable number of RX/TX ring descriptors
@@ -289,7 +290,9 @@ print_usage(const char *prgname)
" [--hash-entry-num]"
" [--ipv6]"
" [--parse-ptype]"
- " [--per-port-pool]\n\n"
+ " [--per-port-pool]"
+ " [--mode]"
+ " [--eventq-sched]\n\n"
" -p PORTMASK: Hexadecimal bitmask of ports to configure\n"
" -P : Enable promiscuous mode\n"
@@ -304,7 +307,16 @@ print_usage(const char *prgname)
" --hash-entry-num: Specify the hash entry number in hexadecimal to be setup\n"
" --ipv6: Set if running ipv6 packets\n"
" --parse-ptype: Set to use software to analyze packet type\n"
- " --per-port-pool: Use separate buffer pool per port\n\n",
+ " --per-port-pool: Use separate buffer pool per port\n"
+ " --mode: Packet transfer mode for I/O, poll or eventdev\n"
+ " Default mode = poll\n"
+ " --eventq-sched: Event queue synchronization method\n"
+ " ordered, atomic or parallel.\n"
+ " Default: atomic\n"
+ " Valid only if --mode=eventdev\n"
+ " --event-eth-rxqs: Number of ethernet RX queues per device.\n"
+ " Default: 1\n"
+ " Valid only if --mode=eventdev\n\n",
prgname);
}
@@ -440,6 +452,48 @@ parse_eth_dest(const char *optarg)
*(uint64_t *)(val_eth + portid) = dest_eth_addr[portid];
}
+static void
+parse_mode(const char *optarg)
+{
+ struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
+
+ if (!strcmp(optarg, "poll"))
+ evt_rsrc->enabled = false;
+ else if (!strcmp(optarg, "eventdev"))
+ evt_rsrc->enabled = true;
+}
+
+static void
+parse_eventq_sched(const char *optarg)
+{
+ struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
+
+ if (!strcmp(optarg, "ordered"))
+ evt_rsrc->sched_type = RTE_SCHED_TYPE_ORDERED;
+ if (!strcmp(optarg, "atomic"))
+ evt_rsrc->sched_type = RTE_SCHED_TYPE_ATOMIC;
+ if (!strcmp(optarg, "parallel"))
+ evt_rsrc->sched_type = RTE_SCHED_TYPE_PARALLEL;
+}
+
+static void
+parse_event_eth_rx_queues(const char *eth_rx_queues)
+{
+ struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
+ char *end = NULL;
+ uint8_t num_eth_rx_queues;
+
+ /* parse decimal string */
+ num_eth_rx_queues = strtoul(eth_rx_queues, &end, 10);
+ if ((eth_rx_queues[0] == '\0') || (end == NULL) || (*end != '\0'))
+ return;
+
+ if (num_eth_rx_queues == 0)
+ return;
+
+ evt_rsrc->eth_rx_queues = num_eth_rx_queues;
+}
+
#define MAX_JUMBO_PKT_LEN 9600
#define MEMPOOL_CACHE_SIZE 256
@@ -458,6 +512,9 @@ static const char short_options[] =
#define CMD_LINE_OPT_HASH_ENTRY_NUM "hash-entry-num"
#define CMD_LINE_OPT_PARSE_PTYPE "parse-ptype"
#define CMD_LINE_OPT_PER_PORT_POOL "per-port-pool"
+#define CMD_LINE_OPT_MODE "mode"
+#define CMD_LINE_OPT_EVENTQ_SYNC "eventq-sched"
+#define CMD_LINE_OPT_EVENT_ETH_RX_QUEUES "event-eth-rxqs"
enum {
/* long options mapped to a short option */
@@ -472,6 +529,9 @@ enum {
CMD_LINE_OPT_HASH_ENTRY_NUM_NUM,
CMD_LINE_OPT_PARSE_PTYPE_NUM,
CMD_LINE_OPT_PARSE_PER_PORT_POOL,
+ CMD_LINE_OPT_MODE_NUM,
+ CMD_LINE_OPT_EVENTQ_SYNC_NUM,
+ CMD_LINE_OPT_EVENT_ETH_RX_QUEUES_NUM,
};
static const struct option lgopts[] = {
@@ -483,6 +543,10 @@ static const struct option lgopts[] = {
{CMD_LINE_OPT_HASH_ENTRY_NUM, 1, 0, CMD_LINE_OPT_HASH_ENTRY_NUM_NUM},
{CMD_LINE_OPT_PARSE_PTYPE, 0, 0, CMD_LINE_OPT_PARSE_PTYPE_NUM},
{CMD_LINE_OPT_PER_PORT_POOL, 0, 0, CMD_LINE_OPT_PARSE_PER_PORT_POOL},
+ {CMD_LINE_OPT_MODE, 1, 0, CMD_LINE_OPT_MODE_NUM},
+ {CMD_LINE_OPT_EVENTQ_SYNC, 1, 0, CMD_LINE_OPT_EVENTQ_SYNC_NUM},
+ {CMD_LINE_OPT_EVENT_ETH_RX_QUEUES, 1, 0,
+ CMD_LINE_OPT_EVENT_ETH_RX_QUEUES_NUM},
{NULL, 0, 0, 0}
};
@@ -508,6 +572,10 @@ parse_args(int argc, char **argv)
char **argvopt;
int option_index;
char *prgname = argv[0];
+ uint8_t lcore_params = 0;
+ uint8_t eventq_sched = 0;
+ uint8_t eth_rx_q = 0;
+ struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
argvopt = argv;
@@ -546,6 +614,7 @@ parse_args(int argc, char **argv)
print_usage(prgname);
return -1;
}
+ lcore_params = 1;
break;
case CMD_LINE_OPT_ETH_DEST_NUM:
@@ -607,6 +676,20 @@ parse_args(int argc, char **argv)
per_port_pool = 1;
break;
+ case CMD_LINE_OPT_MODE_NUM:
+ parse_mode(optarg);
+ break;
+
+ case CMD_LINE_OPT_EVENTQ_SYNC_NUM:
+ parse_eventq_sched(optarg);
+ eventq_sched = 1;
+ break;
+
+ case CMD_LINE_OPT_EVENT_ETH_RX_QUEUES_NUM:
+ parse_event_eth_rx_queues(optarg);
+ eth_rx_q = 1;
+ break;
+
default:
print_usage(prgname);
return -1;
@@ -619,6 +702,21 @@ parse_args(int argc, char **argv)
return -1;
}
+ if (evt_rsrc->enabled && lcore_params) {
+ fprintf(stderr, "lcore config is not valid when event mode is selected\n");
+ return -1;
+ }
+
+ if (!evt_rsrc->enabled && eth_rx_q) {
+ fprintf(stderr, "eth_rx_queues is valid only when event mode is selected\n");
+ return -1;
+ }
+
+ if (!evt_rsrc->enabled && eventq_sched) {
+ fprintf(stderr, "eventq_sched is valid only when event mode is selected\n");
+ return -1;
+ }
+
/*
* Nothing is selected, pick longest-prefix match
* as default match.
@@ -811,6 +909,7 @@ prepare_ptype_parser(uint16_t portid, uint16_t queueid)
int
main(int argc, char **argv)
{
+ struct l3fwd_event_resources *evt_rsrc;
struct lcore_conf *qconf;
struct rte_eth_dev_info dev_info;
struct rte_eth_txconf *txconf;
@@ -839,11 +938,16 @@ main(int argc, char **argv)
*(uint64_t *)(val_eth + portid) = dest_eth_addr[portid];
}
+ evt_rsrc = l3fwd_get_eventdev_rsrc();
+ RTE_SET_USED(evt_rsrc);
/* parse application arguments (after the EAL ones) */
ret = parse_args(argc, argv);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Invalid L3FWD parameters\n");
+ /* Configure eventdev parameters if user has requested */
+ l3fwd_event_resource_setup();
+
if (check_lcore_params() < 0)
rte_exit(EXIT_FAILURE, "check_lcore_params failed\n");
diff --git a/examples/l3fwd/meson.build b/examples/l3fwd/meson.build
index 6dd4b9022..864327c7b 100644
--- a/examples/l3fwd/meson.build
+++ b/examples/l3fwd/meson.build
@@ -6,7 +6,7 @@
# To build this example as a standalone application with an already-installed
# DPDK instance, use 'make'
-deps += ['hash', 'lpm']
+deps += ['hash', 'lpm', 'eventdev']
sources = files(
- 'l3fwd_em.c', 'l3fwd_lpm.c', 'main.c'
+ 'l3fwd_em.c', 'l3fwd_lpm.c', 'l3fwd_event.c', 'main.c'
)
--
2.17.1
^ permalink raw reply [flat|nested] 13+ messages in thread
* [dpdk-dev] [PATCH v6 02/11] examples/l3fwd: split pipelines based on capability
2020-01-28 5:34 ` [dpdk-dev] [PATCH v6 00/11] example/l3fwd: introduce event device support pbhagavatula
2020-01-28 5:34 ` [dpdk-dev] [PATCH v6 01/11] examples/l3fwd: add framework for event device pbhagavatula
@ 2020-01-28 5:34 ` pbhagavatula
2020-01-28 5:34 ` [dpdk-dev] [PATCH v6 03/11] examples/l3fwd: add event device configuration pbhagavatula
` (9 subsequent siblings)
11 siblings, 0 replies; 13+ messages in thread
From: pbhagavatula @ 2020-01-28 5:34 UTC (permalink / raw)
To: jerinj, konstantin.ananyev, Marko Kovacevic, Ori Kam,
Bruce Richardson, Radu Nicolau, Akhil Goyal, Tomasz Kantecki,
Sunil Kumar Kori, Pavan Nikhilesh
Cc: dev
From: Sunil Kumar Kori <skori@marvell.com>
Add infra to split eventdev framework based on event Tx adapter
capability.
If event Tx adapter has internal port capability then we use
`rte_event_eth_tx_adapter_enqueue` to transmitting packets else
we use a SINGLE_LINK event queue to enqueue packets to a service
core which is responsible for transmitting packets.
Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
Acked-by: Jerin Jacob <jerinj@marvell.com>
---
examples/l3fwd/Makefile | 1 +
examples/l3fwd/l3fwd_event.c | 31 ++++++++++++++++++++++
examples/l3fwd/l3fwd_event.h | 20 ++++++++++++++
examples/l3fwd/l3fwd_event_generic.c | 14 ++++++++++
examples/l3fwd/l3fwd_event_internal_port.c | 14 ++++++++++
examples/l3fwd/meson.build | 3 ++-
6 files changed, 82 insertions(+), 1 deletion(-)
create mode 100644 examples/l3fwd/l3fwd_event_generic.c
create mode 100644 examples/l3fwd/l3fwd_event_internal_port.c
diff --git a/examples/l3fwd/Makefile b/examples/l3fwd/Makefile
index c892b867b..59a110d12 100644
--- a/examples/l3fwd/Makefile
+++ b/examples/l3fwd/Makefile
@@ -6,6 +6,7 @@ APP = l3fwd
# all source are stored in SRCS-y
SRCS-y := main.c l3fwd_lpm.c l3fwd_em.c l3fwd_event.c
+SRCS-y += l3fwd_event_generic.c l3fwd_event_internal_port.c
# Build using pkg-config variables if possible
ifeq ($(shell pkg-config --exists libdpdk && echo 0),0)
diff --git a/examples/l3fwd/l3fwd_event.c b/examples/l3fwd/l3fwd_event.c
index 1040da4ea..62218f3ca 100644
--- a/examples/l3fwd/l3fwd_event.c
+++ b/examples/l3fwd/l3fwd_event.c
@@ -30,6 +30,31 @@ l3fwd_get_eventdev_rsrc(void)
return NULL;
}
+static void
+l3fwd_event_capability_setup(void)
+{
+ struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
+ uint32_t caps = 0;
+ uint16_t i;
+ int ret;
+
+ RTE_ETH_FOREACH_DEV(i) {
+ ret = rte_event_eth_tx_adapter_caps_get(0, i, &caps);
+ if (ret)
+ rte_exit(EXIT_FAILURE,
+ "Invalid capability for Tx adptr port %d\n",
+ i);
+
+ evt_rsrc->tx_mode_q |= !(caps &
+ RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT);
+ }
+
+ if (evt_rsrc->tx_mode_q)
+ l3fwd_event_set_generic_ops(&evt_rsrc->ops);
+ else
+ l3fwd_event_set_internal_port_ops(&evt_rsrc->ops);
+}
+
void
l3fwd_event_resource_setup(void)
{
@@ -37,4 +62,10 @@ l3fwd_event_resource_setup(void)
if (!evt_rsrc->enabled)
return;
+
+ if (!rte_event_dev_count())
+ rte_exit(EXIT_FAILURE, "No Eventdev found");
+
+ /* Setup eventdev capability callbacks */
+ l3fwd_event_capability_setup();
}
diff --git a/examples/l3fwd/l3fwd_event.h b/examples/l3fwd/l3fwd_event.h
index 4c23c4e1a..d25c8d222 100644
--- a/examples/l3fwd/l3fwd_event.h
+++ b/examples/l3fwd/l3fwd_event.h
@@ -7,17 +7,37 @@
#include <rte_common.h>
#include <rte_eventdev.h>
+#include <rte_event_eth_tx_adapter.h>
#include <rte_spinlock.h>
#include "l3fwd.h"
+typedef uint32_t (*event_device_setup_cb)(void);
+typedef void (*event_queue_setup_cb)(uint32_t event_queue_cfg);
+typedef void (*event_port_setup_cb)(void);
+typedef void (*adapter_setup_cb)(void);
+typedef int (*event_loop_cb)(void *);
+
+struct l3fwd_event_setup_ops {
+ event_device_setup_cb event_device_setup;
+ event_queue_setup_cb event_queue_setup;
+ event_port_setup_cb event_port_setup;
+ adapter_setup_cb adapter_setup;
+ event_loop_cb lpm_event_loop;
+ event_loop_cb em_event_loop;
+};
+
struct l3fwd_event_resources {
+ struct l3fwd_event_setup_ops ops;
uint8_t sched_type;
+ uint8_t tx_mode_q;
uint8_t enabled;
uint8_t eth_rx_queues;
};
struct l3fwd_event_resources *l3fwd_get_eventdev_rsrc(void);
void l3fwd_event_resource_setup(void);
+void l3fwd_event_set_generic_ops(struct l3fwd_event_setup_ops *ops);
+void l3fwd_event_set_internal_port_ops(struct l3fwd_event_setup_ops *ops);
#endif /* __L3FWD_EVENTDEV_H__ */
diff --git a/examples/l3fwd/l3fwd_event_generic.c b/examples/l3fwd/l3fwd_event_generic.c
new file mode 100644
index 000000000..7fff850e5
--- /dev/null
+++ b/examples/l3fwd/l3fwd_event_generic.c
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include <stdbool.h>
+
+#include "l3fwd.h"
+#include "l3fwd_event.h"
+
+void
+l3fwd_event_set_generic_ops(struct l3fwd_event_setup_ops *ops)
+{
+ RTE_SET_USED(ops);
+}
diff --git a/examples/l3fwd/l3fwd_event_internal_port.c b/examples/l3fwd/l3fwd_event_internal_port.c
new file mode 100644
index 000000000..085e9c825
--- /dev/null
+++ b/examples/l3fwd/l3fwd_event_internal_port.c
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include <stdbool.h>
+
+#include "l3fwd.h"
+#include "l3fwd_event.h"
+
+void
+l3fwd_event_set_internal_port_ops(struct l3fwd_event_setup_ops *ops)
+{
+ RTE_SET_USED(ops);
+}
diff --git a/examples/l3fwd/meson.build b/examples/l3fwd/meson.build
index 864327c7b..ebed3b518 100644
--- a/examples/l3fwd/meson.build
+++ b/examples/l3fwd/meson.build
@@ -8,5 +8,6 @@
deps += ['hash', 'lpm', 'eventdev']
sources = files(
- 'l3fwd_em.c', 'l3fwd_lpm.c', 'l3fwd_event.c', 'main.c'
+ 'l3fwd_em.c', 'l3fwd_lpm.c', 'l3fwd_event.c',
+ 'l3fwd_event_internal_port.c', 'l3fwd_event_generic.c', 'main.c'
)
--
2.17.1
^ permalink raw reply [flat|nested] 13+ messages in thread
* [dpdk-dev] [PATCH v6 03/11] examples/l3fwd: add event device configuration
2020-01-28 5:34 ` [dpdk-dev] [PATCH v6 00/11] example/l3fwd: introduce event device support pbhagavatula
2020-01-28 5:34 ` [dpdk-dev] [PATCH v6 01/11] examples/l3fwd: add framework for event device pbhagavatula
2020-01-28 5:34 ` [dpdk-dev] [PATCH v6 02/11] examples/l3fwd: split pipelines based on capability pbhagavatula
@ 2020-01-28 5:34 ` pbhagavatula
2020-01-28 5:34 ` [dpdk-dev] [PATCH v6 04/11] examples/l3fwd: add ethdev setup based on eventdev pbhagavatula
` (8 subsequent siblings)
11 siblings, 0 replies; 13+ messages in thread
From: pbhagavatula @ 2020-01-28 5:34 UTC (permalink / raw)
To: jerinj, konstantin.ananyev, Marko Kovacevic, Ori Kam,
Bruce Richardson, Radu Nicolau, Akhil Goyal, Tomasz Kantecki,
Sunil Kumar Kori, Pavan Nikhilesh
Cc: dev
From: Pavan Nikhilesh <pbhagavatula@marvell.com>
Add event device configuration based on the capabilities of the
probed event device.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
examples/l3fwd/l3fwd_event.c | 3 +
examples/l3fwd/l3fwd_event.h | 36 ++++++++++
examples/l3fwd/l3fwd_event_generic.c | 76 +++++++++++++++++++++-
examples/l3fwd/l3fwd_event_internal_port.c | 76 +++++++++++++++++++++-
4 files changed, 189 insertions(+), 2 deletions(-)
diff --git a/examples/l3fwd/l3fwd_event.c b/examples/l3fwd/l3fwd_event.c
index 62218f3ca..9cb46c711 100644
--- a/examples/l3fwd/l3fwd_event.c
+++ b/examples/l3fwd/l3fwd_event.c
@@ -68,4 +68,7 @@ l3fwd_event_resource_setup(void)
/* Setup eventdev capability callbacks */
l3fwd_event_capability_setup();
+
+ /* Event device configuration */
+ evt_rsrc->ops.event_device_setup();
}
diff --git a/examples/l3fwd/l3fwd_event.h b/examples/l3fwd/l3fwd_event.h
index d25c8d222..fc3862ec1 100644
--- a/examples/l3fwd/l3fwd_event.h
+++ b/examples/l3fwd/l3fwd_event.h
@@ -8,6 +8,7 @@
#include <rte_common.h>
#include <rte_eventdev.h>
#include <rte_event_eth_tx_adapter.h>
+#include <rte_service.h>
#include <rte_spinlock.h>
#include "l3fwd.h"
@@ -18,6 +19,29 @@ typedef void (*event_port_setup_cb)(void);
typedef void (*adapter_setup_cb)(void);
typedef int (*event_loop_cb)(void *);
+struct l3fwd_event_queues {
+ uint8_t *event_q_id;
+ uint8_t nb_queues;
+};
+
+struct l3fwd_event_ports {
+ uint8_t *event_p_id;
+ uint8_t nb_ports;
+ rte_spinlock_t lock;
+};
+
+struct l3fwd_event_rx_adptr {
+ uint32_t service_id;
+ uint8_t nb_rx_adptr;
+ uint8_t *rx_adptr;
+};
+
+struct l3fwd_event_tx_adptr {
+ uint32_t service_id;
+ uint8_t nb_tx_adptr;
+ uint8_t *tx_adptr;
+};
+
struct l3fwd_event_setup_ops {
event_device_setup_cb event_device_setup;
event_queue_setup_cb event_queue_setup;
@@ -28,9 +52,21 @@ struct l3fwd_event_setup_ops {
};
struct l3fwd_event_resources {
+ struct rte_event_port_conf def_p_conf;
+ struct l3fwd_event_rx_adptr rx_adptr;
+ struct l3fwd_event_tx_adptr tx_adptr;
+ uint8_t disable_implicit_release;
struct l3fwd_event_setup_ops ops;
+ struct rte_mempool * (*pkt_pool)[NB_SOCKETS];
+ struct l3fwd_event_queues evq;
+ struct l3fwd_event_ports evp;
+ uint32_t port_mask;
+ uint8_t per_port_pool;
+ uint8_t event_d_id;
uint8_t sched_type;
uint8_t tx_mode_q;
+ uint8_t deq_depth;
+ uint8_t has_burst;
uint8_t enabled;
uint8_t eth_rx_queues;
};
diff --git a/examples/l3fwd/l3fwd_event_generic.c b/examples/l3fwd/l3fwd_event_generic.c
index 7fff850e5..ce06fa0e4 100644
--- a/examples/l3fwd/l3fwd_event_generic.c
+++ b/examples/l3fwd/l3fwd_event_generic.c
@@ -7,8 +7,82 @@
#include "l3fwd.h"
#include "l3fwd_event.h"
+static uint32_t
+l3fwd_event_device_setup_generic(void)
+{
+ struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
+ struct rte_event_dev_config event_d_conf = {
+ .nb_events_limit = 4096,
+ .nb_event_queue_flows = 1024,
+ .nb_event_port_dequeue_depth = 128,
+ .nb_event_port_enqueue_depth = 128
+ };
+ struct rte_event_dev_info dev_info;
+ const uint8_t event_d_id = 0; /* Always use first event device only */
+ uint32_t event_queue_cfg = 0;
+ uint16_t ethdev_count = 0;
+ uint16_t num_workers = 0;
+ uint16_t port_id;
+ int ret;
+
+ RTE_ETH_FOREACH_DEV(port_id) {
+ if ((evt_rsrc->port_mask & (1 << port_id)) == 0)
+ continue;
+ ethdev_count++;
+ }
+
+ /* Event device configuration */
+ rte_event_dev_info_get(event_d_id, &dev_info);
+ /* Enable implicit release */
+ if (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)
+ evt_rsrc->disable_implicit_release = 0;
+
+ if (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES)
+ event_queue_cfg |= RTE_EVENT_QUEUE_CFG_ALL_TYPES;
+
+ /* One queue for each ethdev port + one Tx adapter Single link queue. */
+ event_d_conf.nb_event_queues = ethdev_count + 1;
+ if (dev_info.max_event_queues < event_d_conf.nb_event_queues)
+ event_d_conf.nb_event_queues = dev_info.max_event_queues;
+
+ if (dev_info.max_num_events < event_d_conf.nb_events_limit)
+ event_d_conf.nb_events_limit = dev_info.max_num_events;
+
+ if (dev_info.max_event_queue_flows < event_d_conf.nb_event_queue_flows)
+ event_d_conf.nb_event_queue_flows =
+ dev_info.max_event_queue_flows;
+
+ if (dev_info.max_event_port_dequeue_depth <
+ event_d_conf.nb_event_port_dequeue_depth)
+ event_d_conf.nb_event_port_dequeue_depth =
+ dev_info.max_event_port_dequeue_depth;
+
+ if (dev_info.max_event_port_enqueue_depth <
+ event_d_conf.nb_event_port_enqueue_depth)
+ event_d_conf.nb_event_port_enqueue_depth =
+ dev_info.max_event_port_enqueue_depth;
+
+ num_workers = rte_lcore_count() - rte_service_lcore_count();
+ if (dev_info.max_event_ports < num_workers)
+ num_workers = dev_info.max_event_ports;
+
+ event_d_conf.nb_event_ports = num_workers;
+ evt_rsrc->evp.nb_ports = num_workers;
+ evt_rsrc->evq.nb_queues = event_d_conf.nb_event_queues;
+
+ evt_rsrc->has_burst = !!(dev_info.event_dev_cap &
+ RTE_EVENT_DEV_CAP_BURST_MODE);
+
+ ret = rte_event_dev_configure(event_d_id, &event_d_conf);
+ if (ret < 0)
+ rte_panic("Error in configuring event device\n");
+
+ evt_rsrc->event_d_id = event_d_id;
+ return event_queue_cfg;
+}
+
void
l3fwd_event_set_generic_ops(struct l3fwd_event_setup_ops *ops)
{
- RTE_SET_USED(ops);
+ ops->event_device_setup = l3fwd_event_device_setup_generic;
}
diff --git a/examples/l3fwd/l3fwd_event_internal_port.c b/examples/l3fwd/l3fwd_event_internal_port.c
index 085e9c825..242bd0f48 100644
--- a/examples/l3fwd/l3fwd_event_internal_port.c
+++ b/examples/l3fwd/l3fwd_event_internal_port.c
@@ -7,8 +7,82 @@
#include "l3fwd.h"
#include "l3fwd_event.h"
+static uint32_t
+l3fwd_event_device_setup_internal_port(void)
+{
+ struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
+ struct rte_event_dev_config event_d_conf = {
+ .nb_events_limit = 4096,
+ .nb_event_queue_flows = 1024,
+ .nb_event_port_dequeue_depth = 128,
+ .nb_event_port_enqueue_depth = 128
+ };
+ struct rte_event_dev_info dev_info;
+ const uint8_t event_d_id = 0; /* Always use first event device only */
+ uint32_t event_queue_cfg = 0;
+ uint16_t ethdev_count = 0;
+ uint16_t num_workers = 0;
+ uint16_t port_id;
+ int ret;
+
+ RTE_ETH_FOREACH_DEV(port_id) {
+ if ((evt_rsrc->port_mask & (1 << port_id)) == 0)
+ continue;
+ ethdev_count++;
+ }
+
+ /* Event device configuration */
+ rte_event_dev_info_get(event_d_id, &dev_info);
+
+ /* Enable implicit release */
+ if (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)
+ evt_rsrc->disable_implicit_release = 0;
+
+ if (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES)
+ event_queue_cfg |= RTE_EVENT_QUEUE_CFG_ALL_TYPES;
+
+ event_d_conf.nb_event_queues = ethdev_count;
+ if (dev_info.max_event_queues < event_d_conf.nb_event_queues)
+ event_d_conf.nb_event_queues = dev_info.max_event_queues;
+
+ if (dev_info.max_num_events < event_d_conf.nb_events_limit)
+ event_d_conf.nb_events_limit = dev_info.max_num_events;
+
+ if (dev_info.max_event_queue_flows < event_d_conf.nb_event_queue_flows)
+ event_d_conf.nb_event_queue_flows =
+ dev_info.max_event_queue_flows;
+
+ if (dev_info.max_event_port_dequeue_depth <
+ event_d_conf.nb_event_port_dequeue_depth)
+ event_d_conf.nb_event_port_dequeue_depth =
+ dev_info.max_event_port_dequeue_depth;
+
+ if (dev_info.max_event_port_enqueue_depth <
+ event_d_conf.nb_event_port_enqueue_depth)
+ event_d_conf.nb_event_port_enqueue_depth =
+ dev_info.max_event_port_enqueue_depth;
+
+ num_workers = rte_lcore_count();
+ if (dev_info.max_event_ports < num_workers)
+ num_workers = dev_info.max_event_ports;
+
+ event_d_conf.nb_event_ports = num_workers;
+ evt_rsrc->evp.nb_ports = num_workers;
+ evt_rsrc->evq.nb_queues = event_d_conf.nb_event_queues;
+ evt_rsrc->has_burst = !!(dev_info.event_dev_cap &
+ RTE_EVENT_DEV_CAP_BURST_MODE);
+
+ ret = rte_event_dev_configure(event_d_id, &event_d_conf);
+ if (ret < 0)
+ rte_panic("Error in configuring event device\n");
+
+ evt_rsrc->event_d_id = event_d_id;
+ return event_queue_cfg;
+}
+
+
void
l3fwd_event_set_internal_port_ops(struct l3fwd_event_setup_ops *ops)
{
- RTE_SET_USED(ops);
+ ops->event_device_setup = l3fwd_event_device_setup_internal_port;
}
--
2.17.1
^ permalink raw reply [flat|nested] 13+ messages in thread
* [dpdk-dev] [PATCH v6 04/11] examples/l3fwd: add ethdev setup based on eventdev
2020-01-28 5:34 ` [dpdk-dev] [PATCH v6 00/11] example/l3fwd: introduce event device support pbhagavatula
` (2 preceding siblings ...)
2020-01-28 5:34 ` [dpdk-dev] [PATCH v6 03/11] examples/l3fwd: add event device configuration pbhagavatula
@ 2020-01-28 5:34 ` pbhagavatula
2020-01-28 5:34 ` [dpdk-dev] [PATCH v6 05/11] examples/l3fwd: add event port and queue setup pbhagavatula
` (7 subsequent siblings)
11 siblings, 0 replies; 13+ messages in thread
From: pbhagavatula @ 2020-01-28 5:34 UTC (permalink / raw)
To: jerinj, konstantin.ananyev, Marko Kovacevic, Ori Kam,
Bruce Richardson, Radu Nicolau, Akhil Goyal, Tomasz Kantecki,
Sunil Kumar Kori, Pavan Nikhilesh
Cc: dev, Nipun Gupta
From: Sunil Kumar Kori <skori@marvell.com>
Add ethernet port Rx/Tx queue setup for event device which are later
used for setting up event eth Rx/Tx adapters.
Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
---
examples/l3fwd/l3fwd.h | 10 +++
examples/l3fwd/l3fwd_event.c | 138 ++++++++++++++++++++++++++++++++++-
examples/l3fwd/l3fwd_event.h | 2 +-
examples/l3fwd/main.c | 15 ++--
4 files changed, 153 insertions(+), 12 deletions(-)
diff --git a/examples/l3fwd/l3fwd.h b/examples/l3fwd/l3fwd.h
index e2399d76b..ed5ba5f3b 100644
--- a/examples/l3fwd/l3fwd.h
+++ b/examples/l3fwd/l3fwd.h
@@ -16,9 +16,16 @@
#define NO_HASH_MULTI_LOOKUP 1
#endif
+/*
+ * Configurable number of RX/TX ring descriptors
+ */
+#define RTE_TEST_RX_DESC_DEFAULT 1024
+#define RTE_TEST_TX_DESC_DEFAULT 1024
+
#define MAX_PKT_BURST 32
#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
+#define MEMPOOL_CACHE_SIZE 256
#define MAX_RX_QUEUE_PER_LCORE 16
/*
@@ -170,6 +177,9 @@ is_valid_ipv4_pkt(struct rte_ipv4_hdr *pkt, uint32_t link_len)
}
#endif /* DO_RFC_1812_CHECKS */
+int
+init_mem(uint16_t portid, unsigned int nb_mbuf);
+
/* Function pointers for LPM or EM functionality. */
void
setup_lpm(const int socketid);
diff --git a/examples/l3fwd/l3fwd_event.c b/examples/l3fwd/l3fwd_event.c
index 9cb46c711..f9491ecc6 100644
--- a/examples/l3fwd/l3fwd_event.c
+++ b/examples/l3fwd/l3fwd_event.c
@@ -10,6 +10,14 @@
#include "l3fwd.h"
#include "l3fwd_event.h"
+static void
+print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
+{
+ char buf[RTE_ETHER_ADDR_FMT_SIZE];
+ rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
+ printf("%s%s", name, buf);
+}
+
struct l3fwd_event_resources *
l3fwd_get_eventdev_rsrc(void)
{
@@ -30,6 +38,131 @@ l3fwd_get_eventdev_rsrc(void)
return NULL;
}
+static void
+l3fwd_eth_dev_port_setup(struct rte_eth_conf *port_conf)
+{
+ struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
+ uint16_t nb_ports = rte_eth_dev_count_avail();
+ uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
+ uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
+ unsigned int nb_lcores = rte_lcore_count();
+ struct rte_eth_conf local_port_conf;
+ struct rte_eth_dev_info dev_info;
+ struct rte_eth_txconf txconf;
+ struct rte_eth_rxconf rxconf;
+ unsigned int nb_mbuf;
+ uint16_t port_id;
+ uint8_t eth_qid;
+ int32_t ret;
+
+ /* initialize all ports */
+ RTE_ETH_FOREACH_DEV(port_id) {
+ local_port_conf = *port_conf;
+ /* skip ports that are not enabled */
+ if ((evt_rsrc->port_mask & (1 << port_id)) == 0) {
+ printf("\nSkipping disabled port %d\n", port_id);
+ continue;
+ }
+
+ /* init port */
+ printf("Initializing port %d ... ", port_id);
+ fflush(stdout);
+ printf("Creating queues: nb_rxq=%d nb_txq=1...\n",
+ evt_rsrc->eth_rx_queues);
+
+ rte_eth_dev_info_get(port_id, &dev_info);
+ if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ local_port_conf.txmode.offloads |=
+ DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+
+ local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
+ dev_info.flow_type_rss_offloads;
+ if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
+ port_conf->rx_adv_conf.rss_conf.rss_hf) {
+ printf("Port %u modified RSS hash function "
+ "based on hardware support,"
+ "requested:%#"PRIx64" configured:%#"PRIx64"\n",
+ port_id,
+ port_conf->rx_adv_conf.rss_conf.rss_hf,
+ local_port_conf.rx_adv_conf.rss_conf.rss_hf);
+ }
+
+ ret = rte_eth_dev_configure(port_id, evt_rsrc->eth_rx_queues,
+ 1, &local_port_conf);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "Cannot configure device: err=%d, port=%d\n",
+ ret, port_id);
+
+ ret = rte_eth_dev_adjust_nb_rx_tx_desc(port_id, &nb_rxd,
+ &nb_txd);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "Cannot adjust number of descriptors: err=%d, "
+ "port=%d\n", ret, port_id);
+
+ rte_eth_macaddr_get(port_id, &ports_eth_addr[port_id]);
+ print_ethaddr(" Address:", &ports_eth_addr[port_id]);
+ printf(", ");
+ print_ethaddr("Destination:",
+ (const struct rte_ether_addr *)&dest_eth_addr[port_id]);
+ printf(", ");
+
+ /* prepare source MAC for each port. */
+ rte_ether_addr_copy(&ports_eth_addr[port_id],
+ (struct rte_ether_addr *)(val_eth + port_id) + 1);
+
+ /* init memory */
+ if (!evt_rsrc->per_port_pool) {
+ /* port_id = 0; this is *not* signifying the first port,
+ * rather, it signifies that port_id is ignored.
+ */
+ nb_mbuf = RTE_MAX(nb_ports * nb_rxd +
+ nb_ports * nb_txd +
+ nb_ports * nb_lcores *
+ MAX_PKT_BURST +
+ nb_lcores * MEMPOOL_CACHE_SIZE,
+ 8192u);
+ ret = init_mem(0, nb_mbuf);
+ } else {
+ nb_mbuf = RTE_MAX(nb_rxd + nb_rxd +
+ nb_lcores * MAX_PKT_BURST +
+ nb_lcores * MEMPOOL_CACHE_SIZE,
+ 8192u);
+ ret = init_mem(port_id, nb_mbuf);
+ }
+ /* init Rx queues per port */
+ rxconf = dev_info.default_rxconf;
+ rxconf.offloads = local_port_conf.rxmode.offloads;
+
+ for (eth_qid = 0; eth_qid < evt_rsrc->eth_rx_queues;
+ eth_qid++) {
+ if (!evt_rsrc->per_port_pool)
+ ret = rte_eth_rx_queue_setup(port_id, eth_qid,
+ nb_rxd, 0, &rxconf,
+ evt_rsrc->pkt_pool[0][0]);
+ else
+ ret = rte_eth_rx_queue_setup(port_id, eth_qid,
+ nb_rxd, 0, &rxconf,
+ evt_rsrc->pkt_pool[port_id][0]);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "rte_eth_rx_queue_setup: err=%d, "
+ "port=%d, eth_qid: %d\n",
+ ret, port_id, eth_qid);
+ }
+
+ /* init one Tx queue per port */
+ txconf = dev_info.default_txconf;
+ txconf.offloads = local_port_conf.txmode.offloads;
+ ret = rte_eth_tx_queue_setup(port_id, 0, nb_txd, 0, &txconf);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "rte_eth_tx_queue_setup: err=%d, "
+ "port=%d\n", ret, port_id);
+ }
+}
+
static void
l3fwd_event_capability_setup(void)
{
@@ -56,7 +189,7 @@ l3fwd_event_capability_setup(void)
}
void
-l3fwd_event_resource_setup(void)
+l3fwd_event_resource_setup(struct rte_eth_conf *port_conf)
{
struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
@@ -69,6 +202,9 @@ l3fwd_event_resource_setup(void)
/* Setup eventdev capability callbacks */
l3fwd_event_capability_setup();
+ /* Ethernet device configuration */
+ l3fwd_eth_dev_port_setup(port_conf);
+
/* Event device configuration */
evt_rsrc->ops.event_device_setup();
}
diff --git a/examples/l3fwd/l3fwd_event.h b/examples/l3fwd/l3fwd_event.h
index fc3862ec1..53feea069 100644
--- a/examples/l3fwd/l3fwd_event.h
+++ b/examples/l3fwd/l3fwd_event.h
@@ -72,7 +72,7 @@ struct l3fwd_event_resources {
};
struct l3fwd_event_resources *l3fwd_get_eventdev_rsrc(void);
-void l3fwd_event_resource_setup(void);
+void l3fwd_event_resource_setup(struct rte_eth_conf *port_conf);
void l3fwd_event_set_generic_ops(struct l3fwd_event_setup_ops *ops);
void l3fwd_event_set_internal_port_ops(struct l3fwd_event_setup_ops *ops);
diff --git a/examples/l3fwd/main.c b/examples/l3fwd/main.c
index 8a5cdb52e..04526343f 100644
--- a/examples/l3fwd/main.c
+++ b/examples/l3fwd/main.c
@@ -48,12 +48,6 @@
#include "l3fwd.h"
#include "l3fwd_event.h"
-/*
- * Configurable number of RX/TX ring descriptors
- */
-#define RTE_TEST_RX_DESC_DEFAULT 1024
-#define RTE_TEST_TX_DESC_DEFAULT 1024
-
#define MAX_TX_QUEUE_PER_PORT RTE_MAX_ETHPORTS
#define MAX_RX_QUEUE_PER_PORT 128
@@ -495,7 +489,6 @@ parse_event_eth_rx_queues(const char *eth_rx_queues)
}
#define MAX_JUMBO_PKT_LEN 9600
-#define MEMPOOL_CACHE_SIZE 256
static const char short_options[] =
"p:" /* portmask */
@@ -752,7 +745,7 @@ print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
printf("%s%s", name, buf);
}
-static int
+int
init_mem(uint16_t portid, unsigned int nb_mbuf)
{
struct lcore_conf *qconf;
@@ -939,14 +932,16 @@ main(int argc, char **argv)
}
evt_rsrc = l3fwd_get_eventdev_rsrc();
- RTE_SET_USED(evt_rsrc);
/* parse application arguments (after the EAL ones) */
ret = parse_args(argc, argv);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Invalid L3FWD parameters\n");
+ evt_rsrc->per_port_pool = per_port_pool;
+ evt_rsrc->pkt_pool = pktmbuf_pool;
+ evt_rsrc->port_mask = enabled_port_mask;
/* Configure eventdev parameters if user has requested */
- l3fwd_event_resource_setup();
+ l3fwd_event_resource_setup(&port_conf);
if (check_lcore_params() < 0)
rte_exit(EXIT_FAILURE, "check_lcore_params failed\n");
--
2.17.1
^ permalink raw reply [flat|nested] 13+ messages in thread
* [dpdk-dev] [PATCH v6 05/11] examples/l3fwd: add event port and queue setup
2020-01-28 5:34 ` [dpdk-dev] [PATCH v6 00/11] example/l3fwd: introduce event device support pbhagavatula
` (3 preceding siblings ...)
2020-01-28 5:34 ` [dpdk-dev] [PATCH v6 04/11] examples/l3fwd: add ethdev setup based on eventdev pbhagavatula
@ 2020-01-28 5:34 ` pbhagavatula
2020-01-28 5:35 ` [dpdk-dev] [PATCH v6 06/11] examples/l3fwd: add event eth Rx/Tx adapter setup pbhagavatula
` (6 subsequent siblings)
11 siblings, 0 replies; 13+ messages in thread
From: pbhagavatula @ 2020-01-28 5:34 UTC (permalink / raw)
To: jerinj, konstantin.ananyev, Marko Kovacevic, Ori Kam,
Bruce Richardson, Radu Nicolau, Akhil Goyal, Tomasz Kantecki,
Sunil Kumar Kori, Pavan Nikhilesh
Cc: dev
From: Sunil Kumar Kori <skori@marvell.com>
Add event device queue and port setup based on event eth Tx adapter
capabilities.
Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
---
examples/l3fwd/l3fwd_event.c | 28 +++++-
examples/l3fwd/l3fwd_event.h | 1 +
examples/l3fwd/l3fwd_event_generic.c | 103 +++++++++++++++++++++
examples/l3fwd/l3fwd_event_internal_port.c | 98 ++++++++++++++++++++
4 files changed, 229 insertions(+), 1 deletion(-)
diff --git a/examples/l3fwd/l3fwd_event.c b/examples/l3fwd/l3fwd_event.c
index f9491ecc6..b58f9b79a 100644
--- a/examples/l3fwd/l3fwd_event.c
+++ b/examples/l3fwd/l3fwd_event.c
@@ -188,10 +188,30 @@ l3fwd_event_capability_setup(void)
l3fwd_event_set_internal_port_ops(&evt_rsrc->ops);
}
+int
+l3fwd_get_free_event_port(struct l3fwd_event_resources *evt_rsrc)
+{
+ static int index;
+ int port_id;
+
+ rte_spinlock_lock(&evt_rsrc->evp.lock);
+ if (index >= evt_rsrc->evp.nb_ports) {
+ printf("No free event port is available\n");
+ return -1;
+ }
+
+ port_id = evt_rsrc->evp.event_p_id[index];
+ index++;
+ rte_spinlock_unlock(&evt_rsrc->evp.lock);
+
+ return port_id;
+}
+
void
l3fwd_event_resource_setup(struct rte_eth_conf *port_conf)
{
struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
+ uint32_t event_queue_cfg;
if (!evt_rsrc->enabled)
return;
@@ -206,5 +226,11 @@ l3fwd_event_resource_setup(struct rte_eth_conf *port_conf)
l3fwd_eth_dev_port_setup(port_conf);
/* Event device configuration */
- evt_rsrc->ops.event_device_setup();
+ event_queue_cfg = evt_rsrc->ops.event_device_setup();
+
+ /* Event queue configuration */
+ evt_rsrc->ops.event_queue_setup(event_queue_cfg);
+
+ /* Event port configuration */
+ evt_rsrc->ops.event_port_setup();
}
diff --git a/examples/l3fwd/l3fwd_event.h b/examples/l3fwd/l3fwd_event.h
index 53feea069..4bceca920 100644
--- a/examples/l3fwd/l3fwd_event.h
+++ b/examples/l3fwd/l3fwd_event.h
@@ -73,6 +73,7 @@ struct l3fwd_event_resources {
struct l3fwd_event_resources *l3fwd_get_eventdev_rsrc(void);
void l3fwd_event_resource_setup(struct rte_eth_conf *port_conf);
+int l3fwd_get_free_event_port(struct l3fwd_event_resources *eventdev_rsrc);
void l3fwd_event_set_generic_ops(struct l3fwd_event_setup_ops *ops);
void l3fwd_event_set_internal_port_ops(struct l3fwd_event_setup_ops *ops);
diff --git a/examples/l3fwd/l3fwd_event_generic.c b/examples/l3fwd/l3fwd_event_generic.c
index ce06fa0e4..4d0cbec05 100644
--- a/examples/l3fwd/l3fwd_event_generic.c
+++ b/examples/l3fwd/l3fwd_event_generic.c
@@ -81,8 +81,111 @@ l3fwd_event_device_setup_generic(void)
return event_queue_cfg;
}
+static void
+l3fwd_event_port_setup_generic(void)
+{
+ struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
+ uint8_t event_d_id = evt_rsrc->event_d_id;
+ struct rte_event_port_conf event_p_conf = {
+ .dequeue_depth = 32,
+ .enqueue_depth = 32,
+ .new_event_threshold = 4096
+ };
+ struct rte_event_port_conf def_p_conf;
+ uint8_t event_p_id;
+ int32_t ret;
+
+ evt_rsrc->evp.event_p_id = (uint8_t *)malloc(sizeof(uint8_t) *
+ evt_rsrc->evp.nb_ports);
+ if (!evt_rsrc->evp.event_p_id)
+ rte_panic("No space is available\n");
+
+ memset(&def_p_conf, 0, sizeof(struct rte_event_port_conf));
+ rte_event_port_default_conf_get(event_d_id, 0, &def_p_conf);
+
+ if (def_p_conf.new_event_threshold < event_p_conf.new_event_threshold)
+ event_p_conf.new_event_threshold =
+ def_p_conf.new_event_threshold;
+
+ if (def_p_conf.dequeue_depth < event_p_conf.dequeue_depth)
+ event_p_conf.dequeue_depth = def_p_conf.dequeue_depth;
+
+ if (def_p_conf.enqueue_depth < event_p_conf.enqueue_depth)
+ event_p_conf.enqueue_depth = def_p_conf.enqueue_depth;
+
+ event_p_conf.disable_implicit_release =
+ evt_rsrc->disable_implicit_release;
+ evt_rsrc->deq_depth = def_p_conf.dequeue_depth;
+
+ for (event_p_id = 0; event_p_id < evt_rsrc->evp.nb_ports;
+ event_p_id++) {
+ ret = rte_event_port_setup(event_d_id, event_p_id,
+ &event_p_conf);
+ if (ret < 0)
+ rte_panic("Error in configuring event port %d\n",
+ event_p_id);
+
+ ret = rte_event_port_link(event_d_id, event_p_id,
+ evt_rsrc->evq.event_q_id,
+ NULL,
+ evt_rsrc->evq.nb_queues - 1);
+ if (ret != (evt_rsrc->evq.nb_queues - 1))
+ rte_panic("Error in linking event port %d to queues\n",
+ event_p_id);
+ evt_rsrc->evp.event_p_id[event_p_id] = event_p_id;
+ }
+ /* init spinlock */
+ rte_spinlock_init(&evt_rsrc->evp.lock);
+
+ evt_rsrc->def_p_conf = event_p_conf;
+}
+
+static void
+l3fwd_event_queue_setup_generic(uint32_t event_queue_cfg)
+{
+ struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
+ uint8_t event_d_id = evt_rsrc->event_d_id;
+ struct rte_event_queue_conf event_q_conf = {
+ .nb_atomic_flows = 1024,
+ .nb_atomic_order_sequences = 1024,
+ .event_queue_cfg = event_queue_cfg,
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL
+ };
+ struct rte_event_queue_conf def_q_conf;
+ uint8_t event_q_id;
+ int32_t ret;
+
+ event_q_conf.schedule_type = evt_rsrc->sched_type;
+ evt_rsrc->evq.event_q_id = (uint8_t *)malloc(sizeof(uint8_t) *
+ evt_rsrc->evq.nb_queues);
+ if (!evt_rsrc->evq.event_q_id)
+ rte_panic("Memory allocation failure\n");
+
+ rte_event_queue_default_conf_get(event_d_id, 0, &def_q_conf);
+ if (def_q_conf.nb_atomic_flows < event_q_conf.nb_atomic_flows)
+ event_q_conf.nb_atomic_flows = def_q_conf.nb_atomic_flows;
+
+ for (event_q_id = 0; event_q_id < (evt_rsrc->evq.nb_queues - 1);
+ event_q_id++) {
+ ret = rte_event_queue_setup(event_d_id, event_q_id,
+ &event_q_conf);
+ if (ret < 0)
+ rte_panic("Error in configuring event queue\n");
+ evt_rsrc->evq.event_q_id[event_q_id] = event_q_id;
+ }
+
+ event_q_conf.event_queue_cfg |= RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
+ event_q_conf.priority = RTE_EVENT_DEV_PRIORITY_HIGHEST,
+ ret = rte_event_queue_setup(event_d_id, event_q_id, &event_q_conf);
+ if (ret < 0)
+ rte_panic("Error in configuring event queue for Tx adapter\n");
+ evt_rsrc->evq.event_q_id[event_q_id] = event_q_id;
+}
+
void
l3fwd_event_set_generic_ops(struct l3fwd_event_setup_ops *ops)
{
ops->event_device_setup = l3fwd_event_device_setup_generic;
+ ops->event_queue_setup = l3fwd_event_queue_setup_generic;
+ ops->event_port_setup = l3fwd_event_port_setup_generic;
}
diff --git a/examples/l3fwd/l3fwd_event_internal_port.c b/examples/l3fwd/l3fwd_event_internal_port.c
index 242bd0f48..476dd8a08 100644
--- a/examples/l3fwd/l3fwd_event_internal_port.c
+++ b/examples/l3fwd/l3fwd_event_internal_port.c
@@ -80,9 +80,107 @@ l3fwd_event_device_setup_internal_port(void)
return event_queue_cfg;
}
+static void
+l3fwd_event_port_setup_internal_port(void)
+{
+ struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
+ uint8_t event_d_id = evt_rsrc->event_d_id;
+ struct rte_event_port_conf event_p_conf = {
+ .dequeue_depth = 32,
+ .enqueue_depth = 32,
+ .new_event_threshold = 4096
+ };
+ struct rte_event_port_conf def_p_conf;
+ uint8_t event_p_id;
+ int32_t ret;
+
+ evt_rsrc->evp.event_p_id = (uint8_t *)malloc(sizeof(uint8_t) *
+ evt_rsrc->evp.nb_ports);
+ if (!evt_rsrc->evp.event_p_id)
+ rte_panic("Failed to allocate memory for Event Ports\n");
+
+ rte_event_port_default_conf_get(event_d_id, 0, &def_p_conf);
+ if (def_p_conf.new_event_threshold < event_p_conf.new_event_threshold)
+ event_p_conf.new_event_threshold =
+ def_p_conf.new_event_threshold;
+
+ if (def_p_conf.dequeue_depth < event_p_conf.dequeue_depth)
+ event_p_conf.dequeue_depth = def_p_conf.dequeue_depth;
+
+ if (def_p_conf.enqueue_depth < event_p_conf.enqueue_depth)
+ event_p_conf.enqueue_depth = def_p_conf.enqueue_depth;
+
+ event_p_conf.disable_implicit_release =
+ evt_rsrc->disable_implicit_release;
+
+ for (event_p_id = 0; event_p_id < evt_rsrc->evp.nb_ports;
+ event_p_id++) {
+ ret = rte_event_port_setup(event_d_id, event_p_id,
+ &event_p_conf);
+ if (ret < 0)
+ rte_panic("Error in configuring event port %d\n",
+ event_p_id);
+
+ ret = rte_event_port_link(event_d_id, event_p_id, NULL,
+ NULL, 0);
+ if (ret < 0)
+ rte_panic("Error in linking event port %d to queue\n",
+ event_p_id);
+ evt_rsrc->evp.event_p_id[event_p_id] = event_p_id;
+
+ /* init spinlock */
+ rte_spinlock_init(&evt_rsrc->evp.lock);
+ }
+
+ evt_rsrc->def_p_conf = event_p_conf;
+}
+
+static void
+l3fwd_event_queue_setup_internal_port(uint32_t event_queue_cfg)
+{
+ struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
+ uint8_t event_d_id = evt_rsrc->event_d_id;
+ struct rte_event_queue_conf event_q_conf = {
+ .nb_atomic_flows = 1024,
+ .nb_atomic_order_sequences = 1024,
+ .event_queue_cfg = event_queue_cfg,
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL
+ };
+ struct rte_event_queue_conf def_q_conf;
+ uint8_t event_q_id = 0;
+ int32_t ret;
+
+ rte_event_queue_default_conf_get(event_d_id, event_q_id, &def_q_conf);
+
+ if (def_q_conf.nb_atomic_flows < event_q_conf.nb_atomic_flows)
+ event_q_conf.nb_atomic_flows = def_q_conf.nb_atomic_flows;
+
+ if (def_q_conf.nb_atomic_order_sequences <
+ event_q_conf.nb_atomic_order_sequences)
+ event_q_conf.nb_atomic_order_sequences =
+ def_q_conf.nb_atomic_order_sequences;
+
+ event_q_conf.event_queue_cfg = event_queue_cfg;
+ event_q_conf.schedule_type = evt_rsrc->sched_type;
+ evt_rsrc->evq.event_q_id = (uint8_t *)malloc(sizeof(uint8_t) *
+ evt_rsrc->evq.nb_queues);
+ if (!evt_rsrc->evq.event_q_id)
+ rte_panic("Memory allocation failure\n");
+
+ for (event_q_id = 0; event_q_id < evt_rsrc->evq.nb_queues;
+ event_q_id++) {
+ ret = rte_event_queue_setup(event_d_id, event_q_id,
+ &event_q_conf);
+ if (ret < 0)
+ rte_panic("Error in configuring event queue\n");
+ evt_rsrc->evq.event_q_id[event_q_id] = event_q_id;
+ }
+}
void
l3fwd_event_set_internal_port_ops(struct l3fwd_event_setup_ops *ops)
{
ops->event_device_setup = l3fwd_event_device_setup_internal_port;
+ ops->event_queue_setup = l3fwd_event_queue_setup_internal_port;
+ ops->event_port_setup = l3fwd_event_port_setup_internal_port;
}
--
2.17.1
^ permalink raw reply [flat|nested] 13+ messages in thread
* [dpdk-dev] [PATCH v6 06/11] examples/l3fwd: add event eth Rx/Tx adapter setup
2020-01-28 5:34 ` [dpdk-dev] [PATCH v6 00/11] example/l3fwd: introduce event device support pbhagavatula
` (4 preceding siblings ...)
2020-01-28 5:34 ` [dpdk-dev] [PATCH v6 05/11] examples/l3fwd: add event port and queue setup pbhagavatula
@ 2020-01-28 5:35 ` pbhagavatula
2020-01-28 5:35 ` [dpdk-dev] [PATCH v6 07/11] examples/l3fwd: add service core setup based on caps pbhagavatula
` (5 subsequent siblings)
11 siblings, 0 replies; 13+ messages in thread
From: pbhagavatula @ 2020-01-28 5:35 UTC (permalink / raw)
To: jerinj, konstantin.ananyev, Marko Kovacevic, Ori Kam,
Bruce Richardson, Radu Nicolau, Akhil Goyal, Tomasz Kantecki,
Sunil Kumar Kori, Pavan Nikhilesh
Cc: dev
From: Pavan Nikhilesh <pbhagavatula@marvell.com>
Add event eth Rx/Tx adapter setup for both generic and internal port
event device pipelines.
Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
examples/l3fwd/l3fwd_event.c | 3 +
examples/l3fwd/l3fwd_event.h | 1 +
examples/l3fwd/l3fwd_event_generic.c | 112 +++++++++++++++++++++
examples/l3fwd/l3fwd_event_internal_port.c | 93 +++++++++++++++++
4 files changed, 209 insertions(+)
diff --git a/examples/l3fwd/l3fwd_event.c b/examples/l3fwd/l3fwd_event.c
index b58f9b79a..0a75e39ee 100644
--- a/examples/l3fwd/l3fwd_event.c
+++ b/examples/l3fwd/l3fwd_event.c
@@ -233,4 +233,7 @@ l3fwd_event_resource_setup(struct rte_eth_conf *port_conf)
/* Event port configuration */
evt_rsrc->ops.event_port_setup();
+
+ /* Rx/Tx adapters configuration */
+ evt_rsrc->ops.adapter_setup();
}
diff --git a/examples/l3fwd/l3fwd_event.h b/examples/l3fwd/l3fwd_event.h
index 4bceca920..b1ae970d4 100644
--- a/examples/l3fwd/l3fwd_event.h
+++ b/examples/l3fwd/l3fwd_event.h
@@ -7,6 +7,7 @@
#include <rte_common.h>
#include <rte_eventdev.h>
+#include <rte_event_eth_rx_adapter.h>
#include <rte_event_eth_tx_adapter.h>
#include <rte_service.h>
#include <rte_spinlock.h>
diff --git a/examples/l3fwd/l3fwd_event_generic.c b/examples/l3fwd/l3fwd_event_generic.c
index 4d0cbec05..c69c611dd 100644
--- a/examples/l3fwd/l3fwd_event_generic.c
+++ b/examples/l3fwd/l3fwd_event_generic.c
@@ -182,10 +182,122 @@ l3fwd_event_queue_setup_generic(uint32_t event_queue_cfg)
evt_rsrc->evq.event_q_id[event_q_id] = event_q_id;
}
+static void
+l3fwd_rx_tx_adapter_setup_generic(void)
+{
+ struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
+ struct rte_event_eth_rx_adapter_queue_conf eth_q_conf;
+ uint8_t event_d_id = evt_rsrc->event_d_id;
+ uint8_t rx_adptr_id = 0;
+ uint8_t tx_adptr_id = 0;
+ uint8_t tx_port_id = 0;
+ uint16_t port_id;
+ uint32_t service_id;
+ int32_t ret, i = 0;
+
+ memset(ð_q_conf, 0, sizeof(eth_q_conf));
+ eth_q_conf.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
+
+ /* Rx adapter setup */
+ evt_rsrc->rx_adptr.nb_rx_adptr = 1;
+ evt_rsrc->rx_adptr.rx_adptr = (uint8_t *)malloc(sizeof(uint8_t) *
+ evt_rsrc->rx_adptr.nb_rx_adptr);
+ if (!evt_rsrc->rx_adptr.rx_adptr) {
+ free(evt_rsrc->evp.event_p_id);
+ free(evt_rsrc->evq.event_q_id);
+ rte_panic("Failed to allocate memory for Rx adapter\n");
+ }
+
+ ret = rte_event_eth_rx_adapter_create(rx_adptr_id, event_d_id,
+ &evt_rsrc->def_p_conf);
+ if (ret)
+ rte_panic("Failed to create rx adapter\n");
+
+ /* Configure user requested sched type */
+ eth_q_conf.ev.sched_type = evt_rsrc->sched_type;
+ RTE_ETH_FOREACH_DEV(port_id) {
+ if ((evt_rsrc->port_mask & (1 << port_id)) == 0)
+ continue;
+ eth_q_conf.ev.queue_id = evt_rsrc->evq.event_q_id[i];
+ ret = rte_event_eth_rx_adapter_queue_add(rx_adptr_id, port_id,
+ -1, ð_q_conf);
+ if (ret)
+ rte_panic("Failed to add queues to Rx adapter\n");
+ if (i < evt_rsrc->evq.nb_queues)
+ i++;
+ }
+
+ ret = rte_event_eth_rx_adapter_service_id_get(rx_adptr_id, &service_id);
+ if (ret != -ESRCH && ret != 0)
+ rte_panic("Error getting the service ID for rx adptr\n");
+
+ rte_service_runstate_set(service_id, 1);
+ rte_service_set_runstate_mapped_check(service_id, 0);
+ evt_rsrc->rx_adptr.service_id = service_id;
+
+ ret = rte_event_eth_rx_adapter_start(rx_adptr_id);
+ if (ret)
+ rte_panic("Rx adapter[%d] start Failed\n", rx_adptr_id);
+
+ evt_rsrc->rx_adptr.rx_adptr[0] = rx_adptr_id;
+
+ /* Tx adapter setup */
+ evt_rsrc->tx_adptr.nb_tx_adptr = 1;
+ evt_rsrc->tx_adptr.tx_adptr = (uint8_t *)malloc(sizeof(uint8_t) *
+ evt_rsrc->tx_adptr.nb_tx_adptr);
+ if (!evt_rsrc->tx_adptr.tx_adptr) {
+ free(evt_rsrc->rx_adptr.rx_adptr);
+ free(evt_rsrc->evp.event_p_id);
+ free(evt_rsrc->evq.event_q_id);
+ rte_panic("Failed to allocate memory for Rx adapter\n");
+ }
+
+ ret = rte_event_eth_tx_adapter_create(tx_adptr_id, event_d_id,
+ &evt_rsrc->def_p_conf);
+ if (ret)
+ rte_panic("Failed to create tx adapter\n");
+
+ RTE_ETH_FOREACH_DEV(port_id) {
+ if ((evt_rsrc->port_mask & (1 << port_id)) == 0)
+ continue;
+ ret = rte_event_eth_tx_adapter_queue_add(tx_adptr_id, port_id,
+ -1);
+ if (ret)
+ rte_panic("Failed to add queues to Tx adapter\n");
+ }
+
+ ret = rte_event_eth_tx_adapter_service_id_get(tx_adptr_id, &service_id);
+ if (ret != -ESRCH && ret != 0)
+ rte_panic("Failed to get Tx adapter service ID\n");
+
+ rte_service_runstate_set(service_id, 1);
+ rte_service_set_runstate_mapped_check(service_id, 0);
+ evt_rsrc->tx_adptr.service_id = service_id;
+
+ ret = rte_event_eth_tx_adapter_event_port_get(tx_adptr_id, &tx_port_id);
+ if (ret)
+ rte_panic("Failed to get Tx adapter port id: %d\n", ret);
+
+ ret = rte_event_port_link(event_d_id, tx_port_id,
+ &evt_rsrc->evq.event_q_id[
+ evt_rsrc->evq.nb_queues - 1],
+ NULL, 1);
+ if (ret != 1)
+ rte_panic("Unable to link Tx adapter port to Tx queue:err=%d\n",
+ ret);
+
+ ret = rte_event_eth_tx_adapter_start(tx_adptr_id);
+ if (ret)
+ rte_panic("Tx adapter[%d] start Failed\n", tx_adptr_id);
+
+ evt_rsrc->tx_adptr.tx_adptr[0] = tx_adptr_id;
+}
+
void
l3fwd_event_set_generic_ops(struct l3fwd_event_setup_ops *ops)
{
ops->event_device_setup = l3fwd_event_device_setup_generic;
ops->event_queue_setup = l3fwd_event_queue_setup_generic;
ops->event_port_setup = l3fwd_event_port_setup_generic;
+ ops->adapter_setup = l3fwd_rx_tx_adapter_setup_generic;
}
diff --git a/examples/l3fwd/l3fwd_event_internal_port.c b/examples/l3fwd/l3fwd_event_internal_port.c
index 476dd8a08..993e26f13 100644
--- a/examples/l3fwd/l3fwd_event_internal_port.c
+++ b/examples/l3fwd/l3fwd_event_internal_port.c
@@ -177,10 +177,103 @@ l3fwd_event_queue_setup_internal_port(uint32_t event_queue_cfg)
}
}
+static void
+l3fwd_rx_tx_adapter_setup_internal_port(void)
+{
+ struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
+ struct rte_event_eth_rx_adapter_queue_conf eth_q_conf;
+ uint8_t event_d_id = evt_rsrc->event_d_id;
+ uint16_t adapter_id = 0;
+ uint16_t nb_adapter = 0;
+ uint16_t port_id;
+ uint8_t q_id = 0;
+ int ret;
+
+ memset(ð_q_conf, 0, sizeof(eth_q_conf));
+ eth_q_conf.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
+
+ RTE_ETH_FOREACH_DEV(port_id) {
+ if ((evt_rsrc->port_mask & (1 << port_id)) == 0)
+ continue;
+ nb_adapter++;
+ }
+
+ evt_rsrc->rx_adptr.nb_rx_adptr = nb_adapter;
+ evt_rsrc->rx_adptr.rx_adptr = (uint8_t *)malloc(sizeof(uint8_t) *
+ evt_rsrc->rx_adptr.nb_rx_adptr);
+ if (!evt_rsrc->rx_adptr.rx_adptr) {
+ free(evt_rsrc->evp.event_p_id);
+ free(evt_rsrc->evq.event_q_id);
+ rte_panic("Failed to allocate memory for Rx adapter\n");
+ }
+
+
+ RTE_ETH_FOREACH_DEV(port_id) {
+ if ((evt_rsrc->port_mask & (1 << port_id)) == 0)
+ continue;
+ ret = rte_event_eth_rx_adapter_create(adapter_id, event_d_id,
+ &evt_rsrc->def_p_conf);
+ if (ret)
+ rte_panic("Failed to create rx adapter[%d]\n",
+ adapter_id);
+
+ /* Configure user requested sched type*/
+ eth_q_conf.ev.sched_type = evt_rsrc->sched_type;
+ eth_q_conf.ev.queue_id = evt_rsrc->evq.event_q_id[q_id];
+ ret = rte_event_eth_rx_adapter_queue_add(adapter_id, port_id,
+ -1, ð_q_conf);
+ if (ret)
+ rte_panic("Failed to add queues to Rx adapter\n");
+
+ ret = rte_event_eth_rx_adapter_start(adapter_id);
+ if (ret)
+ rte_panic("Rx adapter[%d] start Failed\n", adapter_id);
+
+ evt_rsrc->rx_adptr.rx_adptr[adapter_id] = adapter_id;
+ adapter_id++;
+ if (q_id < evt_rsrc->evq.nb_queues)
+ q_id++;
+ }
+
+ evt_rsrc->tx_adptr.nb_tx_adptr = nb_adapter;
+ evt_rsrc->tx_adptr.tx_adptr = (uint8_t *)malloc(sizeof(uint8_t) *
+ evt_rsrc->tx_adptr.nb_tx_adptr);
+ if (!evt_rsrc->tx_adptr.tx_adptr) {
+ free(evt_rsrc->rx_adptr.rx_adptr);
+ free(evt_rsrc->evp.event_p_id);
+ free(evt_rsrc->evq.event_q_id);
+ rte_panic("Failed to allocate memory for Rx adapter\n");
+ }
+
+ adapter_id = 0;
+ RTE_ETH_FOREACH_DEV(port_id) {
+ if ((evt_rsrc->port_mask & (1 << port_id)) == 0)
+ continue;
+ ret = rte_event_eth_tx_adapter_create(adapter_id, event_d_id,
+ &evt_rsrc->def_p_conf);
+ if (ret)
+ rte_panic("Failed to create tx adapter[%d]\n",
+ adapter_id);
+
+ ret = rte_event_eth_tx_adapter_queue_add(adapter_id, port_id,
+ -1);
+ if (ret)
+ rte_panic("Failed to add queues to Tx adapter\n");
+
+ ret = rte_event_eth_tx_adapter_start(adapter_id);
+ if (ret)
+ rte_panic("Tx adapter[%d] start Failed\n", adapter_id);
+
+ evt_rsrc->tx_adptr.tx_adptr[adapter_id] = adapter_id;
+ adapter_id++;
+ }
+}
+
void
l3fwd_event_set_internal_port_ops(struct l3fwd_event_setup_ops *ops)
{
ops->event_device_setup = l3fwd_event_device_setup_internal_port;
ops->event_queue_setup = l3fwd_event_queue_setup_internal_port;
ops->event_port_setup = l3fwd_event_port_setup_internal_port;
+ ops->adapter_setup = l3fwd_rx_tx_adapter_setup_internal_port;
}
--
2.17.1
^ permalink raw reply [flat|nested] 13+ messages in thread
* [dpdk-dev] [PATCH v6 07/11] examples/l3fwd: add service core setup based on caps
2020-01-28 5:34 ` [dpdk-dev] [PATCH v6 00/11] example/l3fwd: introduce event device support pbhagavatula
` (5 preceding siblings ...)
2020-01-28 5:35 ` [dpdk-dev] [PATCH v6 06/11] examples/l3fwd: add event eth Rx/Tx adapter setup pbhagavatula
@ 2020-01-28 5:35 ` pbhagavatula
2020-01-28 5:35 ` [dpdk-dev] [PATCH v6 08/11] examples/l3fwd: add event lpm main loop pbhagavatula
` (4 subsequent siblings)
11 siblings, 0 replies; 13+ messages in thread
From: pbhagavatula @ 2020-01-28 5:35 UTC (permalink / raw)
To: jerinj, konstantin.ananyev, Marko Kovacevic, Ori Kam,
Bruce Richardson, Radu Nicolau, Akhil Goyal, Tomasz Kantecki,
Sunil Kumar Kori, Pavan Nikhilesh
Cc: dev
From: Pavan Nikhilesh <pbhagavatula@marvell.com>
Add service core setup when eventdev and Rx/Tx adapter don't have
internal port capability.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
examples/l3fwd/l3fwd_event.c | 6 ++
examples/l3fwd/main.c | 188 +++++++++++++++++++++++++++--------
2 files changed, 150 insertions(+), 44 deletions(-)
diff --git a/examples/l3fwd/l3fwd_event.c b/examples/l3fwd/l3fwd_event.c
index 0a75e39ee..1ed42c3ab 100644
--- a/examples/l3fwd/l3fwd_event.c
+++ b/examples/l3fwd/l3fwd_event.c
@@ -212,6 +212,7 @@ l3fwd_event_resource_setup(struct rte_eth_conf *port_conf)
{
struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
uint32_t event_queue_cfg;
+ int ret;
if (!evt_rsrc->enabled)
return;
@@ -236,4 +237,9 @@ l3fwd_event_resource_setup(struct rte_eth_conf *port_conf)
/* Rx/Tx adapters configuration */
evt_rsrc->ops.adapter_setup();
+
+ /* Start event device */
+ ret = rte_event_dev_start(evt_rsrc->event_d_id);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Error in starting eventdev");
}
diff --git a/examples/l3fwd/main.c b/examples/l3fwd/main.c
index 04526343f..4bb00a48b 100644
--- a/examples/l3fwd/main.c
+++ b/examples/l3fwd/main.c
@@ -899,49 +899,18 @@ prepare_ptype_parser(uint16_t portid, uint16_t queueid)
return 0;
}
-int
-main(int argc, char **argv)
+static void
+l3fwd_poll_resource_setup(void)
{
- struct l3fwd_event_resources *evt_rsrc;
- struct lcore_conf *qconf;
+ uint8_t nb_rx_queue, queue, socketid;
struct rte_eth_dev_info dev_info;
+ uint32_t n_tx_queue, nb_lcores;
struct rte_eth_txconf *txconf;
- int ret;
- unsigned nb_ports;
+ struct lcore_conf *qconf;
uint16_t queueid, portid;
- unsigned lcore_id;
- uint32_t n_tx_queue, nb_lcores;
- uint8_t nb_rx_queue, queue, socketid;
-
- /* init EAL */
- ret = rte_eal_init(argc, argv);
- if (ret < 0)
- rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
- argc -= ret;
- argv += ret;
-
- force_quit = false;
- signal(SIGINT, signal_handler);
- signal(SIGTERM, signal_handler);
-
- /* pre-init dst MACs for all ports to 02:00:00:00:00:xx */
- for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
- dest_eth_addr[portid] =
- RTE_ETHER_LOCAL_ADMIN_ADDR + ((uint64_t)portid << 40);
- *(uint64_t *)(val_eth + portid) = dest_eth_addr[portid];
- }
-
- evt_rsrc = l3fwd_get_eventdev_rsrc();
- /* parse application arguments (after the EAL ones) */
- ret = parse_args(argc, argv);
- if (ret < 0)
- rte_exit(EXIT_FAILURE, "Invalid L3FWD parameters\n");
-
- evt_rsrc->per_port_pool = per_port_pool;
- evt_rsrc->pkt_pool = pktmbuf_pool;
- evt_rsrc->port_mask = enabled_port_mask;
- /* Configure eventdev parameters if user has requested */
- l3fwd_event_resource_setup(&port_conf);
+ unsigned int nb_ports;
+ unsigned int lcore_id;
+ int ret;
if (check_lcore_params() < 0)
rte_exit(EXIT_FAILURE, "check_lcore_params failed\n");
@@ -957,9 +926,6 @@ main(int argc, char **argv)
nb_lcores = rte_lcore_count();
- /* Setup function pointers for lookup method. */
- setup_l3fwd_lookup_tables();
-
/* initialize all ports */
RTE_ETH_FOREACH_DEV(portid) {
struct rte_eth_conf local_port_conf = port_conf;
@@ -1127,7 +1093,142 @@ main(int argc, char **argv)
}
}
- printf("\n");
+
+}
+
+static inline int
+l3fwd_service_enable(uint32_t service_id)
+{
+ uint8_t min_service_count = UINT8_MAX;
+ uint32_t slcore_array[RTE_MAX_LCORE];
+ unsigned int slcore = 0;
+ uint8_t service_count;
+ int32_t slcore_count;
+
+ if (!rte_service_lcore_count())
+ return -ENOENT;
+
+ slcore_count = rte_service_lcore_list(slcore_array, RTE_MAX_LCORE);
+ if (slcore_count < 0)
+ return -ENOENT;
+ /* Get the core which has least number of services running. */
+ while (slcore_count--) {
+ /* Reset default mapping */
+ rte_service_map_lcore_set(service_id,
+ slcore_array[slcore_count], 0);
+ service_count = rte_service_lcore_count_services(
+ slcore_array[slcore_count]);
+ if (service_count < min_service_count) {
+ slcore = slcore_array[slcore_count];
+ min_service_count = service_count;
+ }
+ }
+ if (rte_service_map_lcore_set(service_id, slcore, 1))
+ return -ENOENT;
+ rte_service_lcore_start(slcore);
+
+ return 0;
+}
+
+static void
+l3fwd_event_service_setup(void)
+{
+ struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
+ struct rte_event_dev_info evdev_info;
+ uint32_t service_id, caps;
+ int ret, i;
+
+ rte_event_dev_info_get(evt_rsrc->event_d_id, &evdev_info);
+ if (!(evdev_info.event_dev_cap & RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED)) {
+ ret = rte_event_dev_service_id_get(evt_rsrc->event_d_id,
+ &service_id);
+ if (ret != -ESRCH && ret != 0)
+ rte_exit(EXIT_FAILURE,
+ "Error in starting eventdev service\n");
+ l3fwd_service_enable(service_id);
+ }
+
+ for (i = 0; i < evt_rsrc->rx_adptr.nb_rx_adptr; i++) {
+ ret = rte_event_eth_rx_adapter_caps_get(evt_rsrc->event_d_id,
+ evt_rsrc->rx_adptr.rx_adptr[i], &caps);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "Failed to get Rx adapter[%d] caps\n",
+ evt_rsrc->rx_adptr.rx_adptr[i]);
+ ret = rte_event_eth_rx_adapter_service_id_get(
+ evt_rsrc->event_d_id,
+ &service_id);
+ if (ret != -ESRCH && ret != 0)
+ rte_exit(EXIT_FAILURE,
+ "Error in starting Rx adapter[%d] service\n",
+ evt_rsrc->rx_adptr.rx_adptr[i]);
+ l3fwd_service_enable(service_id);
+ }
+
+ for (i = 0; i < evt_rsrc->tx_adptr.nb_tx_adptr; i++) {
+ ret = rte_event_eth_tx_adapter_caps_get(evt_rsrc->event_d_id,
+ evt_rsrc->tx_adptr.tx_adptr[i], &caps);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "Failed to get Rx adapter[%d] caps\n",
+ evt_rsrc->tx_adptr.tx_adptr[i]);
+ ret = rte_event_eth_tx_adapter_service_id_get(
+ evt_rsrc->event_d_id,
+ &service_id);
+ if (ret != -ESRCH && ret != 0)
+ rte_exit(EXIT_FAILURE,
+ "Error in starting Rx adapter[%d] service\n",
+ evt_rsrc->tx_adptr.tx_adptr[i]);
+ l3fwd_service_enable(service_id);
+ }
+}
+
+int
+main(int argc, char **argv)
+{
+ struct l3fwd_event_resources *evt_rsrc;
+ struct lcore_conf *qconf;
+ uint16_t queueid, portid;
+ unsigned int lcore_id;
+ uint8_t queue;
+ int ret;
+
+ /* init EAL */
+ ret = rte_eal_init(argc, argv);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
+ argc -= ret;
+ argv += ret;
+
+ force_quit = false;
+ signal(SIGINT, signal_handler);
+ signal(SIGTERM, signal_handler);
+
+ /* pre-init dst MACs for all ports to 02:00:00:00:00:xx */
+ for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
+ dest_eth_addr[portid] =
+ RTE_ETHER_LOCAL_ADMIN_ADDR + ((uint64_t)portid << 40);
+ *(uint64_t *)(val_eth + portid) = dest_eth_addr[portid];
+ }
+
+ evt_rsrc = l3fwd_get_eventdev_rsrc();
+ /* parse application arguments (after the EAL ones) */
+ ret = parse_args(argc, argv);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Invalid L3FWD parameters\n");
+
+ /* Setup function pointers for lookup method. */
+ setup_l3fwd_lookup_tables();
+
+ evt_rsrc->per_port_pool = per_port_pool;
+ evt_rsrc->pkt_pool = pktmbuf_pool;
+ evt_rsrc->port_mask = enabled_port_mask;
+ /* Configure eventdev parameters if user has requested */
+ if (evt_rsrc->enabled) {
+ l3fwd_event_resource_setup(&port_conf);
+ l3fwd_event_service_setup();
+ } else
+ l3fwd_poll_resource_setup();
/* start ports */
RTE_ETH_FOREACH_DEV(portid) {
@@ -1170,7 +1271,6 @@ main(int argc, char **argv)
}
}
-
check_all_ports_link_status(enabled_port_mask);
ret = 0;
--
2.17.1
^ permalink raw reply [flat|nested] 13+ messages in thread
* [dpdk-dev] [PATCH v6 08/11] examples/l3fwd: add event lpm main loop
2020-01-28 5:34 ` [dpdk-dev] [PATCH v6 00/11] example/l3fwd: introduce event device support pbhagavatula
` (6 preceding siblings ...)
2020-01-28 5:35 ` [dpdk-dev] [PATCH v6 07/11] examples/l3fwd: add service core setup based on caps pbhagavatula
@ 2020-01-28 5:35 ` pbhagavatula
2020-01-28 5:35 ` [dpdk-dev] [PATCH v6 09/11] examples/l3fwd: add event em " pbhagavatula
` (3 subsequent siblings)
11 siblings, 0 replies; 13+ messages in thread
From: pbhagavatula @ 2020-01-28 5:35 UTC (permalink / raw)
To: jerinj, konstantin.ananyev, Marko Kovacevic, Ori Kam,
Bruce Richardson, Radu Nicolau, Akhil Goyal, Tomasz Kantecki,
Sunil Kumar Kori, Pavan Nikhilesh
Cc: dev
From: Pavan Nikhilesh <pbhagavatula@marvell.com>
Add lpm main loop for handling events based on capabilities of the
event device.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
examples/l3fwd/l3fwd.h | 9 ++
examples/l3fwd/l3fwd_event.c | 9 ++
examples/l3fwd/l3fwd_event.h | 5 +
examples/l3fwd/l3fwd_lpm.c | 203 +++++++++++++++++++++++++++++++++++
examples/l3fwd/main.c | 1 +
5 files changed, 227 insertions(+)
diff --git a/examples/l3fwd/l3fwd.h b/examples/l3fwd/l3fwd.h
index ed5ba5f3b..b3cdcd496 100644
--- a/examples/l3fwd/l3fwd.h
+++ b/examples/l3fwd/l3fwd.h
@@ -207,6 +207,15 @@ em_main_loop(__attribute__((unused)) void *dummy);
int
lpm_main_loop(__attribute__((unused)) void *dummy);
+int
+lpm_event_main_loop_tx_d(__attribute__((unused)) void *dummy);
+int
+lpm_event_main_loop_tx_d_burst(__attribute__((unused)) void *dummy);
+int
+lpm_event_main_loop_tx_q(__attribute__((unused)) void *dummy);
+int
+lpm_event_main_loop_tx_q_burst(__attribute__((unused)) void *dummy);
+
/* Return ipv4/ipv6 fwd lookup struct for LPM or EM. */
void *
em_get_ipv4_l3fwd_lookup_struct(const int socketid);
diff --git a/examples/l3fwd/l3fwd_event.c b/examples/l3fwd/l3fwd_event.c
index 1ed42c3ab..ebddd8f97 100644
--- a/examples/l3fwd/l3fwd_event.c
+++ b/examples/l3fwd/l3fwd_event.c
@@ -211,6 +211,12 @@ void
l3fwd_event_resource_setup(struct rte_eth_conf *port_conf)
{
struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
+ const event_loop_cb lpm_event_loop[2][2] = {
+ [0][0] = lpm_event_main_loop_tx_d,
+ [0][1] = lpm_event_main_loop_tx_d_burst,
+ [1][0] = lpm_event_main_loop_tx_q,
+ [1][1] = lpm_event_main_loop_tx_q_burst,
+ };
uint32_t event_queue_cfg;
int ret;
@@ -242,4 +248,7 @@ l3fwd_event_resource_setup(struct rte_eth_conf *port_conf)
ret = rte_event_dev_start(evt_rsrc->event_d_id);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Error in starting eventdev");
+
+ evt_rsrc->ops.lpm_event_loop = lpm_event_loop[evt_rsrc->tx_mode_q]
+ [evt_rsrc->has_burst];
}
diff --git a/examples/l3fwd/l3fwd_event.h b/examples/l3fwd/l3fwd_event.h
index b1ae970d4..0e4616417 100644
--- a/examples/l3fwd/l3fwd_event.h
+++ b/examples/l3fwd/l3fwd_event.h
@@ -14,6 +14,11 @@
#include "l3fwd.h"
+#define L3FWD_EVENT_SINGLE 0x1
+#define L3FWD_EVENT_BURST 0x2
+#define L3FWD_EVENT_TX_DIRECT 0x4
+#define L3FWD_EVENT_TX_ENQ 0x8
+
typedef uint32_t (*event_device_setup_cb)(void);
typedef void (*event_queue_setup_cb)(uint32_t event_queue_cfg);
typedef void (*event_port_setup_cb)(void);
diff --git a/examples/l3fwd/l3fwd_lpm.c b/examples/l3fwd/l3fwd_lpm.c
index 349de2703..058a60b1d 100644
--- a/examples/l3fwd/l3fwd_lpm.c
+++ b/examples/l3fwd/l3fwd_lpm.c
@@ -28,6 +28,7 @@
#include <rte_lpm6.h>
#include "l3fwd.h"
+#include "l3fwd_event.h"
struct ipv4_l3fwd_lpm_route {
uint32_t ip;
@@ -254,6 +255,208 @@ lpm_main_loop(__attribute__((unused)) void *dummy)
return 0;
}
+static __rte_always_inline uint16_t
+lpm_process_event_pkt(const struct lcore_conf *lconf, struct rte_mbuf *mbuf)
+{
+ mbuf->port = lpm_get_dst_port(lconf, mbuf, mbuf->port);
+
+#if defined RTE_ARCH_X86 || defined RTE_MACHINE_CPUFLAG_NEON \
+ || defined RTE_ARCH_PPC_64
+ process_packet(mbuf, &mbuf->port);
+#else
+
+ struct rte_ether_hdr *eth_hdr = rte_pktmbuf_mtod(mbuf,
+ struct rte_ether_hdr *);
+#ifdef DO_RFC_1812_CHECKS
+ struct rte_ipv4_hdr *ipv4_hdr;
+ if (RTE_ETH_IS_IPV4_HDR(mbuf->packet_type)) {
+ /* Handle IPv4 headers.*/
+ ipv4_hdr = rte_pktmbuf_mtod_offset(mbuf,
+ struct rte_ipv4_hdr *,
+ sizeof(struct rte_ether_hdr));
+
+ if (is_valid_ipv4_pkt(ipv4_hdr, mbuf->pkt_len)
+ < 0) {
+ mbuf->port = BAD_PORT;
+ continue;
+ }
+ /* Update time to live and header checksum */
+ --(ipv4_hdr->time_to_live);
+ ++(ipv4_hdr->hdr_checksum);
+ }
+#endif
+ /* dst addr */
+ *(uint64_t *)ð_hdr->d_addr = dest_eth_addr[mbuf->port];
+
+ /* src addr */
+ rte_ether_addr_copy(&ports_eth_addr[mbuf->port],
+ ð_hdr->s_addr);
+#endif
+ return mbuf->port;
+}
+
+static __rte_always_inline void
+lpm_event_loop_single(struct l3fwd_event_resources *evt_rsrc,
+ const uint8_t flags)
+{
+ const int event_p_id = l3fwd_get_free_event_port(evt_rsrc);
+ const uint8_t tx_q_id = evt_rsrc->evq.event_q_id[
+ evt_rsrc->evq.nb_queues - 1];
+ const uint8_t event_d_id = evt_rsrc->event_d_id;
+ struct lcore_conf *lconf;
+ unsigned int lcore_id;
+ struct rte_event ev;
+
+ if (event_p_id < 0)
+ return;
+
+ lcore_id = rte_lcore_id();
+ lconf = &lcore_conf[lcore_id];
+
+ RTE_LOG(INFO, L3FWD, "entering %s on lcore %u\n", __func__, lcore_id);
+ while (!force_quit) {
+ if (!rte_event_dequeue_burst(event_d_id, event_p_id, &ev, 1, 0))
+ continue;
+
+ if (lpm_process_event_pkt(lconf, ev.mbuf) == BAD_PORT) {
+ rte_pktmbuf_free(ev.mbuf);
+ continue;
+ }
+
+ if (flags & L3FWD_EVENT_TX_ENQ) {
+ ev.queue_id = tx_q_id;
+ ev.op = RTE_EVENT_OP_FORWARD;
+ while (rte_event_enqueue_burst(event_d_id, event_p_id,
+ &ev, 1) && !force_quit)
+ ;
+ }
+
+ if (flags & L3FWD_EVENT_TX_DIRECT) {
+ rte_event_eth_tx_adapter_txq_set(ev.mbuf, 0);
+ while (!rte_event_eth_tx_adapter_enqueue(event_d_id,
+ event_p_id, &ev, 1, 0) &&
+ !force_quit)
+ ;
+ }
+ }
+}
+
+static __rte_always_inline void
+lpm_event_loop_burst(struct l3fwd_event_resources *evt_rsrc,
+ const uint8_t flags)
+{
+ const int event_p_id = l3fwd_get_free_event_port(evt_rsrc);
+ const uint8_t tx_q_id = evt_rsrc->evq.event_q_id[
+ evt_rsrc->evq.nb_queues - 1];
+ const uint8_t event_d_id = evt_rsrc->event_d_id;
+ const uint16_t deq_len = evt_rsrc->deq_depth;
+ struct rte_event events[MAX_PKT_BURST];
+ struct lcore_conf *lconf;
+ unsigned int lcore_id;
+ int i, nb_enq, nb_deq;
+
+ if (event_p_id < 0)
+ return;
+
+ lcore_id = rte_lcore_id();
+
+ lconf = &lcore_conf[lcore_id];
+
+ RTE_LOG(INFO, L3FWD, "entering %s on lcore %u\n", __func__, lcore_id);
+
+ while (!force_quit) {
+ /* Read events from RX queues */
+ nb_deq = rte_event_dequeue_burst(event_d_id, event_p_id,
+ events, deq_len, 0);
+ if (nb_deq == 0) {
+ rte_pause();
+ continue;
+ }
+
+ for (i = 0; i < nb_deq; i++) {
+ if (flags & L3FWD_EVENT_TX_ENQ) {
+ events[i].queue_id = tx_q_id;
+ events[i].op = RTE_EVENT_OP_FORWARD;
+ }
+
+ if (flags & L3FWD_EVENT_TX_DIRECT)
+ rte_event_eth_tx_adapter_txq_set(events[i].mbuf,
+ 0);
+
+ lpm_process_event_pkt(lconf, events[i].mbuf);
+ }
+
+ if (flags & L3FWD_EVENT_TX_ENQ) {
+ nb_enq = rte_event_enqueue_burst(event_d_id, event_p_id,
+ events, nb_deq);
+ while (nb_enq < nb_deq && !force_quit)
+ nb_enq += rte_event_enqueue_burst(event_d_id,
+ event_p_id, events + nb_enq,
+ nb_deq - nb_enq);
+ }
+
+ if (flags & L3FWD_EVENT_TX_DIRECT) {
+ nb_enq = rte_event_eth_tx_adapter_enqueue(event_d_id,
+ event_p_id, events, nb_deq, 0);
+ while (nb_enq < nb_deq && !force_quit)
+ nb_enq += rte_event_eth_tx_adapter_enqueue(
+ event_d_id, event_p_id,
+ events + nb_enq,
+ nb_deq - nb_enq, 0);
+ }
+ }
+}
+
+static __rte_always_inline void
+lpm_event_loop(struct l3fwd_event_resources *evt_rsrc,
+ const uint8_t flags)
+{
+ if (flags & L3FWD_EVENT_SINGLE)
+ lpm_event_loop_single(evt_rsrc, flags);
+ if (flags & L3FWD_EVENT_BURST)
+ lpm_event_loop_burst(evt_rsrc, flags);
+}
+
+int __rte_noinline
+lpm_event_main_loop_tx_d(__attribute__((unused)) void *dummy)
+{
+ struct l3fwd_event_resources *evt_rsrc =
+ l3fwd_get_eventdev_rsrc();
+
+ lpm_event_loop(evt_rsrc, L3FWD_EVENT_TX_DIRECT | L3FWD_EVENT_SINGLE);
+ return 0;
+}
+
+int __rte_noinline
+lpm_event_main_loop_tx_d_burst(__attribute__((unused)) void *dummy)
+{
+ struct l3fwd_event_resources *evt_rsrc =
+ l3fwd_get_eventdev_rsrc();
+
+ lpm_event_loop(evt_rsrc, L3FWD_EVENT_TX_DIRECT | L3FWD_EVENT_BURST);
+ return 0;
+}
+
+int __rte_noinline
+lpm_event_main_loop_tx_q(__attribute__((unused)) void *dummy)
+{
+ struct l3fwd_event_resources *evt_rsrc =
+ l3fwd_get_eventdev_rsrc();
+
+ lpm_event_loop(evt_rsrc, L3FWD_EVENT_TX_ENQ | L3FWD_EVENT_SINGLE);
+ return 0;
+}
+
+int __rte_noinline
+lpm_event_main_loop_tx_q_burst(__attribute__((unused)) void *dummy)
+{
+ struct l3fwd_event_resources *evt_rsrc =
+ l3fwd_get_eventdev_rsrc();
+
+ lpm_event_loop(evt_rsrc, L3FWD_EVENT_TX_ENQ | L3FWD_EVENT_BURST);
+ return 0;
+}
+
void
setup_lpm(const int socketid)
{
diff --git a/examples/l3fwd/main.c b/examples/l3fwd/main.c
index 4bb00a48b..b8bd19b41 100644
--- a/examples/l3fwd/main.c
+++ b/examples/l3fwd/main.c
@@ -1226,6 +1226,7 @@ main(int argc, char **argv)
/* Configure eventdev parameters if user has requested */
if (evt_rsrc->enabled) {
l3fwd_event_resource_setup(&port_conf);
+ l3fwd_lkp.main_loop = evt_rsrc->ops.lpm_event_loop;
l3fwd_event_service_setup();
} else
l3fwd_poll_resource_setup();
--
2.17.1
^ permalink raw reply [flat|nested] 13+ messages in thread
* [dpdk-dev] [PATCH v6 09/11] examples/l3fwd: add event em main loop
2020-01-28 5:34 ` [dpdk-dev] [PATCH v6 00/11] example/l3fwd: introduce event device support pbhagavatula
` (7 preceding siblings ...)
2020-01-28 5:35 ` [dpdk-dev] [PATCH v6 08/11] examples/l3fwd: add event lpm main loop pbhagavatula
@ 2020-01-28 5:35 ` pbhagavatula
2020-01-28 5:35 ` [dpdk-dev] [PATCH v6 10/11] examples/l3fwd: add graceful teardown for eventdevice pbhagavatula
` (2 subsequent siblings)
11 siblings, 0 replies; 13+ messages in thread
From: pbhagavatula @ 2020-01-28 5:35 UTC (permalink / raw)
To: jerinj, konstantin.ananyev, Marko Kovacevic, Ori Kam,
Bruce Richardson, Radu Nicolau, Akhil Goyal, Tomasz Kantecki,
Sunil Kumar Kori, Pavan Nikhilesh
Cc: dev
From: Pavan Nikhilesh <pbhagavatula@marvell.com>
Add em main loop for handling events based on capabilities of the
event device.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
examples/l3fwd/l3fwd.h | 10 ++
examples/l3fwd/l3fwd_em.c | 177 +++++++++++++++++++++++++++
examples/l3fwd/l3fwd_em.h | 159 +++++++++++++++++-------
examples/l3fwd/l3fwd_em_hlm.h | 131 ++++++++++++++++++++
examples/l3fwd/l3fwd_em_sequential.h | 26 ++++
examples/l3fwd/l3fwd_event.c | 9 ++
examples/l3fwd/main.c | 5 +-
7 files changed, 470 insertions(+), 47 deletions(-)
diff --git a/examples/l3fwd/l3fwd.h b/examples/l3fwd/l3fwd.h
index b3cdcd496..c786f9062 100644
--- a/examples/l3fwd/l3fwd.h
+++ b/examples/l3fwd/l3fwd.h
@@ -216,6 +216,16 @@ lpm_event_main_loop_tx_q(__attribute__((unused)) void *dummy);
int
lpm_event_main_loop_tx_q_burst(__attribute__((unused)) void *dummy);
+int
+em_event_main_loop_tx_d(__attribute__((unused)) void *dummy);
+int
+em_event_main_loop_tx_d_burst(__attribute__((unused)) void *dummy);
+int
+em_event_main_loop_tx_q(__attribute__((unused)) void *dummy);
+int
+em_event_main_loop_tx_q_burst(__attribute__((unused)) void *dummy);
+
+
/* Return ipv4/ipv6 fwd lookup struct for LPM or EM. */
void *
em_get_ipv4_l3fwd_lookup_struct(const int socketid);
diff --git a/examples/l3fwd/l3fwd_em.c b/examples/l3fwd/l3fwd_em.c
index 74a7c8fa4..1a8bc9168 100644
--- a/examples/l3fwd/l3fwd_em.c
+++ b/examples/l3fwd/l3fwd_em.c
@@ -26,6 +26,7 @@
#include <rte_hash.h>
#include "l3fwd.h"
+#include "l3fwd_event.h"
#if defined(RTE_ARCH_X86) || defined(RTE_MACHINE_CPUFLAG_CRC32)
#define EM_HASH_CRC 1
@@ -699,6 +700,182 @@ em_main_loop(__attribute__((unused)) void *dummy)
return 0;
}
+static __rte_always_inline void
+em_event_loop_single(struct l3fwd_event_resources *evt_rsrc,
+ const uint8_t flags)
+{
+ const int event_p_id = l3fwd_get_free_event_port(evt_rsrc);
+ const uint8_t tx_q_id = evt_rsrc->evq.event_q_id[
+ evt_rsrc->evq.nb_queues - 1];
+ const uint8_t event_d_id = evt_rsrc->event_d_id;
+ struct lcore_conf *lconf;
+ unsigned int lcore_id;
+ struct rte_event ev;
+
+ if (event_p_id < 0)
+ return;
+
+ lcore_id = rte_lcore_id();
+ lconf = &lcore_conf[lcore_id];
+
+ RTE_LOG(INFO, L3FWD, "entering %s on lcore %u\n", __func__, lcore_id);
+ while (!force_quit) {
+ if (!rte_event_dequeue_burst(event_d_id, event_p_id, &ev, 1, 0))
+ continue;
+
+ struct rte_mbuf *mbuf = ev.mbuf;
+
+#if defined RTE_ARCH_X86 || defined RTE_MACHINE_CPUFLAG_NEON
+ mbuf->port = em_get_dst_port(lconf, mbuf, mbuf->port);
+ process_packet(mbuf, &mbuf->port);
+#else
+ l3fwd_em_simple_process(mbuf, lconf);
+#endif
+ if (mbuf->port == BAD_PORT) {
+ rte_pktmbuf_free(mbuf);
+ continue;
+ }
+
+ if (flags & L3FWD_EVENT_TX_ENQ) {
+ ev.queue_id = tx_q_id;
+ ev.op = RTE_EVENT_OP_FORWARD;
+ while (rte_event_enqueue_burst(event_d_id, event_p_id,
+ &ev, 1) && !force_quit)
+ ;
+ }
+
+ if (flags & L3FWD_EVENT_TX_DIRECT) {
+ rte_event_eth_tx_adapter_txq_set(mbuf, 0);
+ while (!rte_event_eth_tx_adapter_enqueue(event_d_id,
+ event_p_id, &ev, 1, 0) &&
+ !force_quit)
+ ;
+ }
+ }
+}
+
+static __rte_always_inline void
+em_event_loop_burst(struct l3fwd_event_resources *evt_rsrc,
+ const uint8_t flags)
+{
+ const int event_p_id = l3fwd_get_free_event_port(evt_rsrc);
+ const uint8_t tx_q_id = evt_rsrc->evq.event_q_id[
+ evt_rsrc->evq.nb_queues - 1];
+ const uint8_t event_d_id = evt_rsrc->event_d_id;
+ const uint16_t deq_len = evt_rsrc->deq_depth;
+ struct rte_event events[MAX_PKT_BURST];
+ struct lcore_conf *lconf;
+ unsigned int lcore_id;
+ int i, nb_enq, nb_deq;
+
+ if (event_p_id < 0)
+ return;
+
+ lcore_id = rte_lcore_id();
+
+ lconf = &lcore_conf[lcore_id];
+
+ RTE_LOG(INFO, L3FWD, "entering %s on lcore %u\n", __func__, lcore_id);
+
+ while (!force_quit) {
+ /* Read events from RX queues */
+ nb_deq = rte_event_dequeue_burst(event_d_id, event_p_id,
+ events, deq_len, 0);
+ if (nb_deq == 0) {
+ rte_pause();
+ continue;
+ }
+
+#if defined RTE_ARCH_X86 || defined RTE_MACHINE_CPUFLAG_NEON
+ l3fwd_em_process_events(nb_deq, (struct rte_event **)&events,
+ lconf);
+#else
+ l3fwd_em_no_opt_process_events(nb_deq,
+ (struct rte_event **)&events,
+ lconf);
+#endif
+ for (i = 0; i < nb_deq; i++) {
+ if (flags & L3FWD_EVENT_TX_ENQ) {
+ events[i].queue_id = tx_q_id;
+ events[i].op = RTE_EVENT_OP_FORWARD;
+ }
+
+ if (flags & L3FWD_EVENT_TX_DIRECT)
+ rte_event_eth_tx_adapter_txq_set(events[i].mbuf,
+ 0);
+ }
+
+ if (flags & L3FWD_EVENT_TX_ENQ) {
+ nb_enq = rte_event_enqueue_burst(event_d_id, event_p_id,
+ events, nb_deq);
+ while (nb_enq < nb_deq && !force_quit)
+ nb_enq += rte_event_enqueue_burst(event_d_id,
+ event_p_id, events + nb_enq,
+ nb_deq - nb_enq);
+ }
+
+ if (flags & L3FWD_EVENT_TX_DIRECT) {
+ nb_enq = rte_event_eth_tx_adapter_enqueue(event_d_id,
+ event_p_id, events, nb_deq, 0);
+ while (nb_enq < nb_deq && !force_quit)
+ nb_enq += rte_event_eth_tx_adapter_enqueue(
+ event_d_id, event_p_id,
+ events + nb_enq,
+ nb_deq - nb_enq, 0);
+ }
+ }
+}
+
+static __rte_always_inline void
+em_event_loop(struct l3fwd_event_resources *evt_rsrc,
+ const uint8_t flags)
+{
+ if (flags & L3FWD_EVENT_SINGLE)
+ em_event_loop_single(evt_rsrc, flags);
+ if (flags & L3FWD_EVENT_BURST)
+ em_event_loop_burst(evt_rsrc, flags);
+}
+
+int __rte_noinline
+em_event_main_loop_tx_d(__attribute__((unused)) void *dummy)
+{
+ struct l3fwd_event_resources *evt_rsrc =
+ l3fwd_get_eventdev_rsrc();
+
+ em_event_loop(evt_rsrc, L3FWD_EVENT_TX_DIRECT | L3FWD_EVENT_SINGLE);
+ return 0;
+}
+
+int __rte_noinline
+em_event_main_loop_tx_d_burst(__attribute__((unused)) void *dummy)
+{
+ struct l3fwd_event_resources *evt_rsrc =
+ l3fwd_get_eventdev_rsrc();
+
+ em_event_loop(evt_rsrc, L3FWD_EVENT_TX_DIRECT | L3FWD_EVENT_BURST);
+ return 0;
+}
+
+int __rte_noinline
+em_event_main_loop_tx_q(__attribute__((unused)) void *dummy)
+{
+ struct l3fwd_event_resources *evt_rsrc =
+ l3fwd_get_eventdev_rsrc();
+
+ em_event_loop(evt_rsrc, L3FWD_EVENT_TX_ENQ | L3FWD_EVENT_SINGLE);
+ return 0;
+}
+
+int __rte_noinline
+em_event_main_loop_tx_q_burst(__attribute__((unused)) void *dummy)
+{
+ struct l3fwd_event_resources *evt_rsrc =
+ l3fwd_get_eventdev_rsrc();
+
+ em_event_loop(evt_rsrc, L3FWD_EVENT_TX_ENQ | L3FWD_EVENT_BURST);
+ return 0;
+}
+
/*
* Initialize exact match (hash) parameters.
*/
diff --git a/examples/l3fwd/l3fwd_em.h b/examples/l3fwd/l3fwd_em.h
index 090c1b448..b992a21da 100644
--- a/examples/l3fwd/l3fwd_em.h
+++ b/examples/l3fwd/l3fwd_em.h
@@ -5,73 +5,92 @@
#ifndef __L3FWD_EM_H__
#define __L3FWD_EM_H__
-static __rte_always_inline void
-l3fwd_em_simple_forward(struct rte_mbuf *m, uint16_t portid,
- struct lcore_conf *qconf)
+static __rte_always_inline uint16_t
+l3fwd_em_handle_ipv4(struct rte_mbuf *m, uint16_t portid,
+ struct rte_ether_hdr *eth_hdr, struct lcore_conf *qconf)
{
- struct rte_ether_hdr *eth_hdr;
struct rte_ipv4_hdr *ipv4_hdr;
uint16_t dst_port;
- uint32_t tcp_or_udp;
- uint32_t l3_ptypes;
-
- eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
- tcp_or_udp = m->packet_type & (RTE_PTYPE_L4_TCP | RTE_PTYPE_L4_UDP);
- l3_ptypes = m->packet_type & RTE_PTYPE_L3_MASK;
- if (tcp_or_udp && (l3_ptypes == RTE_PTYPE_L3_IPV4)) {
- /* Handle IPv4 headers.*/
- ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
- sizeof(struct rte_ether_hdr));
+ /* Handle IPv4 headers.*/
+ ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
+ sizeof(struct rte_ether_hdr));
#ifdef DO_RFC_1812_CHECKS
- /* Check to make sure the packet is valid (RFC1812) */
- if (is_valid_ipv4_pkt(ipv4_hdr, m->pkt_len) < 0) {
- rte_pktmbuf_free(m);
- return;
- }
+ /* Check to make sure the packet is valid (RFC1812) */
+ if (is_valid_ipv4_pkt(ipv4_hdr, m->pkt_len) < 0) {
+ rte_pktmbuf_free(m);
+ return BAD_PORT;
+ }
#endif
- dst_port = em_get_ipv4_dst_port(ipv4_hdr, portid,
- qconf->ipv4_lookup_struct);
+ dst_port = em_get_ipv4_dst_port(ipv4_hdr, portid,
+ qconf->ipv4_lookup_struct);
- if (dst_port >= RTE_MAX_ETHPORTS ||
+ if (dst_port >= RTE_MAX_ETHPORTS ||
(enabled_port_mask & 1 << dst_port) == 0)
- dst_port = portid;
+ dst_port = portid;
#ifdef DO_RFC_1812_CHECKS
- /* Update time to live and header checksum */
- --(ipv4_hdr->time_to_live);
- ++(ipv4_hdr->hdr_checksum);
+ /* Update time to live and header checksum */
+ --(ipv4_hdr->time_to_live);
+ ++(ipv4_hdr->hdr_checksum);
#endif
- /* dst addr */
- *(uint64_t *)ð_hdr->d_addr = dest_eth_addr[dst_port];
+ /* dst addr */
+ *(uint64_t *)ð_hdr->d_addr = dest_eth_addr[dst_port];
- /* src addr */
- rte_ether_addr_copy(&ports_eth_addr[dst_port],
- ð_hdr->s_addr);
+ /* src addr */
+ rte_ether_addr_copy(&ports_eth_addr[dst_port],
+ ð_hdr->s_addr);
- send_single_packet(qconf, m, dst_port);
- } else if (tcp_or_udp && (l3_ptypes == RTE_PTYPE_L3_IPV6)) {
- /* Handle IPv6 headers.*/
- struct rte_ipv6_hdr *ipv6_hdr;
+ return dst_port;
+}
- ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
- sizeof(struct rte_ether_hdr));
+static __rte_always_inline uint16_t
+l3fwd_em_handle_ipv6(struct rte_mbuf *m, uint16_t portid,
+ struct rte_ether_hdr *eth_hdr, struct lcore_conf *qconf)
+{
+ /* Handle IPv6 headers.*/
+ struct rte_ipv6_hdr *ipv6_hdr;
+ uint16_t dst_port;
- dst_port = em_get_ipv6_dst_port(ipv6_hdr, portid,
- qconf->ipv6_lookup_struct);
+ ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
+ sizeof(struct rte_ether_hdr));
- if (dst_port >= RTE_MAX_ETHPORTS ||
+ dst_port = em_get_ipv6_dst_port(ipv6_hdr, portid,
+ qconf->ipv6_lookup_struct);
+
+ if (dst_port >= RTE_MAX_ETHPORTS ||
(enabled_port_mask & 1 << dst_port) == 0)
- dst_port = portid;
+ dst_port = portid;
+
+ /* dst addr */
+ *(uint64_t *)ð_hdr->d_addr = dest_eth_addr[dst_port];
- /* dst addr */
- *(uint64_t *)ð_hdr->d_addr = dest_eth_addr[dst_port];
+ /* src addr */
+ rte_ether_addr_copy(&ports_eth_addr[dst_port],
+ ð_hdr->s_addr);
- /* src addr */
- rte_ether_addr_copy(&ports_eth_addr[dst_port],
- ð_hdr->s_addr);
+ return dst_port;
+}
+static __rte_always_inline void
+l3fwd_em_simple_forward(struct rte_mbuf *m, uint16_t portid,
+ struct lcore_conf *qconf)
+{
+ struct rte_ether_hdr *eth_hdr;
+ uint16_t dst_port;
+ uint32_t tcp_or_udp;
+ uint32_t l3_ptypes;
+
+ eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
+ tcp_or_udp = m->packet_type & (RTE_PTYPE_L4_TCP | RTE_PTYPE_L4_UDP);
+ l3_ptypes = m->packet_type & RTE_PTYPE_L3_MASK;
+
+ if (tcp_or_udp && (l3_ptypes == RTE_PTYPE_L3_IPV4)) {
+ dst_port = l3fwd_em_handle_ipv4(m, portid, eth_hdr, qconf);
+ send_single_packet(qconf, m, dst_port);
+ } else if (tcp_or_udp && (l3_ptypes == RTE_PTYPE_L3_IPV6)) {
+ dst_port = l3fwd_em_handle_ipv6(m, portid, eth_hdr, qconf);
send_single_packet(qconf, m, dst_port);
} else {
/* Free the mbuf that contains non-IPV4/IPV6 packet */
@@ -79,6 +98,25 @@ l3fwd_em_simple_forward(struct rte_mbuf *m, uint16_t portid,
}
}
+static __rte_always_inline void
+l3fwd_em_simple_process(struct rte_mbuf *m, struct lcore_conf *qconf)
+{
+ struct rte_ether_hdr *eth_hdr;
+ uint32_t tcp_or_udp;
+ uint32_t l3_ptypes;
+
+ eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
+ tcp_or_udp = m->packet_type & (RTE_PTYPE_L4_TCP | RTE_PTYPE_L4_UDP);
+ l3_ptypes = m->packet_type & RTE_PTYPE_L3_MASK;
+
+ if (tcp_or_udp && (l3_ptypes == RTE_PTYPE_L3_IPV4))
+ m->port = l3fwd_em_handle_ipv4(m, m->port, eth_hdr, qconf);
+ else if (tcp_or_udp && (l3_ptypes == RTE_PTYPE_L3_IPV6))
+ m->port = l3fwd_em_handle_ipv6(m, m->port, eth_hdr, qconf);
+ else
+ m->port = BAD_PORT;
+}
+
/*
* Buffer non-optimized handling of packets, invoked
* from main_loop.
@@ -108,4 +146,33 @@ l3fwd_em_no_opt_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
l3fwd_em_simple_forward(pkts_burst[j], portid, qconf);
}
+/*
+ * Buffer non-optimized handling of events, invoked
+ * from main_loop.
+ */
+static inline void
+l3fwd_em_no_opt_process_events(int nb_rx, struct rte_event **events,
+ struct lcore_conf *qconf)
+{
+ int32_t j;
+
+ /* Prefetch first packets */
+ for (j = 0; j < PREFETCH_OFFSET && j < nb_rx; j++)
+ rte_prefetch0(rte_pktmbuf_mtod(events[j]->mbuf, void *));
+
+ /*
+ * Prefetch and forward already prefetched
+ * packets.
+ */
+ for (j = 0; j < (nb_rx - PREFETCH_OFFSET); j++) {
+ rte_prefetch0(rte_pktmbuf_mtod(events[
+ j + PREFETCH_OFFSET]->mbuf, void *));
+ l3fwd_em_simple_process(events[j]->mbuf, qconf);
+ }
+
+ /* Forward remaining prefetched packets */
+ for (; j < nb_rx; j++)
+ l3fwd_em_simple_process(events[j]->mbuf, qconf);
+}
+
#endif /* __L3FWD_EM_H__ */
diff --git a/examples/l3fwd/l3fwd_em_hlm.h b/examples/l3fwd/l3fwd_em_hlm.h
index ad8b9ce87..79812716c 100644
--- a/examples/l3fwd/l3fwd_em_hlm.h
+++ b/examples/l3fwd/l3fwd_em_hlm.h
@@ -75,6 +75,60 @@ em_get_dst_port_ipv6xN(struct lcore_conf *qconf, struct rte_mbuf *m[],
}
}
+static __rte_always_inline void
+em_get_dst_port_ipv4xN_events(struct lcore_conf *qconf, struct rte_mbuf *m[],
+ uint16_t dst_port[])
+{
+ int i;
+ int32_t ret[EM_HASH_LOOKUP_COUNT];
+ union ipv4_5tuple_host key[EM_HASH_LOOKUP_COUNT];
+ const void *key_array[EM_HASH_LOOKUP_COUNT];
+
+ for (i = 0; i < EM_HASH_LOOKUP_COUNT; i++) {
+ get_ipv4_5tuple(m[i], mask0.x, &key[i]);
+ key_array[i] = &key[i];
+ }
+
+ rte_hash_lookup_bulk(qconf->ipv4_lookup_struct, &key_array[0],
+ EM_HASH_LOOKUP_COUNT, ret);
+
+ for (i = 0; i < EM_HASH_LOOKUP_COUNT; i++) {
+ dst_port[i] = ((ret[i] < 0) ?
+ m[i]->port : ipv4_l3fwd_out_if[ret[i]]);
+
+ if (dst_port[i] >= RTE_MAX_ETHPORTS ||
+ (enabled_port_mask & 1 << dst_port[i]) == 0)
+ dst_port[i] = m[i]->port;
+ }
+}
+
+static __rte_always_inline void
+em_get_dst_port_ipv6xN_events(struct lcore_conf *qconf, struct rte_mbuf *m[],
+ uint16_t dst_port[])
+{
+ int i;
+ int32_t ret[EM_HASH_LOOKUP_COUNT];
+ union ipv6_5tuple_host key[EM_HASH_LOOKUP_COUNT];
+ const void *key_array[EM_HASH_LOOKUP_COUNT];
+
+ for (i = 0; i < EM_HASH_LOOKUP_COUNT; i++) {
+ get_ipv6_5tuple(m[i], mask1.x, mask2.x, &key[i]);
+ key_array[i] = &key[i];
+ }
+
+ rte_hash_lookup_bulk(qconf->ipv6_lookup_struct, &key_array[0],
+ EM_HASH_LOOKUP_COUNT, ret);
+
+ for (i = 0; i < EM_HASH_LOOKUP_COUNT; i++) {
+ dst_port[i] = ((ret[i] < 0) ?
+ m[i]->port : ipv6_l3fwd_out_if[ret[i]]);
+
+ if (dst_port[i] >= RTE_MAX_ETHPORTS ||
+ (enabled_port_mask & 1 << dst_port[i]) == 0)
+ dst_port[i] = m[i]->port;
+ }
+}
+
static __rte_always_inline uint16_t
em_get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
uint16_t portid)
@@ -187,4 +241,81 @@ l3fwd_em_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
send_packets_multi(qconf, pkts_burst, dst_port, nb_rx);
}
+
+/*
+ * Buffer optimized handling of events, invoked
+ * from main_loop.
+ */
+static inline void
+l3fwd_em_process_events(int nb_rx, struct rte_event **ev,
+ struct lcore_conf *qconf)
+{
+ int32_t i, j, pos;
+ uint16_t dst_port[MAX_PKT_BURST];
+ struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
+
+ /*
+ * Send nb_rx - nb_rx % EM_HASH_LOOKUP_COUNT packets
+ * in groups of EM_HASH_LOOKUP_COUNT.
+ */
+ int32_t n = RTE_ALIGN_FLOOR(nb_rx, EM_HASH_LOOKUP_COUNT);
+
+ for (j = 0; j < EM_HASH_LOOKUP_COUNT && j < nb_rx; j++) {
+ pkts_burst[j] = ev[j]->mbuf;
+ rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[j],
+ struct rte_ether_hdr *) + 1);
+ }
+
+ for (j = 0; j < n; j += EM_HASH_LOOKUP_COUNT) {
+
+ uint32_t pkt_type = RTE_PTYPE_L3_MASK |
+ RTE_PTYPE_L4_TCP | RTE_PTYPE_L4_UDP;
+ uint32_t l3_type, tcp_or_udp;
+
+ for (i = 0; i < EM_HASH_LOOKUP_COUNT; i++)
+ pkt_type &= pkts_burst[j + i]->packet_type;
+
+ l3_type = pkt_type & RTE_PTYPE_L3_MASK;
+ tcp_or_udp = pkt_type & (RTE_PTYPE_L4_TCP | RTE_PTYPE_L4_UDP);
+
+ for (i = 0, pos = j + EM_HASH_LOOKUP_COUNT;
+ i < EM_HASH_LOOKUP_COUNT && pos < nb_rx; i++, pos++) {
+ rte_prefetch0(rte_pktmbuf_mtod(
+ pkts_burst[pos],
+ struct rte_ether_hdr *) + 1);
+ }
+
+ if (tcp_or_udp && (l3_type == RTE_PTYPE_L3_IPV4)) {
+
+ em_get_dst_port_ipv4xN_events(qconf, &pkts_burst[j],
+ &dst_port[j]);
+
+ } else if (tcp_or_udp && (l3_type == RTE_PTYPE_L3_IPV6)) {
+
+ em_get_dst_port_ipv6xN_events(qconf, &pkts_burst[j],
+ &dst_port[j]);
+
+ } else {
+ for (i = 0; i < EM_HASH_LOOKUP_COUNT; i++) {
+ pkts_burst[j + i]->port = em_get_dst_port(qconf,
+ pkts_burst[j + i],
+ pkts_burst[j + i]->port);
+ process_packet(pkts_burst[j + i],
+ &pkts_burst[j + i]->port);
+ }
+ continue;
+ }
+ processx4_step3(&pkts_burst[j], &dst_port[j]);
+
+ for (i = 0; i < EM_HASH_LOOKUP_COUNT; i++)
+ pkts_burst[j + i]->port = dst_port[j + i];
+
+ }
+
+ for (; j < nb_rx; j++) {
+ pkts_burst[j]->port = em_get_dst_port(qconf, pkts_burst[j],
+ pkts_burst[j]->port);
+ process_packet(pkts_burst[j], &pkts_burst[j]->port);
+ }
+}
#endif /* __L3FWD_EM_HLM_H__ */
diff --git a/examples/l3fwd/l3fwd_em_sequential.h b/examples/l3fwd/l3fwd_em_sequential.h
index 23fe9dec8..b231b9994 100644
--- a/examples/l3fwd/l3fwd_em_sequential.h
+++ b/examples/l3fwd/l3fwd_em_sequential.h
@@ -95,4 +95,30 @@ l3fwd_em_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
send_packets_multi(qconf, pkts_burst, dst_port, nb_rx);
}
+
+/*
+ * Buffer optimized handling of events, invoked
+ * from main_loop.
+ */
+static inline void
+l3fwd_em_process_events(int nb_rx, struct rte_event **events,
+ struct lcore_conf *qconf)
+{
+ int32_t i, j;
+
+ rte_prefetch0(rte_pktmbuf_mtod(events[0]->mbuf,
+ struct rte_ether_hdr *) + 1);
+
+ for (i = 1, j = 0; j < nb_rx; i++, j++) {
+ struct rte_mbuf *mbuf = events[j]->mbuf;
+
+ if (i < nb_rx) {
+ rte_prefetch0(rte_pktmbuf_mtod(
+ events[i]->mbuf,
+ struct rte_ether_hdr *) + 1);
+ }
+ mbuf->port = em_get_dst_port(qconf, mbuf, mbuf->port);
+ process_packet(mbuf, &mbuf->port);
+ }
+}
#endif /* __L3FWD_EM_SEQUENTIAL_H__ */
diff --git a/examples/l3fwd/l3fwd_event.c b/examples/l3fwd/l3fwd_event.c
index ebddd8f97..43c47eade 100644
--- a/examples/l3fwd/l3fwd_event.c
+++ b/examples/l3fwd/l3fwd_event.c
@@ -217,6 +217,12 @@ l3fwd_event_resource_setup(struct rte_eth_conf *port_conf)
[1][0] = lpm_event_main_loop_tx_q,
[1][1] = lpm_event_main_loop_tx_q_burst,
};
+ const event_loop_cb em_event_loop[2][2] = {
+ [0][0] = em_event_main_loop_tx_d,
+ [0][1] = em_event_main_loop_tx_d_burst,
+ [1][0] = em_event_main_loop_tx_q,
+ [1][1] = em_event_main_loop_tx_q_burst,
+ };
uint32_t event_queue_cfg;
int ret;
@@ -251,4 +257,7 @@ l3fwd_event_resource_setup(struct rte_eth_conf *port_conf)
evt_rsrc->ops.lpm_event_loop = lpm_event_loop[evt_rsrc->tx_mode_q]
[evt_rsrc->has_burst];
+
+ evt_rsrc->ops.em_event_loop = em_event_loop[evt_rsrc->tx_mode_q]
+ [evt_rsrc->has_burst];
}
diff --git a/examples/l3fwd/main.c b/examples/l3fwd/main.c
index b8bd19b41..c95b1655e 100644
--- a/examples/l3fwd/main.c
+++ b/examples/l3fwd/main.c
@@ -1226,7 +1226,10 @@ main(int argc, char **argv)
/* Configure eventdev parameters if user has requested */
if (evt_rsrc->enabled) {
l3fwd_event_resource_setup(&port_conf);
- l3fwd_lkp.main_loop = evt_rsrc->ops.lpm_event_loop;
+ if (l3fwd_em_on)
+ l3fwd_lkp.main_loop = evt_rsrc->ops.em_event_loop;
+ else
+ l3fwd_lkp.main_loop = evt_rsrc->ops.lpm_event_loop;
l3fwd_event_service_setup();
} else
l3fwd_poll_resource_setup();
--
2.17.1
^ permalink raw reply [flat|nested] 13+ messages in thread
* [dpdk-dev] [PATCH v6 10/11] examples/l3fwd: add graceful teardown for eventdevice
2020-01-28 5:34 ` [dpdk-dev] [PATCH v6 00/11] example/l3fwd: introduce event device support pbhagavatula
` (8 preceding siblings ...)
2020-01-28 5:35 ` [dpdk-dev] [PATCH v6 09/11] examples/l3fwd: add event em " pbhagavatula
@ 2020-01-28 5:35 ` pbhagavatula
2020-01-28 5:35 ` [dpdk-dev] [PATCH v6 11/11] doc: update l3fwd user guide to support eventdev pbhagavatula
2020-01-28 9:09 ` [dpdk-dev] [PATCH v6 00/11] example/l3fwd: introduce event device support Jerin Jacob
11 siblings, 0 replies; 13+ messages in thread
From: pbhagavatula @ 2020-01-28 5:35 UTC (permalink / raw)
To: jerinj, konstantin.ananyev, Marko Kovacevic, Ori Kam,
Bruce Richardson, Radu Nicolau, Akhil Goyal, Tomasz Kantecki,
Sunil Kumar Kori, Pavan Nikhilesh
Cc: dev
From: Pavan Nikhilesh <pbhagavatula@marvell.com>
Add graceful teardown that addresses both event mode and poll mode.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
examples/l3fwd/main.c | 50 +++++++++++++++++++++++++++++--------------
1 file changed, 34 insertions(+), 16 deletions(-)
diff --git a/examples/l3fwd/main.c b/examples/l3fwd/main.c
index c95b1655e..dda430d68 100644
--- a/examples/l3fwd/main.c
+++ b/examples/l3fwd/main.c
@@ -1092,8 +1092,6 @@ l3fwd_poll_resource_setup(void)
ret, portid);
}
}
-
-
}
static inline int
@@ -1191,7 +1189,7 @@ main(int argc, char **argv)
uint16_t queueid, portid;
unsigned int lcore_id;
uint8_t queue;
- int ret;
+ int i, ret;
/* init EAL */
ret = rte_eal_init(argc, argv);
@@ -1280,21 +1278,41 @@ main(int argc, char **argv)
ret = 0;
/* launch per-lcore init on every lcore */
rte_eal_mp_remote_launch(l3fwd_lkp.main_loop, NULL, CALL_MASTER);
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
- if (rte_eal_wait_lcore(lcore_id) < 0) {
- ret = -1;
- break;
+ if (evt_rsrc->enabled) {
+ for (i = 0; i < evt_rsrc->rx_adptr.nb_rx_adptr; i++)
+ rte_event_eth_rx_adapter_stop(
+ evt_rsrc->rx_adptr.rx_adptr[i]);
+ for (i = 0; i < evt_rsrc->tx_adptr.nb_tx_adptr; i++)
+ rte_event_eth_tx_adapter_stop(
+ evt_rsrc->tx_adptr.tx_adptr[i]);
+
+ RTE_ETH_FOREACH_DEV(portid) {
+ if ((enabled_port_mask & (1 << portid)) == 0)
+ continue;
+ rte_eth_dev_stop(portid);
}
- }
- /* stop ports */
- RTE_ETH_FOREACH_DEV(portid) {
- if ((enabled_port_mask & (1 << portid)) == 0)
- continue;
- printf("Closing port %d...", portid);
- rte_eth_dev_stop(portid);
- rte_eth_dev_close(portid);
- printf(" Done\n");
+ rte_eal_mp_wait_lcore();
+ RTE_ETH_FOREACH_DEV(portid) {
+ if ((enabled_port_mask & (1 << portid)) == 0)
+ continue;
+ rte_eth_dev_close(portid);
+ }
+
+ rte_event_dev_stop(evt_rsrc->event_d_id);
+ rte_event_dev_close(evt_rsrc->event_d_id);
+
+ } else {
+ rte_eal_mp_wait_lcore();
+
+ RTE_ETH_FOREACH_DEV(portid) {
+ if ((enabled_port_mask & (1 << portid)) == 0)
+ continue;
+ printf("Closing port %d...", portid);
+ rte_eth_dev_stop(portid);
+ rte_eth_dev_close(portid);
+ printf(" Done\n");
+ }
}
printf("Bye...\n");
--
2.17.1
^ permalink raw reply [flat|nested] 13+ messages in thread
* [dpdk-dev] [PATCH v6 11/11] doc: update l3fwd user guide to support eventdev
2020-01-28 5:34 ` [dpdk-dev] [PATCH v6 00/11] example/l3fwd: introduce event device support pbhagavatula
` (9 preceding siblings ...)
2020-01-28 5:35 ` [dpdk-dev] [PATCH v6 10/11] examples/l3fwd: add graceful teardown for eventdevice pbhagavatula
@ 2020-01-28 5:35 ` pbhagavatula
2020-01-28 9:09 ` [dpdk-dev] [PATCH v6 00/11] example/l3fwd: introduce event device support Jerin Jacob
11 siblings, 0 replies; 13+ messages in thread
From: pbhagavatula @ 2020-01-28 5:35 UTC (permalink / raw)
To: jerinj, konstantin.ananyev, Thomas Monjalon, John McNamara,
Marko Kovacevic, Ori Kam, Bruce Richardson, Radu Nicolau,
Akhil Goyal, Tomasz Kantecki, Sunil Kumar Kori, Pavan Nikhilesh
Cc: dev
From: Pavan Nikhilesh <pbhagavatula@marvell.com>
Update l3fwd user guide to include event device related information.
Update release notes and MAINTAINERS file.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
MAINTAINERS | 1 +
doc/guides/rel_notes/release_20_02.rst | 5 ++
doc/guides/sample_app_ug/l3_forward.rst | 79 +++++++++++++++++++++++--
3 files changed, 79 insertions(+), 6 deletions(-)
diff --git a/MAINTAINERS b/MAINTAINERS
index 94bccae6d..8047aaf2a 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -430,6 +430,7 @@ T: git://dpdk.org/next/dpdk-next-eventdev
F: lib/librte_eventdev/
F: drivers/event/skeleton/
F: app/test/test_eventdev.c
+F: examples/l3fwd/l3fwd_event*
Eventdev Ethdev Rx Adapter API
M: Nikhil Rao <nikhil.rao@intel.com>
diff --git a/doc/guides/rel_notes/release_20_02.rst b/doc/guides/rel_notes/release_20_02.rst
index 50e2c1484..9cb69e53b 100644
--- a/doc/guides/rel_notes/release_20_02.rst
+++ b/doc/guides/rel_notes/release_20_02.rst
@@ -143,6 +143,11 @@ New Features
Added a new OCTEON TX2 rawdev PMD for End Point mode of operation.
See the :doc:`../rawdevs/octeontx2_ep` for more details on this new PMD.
+* **Add event device support for l3fwd sample application.**
+
+ Add event device support for ``l3fwd`` sample application. It demonstrates
+ usage of poll and event mode IO mechanism under a single application.
+
Removed Items
-------------
diff --git a/doc/guides/sample_app_ug/l3_forward.rst b/doc/guides/sample_app_ug/l3_forward.rst
index 4cb4b18da..153e46893 100644
--- a/doc/guides/sample_app_ug/l3_forward.rst
+++ b/doc/guides/sample_app_ug/l3_forward.rst
@@ -4,16 +4,23 @@
L3 Forwarding Sample Application
================================
-The L3 Forwarding application is a simple example of packet processing using the DPDK.
+The L3 Forwarding application is a simple example of packet processing using
+DPDK to demonstrate usage of poll and event mode packet I/O mechanism.
The application performs L3 forwarding.
Overview
--------
-The application demonstrates the use of the hash and LPM libraries in the DPDK to implement packet forwarding.
-The initialization and run-time paths are very similar to those of the :doc:`l2_forward_real_virtual`.
-The main difference from the L2 Forwarding sample application is that the forwarding decision
-is made based on information read from the input packet.
+The application demonstrates the use of the hash and LPM libraries in the DPDK
+to implement packet forwarding using poll or event mode PMDs for packet I/O.
+The initialization and run-time paths are very similar to those of the
+:doc:`l2_forward_real_virtual` and :doc:`l2_forward_event`.
+The main difference from the L2 Forwarding sample application is that optionally
+packet can be Rx/Tx from/to eventdev instead of port directly and forwarding
+decision is made based on information read from the input packet.
+
+Eventdev can optionally use S/W or H/W (if supported by platform) scheduler
+implementation for packet I/O based on run time parameters.
The lookup method is either hash-based or LPM-based and is selected at run time. When the selected lookup method is hash-based,
a hash object is used to emulate the flow classification stage.
@@ -56,6 +63,9 @@ The application has a number of command line options::
[--ipv6]
[--parse-ptype]
[--per-port-pool]
+ [--mode]
+ [--eventq-sched]
+ [--event-eth-rxqs]
Where,
@@ -86,6 +96,13 @@ Where,
* ``--per-port-pool:`` Optional, set to use independent buffer pools per port. Without this option, single buffer pool is used for all ports.
+* ``--mode:`` Optional, Packet transfer mode for I/O, poll or eventdev.
+
+* ``--eventq-sched:`` Optional, Event queue synchronization method, Ordered, Atomic or Parallel. Only valid if --mode=eventdev.
+
+* ``--event-eth-rxqs:`` Optional, Number of ethernet RX queues per device. Only valid if --mode=eventdev.
+
+
For example, consider a dual processor socket platform with 8 physical cores, where cores 0-7 and 16-23 appear on socket 0,
while cores 8-15 and 24-31 appear on socket 1.
@@ -116,6 +133,51 @@ In this command:
| | | | |
+----------+-----------+-----------+-------------------------------------+
+To use eventdev mode with sync method **ordered** on above mentioned environment,
+Following is the sample command:
+
+.. code-block:: console
+
+ ./build/l3fwd -l 0-3 -n 4 -w <event device> -- -p 0x3 --eventq-sched=ordered
+
+or
+
+.. code-block:: console
+
+ ./build/l3fwd -l 0-3 -n 4 -w <event device> -- -p 0x03 --mode=eventdev --eventq-sched=ordered
+
+In this command:
+
+* -w option whitelist the event device supported by platform. Way to pass this device may vary based on platform.
+
+* The --mode option defines PMD to be used for packet I/O.
+
+* The --eventq-sched option enables synchronization menthod of event queue so that packets will be scheduled accordingly.
+
+If application uses S/W scheduler, it uses following DPDK services:
+
+* Software scheduler
+* Rx adapter service function
+* Tx adapter service function
+
+Application needs service cores to run above mentioned services. Service cores
+must be provided as EAL parameters along with the --vdev=event_sw0 to enable S/W
+scheduler. Following is the sample command:
+
+.. code-block:: console
+
+ ./build/l3fwd -l 0-7 -s 0-3 -n 4 --vdev event_sw0 -- -p 0x3 --mode=eventdev --eventq-sched=ordered
+
+In case of eventdev mode, *--config* option is not used for ethernet port
+configuration. Instead each ethernet port will be configured with mentioned
+setup:
+
+* Single Rx/Tx queue
+
+* Each Rx queue will be connected to event queue via Rx adapter.
+
+* Each Tx queue will be connected via Tx adapter.
+
Refer to the *DPDK Getting Started Guide* for general information on running applications and
the Environment Abstraction Layer (EAL) options.
@@ -125,7 +187,7 @@ Explanation
-----------
The following sections provide some explanation of the sample application code. As mentioned in the overview section,
-the initialization and run-time paths are very similar to those of the :doc:`l2_forward_real_virtual`.
+the initialization and run-time paths are very similar to those of the :doc:`l2_forward_real_virtual` and :doc:`l2_forward_event`.
The following sections describe aspects that are specific to the L3 Forwarding sample application.
Hash Initialization
@@ -315,3 +377,8 @@ for LPM-based lookups is done by the get_ipv4_dst_port() function below:
return ((rte_lpm_lookup(ipv4_l3fwd_lookup_struct, rte_be_to_cpu_32(ipv4_hdr->dst_addr), &next_hop) == 0)? next_hop : portid);
}
+
+Eventdev Driver Initialization
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Eventdev driver initialization is same as L2 forwarding eventdev application.
+Refer :doc:`l2_forward_event` for more details.
--
2.17.1
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [dpdk-dev] [PATCH v6 00/11] example/l3fwd: introduce event device support
2020-01-28 5:34 ` [dpdk-dev] [PATCH v6 00/11] example/l3fwd: introduce event device support pbhagavatula
` (10 preceding siblings ...)
2020-01-28 5:35 ` [dpdk-dev] [PATCH v6 11/11] doc: update l3fwd user guide to support eventdev pbhagavatula
@ 2020-01-28 9:09 ` Jerin Jacob
11 siblings, 0 replies; 13+ messages in thread
From: Jerin Jacob @ 2020-01-28 9:09 UTC (permalink / raw)
To: Pavan Nikhilesh; +Cc: Jerin Jacob, Ananyev, Konstantin, dpdk-dev
On Tue, Jan 28, 2020 at 11:05 AM <pbhagavatula@marvell.com> wrote:
>
> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
>
> This patchset adds support for event mode in l3fwd.
> The poll mode is still the default mode of operation.
>
> The following new command line parameters are added:
> --mode: Dictates the mode of operation either poll or event. If unset then
> by default poll mode is used.
> --eventq_sched: Dictates event synchronization mode i.e. either ordered,
> atomic or parallel.
> --event-eth-rxqs: Number of ethernet RX queues per device.
>
> Based on event device capability the configuration is done as follows:
> - A single event device is enabled.
> - The number of event ports is equal to the number of worker
> cores enabled in the core mask. Additional event ports might
> be configured based on Rx/Tx adapter capability.
> - The number of event queues is equal to the number of ethernet
> ports. If Tx adapter doesn't have internal port capability then
> an additional single link event queue is used to enqueue events
> to Tx adapter.
> - Each event port is linked to all existing event queues.
> - Dedicated Rx/Tx adapters for each Ethernet port.
>
> v6 Changes:
> ----------
> - Fix typo in comments.
>
> v5 Changes:
> ----------
> - Update release notes and MAINTAINERS file.
> - Fix typo in doc.
>
> v4 Changes:
> ----------
> - Update documentation about --event-eth-rxqs. (Jerin)
> - Add validation for command line options that are specific to event/poll mode.
> - Fix event device service initialization.
>
> v3 Changes:
> ----------
> - Unify argument parsing.
> - Segregate poll mode and event mode initialization.
> - Simplify event resource creation and accesses.
> - Integrate http://patches.dpdk.org/project/dpdk/list/?series=8002 series.
> - Reduce code duplication in lpm.
>
> v2 Changes:
> ----------
> - Shorten the structure name `s/event_rsrc/evt_rsrc` `s/l2fwd_rsrc/rsrc`.
> - Use rte_panic instead of rte_exit.
> - Rebase on top of Tx adapter change http://patches.dpdk.org/patch/60971.
> - Add parallel mode support.
> - Fix missing eventdev args parsing.
>
>
> Pavan Nikhilesh (7):
> examples/l3fwd: add event device configuration
> examples/l3fwd: add event eth Rx/Tx adapter setup
> examples/l3fwd: add service core setup based on caps
> examples/l3fwd: add event lpm main loop
> examples/l3fwd: add event em main loop
> examples/l3fwd: add graceful teardown for eventdevice
> doc: update l3fwd user guide to support eventdev
>
> Sunil Kumar Kori (4):
> examples/l3fwd: add framework for event device
> examples/l3fwd: split pipelines based on capability
> examples/l3fwd: add ethdev setup based on eventdev
> examples/l3fwd: add event port and queue setup
Series-Acked-by: Jerin Jacob <jerinj@marvell.com>
Series applied to dpdk-next-eventdev/master. Thanks.
>
> MAINTAINERS | 1 +
> doc/guides/rel_notes/release_20_02.rst | 5 +
> doc/guides/sample_app_ug/l3_forward.rst | 79 ++++-
> examples/l3fwd/Makefile | 3 +-
> examples/l3fwd/l3fwd.h | 30 ++
> examples/l3fwd/l3fwd_em.c | 177 +++++++++++
> examples/l3fwd/l3fwd_em.h | 159 +++++++---
> examples/l3fwd/l3fwd_em_hlm.h | 131 ++++++++
> examples/l3fwd/l3fwd_em_sequential.h | 26 ++
> examples/l3fwd/l3fwd_event.c | 263 ++++++++++++++++
> examples/l3fwd/l3fwd_event.h | 86 ++++++
> examples/l3fwd/l3fwd_event_generic.c | 303 ++++++++++++++++++
> examples/l3fwd/l3fwd_event_internal_port.c | 279 +++++++++++++++++
> examples/l3fwd/l3fwd_lpm.c | 203 ++++++++++++
> examples/l3fwd/main.c | 341 +++++++++++++++++----
> examples/l3fwd/meson.build | 5 +-
> 16 files changed, 1976 insertions(+), 115 deletions(-)
> create mode 100644 examples/l3fwd/l3fwd_event.c
> create mode 100644 examples/l3fwd/l3fwd_event.h
> create mode 100644 examples/l3fwd/l3fwd_event_generic.c
> create mode 100644 examples/l3fwd/l3fwd_event_internal_port.c
>
> --
> 2.17.1
>
^ permalink raw reply [flat|nested] 13+ messages in thread