* [dpdk-dev] [PATCH 01/13] examples/eventdev: add Rx adapter support
2017-12-07 20:36 [dpdk-dev] [PATCH 00/13] examples/eventdev: add capability based pipeline support Pavan Nikhilesh
@ 2017-12-07 20:36 ` Pavan Nikhilesh
2017-12-11 16:15 ` Eads, Gage
2017-12-07 20:36 ` [dpdk-dev] [PATCH 02/13] examples/eventdev: move common data into pipeline common Pavan Nikhilesh
` (12 subsequent siblings)
13 siblings, 1 reply; 48+ messages in thread
From: Pavan Nikhilesh @ 2017-12-07 20:36 UTC (permalink / raw)
To: gage.eads, jerin.jacobkollanukkaran, harry.van.haaren,
nikhil.rao, hemant.agrawal, liang.j.ma
Cc: dev, Pavan Nikhilesh
Use event Rx adapter for packets Rx instead of explicit producer logic.
Use service run iter function for granular control instead of using
dedicated service lcore.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
---
examples/eventdev_pipeline_sw_pmd/main.c | 149 +++++++++++++++++--------------
1 file changed, 82 insertions(+), 67 deletions(-)
diff --git a/examples/eventdev_pipeline_sw_pmd/main.c b/examples/eventdev_pipeline_sw_pmd/main.c
index 5f431d87d..bb87c9544 100644
--- a/examples/eventdev_pipeline_sw_pmd/main.c
+++ b/examples/eventdev_pipeline_sw_pmd/main.c
@@ -46,6 +46,7 @@
#include <rte_cycles.h>
#include <rte_ethdev.h>
#include <rte_eventdev.h>
+#include <rte_event_eth_rx_adapter.h>
#include <rte_service.h>
#define MAX_NUM_STAGES 8
@@ -78,6 +79,7 @@ struct fastpath_data {
uint32_t tx_lock;
uint32_t sched_lock;
uint32_t evdev_service_id;
+ uint32_t rxadptr_service_id;
bool rx_single;
bool tx_single;
bool sched_single;
@@ -105,6 +107,7 @@ struct config_data {
unsigned int worker_cq_depth;
int16_t next_qid[MAX_NUM_STAGES+2];
int16_t qid[MAX_NUM_STAGES];
+ uint8_t rx_adapter_id;
};
static struct config_data cdata = {
@@ -193,53 +196,12 @@ consumer(void)
return 0;
}
-static int
-producer(void)
-{
- static uint8_t eth_port;
- struct rte_mbuf *mbufs[BATCH_SIZE+2];
- struct rte_event ev[BATCH_SIZE+2];
- uint32_t i, num_ports = prod_data.num_nic_ports;
- int32_t qid = prod_data.qid;
- uint8_t dev_id = prod_data.dev_id;
- uint8_t port_id = prod_data.port_id;
- uint32_t prio_idx = 0;
-
- const uint16_t nb_rx = rte_eth_rx_burst(eth_port, 0, mbufs, BATCH_SIZE);
- if (++eth_port == num_ports)
- eth_port = 0;
- if (nb_rx == 0) {
- rte_pause();
- return 0;
- }
-
- for (i = 0; i < nb_rx; i++) {
- ev[i].flow_id = mbufs[i]->hash.rss;
- ev[i].op = RTE_EVENT_OP_NEW;
- ev[i].sched_type = cdata.queue_type;
- ev[i].queue_id = qid;
- ev[i].event_type = RTE_EVENT_TYPE_ETHDEV;
- ev[i].sub_event_type = 0;
- ev[i].priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
- ev[i].mbuf = mbufs[i];
- RTE_SET_USED(prio_idx);
- }
-
- const int nb_tx = rte_event_enqueue_burst(dev_id, port_id, ev, nb_rx);
- if (nb_tx != nb_rx) {
- for (i = nb_tx; i < nb_rx; i++)
- rte_pktmbuf_free(mbufs[i]);
- }
-
- return 0;
-}
-
static inline void
schedule_devices(unsigned int lcore_id)
{
if (fdata->rx_core[lcore_id] && (fdata->rx_single ||
rte_atomic32_cmpset(&(fdata->rx_lock), 0, 1))) {
- producer();
+ rte_service_run_iter_on_app_lcore(fdata->rxadptr_service_id, 1);
rte_atomic32_clear((rte_atomic32_t *)&(fdata->rx_lock));
}
@@ -553,6 +515,79 @@ parse_app_args(int argc, char **argv)
}
}
+static inline void
+init_rx_adapter(uint16_t nb_ports)
+{
+ int i;
+ int ret;
+ uint8_t evdev_id = 0;
+ uint8_t port_needed = 0;
+ struct rte_event_dev_info dev_info;
+
+ ret = rte_event_dev_info_get(evdev_id, &dev_info);
+
+ struct rte_event_port_conf rx_p_conf = {
+ .dequeue_depth = 8,
+ .enqueue_depth = 8,
+ .new_event_threshold = 1200,
+ };
+
+ if (rx_p_conf.dequeue_depth > dev_info.max_event_port_dequeue_depth)
+ rx_p_conf.dequeue_depth = dev_info.max_event_port_dequeue_depth;
+ if (rx_p_conf.enqueue_depth > dev_info.max_event_port_enqueue_depth)
+ rx_p_conf.enqueue_depth = dev_info.max_event_port_enqueue_depth;
+
+ ret = rte_event_eth_rx_adapter_create(cdata.rx_adapter_id, evdev_id,
+ &rx_p_conf);
+ if (ret)
+ rte_exit(EXIT_FAILURE, "failed to create rx adapter[%d]",
+ cdata.rx_adapter_id);
+
+ struct rte_event_eth_rx_adapter_queue_conf queue_conf = {
+ .ev.sched_type = cdata.queue_type,
+ .ev.queue_id = cdata.qid[0],
+ };
+
+ for (i = 0; i < nb_ports; i++) {
+ uint32_t cap;
+
+ ret = rte_event_eth_rx_adapter_caps_get(evdev_id, i, &cap);
+ if (ret)
+ rte_exit(EXIT_FAILURE,
+ "failed to get event rx adapter "
+ "capabilities");
+ /* Producer needs port. */
+ port_needed |= !(cap &
+ RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT);
+
+ ret = rte_event_eth_rx_adapter_queue_add(cdata.rx_adapter_id, i,
+ -1, &queue_conf);
+ if (ret)
+ rte_exit(EXIT_FAILURE,
+ "Failed to add queues to Rx adapter");
+ }
+
+ if (port_needed)
+ prod_data.port_id = cons_data.port_id + 1;
+ prod_data.dev_id = evdev_id;
+ prod_data.qid = cdata.qid[0];
+
+ ret = rte_event_eth_rx_adapter_service_id_get(cdata.rx_adapter_id,
+ &fdata->rxadptr_service_id);
+ if (ret != -ESRCH && ret != 0) {
+ rte_exit(EXIT_FAILURE,
+ "Error getting the service ID for Rx adapter\n");
+ }
+ rte_service_runstate_set(fdata->rxadptr_service_id, 1);
+ rte_service_set_runstate_mapped_check(fdata->rxadptr_service_id, 0);
+
+ ret = rte_event_eth_rx_adapter_start(cdata.rx_adapter_id);
+ if (ret)
+ rte_exit(EXIT_FAILURE, "Rx adapter[%d] start failed",
+ cdata.rx_adapter_id);
+
+}
+
/*
* Initializes a given port using global settings and with the RX buffers
* coming from the mbuf_pool passed as a parameter.
@@ -663,15 +698,14 @@ struct port_link {
};
static int
-setup_eventdev(struct prod_data *prod_data,
- struct cons_data *cons_data,
+setup_eventdev(struct cons_data *cons_data,
struct worker_data *worker_data)
{
const uint8_t dev_id = 0;
/* +1 stages is for a SINGLE_LINK TX stage */
const uint8_t nb_queues = cdata.num_stages + 1;
- /* + 2 is one port for producer and one for consumer */
- const uint8_t nb_ports = cdata.num_workers + 2;
+ /* + 1 for consumer */
+ const uint8_t nb_ports = cdata.num_workers + 1;
struct rte_event_dev_config config = {
.nb_event_queues = nb_queues,
.nb_event_ports = nb_ports,
@@ -818,26 +852,6 @@ setup_eventdev(struct prod_data *prod_data,
__LINE__, i);
return -1;
}
- /* port for producer, no links */
- struct rte_event_port_conf rx_p_conf = {
- .dequeue_depth = 8,
- .enqueue_depth = 8,
- .new_event_threshold = 1200,
- };
-
- if (rx_p_conf.dequeue_depth > config.nb_event_port_dequeue_depth)
- rx_p_conf.dequeue_depth = config.nb_event_port_dequeue_depth;
- if (rx_p_conf.enqueue_depth > config.nb_event_port_enqueue_depth)
- rx_p_conf.enqueue_depth = config.nb_event_port_enqueue_depth;
-
- if (rte_event_port_setup(dev_id, i + 1, &rx_p_conf) < 0) {
- printf("Error setting up port %d\n", i);
- return -1;
- }
-
- *prod_data = (struct prod_data){.dev_id = dev_id,
- .port_id = i + 1,
- .qid = cdata.qid[0] };
*cons_data = (struct cons_data){.dev_id = dev_id,
.port_id = i };
@@ -940,12 +954,13 @@ main(int argc, char **argv)
if (worker_data == NULL)
rte_panic("rte_calloc failed\n");
- int dev_id = setup_eventdev(&prod_data, &cons_data, worker_data);
+ int dev_id = setup_eventdev(&cons_data, worker_data);
if (dev_id < 0)
rte_exit(EXIT_FAILURE, "Error setting up eventdev\n");
prod_data.num_nic_ports = num_ports;
init_ports(num_ports);
+ init_rx_adapter(num_ports);
int worker_idx = 0;
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
--
2.14.1
^ permalink raw reply [flat|nested] 48+ messages in thread
* Re: [dpdk-dev] [PATCH 01/13] examples/eventdev: add Rx adapter support
2017-12-07 20:36 ` [dpdk-dev] [PATCH 01/13] examples/eventdev: add Rx adapter support Pavan Nikhilesh
@ 2017-12-11 16:15 ` Eads, Gage
2017-12-12 8:17 ` Pavan Nikhilesh Bhagavatula
0 siblings, 1 reply; 48+ messages in thread
From: Eads, Gage @ 2017-12-11 16:15 UTC (permalink / raw)
To: Pavan Nikhilesh, jerin.jacobkollanukkaran, Van Haaren, Harry,
Rao, Nikhil, hemant.agrawal, Ma, Liang J
Cc: dev
Hi Pavan,
</snip>
> static inline void
> schedule_devices(unsigned int lcore_id) {
> if (fdata->rx_core[lcore_id] && (fdata->rx_single ||
> rte_atomic32_cmpset(&(fdata->rx_lock), 0, 1))) {
> - producer();
> + rte_service_run_iter_on_app_lcore(fdata->rxadptr_service_id,
> 1);
> rte_atomic32_clear((rte_atomic32_t *)&(fdata->rx_lock));
> }
The (rx_single || cmpset(rx_lock)) check should no longer be needed -- this logic is provided in rte_service_run_iter_on_app_lcore() and service_run(). The rx_lock can be dropped in general.
</snip>
> + if (port_needed)
> + prod_data.port_id = cons_data.port_id + 1;
> + prod_data.dev_id = evdev_id;
> + prod_data.qid = cdata.qid[0];
> +
Is prod_data still needed? Looks like we're only using it in main() to print the port ID (which may not be valid, depending on if port_needed is true).
Thanks,
Gage
^ permalink raw reply [flat|nested] 48+ messages in thread
* Re: [dpdk-dev] [PATCH 01/13] examples/eventdev: add Rx adapter support
2017-12-11 16:15 ` Eads, Gage
@ 2017-12-12 8:17 ` Pavan Nikhilesh Bhagavatula
2017-12-12 15:59 ` Eads, Gage
0 siblings, 1 reply; 48+ messages in thread
From: Pavan Nikhilesh Bhagavatula @ 2017-12-12 8:17 UTC (permalink / raw)
To: Eads, Gage, jerin.jacobkollanukkaran, Van Haaren, Harry,
nikhil.rao, hemant.agrawal, Ma, Liang J
Cc: dev
On Mon, Dec 11, 2017 at 04:15:41PM +0000, Eads, Gage wrote:
> Hi Pavan,
>
> </snip>
>
> > static inline void
> > schedule_devices(unsigned int lcore_id) {
> > if (fdata->rx_core[lcore_id] && (fdata->rx_single ||
> > rte_atomic32_cmpset(&(fdata->rx_lock), 0, 1))) {
> > - producer();
> > + rte_service_run_iter_on_app_lcore(fdata->rxadptr_service_id,
> > 1);
> > rte_atomic32_clear((rte_atomic32_t *)&(fdata->rx_lock));
> > }
>
> The (rx_single || cmpset(rx_lock)) check should no longer be needed -- this logic is provided in rte_service_run_iter_on_app_lcore() and service_run(). The rx_lock can be dropped in general.
>
we could either remove the example level locks (or) keep the locks at
application level and disable them in service api through
rte_service_run_iter_on_app_lcore(<id>, 0).
If we choose to remove example level locks we could do something like
rte_service_run_iter_on_app_lcore(id, !rx_single)
> </snip>
>
> > + if (port_needed)
> > + prod_data.port_id = cons_data.port_id + 1;
> > + prod_data.dev_id = evdev_id;
> > + prod_data.qid = cdata.qid[0];
> > +
>
> Is prod_data still needed? Looks like we're only using it in main() to print the port ID (which may not be valid, depending on if port_needed is true).
Prod data is not needed I left it there to be consistent with the old example,
I will clean it up in the next version.
>
> Thanks,
> Gage
Thanks,
Pavan
^ permalink raw reply [flat|nested] 48+ messages in thread
* Re: [dpdk-dev] [PATCH 01/13] examples/eventdev: add Rx adapter support
2017-12-12 8:17 ` Pavan Nikhilesh Bhagavatula
@ 2017-12-12 15:59 ` Eads, Gage
0 siblings, 0 replies; 48+ messages in thread
From: Eads, Gage @ 2017-12-12 15:59 UTC (permalink / raw)
To: Pavan Nikhilesh Bhagavatula, jerin.jacobkollanukkaran,
Van Haaren, Harry, Rao, Nikhil, hemant.agrawal, Ma, Liang J
Cc: dev
> -----Original Message-----
> From: Pavan Nikhilesh Bhagavatula
> [mailto:pbhagavatula@caviumnetworks.com]
> Sent: Tuesday, December 12, 2017 2:18 AM
> To: Eads, Gage <gage.eads@intel.com>;
> jerin.jacobkollanukkaran@cavium.com; Van Haaren, Harry
> <harry.van.haaren@intel.com>; Rao, Nikhil <nikhil.rao@intel.com>;
> hemant.agrawal@nxp.com; Ma, Liang J <liang.j.ma@intel.com>
> Cc: dev@dpdk.org
> Subject: Re: [PATCH 01/13] examples/eventdev: add Rx adapter support
>
> On Mon, Dec 11, 2017 at 04:15:41PM +0000, Eads, Gage wrote:
> > Hi Pavan,
> >
> > </snip>
> >
> > > static inline void
> > > schedule_devices(unsigned int lcore_id) {
> > > if (fdata->rx_core[lcore_id] && (fdata->rx_single ||
> > > rte_atomic32_cmpset(&(fdata->rx_lock), 0, 1))) {
> > > - producer();
> > > + rte_service_run_iter_on_app_lcore(fdata->rxadptr_service_id,
> > > 1);
> > > rte_atomic32_clear((rte_atomic32_t *)&(fdata->rx_lock));
> > > }
> >
> > The (rx_single || cmpset(rx_lock)) check should no longer be needed -- this
> logic is provided in rte_service_run_iter_on_app_lcore() and service_run(). The
> rx_lock can be dropped in general.
> >
>
> we could either remove the example level locks (or) keep the locks at application
> level and disable them in service api through
> rte_service_run_iter_on_app_lcore(<id>, 0).
>
> If we choose to remove example level locks we could do something like
> rte_service_run_iter_on_app_lcore(id, !rx_single)
>
That sounds good. No need to duplicate code that the EAL provides, and it simplifies the example.
Thanks,
Gage
^ permalink raw reply [flat|nested] 48+ messages in thread
* [dpdk-dev] [PATCH 02/13] examples/eventdev: move common data into pipeline common
2017-12-07 20:36 [dpdk-dev] [PATCH 00/13] examples/eventdev: add capability based pipeline support Pavan Nikhilesh
2017-12-07 20:36 ` [dpdk-dev] [PATCH 01/13] examples/eventdev: add Rx adapter support Pavan Nikhilesh
@ 2017-12-07 20:36 ` Pavan Nikhilesh
2017-12-11 16:15 ` Eads, Gage
2017-12-07 20:36 ` [dpdk-dev] [PATCH 03/13] examples/eventdev: add framework for caps based pipeline Pavan Nikhilesh
` (11 subsequent siblings)
13 siblings, 1 reply; 48+ messages in thread
From: Pavan Nikhilesh @ 2017-12-07 20:36 UTC (permalink / raw)
To: gage.eads, jerin.jacobkollanukkaran, harry.van.haaren,
nikhil.rao, hemant.agrawal, liang.j.ma
Cc: dev, Pavan Nikhilesh
Move common structures and functions into pipeline_common.h so that they
can be used by different kinds of pipelines.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
---
examples/eventdev_pipeline_sw_pmd/main.c | 77 +--------------
.../eventdev_pipeline_sw_pmd/pipeline_common.h | 109 +++++++++++++++++++++
2 files changed, 112 insertions(+), 74 deletions(-)
create mode 100644 examples/eventdev_pipeline_sw_pmd/pipeline_common.h
diff --git a/examples/eventdev_pipeline_sw_pmd/main.c b/examples/eventdev_pipeline_sw_pmd/main.c
index bb87c9544..c9702fddd 100644
--- a/examples/eventdev_pipeline_sw_pmd/main.c
+++ b/examples/eventdev_pipeline_sw_pmd/main.c
@@ -2,6 +2,7 @@
* BSD LICENSE
*
* Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ * Copyright 2016 Cavium, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -35,82 +36,10 @@
#include <stdio.h>
#include <signal.h>
#include <sched.h>
-#include <stdbool.h>
-
-#include <rte_eal.h>
-#include <rte_mempool.h>
-#include <rte_mbuf.h>
-#include <rte_launch.h>
-#include <rte_malloc.h>
-#include <rte_random.h>
-#include <rte_cycles.h>
-#include <rte_ethdev.h>
-#include <rte_eventdev.h>
-#include <rte_event_eth_rx_adapter.h>
-#include <rte_service.h>
-
-#define MAX_NUM_STAGES 8
-#define BATCH_SIZE 16
-#define MAX_NUM_CORE 64
-
-struct prod_data {
- uint8_t dev_id;
- uint8_t port_id;
- int32_t qid;
- unsigned int num_nic_ports;
-} __rte_cache_aligned;
-
-struct cons_data {
- uint8_t dev_id;
- uint8_t port_id;
-} __rte_cache_aligned;
-
-static struct prod_data prod_data;
-static struct cons_data cons_data;
-
-struct worker_data {
- uint8_t dev_id;
- uint8_t port_id;
-} __rte_cache_aligned;
-
-struct fastpath_data {
- volatile int done;
- uint32_t rx_lock;
- uint32_t tx_lock;
- uint32_t sched_lock;
- uint32_t evdev_service_id;
- uint32_t rxadptr_service_id;
- bool rx_single;
- bool tx_single;
- bool sched_single;
- unsigned int rx_core[MAX_NUM_CORE];
- unsigned int tx_core[MAX_NUM_CORE];
- unsigned int sched_core[MAX_NUM_CORE];
- unsigned int worker_core[MAX_NUM_CORE];
- struct rte_eth_dev_tx_buffer *tx_buf[RTE_MAX_ETHPORTS];
-};
-static struct fastpath_data *fdata;
-
-struct config_data {
- unsigned int active_cores;
- unsigned int num_workers;
- int64_t num_packets;
- unsigned int num_fids;
- int queue_type;
- int worker_cycles;
- int enable_queue_priorities;
- int quiet;
- int dump_dev;
- int dump_dev_signal;
- unsigned int num_stages;
- unsigned int worker_cq_depth;
- int16_t next_qid[MAX_NUM_STAGES+2];
- int16_t qid[MAX_NUM_STAGES];
- uint8_t rx_adapter_id;
-};
+#include "pipeline_common.h"
-static struct config_data cdata = {
+struct config_data cdata = {
.num_packets = (1L << 25), /* do ~32M packets */
.num_fids = 512,
.queue_type = RTE_SCHED_TYPE_ATOMIC,
diff --git a/examples/eventdev_pipeline_sw_pmd/pipeline_common.h b/examples/eventdev_pipeline_sw_pmd/pipeline_common.h
new file mode 100644
index 000000000..938e155d3
--- /dev/null
+++ b/examples/eventdev_pipeline_sw_pmd/pipeline_common.h
@@ -0,0 +1,109 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright 2016 Intel Corporation.
+ * Copyright 2016 Cavium, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdbool.h>
+
+#include <rte_eal.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_launch.h>
+#include <rte_malloc.h>
+#include <rte_random.h>
+#include <rte_cycles.h>
+#include <rte_ethdev.h>
+#include <rte_eventdev.h>
+#include <rte_event_eth_rx_adapter.h>
+#include <rte_service.h>
+#include <rte_service_component.h>
+
+#define MAX_NUM_STAGES 8
+#define BATCH_SIZE 16
+#define MAX_NUM_CORE 64
+
+struct prod_data {
+ uint8_t dev_id;
+ uint8_t port_id;
+ int32_t qid;
+ unsigned int num_nic_ports;
+} __rte_cache_aligned;
+
+struct cons_data {
+ uint8_t dev_id;
+ uint8_t port_id;
+} __rte_cache_aligned;
+
+struct worker_data {
+ uint8_t dev_id;
+ uint8_t port_id;
+} __rte_cache_aligned;
+
+struct fastpath_data {
+ volatile int done;
+ uint32_t rx_lock;
+ uint32_t tx_lock;
+ uint32_t sched_lock;
+ uint32_t evdev_service_id;
+ uint32_t rxadptr_service_id;
+ bool rx_single;
+ bool tx_single;
+ bool sched_single;
+ unsigned int rx_core[MAX_NUM_CORE];
+ unsigned int tx_core[MAX_NUM_CORE];
+ unsigned int sched_core[MAX_NUM_CORE];
+ unsigned int worker_core[MAX_NUM_CORE];
+ struct rte_eth_dev_tx_buffer *tx_buf[RTE_MAX_ETHPORTS];
+} __rte_cache_aligned;
+
+struct config_data {
+ unsigned int active_cores;
+ unsigned int num_workers;
+ int64_t num_packets;
+ unsigned int num_fids;
+ int queue_type;
+ int worker_cycles;
+ int enable_queue_priorities;
+ int quiet;
+ int dump_dev;
+ int dump_dev_signal;
+ unsigned int num_stages;
+ unsigned int worker_cq_depth;
+ int16_t next_qid[MAX_NUM_STAGES+2];
+ int16_t qid[MAX_NUM_STAGES];
+ uint8_t rx_adapter_id;
+};
+
+struct prod_data prod_data;
+struct cons_data cons_data;
+
+struct fastpath_data *fdata;
+struct config_data cdata;
--
2.14.1
^ permalink raw reply [flat|nested] 48+ messages in thread
* Re: [dpdk-dev] [PATCH 02/13] examples/eventdev: move common data into pipeline common
2017-12-07 20:36 ` [dpdk-dev] [PATCH 02/13] examples/eventdev: move common data into pipeline common Pavan Nikhilesh
@ 2017-12-11 16:15 ` Eads, Gage
2017-12-12 8:19 ` Pavan Nikhilesh Bhagavatula
0 siblings, 1 reply; 48+ messages in thread
From: Eads, Gage @ 2017-12-11 16:15 UTC (permalink / raw)
To: Pavan Nikhilesh, jerin.jacobkollanukkaran, Van Haaren, Harry,
Rao, Nikhil, hemant.agrawal, Ma, Liang J
Cc: dev
Hi Pavan,
</snip>
> @@ -2,6 +2,7 @@
> * BSD LICENSE
> *
> * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
> + * Copyright 2016 Cavium, Inc.
Should this be 2017? Same for the copyright dates in pipeline_common.h.
^ permalink raw reply [flat|nested] 48+ messages in thread
* Re: [dpdk-dev] [PATCH 02/13] examples/eventdev: move common data into pipeline common
2017-12-11 16:15 ` Eads, Gage
@ 2017-12-12 8:19 ` Pavan Nikhilesh Bhagavatula
0 siblings, 0 replies; 48+ messages in thread
From: Pavan Nikhilesh Bhagavatula @ 2017-12-12 8:19 UTC (permalink / raw)
To: Eads, Gage, jerin.jacobkollanukkaran, Van Haaren, Harry, Rao,
Nikhil, hemant.agrawal, Ma, Liang J
Cc: dev
On Mon, Dec 11, 2017 at 04:15:48PM +0000, Eads, Gage wrote:
> Hi Pavan,
>
> </snip>
>
> > @@ -2,6 +2,7 @@
> > * BSD LICENSE
> > *
> > * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
> > + * Copyright 2016 Cavium, Inc.
>
> Should this be 2017? Same for the copyright dates in pipeline_common.h.
Agreed, will modify in the next version.
Thanks,
Pavan.
^ permalink raw reply [flat|nested] 48+ messages in thread
* [dpdk-dev] [PATCH 03/13] examples/eventdev: add framework for caps based pipeline
2017-12-07 20:36 [dpdk-dev] [PATCH 00/13] examples/eventdev: add capability based pipeline support Pavan Nikhilesh
2017-12-07 20:36 ` [dpdk-dev] [PATCH 01/13] examples/eventdev: add Rx adapter support Pavan Nikhilesh
2017-12-07 20:36 ` [dpdk-dev] [PATCH 02/13] examples/eventdev: move common data into pipeline common Pavan Nikhilesh
@ 2017-12-07 20:36 ` Pavan Nikhilesh
2017-12-07 20:36 ` [dpdk-dev] [PATCH 04/13] examples/eventdev: add generic worker pipeline Pavan Nikhilesh
` (10 subsequent siblings)
13 siblings, 0 replies; 48+ messages in thread
From: Pavan Nikhilesh @ 2017-12-07 20:36 UTC (permalink / raw)
To: gage.eads, jerin.jacobkollanukkaran, harry.van.haaren,
nikhil.rao, hemant.agrawal, liang.j.ma
Cc: dev, Pavan Nikhilesh
Add framework to support capability based pipeline.
Based on the capability of event device and probed ethernet devices the
optimal pipeline configuration can be chosen.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
---
examples/eventdev_pipeline_sw_pmd/pipeline_common.h | 11 +++++++++++
1 file changed, 11 insertions(+)
diff --git a/examples/eventdev_pipeline_sw_pmd/pipeline_common.h b/examples/eventdev_pipeline_sw_pmd/pipeline_common.h
index 938e155d3..5219a4e85 100644
--- a/examples/eventdev_pipeline_sw_pmd/pipeline_common.h
+++ b/examples/eventdev_pipeline_sw_pmd/pipeline_common.h
@@ -67,6 +67,16 @@ struct worker_data {
uint8_t port_id;
} __rte_cache_aligned;
+struct setup_data {
+ int (*worker_loop)(void *);
+ int (*consumer_loop)(void);
+ void (*schedule_loop)(unsigned int);
+ int (*eventdev_setup)(struct prod_data *, struct cons_data *,
+ struct worker_data *);
+ void (*rx_adapter_setup)(uint16_t nb_ports);
+ void (*opt_check)(void);
+};
+
struct fastpath_data {
volatile int done;
uint32_t rx_lock;
@@ -82,6 +92,7 @@ struct fastpath_data {
unsigned int sched_core[MAX_NUM_CORE];
unsigned int worker_core[MAX_NUM_CORE];
struct rte_eth_dev_tx_buffer *tx_buf[RTE_MAX_ETHPORTS];
+ struct setup_data cap;
} __rte_cache_aligned;
struct config_data {
--
2.14.1
^ permalink raw reply [flat|nested] 48+ messages in thread
* [dpdk-dev] [PATCH 04/13] examples/eventdev: add generic worker pipeline
2017-12-07 20:36 [dpdk-dev] [PATCH 00/13] examples/eventdev: add capability based pipeline support Pavan Nikhilesh
` (2 preceding siblings ...)
2017-12-07 20:36 ` [dpdk-dev] [PATCH 03/13] examples/eventdev: add framework for caps based pipeline Pavan Nikhilesh
@ 2017-12-07 20:36 ` Pavan Nikhilesh
2017-12-07 20:36 ` [dpdk-dev] [PATCH 05/13] examples/eventdev: add ops to check cmdline args Pavan Nikhilesh
` (9 subsequent siblings)
13 siblings, 0 replies; 48+ messages in thread
From: Pavan Nikhilesh @ 2017-12-07 20:36 UTC (permalink / raw)
To: gage.eads, jerin.jacobkollanukkaran, harry.van.haaren,
nikhil.rao, hemant.agrawal, liang.j.ma
Cc: dev, Pavan Nikhilesh
Rename existing pipeline as generic worker pipeline.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
---
examples/eventdev_pipeline_sw_pmd/Makefile | 1 +
examples/eventdev_pipeline_sw_pmd/main.c | 432 +--------------------
.../eventdev_pipeline_sw_pmd/pipeline_common.h | 56 +++
.../pipeline_worker_generic.c | 383 ++++++++++++++++++
4 files changed, 455 insertions(+), 417 deletions(-)
create mode 100644 examples/eventdev_pipeline_sw_pmd/pipeline_worker_generic.c
diff --git a/examples/eventdev_pipeline_sw_pmd/Makefile b/examples/eventdev_pipeline_sw_pmd/Makefile
index de4e22c88..5e30556fb 100644
--- a/examples/eventdev_pipeline_sw_pmd/Makefile
+++ b/examples/eventdev_pipeline_sw_pmd/Makefile
@@ -42,6 +42,7 @@ APP = eventdev_pipeline_sw_pmd
# all source are stored in SRCS-y
SRCS-y := main.c
+SRCS-y += pipeline_worker_generic.c
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
diff --git a/examples/eventdev_pipeline_sw_pmd/main.c b/examples/eventdev_pipeline_sw_pmd/main.c
index c9702fddd..bd53acf76 100644
--- a/examples/eventdev_pipeline_sw_pmd/main.c
+++ b/examples/eventdev_pipeline_sw_pmd/main.c
@@ -69,169 +69,6 @@ eth_tx_buffer_retry(struct rte_mbuf **pkts, uint16_t unsent,
} while (_sent != unsent);
}
-static int
-consumer(void)
-{
- const uint64_t freq_khz = rte_get_timer_hz() / 1000;
- struct rte_event packets[BATCH_SIZE];
-
- static uint64_t received;
- static uint64_t last_pkts;
- static uint64_t last_time;
- static uint64_t start_time;
- unsigned int i, j;
- uint8_t dev_id = cons_data.dev_id;
- uint8_t port_id = cons_data.port_id;
-
- uint16_t n = rte_event_dequeue_burst(dev_id, port_id,
- packets, RTE_DIM(packets), 0);
-
- if (n == 0) {
- for (j = 0; j < rte_eth_dev_count(); j++)
- rte_eth_tx_buffer_flush(j, 0, fdata->tx_buf[j]);
- return 0;
- }
- if (start_time == 0)
- last_time = start_time = rte_get_timer_cycles();
-
- received += n;
- for (i = 0; i < n; i++) {
- uint8_t outport = packets[i].mbuf->port;
- rte_eth_tx_buffer(outport, 0, fdata->tx_buf[outport],
- packets[i].mbuf);
- }
-
- /* Print out mpps every 1<22 packets */
- if (!cdata.quiet && received >= last_pkts + (1<<22)) {
- const uint64_t now = rte_get_timer_cycles();
- const uint64_t total_ms = (now - start_time) / freq_khz;
- const uint64_t delta_ms = (now - last_time) / freq_khz;
- uint64_t delta_pkts = received - last_pkts;
-
- printf("# consumer RX=%"PRIu64", time %"PRIu64 "ms, "
- "avg %.3f mpps [current %.3f mpps]\n",
- received,
- total_ms,
- received / (total_ms * 1000.0),
- delta_pkts / (delta_ms * 1000.0));
- last_pkts = received;
- last_time = now;
- }
-
- cdata.num_packets -= n;
- if (cdata.num_packets <= 0)
- fdata->done = 1;
-
- return 0;
-}
-
-static inline void
-schedule_devices(unsigned int lcore_id)
-{
- if (fdata->rx_core[lcore_id] && (fdata->rx_single ||
- rte_atomic32_cmpset(&(fdata->rx_lock), 0, 1))) {
- rte_service_run_iter_on_app_lcore(fdata->rxadptr_service_id, 1);
- rte_atomic32_clear((rte_atomic32_t *)&(fdata->rx_lock));
- }
-
- if (fdata->sched_core[lcore_id] && (fdata->sched_single ||
- rte_atomic32_cmpset(&(fdata->sched_lock), 0, 1))) {
- rte_service_run_iter_on_app_lcore(fdata->evdev_service_id, 1);
- if (cdata.dump_dev_signal) {
- rte_event_dev_dump(0, stdout);
- cdata.dump_dev_signal = 0;
- }
- rte_atomic32_clear((rte_atomic32_t *)&(fdata->sched_lock));
- }
-
- if (fdata->tx_core[lcore_id] && (fdata->tx_single ||
- rte_atomic32_cmpset(&(fdata->tx_lock), 0, 1))) {
- consumer();
- rte_atomic32_clear((rte_atomic32_t *)&(fdata->tx_lock));
- }
-}
-
-static inline void
-work(struct rte_mbuf *m)
-{
- struct ether_hdr *eth;
- struct ether_addr addr;
-
- /* change mac addresses on packet (to use mbuf data) */
- /*
- * FIXME Swap mac address properly and also handle the
- * case for both odd and even number of stages that the
- * addresses end up the same at the end of the pipeline
- */
- eth = rte_pktmbuf_mtod(m, struct ether_hdr *);
- ether_addr_copy(ð->d_addr, &addr);
- ether_addr_copy(&addr, ð->d_addr);
-
- /* do a number of cycles of work per packet */
- volatile uint64_t start_tsc = rte_rdtsc();
- while (rte_rdtsc() < start_tsc + cdata.worker_cycles)
- rte_pause();
-}
-
-static int
-worker(void *arg)
-{
- struct rte_event events[BATCH_SIZE];
-
- struct worker_data *data = (struct worker_data *)arg;
- uint8_t dev_id = data->dev_id;
- uint8_t port_id = data->port_id;
- size_t sent = 0, received = 0;
- unsigned int lcore_id = rte_lcore_id();
-
- while (!fdata->done) {
- uint16_t i;
-
- schedule_devices(lcore_id);
-
- if (!fdata->worker_core[lcore_id]) {
- rte_pause();
- continue;
- }
-
- const uint16_t nb_rx = rte_event_dequeue_burst(dev_id, port_id,
- events, RTE_DIM(events), 0);
-
- if (nb_rx == 0) {
- rte_pause();
- continue;
- }
- received += nb_rx;
-
- for (i = 0; i < nb_rx; i++) {
-
- /* The first worker stage does classification */
- if (events[i].queue_id == cdata.qid[0])
- events[i].flow_id = events[i].mbuf->hash.rss
- % cdata.num_fids;
-
- events[i].queue_id = cdata.next_qid[events[i].queue_id];
- events[i].op = RTE_EVENT_OP_FORWARD;
- events[i].sched_type = cdata.queue_type;
-
- work(events[i].mbuf);
- }
- uint16_t nb_tx = rte_event_enqueue_burst(dev_id, port_id,
- events, nb_rx);
- while (nb_tx < nb_rx && !fdata->done)
- nb_tx += rte_event_enqueue_burst(dev_id, port_id,
- events + nb_tx,
- nb_rx - nb_tx);
- sent += nb_tx;
- }
-
- if (!cdata.quiet)
- printf(" worker %u thread done. RX=%zu TX=%zu\n",
- rte_lcore_id(), received, sent);
-
- return 0;
-}
-
/*
* Parse the coremask given as argument (hexadecimal string) and fill
* the global configuration (core role and core count) with the parsed
@@ -444,79 +281,6 @@ parse_app_args(int argc, char **argv)
}
}
-static inline void
-init_rx_adapter(uint16_t nb_ports)
-{
- int i;
- int ret;
- uint8_t evdev_id = 0;
- uint8_t port_needed = 0;
- struct rte_event_dev_info dev_info;
-
- ret = rte_event_dev_info_get(evdev_id, &dev_info);
-
- struct rte_event_port_conf rx_p_conf = {
- .dequeue_depth = 8,
- .enqueue_depth = 8,
- .new_event_threshold = 1200,
- };
-
- if (rx_p_conf.dequeue_depth > dev_info.max_event_port_dequeue_depth)
- rx_p_conf.dequeue_depth = dev_info.max_event_port_dequeue_depth;
- if (rx_p_conf.enqueue_depth > dev_info.max_event_port_enqueue_depth)
- rx_p_conf.enqueue_depth = dev_info.max_event_port_enqueue_depth;
-
- ret = rte_event_eth_rx_adapter_create(cdata.rx_adapter_id, evdev_id,
- &rx_p_conf);
- if (ret)
- rte_exit(EXIT_FAILURE, "failed to create rx adapter[%d]",
- cdata.rx_adapter_id);
-
- struct rte_event_eth_rx_adapter_queue_conf queue_conf = {
- .ev.sched_type = cdata.queue_type,
- .ev.queue_id = cdata.qid[0],
- };
-
- for (i = 0; i < nb_ports; i++) {
- uint32_t cap;
-
- ret = rte_event_eth_rx_adapter_caps_get(evdev_id, i, &cap);
- if (ret)
- rte_exit(EXIT_FAILURE,
- "failed to get event rx adapter "
- "capabilities");
- /* Producer needs port. */
- port_needed |= !(cap &
- RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT);
-
- ret = rte_event_eth_rx_adapter_queue_add(cdata.rx_adapter_id, i,
- -1, &queue_conf);
- if (ret)
- rte_exit(EXIT_FAILURE,
- "Failed to add queues to Rx adapter");
- }
-
- if (port_needed)
- prod_data.port_id = cons_data.port_id + 1;
- prod_data.dev_id = evdev_id;
- prod_data.qid = cdata.qid[0];
-
- ret = rte_event_eth_rx_adapter_service_id_get(cdata.rx_adapter_id,
- &fdata->rxadptr_service_id);
- if (ret != -ESRCH && ret != 0) {
- rte_exit(EXIT_FAILURE,
- "Error getting the service ID for Rx adapter\n");
- }
- rte_service_runstate_set(fdata->rxadptr_service_id, 1);
- rte_service_set_runstate_mapped_check(fdata->rxadptr_service_id, 0);
-
- ret = rte_event_eth_rx_adapter_start(cdata.rx_adapter_id);
- if (ret)
- rte_exit(EXIT_FAILURE, "Rx adapter[%d] start failed",
- cdata.rx_adapter_id);
-
-}
-
/*
* Initializes a given port using global settings and with the RX buffers
* coming from the mbuf_pool passed as a parameter.
@@ -621,183 +385,12 @@ init_ports(unsigned int num_ports)
return 0;
}
-struct port_link {
- uint8_t queue_id;
- uint8_t priority;
-};
-
-static int
-setup_eventdev(struct cons_data *cons_data,
- struct worker_data *worker_data)
+static void
+do_capability_setup(uint16_t nb_ethdev, uint8_t eventdev_id)
{
- const uint8_t dev_id = 0;
- /* +1 stages is for a SINGLE_LINK TX stage */
- const uint8_t nb_queues = cdata.num_stages + 1;
- /* + 1 for consumer */
- const uint8_t nb_ports = cdata.num_workers + 1;
- struct rte_event_dev_config config = {
- .nb_event_queues = nb_queues,
- .nb_event_ports = nb_ports,
- .nb_events_limit = 4096,
- .nb_event_queue_flows = 1024,
- .nb_event_port_dequeue_depth = 128,
- .nb_event_port_enqueue_depth = 128,
- };
- struct rte_event_port_conf wkr_p_conf = {
- .dequeue_depth = cdata.worker_cq_depth,
- .enqueue_depth = 64,
- .new_event_threshold = 4096,
- };
- struct rte_event_queue_conf wkr_q_conf = {
- .schedule_type = cdata.queue_type,
- .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
- .nb_atomic_flows = 1024,
- .nb_atomic_order_sequences = 1024,
- };
- struct rte_event_port_conf tx_p_conf = {
- .dequeue_depth = 128,
- .enqueue_depth = 128,
- .new_event_threshold = 4096,
- };
- const struct rte_event_queue_conf tx_q_conf = {
- .priority = RTE_EVENT_DEV_PRIORITY_HIGHEST,
- .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
- };
-
- struct port_link worker_queues[MAX_NUM_STAGES];
- struct port_link tx_queue;
- unsigned int i;
-
- int ret, ndev = rte_event_dev_count();
- if (ndev < 1) {
- printf("%d: No Eventdev Devices Found\n", __LINE__);
- return -1;
- }
-
- struct rte_event_dev_info dev_info;
- ret = rte_event_dev_info_get(dev_id, &dev_info);
- printf("\tEventdev %d: %s\n", dev_id, dev_info.driver_name);
-
- if (dev_info.max_event_port_dequeue_depth <
- config.nb_event_port_dequeue_depth)
- config.nb_event_port_dequeue_depth =
- dev_info.max_event_port_dequeue_depth;
- if (dev_info.max_event_port_enqueue_depth <
- config.nb_event_port_enqueue_depth)
- config.nb_event_port_enqueue_depth =
- dev_info.max_event_port_enqueue_depth;
-
- ret = rte_event_dev_configure(dev_id, &config);
- if (ret < 0) {
- printf("%d: Error configuring device\n", __LINE__);
- return -1;
- }
-
- /* Q creation - one load balanced per pipeline stage*/
- printf(" Stages:\n");
- for (i = 0; i < cdata.num_stages; i++) {
- if (rte_event_queue_setup(dev_id, i, &wkr_q_conf) < 0) {
- printf("%d: error creating qid %d\n", __LINE__, i);
- return -1;
- }
- cdata.qid[i] = i;
- cdata.next_qid[i] = i+1;
- worker_queues[i].queue_id = i;
- if (cdata.enable_queue_priorities) {
- /* calculate priority stepping for each stage, leaving
- * headroom of 1 for the SINGLE_LINK TX below
- */
- const uint32_t prio_delta =
- (RTE_EVENT_DEV_PRIORITY_LOWEST-1) / nb_queues;
-
- /* higher priority for queues closer to tx */
- wkr_q_conf.priority =
- RTE_EVENT_DEV_PRIORITY_LOWEST - prio_delta * i;
- }
-
- const char *type_str = "Atomic";
- switch (wkr_q_conf.schedule_type) {
- case RTE_SCHED_TYPE_ORDERED:
- type_str = "Ordered";
- break;
- case RTE_SCHED_TYPE_PARALLEL:
- type_str = "Parallel";
- break;
- }
- printf("\tStage %d, Type %s\tPriority = %d\n", i, type_str,
- wkr_q_conf.priority);
- }
- printf("\n");
-
- /* final queue for sending to TX core */
- if (rte_event_queue_setup(dev_id, i, &tx_q_conf) < 0) {
- printf("%d: error creating qid %d\n", __LINE__, i);
- return -1;
- }
- tx_queue.queue_id = i;
- tx_queue.priority = RTE_EVENT_DEV_PRIORITY_HIGHEST;
-
- if (wkr_p_conf.dequeue_depth > config.nb_event_port_dequeue_depth)
- wkr_p_conf.dequeue_depth = config.nb_event_port_dequeue_depth;
- if (wkr_p_conf.enqueue_depth > config.nb_event_port_enqueue_depth)
- wkr_p_conf.enqueue_depth = config.nb_event_port_enqueue_depth;
-
- /* set up one port per worker, linking to all stage queues */
- for (i = 0; i < cdata.num_workers; i++) {
- struct worker_data *w = &worker_data[i];
- w->dev_id = dev_id;
- if (rte_event_port_setup(dev_id, i, &wkr_p_conf) < 0) {
- printf("Error setting up port %d\n", i);
- return -1;
- }
-
- uint32_t s;
- for (s = 0; s < cdata.num_stages; s++) {
- if (rte_event_port_link(dev_id, i,
- &worker_queues[s].queue_id,
- &worker_queues[s].priority,
- 1) != 1) {
- printf("%d: error creating link for port %d\n",
- __LINE__, i);
- return -1;
- }
- }
- w->port_id = i;
- }
-
- if (tx_p_conf.dequeue_depth > config.nb_event_port_dequeue_depth)
- tx_p_conf.dequeue_depth = config.nb_event_port_dequeue_depth;
- if (tx_p_conf.enqueue_depth > config.nb_event_port_enqueue_depth)
- tx_p_conf.enqueue_depth = config.nb_event_port_enqueue_depth;
-
- /* port for consumer, linked to TX queue */
- if (rte_event_port_setup(dev_id, i, &tx_p_conf) < 0) {
- printf("Error setting up port %d\n", i);
- return -1;
- }
- if (rte_event_port_link(dev_id, i, &tx_queue.queue_id,
- &tx_queue.priority, 1) != 1) {
- printf("%d: error creating link for port %d\n",
- __LINE__, i);
- return -1;
- }
- *cons_data = (struct cons_data){.dev_id = dev_id,
- .port_id = i };
-
- ret = rte_event_dev_service_id_get(dev_id,
- &fdata->evdev_service_id);
- if (ret != -ESRCH && ret != 0) {
- printf("Error getting the service ID for sw eventdev\n");
- return -1;
- }
- rte_service_runstate_set(fdata->evdev_service_id, 1);
- rte_service_set_runstate_mapped_check(fdata->evdev_service_id, 0);
- if (rte_event_dev_start(dev_id) < 0) {
- printf("Error starting eventdev\n");
- return -1;
- }
-
- return dev_id;
+ RTE_SET_USED(nb_ethdev);
+ RTE_SET_USED(eventdev_id);
+ set_worker_generic_setup_data(&fdata->cap, 1);
}
static void
@@ -878,18 +471,23 @@ main(int argc, char **argv)
if (ndevs > 1)
fprintf(stderr, "Warning: More than one eventdev, using idx 0");
+
+ do_capability_setup(num_ports, 0);
+ fdata->cap.opt_check();
+
worker_data = rte_calloc(0, cdata.num_workers,
sizeof(worker_data[0]), 0);
if (worker_data == NULL)
rte_panic("rte_calloc failed\n");
- int dev_id = setup_eventdev(&cons_data, worker_data);
+ int dev_id = fdata->cap.eventdev_setup(&prod_data, &cons_data,
+ worker_data);
if (dev_id < 0)
rte_exit(EXIT_FAILURE, "Error setting up eventdev\n");
prod_data.num_nic_ports = num_ports;
init_ports(num_ports);
- init_rx_adapter(num_ports);
+ fdata->cap.rx_adapter_setup(num_ports);
int worker_idx = 0;
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
@@ -922,8 +520,8 @@ main(int argc, char **argv)
__func__, lcore_id,
worker_data[worker_idx].port_id);
- err = rte_eal_remote_launch(worker, &worker_data[worker_idx],
- lcore_id);
+ err = rte_eal_remote_launch(fdata->cap.worker_loop,
+ &worker_data[worker_idx], lcore_id);
if (err) {
rte_panic("Failed to launch worker on core %d\n",
lcore_id);
@@ -936,7 +534,7 @@ main(int argc, char **argv)
lcore_id = rte_lcore_id();
if (core_in_use(lcore_id))
- worker(&worker_data[worker_idx++]);
+ fdata->cap.worker_loop(&worker_data[worker_idx++]);
rte_eal_mp_wait_lcore();
diff --git a/examples/eventdev_pipeline_sw_pmd/pipeline_common.h b/examples/eventdev_pipeline_sw_pmd/pipeline_common.h
index 5219a4e85..0f3426a3a 100644
--- a/examples/eventdev_pipeline_sw_pmd/pipeline_common.h
+++ b/examples/eventdev_pipeline_sw_pmd/pipeline_common.h
@@ -113,8 +113,64 @@ struct config_data {
uint8_t rx_adapter_id;
};
+struct port_link {
+ uint8_t queue_id;
+ uint8_t priority;
+};
+
struct prod_data prod_data;
struct cons_data cons_data;
struct fastpath_data *fdata;
struct config_data cdata;
+
+static __rte_always_inline void
+work(struct rte_mbuf *m)
+{
+ struct ether_hdr *eth;
+ struct ether_addr addr;
+
+ /* change mac addresses on packet (to use mbuf data) */
+ /*
+ * FIXME Swap mac address properly and also handle the
+ * case for both odd and even number of stages that the
+ * addresses end up the same at the end of the pipeline
+ */
+ eth = rte_pktmbuf_mtod(m, struct ether_hdr *);
+ ether_addr_copy(ð->d_addr, &addr);
+ ether_addr_copy(&addr, ð->d_addr);
+
+ /* do a number of cycles of work per packet */
+ volatile uint64_t start_tsc = rte_rdtsc();
+ while (rte_rdtsc() < start_tsc + cdata.worker_cycles)
+ rte_pause();
+}
+
+
+static __rte_always_inline void
+schedule_devices(unsigned int lcore_id)
+{
+ if (fdata->rx_core[lcore_id] && (fdata->rx_single ||
+ rte_atomic32_cmpset(&(fdata->rx_lock), 0, 1))) {
+ rte_service_run_iter_on_app_lcore(fdata->rxadptr_service_id, 1);
+ rte_atomic32_clear((rte_atomic32_t *)&(fdata->rx_lock));
+ }
+
+ if (fdata->sched_core[lcore_id] && (fdata->sched_single ||
+ rte_atomic32_cmpset(&(fdata->sched_lock), 0, 1))) {
+ rte_service_run_iter_on_app_lcore(fdata->evdev_service_id, 1);
+ if (cdata.dump_dev_signal) {
+ rte_event_dev_dump(0, stdout);
+ cdata.dump_dev_signal = 0;
+ }
+ rte_atomic32_clear((rte_atomic32_t *)&(fdata->sched_lock));
+ }
+
+ if (fdata->tx_core[lcore_id] && (fdata->tx_single ||
+ rte_atomic32_cmpset(&(fdata->tx_lock), 0, 1))) {
+ fdata->cap.consumer_loop();
+ rte_atomic32_clear((rte_atomic32_t *)&(fdata->tx_lock));
+ }
+}
+
+void set_worker_generic_setup_data(struct setup_data *caps, bool burst);
diff --git a/examples/eventdev_pipeline_sw_pmd/pipeline_worker_generic.c b/examples/eventdev_pipeline_sw_pmd/pipeline_worker_generic.c
new file mode 100644
index 000000000..032a4f2d2
--- /dev/null
+++ b/examples/eventdev_pipeline_sw_pmd/pipeline_worker_generic.c
@@ -0,0 +1,383 @@
+#include "pipeline_common.h"
+
+static int
+worker_generic_burst(void *arg)
+{
+ struct rte_event events[BATCH_SIZE];
+
+ struct worker_data *data = (struct worker_data *)arg;
+ uint8_t dev_id = data->dev_id;
+ uint8_t port_id = data->port_id;
+ size_t sent = 0, received = 0;
+ unsigned int lcore_id = rte_lcore_id();
+
+ while (!fdata->done) {
+ uint16_t i;
+
+ if (fdata->cap.schedule_loop)
+ fdata->cap.schedule_loop(lcore_id);
+
+ if (!fdata->worker_core[lcore_id]) {
+ rte_pause();
+ continue;
+ }
+
+ const uint16_t nb_rx = rte_event_dequeue_burst(dev_id, port_id,
+ events, RTE_DIM(events), 0);
+
+ if (nb_rx == 0) {
+ rte_pause();
+ continue;
+ }
+ received += nb_rx;
+
+ for (i = 0; i < nb_rx; i++) {
+
+ /* The first worker stage does classification */
+ if (events[i].queue_id == cdata.qid[0])
+ events[i].flow_id = events[i].mbuf->hash.rss
+ % cdata.num_fids;
+
+ events[i].queue_id = cdata.next_qid[events[i].queue_id];
+ events[i].op = RTE_EVENT_OP_FORWARD;
+ events[i].sched_type = cdata.queue_type;
+
+ work(events[i].mbuf);
+ }
+ uint16_t nb_tx = rte_event_enqueue_burst(dev_id, port_id,
+ events, nb_rx);
+ while (nb_tx < nb_rx && !fdata->done)
+ nb_tx += rte_event_enqueue_burst(dev_id, port_id,
+ events + nb_tx,
+ nb_rx - nb_tx);
+ sent += nb_tx;
+ }
+
+ if (!cdata.quiet)
+ printf(" worker %u thread done. RX=%zu TX=%zu\n",
+ rte_lcore_id(), received, sent);
+
+ return 0;
+}
+
+static __rte_always_inline int
+consumer_burst(void)
+{
+ const uint64_t freq_khz = rte_get_timer_hz() / 1000;
+ struct rte_event packets[BATCH_SIZE];
+
+ static uint64_t received;
+ static uint64_t last_pkts;
+ static uint64_t last_time;
+ static uint64_t start_time;
+ unsigned int i, j;
+ uint8_t dev_id = cons_data.dev_id;
+ uint8_t port_id = cons_data.port_id;
+ uint16_t nb_ports = rte_eth_dev_count();
+
+ do {
+ uint16_t n = rte_event_dequeue_burst(dev_id, port_id,
+ packets, RTE_DIM(packets), 0);
+
+ if (n == 0) {
+ for (j = 0; j < nb_ports; j++)
+ rte_eth_tx_buffer_flush(j, 0, fdata->tx_buf[j]);
+ return 0;
+ }
+ if (start_time == 0)
+ last_time = start_time = rte_get_timer_cycles();
+
+ received += n;
+ for (i = 0; i < n; i++) {
+ uint8_t outport = packets[i].mbuf->port;
+ rte_eth_tx_buffer(outport, 0, fdata->tx_buf[outport],
+ packets[i].mbuf);
+ }
+
+ /* Print out mpps every 1<22 packets */
+ if (!cdata.quiet && received >= last_pkts + (1<<22)) {
+ const uint64_t now = rte_get_timer_cycles();
+ const uint64_t total_ms = (now - start_time) / freq_khz;
+ const uint64_t delta_ms = (now - last_time) / freq_khz;
+ uint64_t delta_pkts = received - last_pkts;
+
+ printf("# consumer RX=%"PRIu64", time %"PRIu64 "ms, "
+ "avg %.3f mpps [current %.3f mpps]\n",
+ received,
+ total_ms,
+ received / (total_ms * 1000.0),
+ delta_pkts / (delta_ms * 1000.0));
+ last_pkts = received;
+ last_time = now;
+ }
+
+ cdata.num_packets -= n;
+ if (cdata.num_packets <= 0)
+ fdata->done = 1;
+ /* Be stuck in this loop if single. */
+ } while (!fdata->done && fdata->tx_single);
+
+ return 0;
+}
+
+static int
+setup_eventdev_cw(struct prod_data *prod_data,
+ struct cons_data *cons_data,
+ struct worker_data *worker_data)
+{
+ RTE_SET_USED(prod_data);
+ const uint8_t dev_id = 0;
+ /* +1 stages is for a SINGLE_LINK TX stage */
+ const uint8_t nb_queues = cdata.num_stages + 1;
+ /* + 1 is one port for consumer */
+ const uint8_t nb_ports = cdata.num_workers + 1;
+ struct rte_event_dev_config config = {
+ .nb_event_queues = nb_queues,
+ .nb_event_ports = nb_ports,
+ .nb_events_limit = 4096,
+ .nb_event_queue_flows = 1024,
+ .nb_event_port_dequeue_depth = 128,
+ .nb_event_port_enqueue_depth = 128,
+ };
+ struct rte_event_port_conf wkr_p_conf = {
+ .dequeue_depth = cdata.worker_cq_depth,
+ .enqueue_depth = 64,
+ .new_event_threshold = 4096,
+ };
+ struct rte_event_queue_conf wkr_q_conf = {
+ .schedule_type = cdata.queue_type,
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .nb_atomic_flows = 1024,
+ .nb_atomic_order_sequences = 1024,
+ };
+ struct rte_event_port_conf tx_p_conf = {
+ .dequeue_depth = 128,
+ .enqueue_depth = 128,
+ .new_event_threshold = 4096,
+ };
+ struct rte_event_queue_conf tx_q_conf = {
+ .priority = RTE_EVENT_DEV_PRIORITY_HIGHEST,
+ .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
+ };
+
+ struct port_link worker_queues[MAX_NUM_STAGES];
+ struct port_link tx_queue;
+ unsigned int i;
+
+ int ret, ndev = rte_event_dev_count();
+ if (ndev < 1) {
+ printf("%d: No Eventdev Devices Found\n", __LINE__);
+ return -1;
+ }
+
+ struct rte_event_dev_info dev_info;
+ ret = rte_event_dev_info_get(dev_id, &dev_info);
+ printf("\tEventdev %d: %s\n", dev_id, dev_info.driver_name);
+
+ if (dev_info.max_event_port_dequeue_depth <
+ config.nb_event_port_dequeue_depth)
+ config.nb_event_port_dequeue_depth =
+ dev_info.max_event_port_dequeue_depth;
+ if (dev_info.max_event_port_enqueue_depth <
+ config.nb_event_port_enqueue_depth)
+ config.nb_event_port_enqueue_depth =
+ dev_info.max_event_port_enqueue_depth;
+
+ ret = rte_event_dev_configure(dev_id, &config);
+ if (ret < 0) {
+ printf("%d: Error configuring device\n", __LINE__);
+ return -1;
+ }
+
+ /* Q creation - one load balanced per pipeline stage*/
+ printf(" Stages:\n");
+ for (i = 0; i < cdata.num_stages; i++) {
+ if (rte_event_queue_setup(dev_id, i, &wkr_q_conf) < 0) {
+ printf("%d: error creating qid %d\n", __LINE__, i);
+ return -1;
+ }
+ cdata.qid[i] = i;
+ cdata.next_qid[i] = i+1;
+ worker_queues[i].queue_id = i;
+ if (cdata.enable_queue_priorities) {
+ /* calculate priority stepping for each stage, leaving
+ * headroom of 1 for the SINGLE_LINK TX below
+ */
+ const uint32_t prio_delta =
+ (RTE_EVENT_DEV_PRIORITY_LOWEST-1) / nb_queues;
+
+ /* higher priority for queues closer to tx */
+ wkr_q_conf.priority =
+ RTE_EVENT_DEV_PRIORITY_LOWEST - prio_delta * i;
+ }
+
+ const char *type_str = "Atomic";
+ switch (wkr_q_conf.schedule_type) {
+ case RTE_SCHED_TYPE_ORDERED:
+ type_str = "Ordered";
+ break;
+ case RTE_SCHED_TYPE_PARALLEL:
+ type_str = "Parallel";
+ break;
+ }
+ printf("\tStage %d, Type %s\tPriority = %d\n", i, type_str,
+ wkr_q_conf.priority);
+ }
+ printf("\n");
+
+ /* final queue for sending to TX core */
+ if (rte_event_queue_setup(dev_id, i, &tx_q_conf) < 0) {
+ printf("%d: error creating qid %d\n", __LINE__, i);
+ return -1;
+ }
+ tx_queue.queue_id = i;
+ tx_queue.priority = RTE_EVENT_DEV_PRIORITY_HIGHEST;
+
+ if (wkr_p_conf.dequeue_depth > config.nb_event_port_dequeue_depth)
+ wkr_p_conf.dequeue_depth = config.nb_event_port_dequeue_depth;
+ if (wkr_p_conf.enqueue_depth > config.nb_event_port_enqueue_depth)
+ wkr_p_conf.enqueue_depth = config.nb_event_port_enqueue_depth;
+
+ /* set up one port per worker, linking to all stage queues */
+ for (i = 0; i < cdata.num_workers; i++) {
+ struct worker_data *w = &worker_data[i];
+ w->dev_id = dev_id;
+ if (rte_event_port_setup(dev_id, i, &wkr_p_conf) < 0) {
+ printf("Error setting up port %d\n", i);
+ return -1;
+ }
+
+ uint32_t s;
+ for (s = 0; s < cdata.num_stages; s++) {
+ if (rte_event_port_link(dev_id, i,
+ &worker_queues[s].queue_id,
+ &worker_queues[s].priority,
+ 1) != 1) {
+ printf("%d: error creating link for port %d\n",
+ __LINE__, i);
+ return -1;
+ }
+ }
+ w->port_id = i;
+ }
+
+ if (tx_p_conf.dequeue_depth > config.nb_event_port_dequeue_depth)
+ tx_p_conf.dequeue_depth = config.nb_event_port_dequeue_depth;
+ if (tx_p_conf.enqueue_depth > config.nb_event_port_enqueue_depth)
+ tx_p_conf.enqueue_depth = config.nb_event_port_enqueue_depth;
+
+ /* port for consumer, linked to TX queue */
+ if (rte_event_port_setup(dev_id, i, &tx_p_conf) < 0) {
+ printf("Error setting up port %d\n", i);
+ return -1;
+ }
+ if (rte_event_port_link(dev_id, i, &tx_queue.queue_id,
+ &tx_queue.priority, 1) != 1) {
+ printf("%d: error creating link for port %d\n",
+ __LINE__, i);
+ return -1;
+ }
+ *cons_data = (struct cons_data){.dev_id = dev_id,
+ .port_id = i };
+
+ ret = rte_event_dev_service_id_get(dev_id,
+ &fdata->evdev_service_id);
+ if (ret != -ESRCH && ret != 0) {
+ printf("Error getting the service ID for sw eventdev\n");
+ return -1;
+ }
+ rte_service_runstate_set(fdata->evdev_service_id, 1);
+ rte_service_set_runstate_mapped_check(fdata->evdev_service_id, 0);
+ if (rte_event_dev_start(dev_id) < 0) {
+ printf("Error starting eventdev\n");
+ return -1;
+ }
+
+ return dev_id;
+}
+
+static void
+init_rx_adapter(uint16_t nb_ports)
+{
+ int i;
+ int ret;
+ uint8_t evdev_id = 0;
+ uint8_t rx_cb_needed = 0;
+ struct rte_event_dev_info dev_info;
+
+ ret = rte_event_dev_info_get(evdev_id, &dev_info);
+
+ struct rte_event_port_conf rx_p_conf = {
+ .dequeue_depth = 8,
+ .enqueue_depth = 8,
+ .new_event_threshold = 1200,
+ };
+
+ if (rx_p_conf.dequeue_depth > dev_info.max_event_port_dequeue_depth)
+ rx_p_conf.dequeue_depth = dev_info.max_event_port_dequeue_depth;
+ if (rx_p_conf.enqueue_depth > dev_info.max_event_port_enqueue_depth)
+ rx_p_conf.enqueue_depth = dev_info.max_event_port_enqueue_depth;
+
+ /* Create one adapter for all the ethernet ports. */
+ ret = rte_event_eth_rx_adapter_create(cdata.rx_adapter_id, evdev_id,
+ &rx_p_conf);
+ if (ret)
+ rte_exit(EXIT_FAILURE, "failed to create rx adapter[%d]",
+ cdata.rx_adapter_id);
+
+ struct rte_event_eth_rx_adapter_queue_conf queue_conf = {
+ .ev.sched_type = cdata.queue_type,
+ .ev.queue_id = cdata.qid[0],
+ };
+
+ for (i = 0; i < nb_ports; i++) {
+ uint32_t cap;
+
+ ret = rte_event_eth_rx_adapter_caps_get(evdev_id, i, &cap);
+ if (ret)
+ rte_exit(EXIT_FAILURE,
+ "failed to get event rx adapter "
+ "capabilities");
+ /* Producer needs to be scheduled. */
+ rx_cb_needed |= !(cap &
+ RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT);
+
+ ret = rte_event_eth_rx_adapter_queue_add(cdata.rx_adapter_id, i,
+ -1, &queue_conf);
+ if (ret)
+ rte_exit(EXIT_FAILURE,
+ "Failed to add queues to Rx adapter");
+ }
+
+ if (rx_cb_needed)
+ prod_data.port_id = cons_data.port_id + 1;
+
+ prod_data.dev_id = evdev_id;
+ prod_data.qid = cdata.qid[0];
+
+ ret = rte_event_eth_rx_adapter_service_id_get(cdata.rx_adapter_id,
+ &fdata->rxadptr_service_id);
+ if (ret != -ESRCH && ret != 0) {
+ rte_exit(EXIT_FAILURE,
+ "Error getting the service ID for sw eventdev\n");
+ }
+ rte_service_runstate_set(fdata->rxadptr_service_id, 1);
+ rte_service_set_runstate_mapped_check(fdata->rxadptr_service_id, 0);
+
+ ret = rte_event_eth_rx_adapter_start(cdata.rx_adapter_id);
+ if (ret)
+ rte_exit(EXIT_FAILURE, "Rx adapter[%d] start failed",
+ cdata.rx_adapter_id);
+}
+
+void
+set_worker_generic_setup_data(struct setup_data *caps, bool burst)
+{
+ RTE_SET_USED(burst);
+ caps->consumer_loop = consumer_burst;
+ caps->worker_loop = worker_generic_burst;
+
+ caps->rx_adapter_setup = init_rx_adapter;
+ caps->schedule_loop = schedule_devices;
+ caps->eventdev_setup = setup_eventdev_cw;
+}
--
2.14.1
^ permalink raw reply [flat|nested] 48+ messages in thread
* [dpdk-dev] [PATCH 05/13] examples/eventdev: add ops to check cmdline args
2017-12-07 20:36 [dpdk-dev] [PATCH 00/13] examples/eventdev: add capability based pipeline support Pavan Nikhilesh
` (3 preceding siblings ...)
2017-12-07 20:36 ` [dpdk-dev] [PATCH 04/13] examples/eventdev: add generic worker pipeline Pavan Nikhilesh
@ 2017-12-07 20:36 ` Pavan Nikhilesh
2017-12-19 11:23 ` Van Haaren, Harry
2017-12-07 20:36 ` [dpdk-dev] [PATCH 06/13] examples/eventdev: add non burst mode generic worker Pavan Nikhilesh
` (8 subsequent siblings)
13 siblings, 1 reply; 48+ messages in thread
From: Pavan Nikhilesh @ 2017-12-07 20:36 UTC (permalink / raw)
To: gage.eads, jerin.jacobkollanukkaran, harry.van.haaren,
nikhil.rao, hemant.agrawal, liang.j.ma
Cc: dev, Pavan Nikhilesh
Each eventdev pipeline needs to allow different cmdline args combination
based on pipeline type.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
---
examples/eventdev_pipeline_sw_pmd/main.c | 16 +++------
.../eventdev_pipeline_sw_pmd/pipeline_common.h | 4 +++
.../pipeline_worker_generic.c | 40 ++++++++++++++++++++++
3 files changed, 49 insertions(+), 11 deletions(-)
diff --git a/examples/eventdev_pipeline_sw_pmd/main.c b/examples/eventdev_pipeline_sw_pmd/main.c
index bd53acf76..2e80841d0 100644
--- a/examples/eventdev_pipeline_sw_pmd/main.c
+++ b/examples/eventdev_pipeline_sw_pmd/main.c
@@ -254,17 +254,11 @@ parse_app_args(int argc, char **argv)
}
}
- if (worker_lcore_mask == 0 || rx_lcore_mask == 0 ||
- sched_lcore_mask == 0 || tx_lcore_mask == 0) {
- printf("Core part of pipeline was not assigned any cores. "
- "This will stall the pipeline, please check core masks "
- "(use -h for details on setting core masks):\n"
- "\trx: %"PRIu64"\n\ttx: %"PRIu64"\n\tsched: %"PRIu64
- "\n\tworkers: %"PRIu64"\n",
- rx_lcore_mask, tx_lcore_mask, sched_lcore_mask,
- worker_lcore_mask);
- rte_exit(-1, "Fix core masks\n");
- }
+ cdata.worker_lcore_mask = worker_lcore_mask;
+ cdata.sched_lcore_mask = sched_lcore_mask;
+ cdata.rx_lcore_mask = rx_lcore_mask;
+ cdata.tx_lcore_mask = tx_lcore_mask;
+
if (cdata.num_stages == 0 || cdata.num_stages > MAX_NUM_STAGES)
usage();
diff --git a/examples/eventdev_pipeline_sw_pmd/pipeline_common.h b/examples/eventdev_pipeline_sw_pmd/pipeline_common.h
index 0f3426a3a..a5837c99b 100644
--- a/examples/eventdev_pipeline_sw_pmd/pipeline_common.h
+++ b/examples/eventdev_pipeline_sw_pmd/pipeline_common.h
@@ -111,6 +111,10 @@ struct config_data {
int16_t next_qid[MAX_NUM_STAGES+2];
int16_t qid[MAX_NUM_STAGES];
uint8_t rx_adapter_id;
+ uint64_t worker_lcore_mask;
+ uint64_t rx_lcore_mask;
+ uint64_t tx_lcore_mask;
+ uint64_t sched_lcore_mask;
};
struct port_link {
diff --git a/examples/eventdev_pipeline_sw_pmd/pipeline_worker_generic.c b/examples/eventdev_pipeline_sw_pmd/pipeline_worker_generic.c
index 032a4f2d2..a72b7b2f9 100644
--- a/examples/eventdev_pipeline_sw_pmd/pipeline_worker_generic.c
+++ b/examples/eventdev_pipeline_sw_pmd/pipeline_worker_generic.c
@@ -370,6 +370,45 @@ init_rx_adapter(uint16_t nb_ports)
cdata.rx_adapter_id);
}
+static void
+generic_opt_check(void)
+{
+ int i;
+ int ret;
+ uint32_t cap = 0;
+ uint8_t rx_needed = 0;
+ struct rte_event_dev_info eventdev_info;
+
+ memset(&eventdev_info, 0, sizeof(struct rte_event_dev_info));
+ rte_event_dev_info_get(0, &eventdev_info);
+
+ for (i = 0; i < rte_eth_dev_count(); i++) {
+ ret = rte_event_eth_rx_adapter_caps_get(0, i, &cap);
+ if (ret)
+ rte_exit(EXIT_FAILURE,
+ "failed to get event rx adapter "
+ "capabilities");
+ rx_needed |=
+ !(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT);
+ }
+
+ if (cdata.worker_lcore_mask == 0 ||
+ (rx_needed && cdata.rx_lcore_mask == 0) ||
+ cdata.tx_lcore_mask == 0 || (cdata.sched_lcore_mask == 0
+ && !(eventdev_info.event_dev_cap &
+ RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED))) {
+ printf("Core part of pipeline was not assigned any cores. "
+ "This will stall the pipeline, please check core masks "
+ "(use -h for details on setting core masks):\n"
+ "\trx: %"PRIu64"\n\ttx: %"PRIu64"\n\tsched: %"PRIu64
+ "\n\tworkers: %"PRIu64"\n",
+ cdata.rx_lcore_mask, cdata.tx_lcore_mask,
+ cdata.sched_lcore_mask,
+ cdata.worker_lcore_mask);
+ rte_exit(-1, "Fix core masks\n");
+ }
+}
+
void
set_worker_generic_setup_data(struct setup_data *caps, bool burst)
{
@@ -377,6 +416,7 @@ set_worker_generic_setup_data(struct setup_data *caps, bool burst)
caps->consumer_loop = consumer_burst;
caps->worker_loop = worker_generic_burst;
+ caps->opt_check = generic_opt_check;
caps->rx_adapter_setup = init_rx_adapter;
caps->schedule_loop = schedule_devices;
caps->eventdev_setup = setup_eventdev_cw;
--
2.14.1
^ permalink raw reply [flat|nested] 48+ messages in thread
* Re: [dpdk-dev] [PATCH 05/13] examples/eventdev: add ops to check cmdline args
2017-12-07 20:36 ` [dpdk-dev] [PATCH 05/13] examples/eventdev: add ops to check cmdline args Pavan Nikhilesh
@ 2017-12-19 11:23 ` Van Haaren, Harry
0 siblings, 0 replies; 48+ messages in thread
From: Van Haaren, Harry @ 2017-12-19 11:23 UTC (permalink / raw)
To: Pavan Nikhilesh, Eads, Gage, jerin.jacobkollanukkaran, Rao,
Nikhil, hemant.agrawal, Ma, Liang J
Cc: dev
> From: Pavan Nikhilesh [mailto:pbhagavatula@caviumnetworks.com]
> Sent: Thursday, December 7, 2017 8:37 PM
> To: Eads, Gage <gage.eads@intel.com>; jerin.jacobkollanukkaran@cavium.com;
> Van Haaren, Harry <harry.van.haaren@intel.com>; Rao, Nikhil
> <nikhil.rao@intel.com>; hemant.agrawal@nxp.com; Ma, Liang J
> <liang.j.ma@intel.com>
> Cc: dev@dpdk.org; Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
> Subject: [PATCH 05/13] examples/eventdev: add ops to check cmdline args
>
> Each eventdev pipeline needs to allow different cmdline args combination
> based on pipeline type.
>
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
<snip>
> +static void
> +generic_opt_check(void)
> +{
> + int i;
> + int ret;
> + uint32_t cap = 0;
> + uint8_t rx_needed = 0;
> + struct rte_event_dev_info eventdev_info;
> +
> + memset(&eventdev_info, 0, sizeof(struct rte_event_dev_info));
> + rte_event_dev_info_get(0, &eventdev_info);
> +
> + for (i = 0; i < rte_eth_dev_count(); i++) {
> + ret = rte_event_eth_rx_adapter_caps_get(0, i, &cap);
> + if (ret)
> + rte_exit(EXIT_FAILURE,
> + "failed to get event rx adapter "
> + "capabilities");
Nit: split string
^ permalink raw reply [flat|nested] 48+ messages in thread
* [dpdk-dev] [PATCH 06/13] examples/eventdev: add non burst mode generic worker
2017-12-07 20:36 [dpdk-dev] [PATCH 00/13] examples/eventdev: add capability based pipeline support Pavan Nikhilesh
` (4 preceding siblings ...)
2017-12-07 20:36 ` [dpdk-dev] [PATCH 05/13] examples/eventdev: add ops to check cmdline args Pavan Nikhilesh
@ 2017-12-07 20:36 ` Pavan Nikhilesh
2017-12-19 13:26 ` Van Haaren, Harry
2017-12-07 20:36 ` [dpdk-dev] [PATCH 07/13] examples/eventdev: add thread safe Tx worker pipeline Pavan Nikhilesh
` (7 subsequent siblings)
13 siblings, 1 reply; 48+ messages in thread
From: Pavan Nikhilesh @ 2017-12-07 20:36 UTC (permalink / raw)
To: gage.eads, jerin.jacobkollanukkaran, harry.van.haaren,
nikhil.rao, hemant.agrawal, liang.j.ma
Cc: dev, Pavan Nikhilesh
Currently, worker uses burst dequeue and burst enqueue to forward events.
Add a non burst mode based on the event dev capabilities.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
---
examples/eventdev_pipeline_sw_pmd/main.c | 12 +-
.../pipeline_worker_generic.c | 153 ++++++++++++++++++++-
2 files changed, 160 insertions(+), 5 deletions(-)
diff --git a/examples/eventdev_pipeline_sw_pmd/main.c b/examples/eventdev_pipeline_sw_pmd/main.c
index 2e80841d0..153467893 100644
--- a/examples/eventdev_pipeline_sw_pmd/main.c
+++ b/examples/eventdev_pipeline_sw_pmd/main.c
@@ -383,8 +383,16 @@ static void
do_capability_setup(uint16_t nb_ethdev, uint8_t eventdev_id)
{
RTE_SET_USED(nb_ethdev);
- RTE_SET_USED(eventdev_id);
- set_worker_generic_setup_data(&fdata->cap, 1);
+ uint8_t burst = 0;
+
+ struct rte_event_dev_info eventdev_info;
+ memset(&eventdev_info, 0, sizeof(struct rte_event_dev_info));
+
+ rte_event_dev_info_get(eventdev_id, &eventdev_info);
+ burst = eventdev_info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE ? 1 :
+ 0;
+
+ set_worker_generic_setup_data(&fdata->cap, burst);
}
static void
diff --git a/examples/eventdev_pipeline_sw_pmd/pipeline_worker_generic.c b/examples/eventdev_pipeline_sw_pmd/pipeline_worker_generic.c
index a72b7b2f9..5998aae95 100644
--- a/examples/eventdev_pipeline_sw_pmd/pipeline_worker_generic.c
+++ b/examples/eventdev_pipeline_sw_pmd/pipeline_worker_generic.c
@@ -1,5 +1,91 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright 2016 Intel Corporation.
+ * Copyright 2016 Cavium, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
#include "pipeline_common.h"
+static __rte_always_inline int
+worker_generic(void *arg)
+{
+ struct rte_event ev;
+
+ struct worker_data *data = (struct worker_data *)arg;
+ uint8_t dev_id = data->dev_id;
+ uint8_t port_id = data->port_id;
+ size_t sent = 0, received = 0;
+ unsigned int lcore_id = rte_lcore_id();
+
+ while (!fdata->done) {
+
+ if (fdata->cap.schedule_loop)
+ fdata->cap.schedule_loop(lcore_id);
+
+ if (!fdata->worker_core[lcore_id]) {
+ rte_pause();
+ continue;
+ }
+
+ const uint16_t nb_rx = rte_event_dequeue_burst(dev_id, port_id,
+ &ev, 1, 0);
+
+ if (nb_rx == 0) {
+ rte_pause();
+ continue;
+ }
+ received++;
+
+ /* The first worker stage does classification */
+ if (ev.queue_id == cdata.qid[0])
+ ev.flow_id = ev.mbuf->hash.rss
+ % cdata.num_fids;
+
+ ev.queue_id = cdata.next_qid[ev.queue_id];
+ ev.op = RTE_EVENT_OP_FORWARD;
+ ev.sched_type = cdata.queue_type;
+
+ work(ev.mbuf);
+
+ while (rte_event_enqueue_burst(dev_id, port_id, &ev, 1) != 1)
+ rte_pause();
+ sent++;
+ }
+
+ if (!cdata.quiet)
+ printf(" worker %u thread done. RX=%zu TX=%zu\n",
+ rte_lcore_id(), received, sent);
+
+ return 0;
+}
+
static int
worker_generic_burst(void *arg)
{
@@ -60,6 +146,63 @@ worker_generic_burst(void *arg)
return 0;
}
+static __rte_always_inline int
+consumer(void)
+{
+ const uint64_t freq_khz = rte_get_timer_hz() / 1000;
+ struct rte_event packet;
+
+ static uint64_t received;
+ static uint64_t last_pkts;
+ static uint64_t last_time;
+ static uint64_t start_time;
+ int i;
+ uint8_t dev_id = cons_data.dev_id;
+ uint8_t port_id = cons_data.port_id;
+
+ do {
+ uint16_t n = rte_event_dequeue_burst(dev_id, port_id,
+ &packet, 1, 0);
+
+ if (n == 0) {
+ for (i = 0; i < rte_eth_dev_count(); i++)
+ rte_eth_tx_buffer_flush(i, 0, fdata->tx_buf[i]);
+ return 0;
+ }
+ if (start_time == 0)
+ last_time = start_time = rte_get_timer_cycles();
+
+ received++;
+ uint8_t outport = packet.mbuf->port;
+ rte_eth_tx_buffer(outport, 0, fdata->tx_buf[outport],
+ packet.mbuf);
+
+ /* Print out mpps every 1<22 packets */
+ if (!cdata.quiet && received >= last_pkts + (1<<22)) {
+ const uint64_t now = rte_get_timer_cycles();
+ const uint64_t total_ms = (now - start_time) / freq_khz;
+ const uint64_t delta_ms = (now - last_time) / freq_khz;
+ uint64_t delta_pkts = received - last_pkts;
+
+ printf("# consumer RX=%"PRIu64", time %"PRIu64 "ms, "
+ "avg %.3f mpps [current %.3f mpps]\n",
+ received,
+ total_ms,
+ received / (total_ms * 1000.0),
+ delta_pkts / (delta_ms * 1000.0));
+ last_pkts = received;
+ last_time = now;
+ }
+
+ cdata.num_packets--;
+ if (cdata.num_packets <= 0)
+ fdata->done = 1;
+ /* Be stuck in this loop if single. */
+ } while (!fdata->done && fdata->tx_single);
+
+ return 0;
+}
+
static __rte_always_inline int
consumer_burst(void)
{
@@ -412,9 +555,13 @@ generic_opt_check(void)
void
set_worker_generic_setup_data(struct setup_data *caps, bool burst)
{
- RTE_SET_USED(burst);
- caps->consumer_loop = consumer_burst;
- caps->worker_loop = worker_generic_burst;
+ if (burst) {
+ caps->consumer_loop = consumer_burst;
+ caps->worker_loop = worker_generic_burst;
+ } else {
+ caps->consumer_loop = consumer;
+ caps->worker_loop = worker_generic;
+ }
caps->opt_check = generic_opt_check;
caps->rx_adapter_setup = init_rx_adapter;
--
2.14.1
^ permalink raw reply [flat|nested] 48+ messages in thread
* Re: [dpdk-dev] [PATCH 06/13] examples/eventdev: add non burst mode generic worker
2017-12-07 20:36 ` [dpdk-dev] [PATCH 06/13] examples/eventdev: add non burst mode generic worker Pavan Nikhilesh
@ 2017-12-19 13:26 ` Van Haaren, Harry
2017-12-19 19:01 ` Pavan Nikhilesh
0 siblings, 1 reply; 48+ messages in thread
From: Van Haaren, Harry @ 2017-12-19 13:26 UTC (permalink / raw)
To: Pavan Nikhilesh, Eads, Gage, jerin.jacobkollanukkaran, Rao,
Nikhil, hemant.agrawal, Ma, Liang J
Cc: dev
> From: Pavan Nikhilesh [mailto:pbhagavatula@caviumnetworks.com]
> Sent: Thursday, December 7, 2017 8:37 PM
> To: Eads, Gage <gage.eads@intel.com>; jerin.jacobkollanukkaran@cavium.com;
> Van Haaren, Harry <harry.van.haaren@intel.com>; Rao, Nikhil
> <nikhil.rao@intel.com>; hemant.agrawal@nxp.com; Ma, Liang J
> <liang.j.ma@intel.com>
> Cc: dev@dpdk.org; Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
> Subject: [PATCH 06/13] examples/eventdev: add non burst mode generic worker
>
> Currently, worker uses burst dequeue and burst enqueue to forward events.
> Add a non burst mode based on the event dev capabilities.
>
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
<snip>
> diff --git a/examples/eventdev_pipeline_sw_pmd/pipeline_worker_generic.c
> b/examples/eventdev_pipeline_sw_pmd/pipeline_worker_generic.c
> index a72b7b2f9..5998aae95 100644
> --- a/examples/eventdev_pipeline_sw_pmd/pipeline_worker_generic.c
> +++ b/examples/eventdev_pipeline_sw_pmd/pipeline_worker_generic.c
> @@ -1,5 +1,91 @@
> +/*
> + * BSD LICENSE
> + *
> + * Copyright 2016 Intel Corporation.
> + * Copyright 2016 Cavium, Inc.
> + *
> + * Redistribution and use in source and binary forms, with or without
> + * modification, are permitted provided that the following conditions
> + * are met:
> + *
> + * * Redistributions of source code must retain the above copyright
> + * notice, this list of conditions and the following disclaimer.
> + * * Redistributions in binary form must reproduce the above copyright
> + * notice, this list of conditions and the following disclaimer in
> + * the documentation and/or other materials provided with the
> + * distribution.
> + * * Neither the name of Cavium, Inc nor the names of its
> + * contributors may be used to endorse or promote products derived
> + * from this software without specific prior written permission.
> + *
> + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
> + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
> + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
> + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
> + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
> + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
> + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
> + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
> + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
> + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
> + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
> + */
> +
Adding a license to an existing file? Also can now be updated to SPDX style
> static __rte_always_inline int
> consumer_burst(void)
> {
> @@ -412,9 +555,13 @@ generic_opt_check(void)
> void
> set_worker_generic_setup_data(struct setup_data *caps, bool burst)
> {
> - RTE_SET_USED(burst);
> - caps->consumer_loop = consumer_burst;
> - caps->worker_loop = worker_generic_burst;
> + if (burst) {
> + caps->consumer_loop = consumer_burst;
> + caps->worker_loop = worker_generic_burst;
> + } else {
> + caps->consumer_loop = consumer;
> + caps->worker_loop = worker_generic;
> + }
This won't scale well. See notes on future patch (9/13 and 10/13)
^ permalink raw reply [flat|nested] 48+ messages in thread
* Re: [dpdk-dev] [PATCH 06/13] examples/eventdev: add non burst mode generic worker
2017-12-19 13:26 ` Van Haaren, Harry
@ 2017-12-19 19:01 ` Pavan Nikhilesh
0 siblings, 0 replies; 48+ messages in thread
From: Pavan Nikhilesh @ 2017-12-19 19:01 UTC (permalink / raw)
To: Van Haaren, Harry, Eads, Gage, jerin.jacobkollanukkaran; +Cc: dev
On Tue, Dec 19, 2017 at 01:26:30PM +0000, Van Haaren, Harry wrote:
> > From: Pavan Nikhilesh [mailto:pbhagavatula@caviumnetworks.com]
> > Sent: Thursday, December 7, 2017 8:37 PM
> > To: Eads, Gage <gage.eads@intel.com>; jerin.jacobkollanukkaran@cavium.com;
> > Van Haaren, Harry <harry.van.haaren@intel.com>; Rao, Nikhil
> > <nikhil.rao@intel.com>; hemant.agrawal@nxp.com; Ma, Liang J
> > <liang.j.ma@intel.com>
> > Cc: dev@dpdk.org; Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
> > Subject: [PATCH 06/13] examples/eventdev: add non burst mode generic worker
> >
> > Currently, worker uses burst dequeue and burst enqueue to forward events.
> > Add a non burst mode based on the event dev capabilities.
> >
> > Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
>
> <snip>
>
> > diff --git a/examples/eventdev_pipeline_sw_pmd/pipeline_worker_generic.c
> > b/examples/eventdev_pipeline_sw_pmd/pipeline_worker_generic.c
> > index a72b7b2f9..5998aae95 100644
> > --- a/examples/eventdev_pipeline_sw_pmd/pipeline_worker_generic.c
> > +++ b/examples/eventdev_pipeline_sw_pmd/pipeline_worker_generic.c
> > @@ -1,5 +1,91 @@
> > +/*
> > + * BSD LICENSE
> > + *
> > + * Copyright 2016 Intel Corporation.
> > + * Copyright 2016 Cavium, Inc.
> > + *
> > + * Redistribution and use in source and binary forms, with or without
> > + * modification, are permitted provided that the following conditions
> > + * are met:
> > + *
> > + * * Redistributions of source code must retain the above copyright
> > + * notice, this list of conditions and the following disclaimer.
> > + * * Redistributions in binary form must reproduce the above copyright
> > + * notice, this list of conditions and the following disclaimer in
> > + * the documentation and/or other materials provided with the
> > + * distribution.
> > + * * Neither the name of Cavium, Inc nor the names of its
> > + * contributors may be used to endorse or promote products derived
> > + * from this software without specific prior written permission.
> > + *
> > + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
> > + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
> > + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
> > + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
> > + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
> > + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
> > + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
> > + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
> > + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
> > + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
> > + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
> > + */
> > +
>
> Adding a license to an existing file? Also can now be updated to SPDX style
>
We still need to get approval from legal once we are good to go we will update
it to SPDK style.
> > static __rte_always_inline int
> > consumer_burst(void)
> > {
> > @@ -412,9 +555,13 @@ generic_opt_check(void)
> > void
> > set_worker_generic_setup_data(struct setup_data *caps, bool burst)
> > {
> > - RTE_SET_USED(burst);
> > - caps->consumer_loop = consumer_burst;
> > - caps->worker_loop = worker_generic_burst;
> > + if (burst) {
> > + caps->consumer_loop = consumer_burst;
> > + caps->worker_loop = worker_generic_burst;
> > + } else {
> > + caps->consumer_loop = consumer;
> > + caps->worker_loop = worker_generic;
> > + }
>
> This won't scale well. See notes on future patch (9/13 and 10/13)
^ permalink raw reply [flat|nested] 48+ messages in thread
* [dpdk-dev] [PATCH 07/13] examples/eventdev: add thread safe Tx worker pipeline
2017-12-07 20:36 [dpdk-dev] [PATCH 00/13] examples/eventdev: add capability based pipeline support Pavan Nikhilesh
` (5 preceding siblings ...)
2017-12-07 20:36 ` [dpdk-dev] [PATCH 06/13] examples/eventdev: add non burst mode generic worker Pavan Nikhilesh
@ 2017-12-07 20:36 ` Pavan Nikhilesh
2017-12-19 12:00 ` Van Haaren, Harry
2017-12-07 20:37 ` [dpdk-dev] [PATCH 08/13] examples/eventdev: add burst for thread safe pipeline Pavan Nikhilesh
` (6 subsequent siblings)
13 siblings, 1 reply; 48+ messages in thread
From: Pavan Nikhilesh @ 2017-12-07 20:36 UTC (permalink / raw)
To: gage.eads, jerin.jacobkollanukkaran, harry.van.haaren,
nikhil.rao, hemant.agrawal, liang.j.ma
Cc: dev, Pavan Nikhilesh
Add worker pipeline when Tx is multi thread safe.
Probe Ethernet dev capabilities and select it it is supported.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
---
examples/eventdev_pipeline_sw_pmd/Makefile | 1 +
examples/eventdev_pipeline_sw_pmd/main.c | 18 +-
.../eventdev_pipeline_sw_pmd/pipeline_common.h | 2 +
.../eventdev_pipeline_sw_pmd/pipeline_worker_tx.c | 433 +++++++++++++++++++++
4 files changed, 452 insertions(+), 2 deletions(-)
create mode 100644 examples/eventdev_pipeline_sw_pmd/pipeline_worker_tx.c
diff --git a/examples/eventdev_pipeline_sw_pmd/Makefile b/examples/eventdev_pipeline_sw_pmd/Makefile
index 5e30556fb..59ee9840a 100644
--- a/examples/eventdev_pipeline_sw_pmd/Makefile
+++ b/examples/eventdev_pipeline_sw_pmd/Makefile
@@ -43,6 +43,7 @@ APP = eventdev_pipeline_sw_pmd
# all source are stored in SRCS-y
SRCS-y := main.c
SRCS-y += pipeline_worker_generic.c
+SRCS-y += pipeline_worker_tx.c
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
diff --git a/examples/eventdev_pipeline_sw_pmd/main.c b/examples/eventdev_pipeline_sw_pmd/main.c
index 153467893..3be981c15 100644
--- a/examples/eventdev_pipeline_sw_pmd/main.c
+++ b/examples/eventdev_pipeline_sw_pmd/main.c
@@ -382,9 +382,20 @@ init_ports(unsigned int num_ports)
static void
do_capability_setup(uint16_t nb_ethdev, uint8_t eventdev_id)
{
- RTE_SET_USED(nb_ethdev);
+ int i;
+ uint8_t mt_unsafe = 0;
uint8_t burst = 0;
+ for (i = 0; i < nb_ethdev; i++) {
+ struct rte_eth_dev_info dev_info;
+ memset(&dev_info, 0, sizeof(struct rte_eth_dev_info));
+
+ rte_eth_dev_info_get(i, &dev_info);
+ /* Check if it is safe ask worker to tx. */
+ mt_unsafe |= !(dev_info.tx_offload_capa &
+ DEV_TX_OFFLOAD_MT_LOCKFREE);
+ }
+
struct rte_event_dev_info eventdev_info;
memset(&eventdev_info, 0, sizeof(struct rte_event_dev_info));
@@ -392,7 +403,10 @@ do_capability_setup(uint16_t nb_ethdev, uint8_t eventdev_id)
burst = eventdev_info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE ? 1 :
0;
- set_worker_generic_setup_data(&fdata->cap, burst);
+ if (mt_unsafe)
+ set_worker_generic_setup_data(&fdata->cap, burst);
+ else
+ set_worker_tx_setup_data(&fdata->cap, burst);
}
static void
diff --git a/examples/eventdev_pipeline_sw_pmd/pipeline_common.h b/examples/eventdev_pipeline_sw_pmd/pipeline_common.h
index a5837c99b..0b27d1eb0 100644
--- a/examples/eventdev_pipeline_sw_pmd/pipeline_common.h
+++ b/examples/eventdev_pipeline_sw_pmd/pipeline_common.h
@@ -108,6 +108,7 @@ struct config_data {
int dump_dev_signal;
unsigned int num_stages;
unsigned int worker_cq_depth;
+ unsigned int rx_stride;
int16_t next_qid[MAX_NUM_STAGES+2];
int16_t qid[MAX_NUM_STAGES];
uint8_t rx_adapter_id;
@@ -178,3 +179,4 @@ schedule_devices(unsigned int lcore_id)
}
void set_worker_generic_setup_data(struct setup_data *caps, bool burst);
+void set_worker_tx_setup_data(struct setup_data *caps, bool burst);
diff --git a/examples/eventdev_pipeline_sw_pmd/pipeline_worker_tx.c b/examples/eventdev_pipeline_sw_pmd/pipeline_worker_tx.c
new file mode 100644
index 000000000..31b7d8936
--- /dev/null
+++ b/examples/eventdev_pipeline_sw_pmd/pipeline_worker_tx.c
@@ -0,0 +1,433 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright 2016 Cavium, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pipeline_common.h"
+
+static __rte_always_inline void
+worker_fwd_event(struct rte_event *ev, uint8_t sched)
+{
+ ev->event_type = RTE_EVENT_TYPE_CPU;
+ ev->op = RTE_EVENT_OP_FORWARD;
+ ev->sched_type = sched;
+}
+
+static __rte_always_inline void
+worker_event_enqueue(const uint8_t dev, const uint8_t port,
+ struct rte_event *ev)
+{
+ while (rte_event_enqueue_burst(dev, port, ev, 1) != 1)
+ rte_pause();
+}
+
+static __rte_always_inline void
+worker_tx_pkt(struct rte_mbuf *mbuf)
+{
+ while (rte_eth_tx_burst(mbuf->port, 0, &mbuf, 1) != 1)
+ rte_pause();
+}
+
+/* Multi stage Pipeline Workers */
+
+static int
+worker_do_tx(void *arg)
+{
+ struct rte_event ev;
+
+ struct worker_data *data = (struct worker_data *)arg;
+ const uint8_t dev = data->dev_id;
+ const uint8_t port = data->port_id;
+ const uint8_t lst_qid = cdata.num_stages - 1;
+ size_t fwd = 0, received = 0, tx = 0;
+
+
+ while (!fdata->done) {
+
+ if (!rte_event_dequeue_burst(dev, port, &ev, 1, 0)) {
+ rte_pause();
+ continue;
+ }
+
+ received++;
+ const uint8_t cq_id = ev.queue_id % cdata.num_stages;
+
+ if (cq_id >= lst_qid) {
+ if (ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
+ worker_tx_pkt(ev.mbuf);
+ tx++;
+ continue;
+ }
+
+ worker_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
+ ev.queue_id = (cq_id == lst_qid) ?
+ cdata.next_qid[ev.queue_id] : ev.queue_id;
+ } else {
+ ev.queue_id = cdata.next_qid[ev.queue_id];
+ worker_fwd_event(&ev, cdata.queue_type);
+ }
+ work(ev.mbuf);
+
+ worker_event_enqueue(dev, port, &ev);
+ fwd++;
+ }
+
+ if (!cdata.quiet)
+ printf(" worker %u thread done. RX=%zu FWD=%zu TX=%zu\n",
+ rte_lcore_id(), received, fwd, tx);
+
+ return 0;
+}
+
+static int
+setup_eventdev_w(struct prod_data *prod_data,
+ struct cons_data *cons_data,
+ struct worker_data *worker_data)
+{
+ RTE_SET_USED(prod_data);
+ RTE_SET_USED(cons_data);
+ uint8_t i;
+ const uint8_t dev_id = 0;
+ const uint8_t nb_ports = cdata.num_workers;
+ uint8_t nb_slots = 0;
+ uint8_t nb_queues = rte_eth_dev_count() * cdata.num_stages;
+
+ struct rte_event_dev_config config = {
+ .nb_event_queues = nb_queues,
+ .nb_event_ports = nb_ports,
+ .nb_events_limit = 4096,
+ .nb_event_queue_flows = 1024,
+ .nb_event_port_dequeue_depth = 128,
+ .nb_event_port_enqueue_depth = 128,
+ };
+ struct rte_event_port_conf wkr_p_conf = {
+ .dequeue_depth = cdata.worker_cq_depth,
+ .enqueue_depth = 64,
+ .new_event_threshold = 4096,
+ };
+ struct rte_event_queue_conf wkr_q_conf = {
+ .schedule_type = cdata.queue_type,
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .nb_atomic_flows = 1024,
+ .nb_atomic_order_sequences = 1024,
+ };
+
+ int ret, ndev = rte_event_dev_count();
+
+ if (ndev < 1) {
+ printf("%d: No Eventdev Devices Found\n", __LINE__);
+ return -1;
+ }
+
+
+ struct rte_event_dev_info dev_info;
+ ret = rte_event_dev_info_get(dev_id, &dev_info);
+ printf("\tEventdev %d: %s\n", dev_id, dev_info.driver_name);
+
+ if (dev_info.max_event_port_dequeue_depth <
+ config.nb_event_port_dequeue_depth)
+ config.nb_event_port_dequeue_depth =
+ dev_info.max_event_port_dequeue_depth;
+ if (dev_info.max_event_port_enqueue_depth <
+ config.nb_event_port_enqueue_depth)
+ config.nb_event_port_enqueue_depth =
+ dev_info.max_event_port_enqueue_depth;
+
+ ret = rte_event_dev_configure(dev_id, &config);
+ if (ret < 0) {
+ printf("%d: Error configuring device\n", __LINE__);
+ return -1;
+ }
+
+ printf(" Stages:\n");
+ for (i = 0; i < nb_queues; i++) {
+
+ uint8_t slot;
+
+ nb_slots = cdata.num_stages + 1;
+ slot = i % nb_slots;
+ wkr_q_conf.schedule_type = slot == cdata.num_stages ?
+ RTE_SCHED_TYPE_ATOMIC : cdata.queue_type;
+
+ if (rte_event_queue_setup(dev_id, i, &wkr_q_conf) < 0) {
+ printf("%d: error creating qid %d\n", __LINE__, i);
+ return -1;
+ }
+ cdata.qid[i] = i;
+ cdata.next_qid[i] = i+1;
+ if (cdata.enable_queue_priorities) {
+ const uint32_t prio_delta =
+ (RTE_EVENT_DEV_PRIORITY_LOWEST) /
+ nb_slots;
+
+ /* higher priority for queues closer to tx */
+ wkr_q_conf.priority =
+ RTE_EVENT_DEV_PRIORITY_LOWEST - prio_delta *
+ (i % nb_slots);
+ }
+
+ const char *type_str = "Atomic";
+ switch (wkr_q_conf.schedule_type) {
+ case RTE_SCHED_TYPE_ORDERED:
+ type_str = "Ordered";
+ break;
+ case RTE_SCHED_TYPE_PARALLEL:
+ type_str = "Parallel";
+ break;
+ }
+ printf("\tStage %d, Type %s\tPriority = %d\n", i, type_str,
+ wkr_q_conf.priority);
+ }
+
+ printf("\n");
+ if (wkr_p_conf.dequeue_depth > config.nb_event_port_dequeue_depth)
+ wkr_p_conf.dequeue_depth = config.nb_event_port_dequeue_depth;
+ if (wkr_p_conf.enqueue_depth > config.nb_event_port_enqueue_depth)
+ wkr_p_conf.enqueue_depth = config.nb_event_port_enqueue_depth;
+
+ /* set up one port per worker, linking to all stage queues */
+ for (i = 0; i < cdata.num_workers; i++) {
+ struct worker_data *w = &worker_data[i];
+ w->dev_id = dev_id;
+ if (rte_event_port_setup(dev_id, i, &wkr_p_conf) < 0) {
+ printf("Error setting up port %d\n", i);
+ return -1;
+ }
+
+ if (rte_event_port_link(dev_id, i, NULL, NULL, 0)
+ != nb_queues) {
+ printf("%d: error creating link for port %d\n",
+ __LINE__, i);
+ return -1;
+ }
+ w->port_id = i;
+ }
+
+ cdata.rx_stride = nb_slots;
+ ret = rte_event_dev_service_id_get(dev_id,
+ &fdata->evdev_service_id);
+ if (ret != -ESRCH && ret != 0) {
+ printf("Error getting the service ID for sw eventdev\n");
+ return -1;
+ }
+ rte_service_runstate_set(fdata->evdev_service_id, 1);
+ rte_service_set_runstate_mapped_check(fdata->evdev_service_id, 0);
+ if (rte_event_dev_start(dev_id) < 0) {
+ printf("Error starting eventdev\n");
+ return -1;
+ }
+
+ return dev_id;
+}
+
+
+struct rx_adptr_services {
+ uint16_t nb_rx_adptrs;
+ uint32_t *rx_adpt_arr;
+};
+
+static int32_t
+service_rx_adapter(void *arg)
+{
+ int i;
+ struct rx_adptr_services *adptr_services = arg;
+
+ for (i = 0; i < adptr_services->nb_rx_adptrs; i++)
+ rte_service_run_iter_on_app_lcore(
+ adptr_services->rx_adpt_arr[i], 1);
+ return 0;
+}
+
+static void
+init_rx_adapter(uint16_t nb_ports)
+{
+ int i;
+ int ret;
+ uint8_t evdev_id = 0;
+ struct rx_adptr_services *adptr_services = NULL;
+ struct rte_event_dev_info dev_info;
+
+ ret = rte_event_dev_info_get(evdev_id, &dev_info);
+ adptr_services = rte_zmalloc(NULL, sizeof(struct rx_adptr_services), 0);
+
+ struct rte_event_port_conf rx_p_conf = {
+ .dequeue_depth = 8,
+ .enqueue_depth = 8,
+ .new_event_threshold = 1200,
+ };
+
+ if (rx_p_conf.dequeue_depth > dev_info.max_event_port_dequeue_depth)
+ rx_p_conf.dequeue_depth = dev_info.max_event_port_dequeue_depth;
+ if (rx_p_conf.enqueue_depth > dev_info.max_event_port_enqueue_depth)
+ rx_p_conf.enqueue_depth = dev_info.max_event_port_enqueue_depth;
+
+
+ struct rte_event_eth_rx_adapter_queue_conf queue_conf = {
+ .ev.sched_type = cdata.queue_type,
+ };
+
+ for (i = 0; i < nb_ports; i++) {
+ uint32_t cap;
+ uint32_t service_id;
+
+ ret = rte_event_eth_rx_adapter_create(i, evdev_id, &rx_p_conf);
+ if (ret)
+ rte_exit(EXIT_FAILURE,
+ "failed to create rx adapter[%d]",
+ cdata.rx_adapter_id);
+
+ ret = rte_event_eth_rx_adapter_caps_get(evdev_id, i, &cap);
+ if (ret)
+ rte_exit(EXIT_FAILURE,
+ "failed to get event rx adapter "
+ "capabilities");
+
+ queue_conf.ev.queue_id = cdata.rx_stride ?
+ (i * cdata.rx_stride)
+ : (uint8_t)cdata.qid[0];
+
+ ret = rte_event_eth_rx_adapter_queue_add(i, i, -1, &queue_conf);
+ if (ret)
+ rte_exit(EXIT_FAILURE,
+ "Failed to add queues to Rx adapter");
+
+
+ /* Producer needs to be scheduled. */
+ if (!(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) {
+ ret = rte_event_eth_rx_adapter_service_id_get(i,
+ &service_id);
+ if (ret != -ESRCH && ret != 0) {
+ rte_exit(EXIT_FAILURE,
+ "Error getting the service ID for rx adptr\n");
+ }
+
+ rte_service_runstate_set(service_id, 1);
+ rte_service_set_runstate_mapped_check(service_id, 0);
+
+ adptr_services->nb_rx_adptrs++;
+ adptr_services->rx_adpt_arr = rte_realloc(
+ adptr_services->rx_adpt_arr,
+ adptr_services->nb_rx_adptrs *
+ sizeof(uint32_t), 0);
+ adptr_services->rx_adpt_arr[
+ adptr_services->nb_rx_adptrs - 1] =
+ service_id;
+ }
+
+ ret = rte_event_eth_rx_adapter_start(i);
+ if (ret)
+ rte_exit(EXIT_FAILURE, "Rx adapter[%d] start failed",
+ cdata.rx_adapter_id);
+ }
+
+ prod_data.dev_id = evdev_id;
+ prod_data.qid = 0;
+
+ if (adptr_services->nb_rx_adptrs) {
+ struct rte_service_spec service;
+
+ memset(&service, 0, sizeof(struct rte_service_spec));
+ snprintf(service.name, sizeof(service.name), "rx_service");
+ service.callback = service_rx_adapter;
+ service.callback_userdata = (void *)adptr_services;
+
+ int32_t ret = rte_service_component_register(&service,
+ &fdata->rxadptr_service_id);
+ if (ret)
+ rte_exit(EXIT_FAILURE,
+ "Rx adapter[%d] service register failed",
+ cdata.rx_adapter_id);
+
+ rte_service_runstate_set(fdata->rxadptr_service_id, 1);
+ rte_service_component_runstate_set(fdata->rxadptr_service_id,
+ 1);
+ rte_service_set_runstate_mapped_check(fdata->rxadptr_service_id,
+ 0);
+ } else
+ rte_free(adptr_services);
+
+
+ if (!adptr_services->nb_rx_adptrs && fdata->cap.consumer_loop == NULL &&
+ (dev_info.event_dev_cap &
+ RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED))
+ fdata->cap.schedule_loop = NULL;
+}
+
+static void
+opt_check(void)
+{
+ int i;
+ int ret;
+ uint32_t cap = 0;
+ uint8_t rx_needed = 0;
+ struct rte_event_dev_info eventdev_info;
+
+ memset(&eventdev_info, 0, sizeof(struct rte_event_dev_info));
+ rte_event_dev_info_get(0, &eventdev_info);
+
+ for (i = 0; i < rte_eth_dev_count(); i++) {
+ ret = rte_event_eth_rx_adapter_caps_get(0, i, &cap);
+ if (ret)
+ rte_exit(EXIT_FAILURE,
+ "failed to get event rx adapter "
+ "capabilities");
+ rx_needed |=
+ !(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT);
+ }
+
+ if (cdata.worker_lcore_mask == 0 ||
+ (rx_needed && cdata.rx_lcore_mask == 0) ||
+ (cdata.sched_lcore_mask == 0 &&
+ !(eventdev_info.event_dev_cap &
+ RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED))) {
+ printf("Core part of pipeline was not assigned any cores. "
+ "This will stall the pipeline, please check core masks "
+ "(use -h for details on setting core masks):\n"
+ "\trx: %"PRIu64"\n\ttx: %"PRIu64"\n\tsched: %"PRIu64
+ "\n\tworkers: %"PRIu64"\n",
+ cdata.rx_lcore_mask, cdata.tx_lcore_mask,
+ cdata.sched_lcore_mask,
+ cdata.worker_lcore_mask);
+ rte_exit(-1, "Fix core masks\n");
+ }
+}
+
+void
+set_worker_tx_setup_data(struct setup_data *caps, bool burst)
+{
+ RTE_SET_USED(burst);
+ caps->worker_loop = worker_do_tx;
+
+ caps->opt_check = opt_check;
+ caps->consumer_loop = NULL;
+ caps->schedule_loop = schedule_devices;
+ caps->eventdev_setup = setup_eventdev_w;
+ caps->rx_adapter_setup = init_rx_adapter;
+}
--
2.14.1
^ permalink raw reply [flat|nested] 48+ messages in thread
* Re: [dpdk-dev] [PATCH 07/13] examples/eventdev: add thread safe Tx worker pipeline
2017-12-07 20:36 ` [dpdk-dev] [PATCH 07/13] examples/eventdev: add thread safe Tx worker pipeline Pavan Nikhilesh
@ 2017-12-19 12:00 ` Van Haaren, Harry
2017-12-19 18:55 ` Pavan Nikhilesh
0 siblings, 1 reply; 48+ messages in thread
From: Van Haaren, Harry @ 2017-12-19 12:00 UTC (permalink / raw)
To: Pavan Nikhilesh, Eads, Gage, jerin.jacobkollanukkaran, Rao,
Nikhil, hemant.agrawal, Ma, Liang J
Cc: dev
> From: Pavan Nikhilesh [mailto:pbhagavatula@caviumnetworks.com]
> Sent: Thursday, December 7, 2017 8:37 PM
> To: Eads, Gage <gage.eads@intel.com>; jerin.jacobkollanukkaran@cavium.com;
> Van Haaren, Harry <harry.van.haaren@intel.com>; Rao, Nikhil
> <nikhil.rao@intel.com>; hemant.agrawal@nxp.com; Ma, Liang J
> <liang.j.ma@intel.com>
> Cc: dev@dpdk.org; Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
> Subject: [PATCH 07/13] examples/eventdev: add thread safe Tx worker pipeline
>
> Add worker pipeline when Tx is multi thread safe.
> Probe Ethernet dev capabilities and select it it is supported.
>
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
<snip>
> static void
> diff --git a/examples/eventdev_pipeline_sw_pmd/pipeline_common.h
> b/examples/eventdev_pipeline_sw_pmd/pipeline_common.h
> index a5837c99b..0b27d1eb0 100644
> --- a/examples/eventdev_pipeline_sw_pmd/pipeline_common.h
> +++ b/examples/eventdev_pipeline_sw_pmd/pipeline_common.h
> @@ -108,6 +108,7 @@ struct config_data {
> int dump_dev_signal;
> unsigned int num_stages;
> unsigned int worker_cq_depth;
> + unsigned int rx_stride;
Perhaps comment what "rx stride" does? The others variables are logical enough to me, but this one I don't get..
> +static int
> +setup_eventdev_w(struct prod_data *prod_data,
> + struct cons_data *cons_data,
> + struct worker_data *worker_data)
I think this function name can be improved - what does the w mean?
^ permalink raw reply [flat|nested] 48+ messages in thread
* Re: [dpdk-dev] [PATCH 07/13] examples/eventdev: add thread safe Tx worker pipeline
2017-12-19 12:00 ` Van Haaren, Harry
@ 2017-12-19 18:55 ` Pavan Nikhilesh
0 siblings, 0 replies; 48+ messages in thread
From: Pavan Nikhilesh @ 2017-12-19 18:55 UTC (permalink / raw)
To: Van Haaren, Harry, jerin.jacobkollanukkaran; +Cc: dev
Hi Harry,
Thanks for review and comments. Feedback inline.
On Tue, Dec 19, 2017 at 12:00:02PM +0000, Van Haaren, Harry wrote:
> > From: Pavan Nikhilesh [mailto:pbhagavatula@caviumnetworks.com]
> > Sent: Thursday, December 7, 2017 8:37 PM
> > To: Eads, Gage <gage.eads@intel.com>; jerin.jacobkollanukkaran@cavium.com;
> > Van Haaren, Harry <harry.van.haaren@intel.com>; Rao, Nikhil
> > <nikhil.rao@intel.com>; hemant.agrawal@nxp.com; Ma, Liang J
> > <liang.j.ma@intel.com>
> > Cc: dev@dpdk.org; Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
> > Subject: [PATCH 07/13] examples/eventdev: add thread safe Tx worker pipeline
> >
> > Add worker pipeline when Tx is multi thread safe.
> > Probe Ethernet dev capabilities and select it it is supported.
> >
> > Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
>
> <snip>
>
> > static void
> > diff --git a/examples/eventdev_pipeline_sw_pmd/pipeline_common.h
> > b/examples/eventdev_pipeline_sw_pmd/pipeline_common.h
> > index a5837c99b..0b27d1eb0 100644
> > --- a/examples/eventdev_pipeline_sw_pmd/pipeline_common.h
> > +++ b/examples/eventdev_pipeline_sw_pmd/pipeline_common.h
> > @@ -108,6 +108,7 @@ struct config_data {
> > int dump_dev_signal;
> > unsigned int num_stages;
> > unsigned int worker_cq_depth;
> > + unsigned int rx_stride;
>
> Perhaps comment what "rx stride" does? The others variables are logical enough to me, but this one I don't get..
Sure, will add comments explaining why we need rx stride.
It would basically reduce congestion in entry queue when using multi ports.
>
> > +static int
> > +setup_eventdev_w(struct prod_data *prod_data,
> > + struct cons_data *cons_data,
> > + struct worker_data *worker_data)
>
> I think this function name can be improved - what does the w mean?
>
I will redo the funtion name according to pipeline name.
`setup_eventdev_worker_generic`
`setup_eventdev_worker_tx`
Pavan
^ permalink raw reply [flat|nested] 48+ messages in thread
* [dpdk-dev] [PATCH 08/13] examples/eventdev: add burst for thread safe pipeline
2017-12-07 20:36 [dpdk-dev] [PATCH 00/13] examples/eventdev: add capability based pipeline support Pavan Nikhilesh
` (6 preceding siblings ...)
2017-12-07 20:36 ` [dpdk-dev] [PATCH 07/13] examples/eventdev: add thread safe Tx worker pipeline Pavan Nikhilesh
@ 2017-12-07 20:37 ` Pavan Nikhilesh
2017-12-07 20:37 ` [dpdk-dev] [PATCH 09/13] examples/eventdev: add all type queue option Pavan Nikhilesh
` (5 subsequent siblings)
13 siblings, 0 replies; 48+ messages in thread
From: Pavan Nikhilesh @ 2017-12-07 20:37 UTC (permalink / raw)
To: gage.eads, jerin.jacobkollanukkaran, harry.van.haaren,
nikhil.rao, hemant.agrawal, liang.j.ma
Cc: dev, Pavan Nikhilesh
Add burst mode worker pipeline when Tx is multi thread safe.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
---
.../eventdev_pipeline_sw_pmd/pipeline_worker_tx.c | 78 +++++++++++++++++++++-
1 file changed, 76 insertions(+), 2 deletions(-)
diff --git a/examples/eventdev_pipeline_sw_pmd/pipeline_worker_tx.c b/examples/eventdev_pipeline_sw_pmd/pipeline_worker_tx.c
index 31b7d8936..a824f1f49 100644
--- a/examples/eventdev_pipeline_sw_pmd/pipeline_worker_tx.c
+++ b/examples/eventdev_pipeline_sw_pmd/pipeline_worker_tx.c
@@ -48,6 +48,19 @@ worker_event_enqueue(const uint8_t dev, const uint8_t port,
rte_pause();
}
+static __rte_always_inline void
+worker_event_enqueue_burst(const uint8_t dev, const uint8_t port,
+ struct rte_event *ev, const uint16_t nb_rx)
+{
+ uint16_t enq;
+
+ enq = rte_event_enqueue_burst(dev, port, ev, nb_rx);
+ while (enq < nb_rx) {
+ enq += rte_event_enqueue_burst(dev, port,
+ ev + enq, nb_rx - enq);
+ }
+}
+
static __rte_always_inline void
worker_tx_pkt(struct rte_mbuf *mbuf)
{
@@ -106,6 +119,65 @@ worker_do_tx(void *arg)
return 0;
}
+static int
+worker_do_tx_burst(void *arg)
+{
+ struct rte_event ev[BATCH_SIZE];
+
+ struct worker_data *data = (struct worker_data *)arg;
+ uint8_t dev = data->dev_id;
+ uint8_t port = data->port_id;
+ uint8_t lst_qid = cdata.num_stages - 1;
+ size_t fwd = 0, received = 0, tx = 0;
+
+ while (!fdata->done) {
+ uint16_t i;
+ const uint16_t nb_rx = rte_event_dequeue_burst(dev, port,
+ ev, BATCH_SIZE, 0);
+
+ if (nb_rx == 0) {
+ rte_pause();
+ continue;
+ }
+ received += nb_rx;
+
+ for (i = 0; i < nb_rx; i++) {
+ const uint8_t cq_id = ev[i].queue_id % cdata.num_stages;
+
+ if (cq_id >= lst_qid) {
+ if (ev[i].sched_type ==
+ RTE_SCHED_TYPE_ATOMIC) {
+ worker_tx_pkt(ev[i].mbuf);
+ tx++;
+ ev[i].op = RTE_EVENT_OP_RELEASE;
+ continue;
+ }
+ ev[i].queue_id = (cq_id == lst_qid) ?
+ cdata.next_qid[ev[i].queue_id] :
+ ev[i].queue_id;
+
+ worker_fwd_event(&ev[i],
+ RTE_SCHED_TYPE_ATOMIC);
+ } else {
+ ev[i].queue_id = cdata.next_qid[
+ ev[i].queue_id];
+ worker_fwd_event(&ev[i],
+ cdata.queue_type);
+ }
+ work(ev[i].mbuf);
+ }
+ worker_event_enqueue_burst(dev, port, ev, nb_rx);
+
+ fwd += nb_rx;
+ }
+
+ if (!cdata.quiet)
+ printf(" worker %u thread done. RX=%zu FWD=%zu TX=%zu\n",
+ rte_lcore_id(), received, fwd, tx);
+
+ return 0;
+}
+
static int
setup_eventdev_w(struct prod_data *prod_data,
struct cons_data *cons_data,
@@ -422,8 +494,10 @@ opt_check(void)
void
set_worker_tx_setup_data(struct setup_data *caps, bool burst)
{
- RTE_SET_USED(burst);
- caps->worker_loop = worker_do_tx;
+ if (burst)
+ caps->worker_loop = worker_do_tx_burst;
+ if (!burst)
+ caps->worker_loop = worker_do_tx;
caps->opt_check = opt_check;
caps->consumer_loop = NULL;
--
2.14.1
^ permalink raw reply [flat|nested] 48+ messages in thread
* [dpdk-dev] [PATCH 09/13] examples/eventdev: add all type queue option
2017-12-07 20:36 [dpdk-dev] [PATCH 00/13] examples/eventdev: add capability based pipeline support Pavan Nikhilesh
` (7 preceding siblings ...)
2017-12-07 20:37 ` [dpdk-dev] [PATCH 08/13] examples/eventdev: add burst for thread safe pipeline Pavan Nikhilesh
@ 2017-12-07 20:37 ` Pavan Nikhilesh
2017-12-19 13:18 ` Van Haaren, Harry
2017-12-07 20:37 ` [dpdk-dev] [PATCH 10/13] examples/eventdev: add single stage pipeline worker Pavan Nikhilesh
` (4 subsequent siblings)
13 siblings, 1 reply; 48+ messages in thread
From: Pavan Nikhilesh @ 2017-12-07 20:37 UTC (permalink / raw)
To: gage.eads, jerin.jacobkollanukkaran, harry.van.haaren,
nikhil.rao, hemant.agrawal, liang.j.ma
Cc: dev, Pavan Nikhilesh
Added configurable option to make queue type as all type queues i.e.
RTE_EVENT_QUEUE_CFG_ALL_TYPES based on event dev capability
RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES.
This can be enabled by supplying '-a' as a cmdline argument.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
---
examples/eventdev_pipeline_sw_pmd/main.c | 7 +-
.../eventdev_pipeline_sw_pmd/pipeline_common.h | 1 +
.../pipeline_worker_generic.c | 5 +
.../eventdev_pipeline_sw_pmd/pipeline_worker_tx.c | 134 +++++++++++++++++++--
4 files changed, 139 insertions(+), 8 deletions(-)
diff --git a/examples/eventdev_pipeline_sw_pmd/main.c b/examples/eventdev_pipeline_sw_pmd/main.c
index 3be981c15..289f7204d 100644
--- a/examples/eventdev_pipeline_sw_pmd/main.c
+++ b/examples/eventdev_pipeline_sw_pmd/main.c
@@ -149,6 +149,7 @@ static struct option long_options[] = {
{"parallel", no_argument, 0, 'p'},
{"ordered", no_argument, 0, 'o'},
{"quiet", no_argument, 0, 'q'},
+ {"use-atq", no_argument, 0, 'a'},
{"dump", no_argument, 0, 'D'},
{0, 0, 0, 0}
};
@@ -172,6 +173,7 @@ usage(void)
" -o, --ordered Use ordered scheduling\n"
" -p, --parallel Use parallel scheduling\n"
" -q, --quiet Minimize printed output\n"
+ " -a, --use-atq Use all type queues\n"
" -D, --dump Print detailed statistics before exit"
"\n";
fprintf(stderr, "%s", usage_str);
@@ -192,7 +194,7 @@ parse_app_args(int argc, char **argv)
int i;
for (;;) {
- c = getopt_long(argc, argv, "r:t:e:c:w:n:f:s:poPqDW:",
+ c = getopt_long(argc, argv, "r:t:e:c:w:n:f:s:paoPqDW:",
long_options, &option_index);
if (c == -1)
break;
@@ -225,6 +227,9 @@ parse_app_args(int argc, char **argv)
case 'p':
cdata.queue_type = RTE_SCHED_TYPE_PARALLEL;
break;
+ case 'a':
+ cdata.all_type_queues = 1;
+ break;
case 'q':
cdata.quiet = 1;
break;
diff --git a/examples/eventdev_pipeline_sw_pmd/pipeline_common.h b/examples/eventdev_pipeline_sw_pmd/pipeline_common.h
index 0b27d1eb0..62755f6d0 100644
--- a/examples/eventdev_pipeline_sw_pmd/pipeline_common.h
+++ b/examples/eventdev_pipeline_sw_pmd/pipeline_common.h
@@ -106,6 +106,7 @@ struct config_data {
int quiet;
int dump_dev;
int dump_dev_signal;
+ int all_type_queues;
unsigned int num_stages;
unsigned int worker_cq_depth;
unsigned int rx_stride;
diff --git a/examples/eventdev_pipeline_sw_pmd/pipeline_worker_generic.c b/examples/eventdev_pipeline_sw_pmd/pipeline_worker_generic.c
index 5998aae95..908d64c87 100644
--- a/examples/eventdev_pipeline_sw_pmd/pipeline_worker_generic.c
+++ b/examples/eventdev_pipeline_sw_pmd/pipeline_worker_generic.c
@@ -525,6 +525,11 @@ generic_opt_check(void)
memset(&eventdev_info, 0, sizeof(struct rte_event_dev_info));
rte_event_dev_info_get(0, &eventdev_info);
+ if (cdata.all_type_queues && !(eventdev_info.event_dev_cap &
+ RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES))
+ rte_exit(EXIT_FAILURE,
+ "Event dev doesn't support all type queues\n");
+
for (i = 0; i < rte_eth_dev_count(); i++) {
ret = rte_event_eth_rx_adapter_caps_get(0, i, &cap);
if (ret)
diff --git a/examples/eventdev_pipeline_sw_pmd/pipeline_worker_tx.c b/examples/eventdev_pipeline_sw_pmd/pipeline_worker_tx.c
index a824f1f49..e25a06027 100644
--- a/examples/eventdev_pipeline_sw_pmd/pipeline_worker_tx.c
+++ b/examples/eventdev_pipeline_sw_pmd/pipeline_worker_tx.c
@@ -119,6 +119,51 @@ worker_do_tx(void *arg)
return 0;
}
+static int
+worker_do_tx_atq(void *arg)
+{
+ struct rte_event ev;
+
+ struct worker_data *data = (struct worker_data *)arg;
+ const uint8_t dev = data->dev_id;
+ const uint8_t port = data->port_id;
+ const uint8_t lst_qid = cdata.num_stages - 1;
+ size_t fwd = 0, received = 0, tx = 0;
+
+ while (!fdata->done) {
+
+ if (!rte_event_dequeue_burst(dev, port, &ev, 1, 0)) {
+ rte_pause();
+ continue;
+ }
+
+ received++;
+ const uint8_t cq_id = ev.queue_id % cdata.num_stages;
+
+ if (cq_id == lst_qid) {
+ if (ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
+ worker_tx_pkt(ev.mbuf);
+ tx++;
+ continue;
+ }
+ worker_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
+ } else {
+ ev.queue_id = cdata.next_qid[ev.queue_id];
+ worker_fwd_event(&ev, cdata.queue_type);
+ }
+ work(ev.mbuf);
+
+ worker_event_enqueue(dev, port, &ev);
+ fwd++;
+ }
+
+ if (!cdata.quiet)
+ printf(" worker %u thread done. RX=%zu FWD=%zu TX=%zu\n",
+ rte_lcore_id(), received, fwd, tx);
+
+ return 0;
+}
+
static int
worker_do_tx_burst(void *arg)
{
@@ -178,6 +223,61 @@ worker_do_tx_burst(void *arg)
return 0;
}
+static int
+worker_do_tx_burst_atq(void *arg)
+{
+ struct rte_event ev[BATCH_SIZE];
+
+ struct worker_data *data = (struct worker_data *)arg;
+ uint8_t dev = data->dev_id;
+ uint8_t port = data->port_id;
+ uint8_t lst_qid = cdata.num_stages - 1;
+ size_t fwd = 0, received = 0, tx = 0;
+
+ while (!fdata->done) {
+ uint16_t i;
+
+ const uint16_t nb_rx = rte_event_dequeue_burst(dev, port,
+ ev, BATCH_SIZE, 0);
+
+ if (nb_rx == 0) {
+ rte_pause();
+ continue;
+ }
+ received += nb_rx;
+
+ for (i = 0; i < nb_rx; i++) {
+ const uint8_t cq_id = ev[i].queue_id % cdata.num_stages;
+
+ if (cq_id == lst_qid) {
+ if (ev[i].sched_type ==
+ RTE_SCHED_TYPE_ATOMIC) {
+ worker_tx_pkt(ev[i].mbuf);
+ tx++;
+ ev[i].op = RTE_EVENT_OP_RELEASE;
+ continue;
+ }
+ worker_fwd_event(&ev[i],
+ RTE_SCHED_TYPE_ATOMIC);
+ } else {
+ ev[i].queue_id = cdata.next_qid[
+ ev[i].queue_id];
+ worker_fwd_event(&ev[i],
+ cdata.queue_type);
+ }
+ }
+
+ worker_event_enqueue_burst(dev, port, ev, nb_rx);
+ fwd += nb_rx;
+ }
+
+ if (!cdata.quiet)
+ printf(" worker %u thread done. RX=%zu FWD=%zu TX=%zu\n",
+ rte_lcore_id(), received, fwd, tx);
+
+ return 0;
+}
+
static int
setup_eventdev_w(struct prod_data *prod_data,
struct cons_data *cons_data,
@@ -186,10 +286,12 @@ setup_eventdev_w(struct prod_data *prod_data,
RTE_SET_USED(prod_data);
RTE_SET_USED(cons_data);
uint8_t i;
+ const uint8_t atq = cdata.all_type_queues ? 1 : 0;
const uint8_t dev_id = 0;
const uint8_t nb_ports = cdata.num_workers;
uint8_t nb_slots = 0;
uint8_t nb_queues = rte_eth_dev_count() * cdata.num_stages;
+ nb_queues += atq ? 0 : rte_eth_dev_count();
struct rte_event_dev_config config = {
.nb_event_queues = nb_queues,
@@ -241,12 +343,19 @@ setup_eventdev_w(struct prod_data *prod_data,
printf(" Stages:\n");
for (i = 0; i < nb_queues; i++) {
- uint8_t slot;
+ if (atq) {
+
+ nb_slots = cdata.num_stages;
+ wkr_q_conf.event_queue_cfg =
+ RTE_EVENT_QUEUE_CFG_ALL_TYPES;
+ } else {
+ uint8_t slot;
- nb_slots = cdata.num_stages + 1;
- slot = i % nb_slots;
- wkr_q_conf.schedule_type = slot == cdata.num_stages ?
- RTE_SCHED_TYPE_ATOMIC : cdata.queue_type;
+ nb_slots = cdata.num_stages + 1;
+ slot = i % nb_slots;
+ wkr_q_conf.schedule_type = slot == cdata.num_stages ?
+ RTE_SCHED_TYPE_ATOMIC : cdata.queue_type;
+ }
if (rte_event_queue_setup(dev_id, i, &wkr_q_conf) < 0) {
printf("%d: error creating qid %d\n", __LINE__, i);
@@ -464,6 +573,11 @@ opt_check(void)
memset(&eventdev_info, 0, sizeof(struct rte_event_dev_info));
rte_event_dev_info_get(0, &eventdev_info);
+ if (cdata.all_type_queues && !(eventdev_info.event_dev_cap &
+ RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES))
+ rte_exit(EXIT_FAILURE,
+ "Event dev doesn't support all type queues\n");
+
for (i = 0; i < rte_eth_dev_count(); i++) {
ret = rte_event_eth_rx_adapter_caps_get(0, i, &cap);
if (ret)
@@ -494,9 +608,15 @@ opt_check(void)
void
set_worker_tx_setup_data(struct setup_data *caps, bool burst)
{
- if (burst)
+ uint8_t atq = cdata.all_type_queues ? 1 : 0;
+
+ if (burst && atq)
+ caps->worker_loop = worker_do_tx_burst_atq;
+ if (burst && !atq)
caps->worker_loop = worker_do_tx_burst;
- if (!burst)
+ if (!burst && atq)
+ caps->worker_loop = worker_do_tx_atq;
+ if (!burst && !atq)
caps->worker_loop = worker_do_tx;
caps->opt_check = opt_check;
--
2.14.1
^ permalink raw reply [flat|nested] 48+ messages in thread
* Re: [dpdk-dev] [PATCH 09/13] examples/eventdev: add all type queue option
2017-12-07 20:37 ` [dpdk-dev] [PATCH 09/13] examples/eventdev: add all type queue option Pavan Nikhilesh
@ 2017-12-19 13:18 ` Van Haaren, Harry
2017-12-19 19:05 ` Pavan Nikhilesh
0 siblings, 1 reply; 48+ messages in thread
From: Van Haaren, Harry @ 2017-12-19 13:18 UTC (permalink / raw)
To: Pavan Nikhilesh, Eads, Gage, jerin.jacobkollanukkaran, Rao,
Nikhil, hemant.agrawal, Ma, Liang J
Cc: dev
> From: Pavan Nikhilesh [mailto:pbhagavatula@caviumnetworks.com]
> Sent: Thursday, December 7, 2017 8:37 PM
> To: Eads, Gage <gage.eads@intel.com>; jerin.jacobkollanukkaran@cavium.com;
> Van Haaren, Harry <harry.van.haaren@intel.com>; Rao, Nikhil
> <nikhil.rao@intel.com>; hemant.agrawal@nxp.com; Ma, Liang J
> <liang.j.ma@intel.com>
> Cc: dev@dpdk.org; Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
> Subject: [PATCH 09/13] examples/eventdev: add all type queue option
>
> Added configurable option to make queue type as all type queues i.e.
> RTE_EVENT_QUEUE_CFG_ALL_TYPES based on event dev capability
> RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES.
>
> This can be enabled by supplying '-a' as a cmdline argument.
>
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
<snip>
> @@ -494,9 +608,15 @@ opt_check(void)
> void
> set_worker_tx_setup_data(struct setup_data *caps, bool burst)
> {
> - if (burst)
> + uint8_t atq = cdata.all_type_queues ? 1 : 0;
> +
> + if (burst && atq)
> + caps->worker_loop = worker_do_tx_burst_atq;
> + if (burst && !atq)
> caps->worker_loop = worker_do_tx_burst;
> - if (!burst)
> + if (!burst && atq)
> + caps->worker_loop = worker_do_tx_atq;
> + if (!burst && !atq)
> caps->worker_loop = worker_do_tx;
This doesn't scale - we can't keep &&-ing in new options. Refactoring and calling a function per burst / non-burst suggested, perhaps something like:
if(burst)
caps->worker_loop = get_worker_loop_burst(atq);
else
caps->worker_loop = get_worker_loop_single(atq);
^ permalink raw reply [flat|nested] 48+ messages in thread
* Re: [dpdk-dev] [PATCH 09/13] examples/eventdev: add all type queue option
2017-12-19 13:18 ` Van Haaren, Harry
@ 2017-12-19 19:05 ` Pavan Nikhilesh
0 siblings, 0 replies; 48+ messages in thread
From: Pavan Nikhilesh @ 2017-12-19 19:05 UTC (permalink / raw)
To: Van Haaren, Harry, Eads, Gage, jerin.jacobkollanukkaran; +Cc: dev
On Tue, Dec 19, 2017 at 01:18:45PM +0000, Van Haaren, Harry wrote:
> > From: Pavan Nikhilesh [mailto:pbhagavatula@caviumnetworks.com]
> > Sent: Thursday, December 7, 2017 8:37 PM
> > To: Eads, Gage <gage.eads@intel.com>; jerin.jacobkollanukkaran@cavium.com;
> > Van Haaren, Harry <harry.van.haaren@intel.com>; Rao, Nikhil
> > <nikhil.rao@intel.com>; hemant.agrawal@nxp.com; Ma, Liang J
> > <liang.j.ma@intel.com>
> > Cc: dev@dpdk.org; Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
> > Subject: [PATCH 09/13] examples/eventdev: add all type queue option
> >
> > Added configurable option to make queue type as all type queues i.e.
> > RTE_EVENT_QUEUE_CFG_ALL_TYPES based on event dev capability
> > RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES.
> >
> > This can be enabled by supplying '-a' as a cmdline argument.
> >
> > Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
>
> <snip>
>
> > @@ -494,9 +608,15 @@ opt_check(void)
> > void
> > set_worker_tx_setup_data(struct setup_data *caps, bool burst)
> > {
> > - if (burst)
> > + uint8_t atq = cdata.all_type_queues ? 1 : 0;
> > +
> > + if (burst && atq)
> > + caps->worker_loop = worker_do_tx_burst_atq;
> > + if (burst && !atq)
> > caps->worker_loop = worker_do_tx_burst;
> > - if (!burst)
> > + if (!burst && atq)
> > + caps->worker_loop = worker_do_tx_atq;
> > + if (!burst && !atq)
> > caps->worker_loop = worker_do_tx;
>
> This doesn't scale - we can't keep &&-ing in new options. Refactoring and calling a function per burst / non-burst suggested, perhaps something like:
>
> if(burst)
> caps->worker_loop = get_worker_loop_burst(atq);
> else
> caps->worker_loop = get_worker_loop_single(atq);
>
Agreed, will refactor worker selection logic.
Cheers,
Pavan.
^ permalink raw reply [flat|nested] 48+ messages in thread
* [dpdk-dev] [PATCH 10/13] examples/eventdev: add single stage pipeline worker
2017-12-07 20:36 [dpdk-dev] [PATCH 00/13] examples/eventdev: add capability based pipeline support Pavan Nikhilesh
` (8 preceding siblings ...)
2017-12-07 20:37 ` [dpdk-dev] [PATCH 09/13] examples/eventdev: add all type queue option Pavan Nikhilesh
@ 2017-12-07 20:37 ` Pavan Nikhilesh
2017-12-11 16:45 ` Eads, Gage
2017-12-07 20:37 ` [dpdk-dev] [PATCH 11/13] examples/eventdev: add atq " Pavan Nikhilesh
` (3 subsequent siblings)
13 siblings, 1 reply; 48+ messages in thread
From: Pavan Nikhilesh @ 2017-12-07 20:37 UTC (permalink / raw)
To: gage.eads, jerin.jacobkollanukkaran, harry.van.haaren,
nikhil.rao, hemant.agrawal, liang.j.ma
Cc: dev, Pavan Nikhilesh
Add optimized eventdev pipeline when ethdev supports thread safe Tx
and number of configured stages is one.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
---
.../eventdev_pipeline_sw_pmd/pipeline_worker_tx.c | 109 +++++++++++++++++++--
1 file changed, 101 insertions(+), 8 deletions(-)
diff --git a/examples/eventdev_pipeline_sw_pmd/pipeline_worker_tx.c b/examples/eventdev_pipeline_sw_pmd/pipeline_worker_tx.c
index e25a06027..15df21b7e 100644
--- a/examples/eventdev_pipeline_sw_pmd/pipeline_worker_tx.c
+++ b/examples/eventdev_pipeline_sw_pmd/pipeline_worker_tx.c
@@ -68,6 +68,91 @@ worker_tx_pkt(struct rte_mbuf *mbuf)
rte_pause();
}
+/* Single stage pipeline workers */
+
+static int
+worker_do_tx_single(void *arg)
+{
+ struct worker_data *data = (struct worker_data *)arg;
+ const uint8_t dev = data->dev_id;
+ const uint8_t port = data->port_id;
+ size_t fwd = 0, received = 0, tx = 0;
+ struct rte_event ev;
+
+ while (!fdata->done) {
+
+ if (!rte_event_dequeue_burst(dev, port, &ev, 1, 0)) {
+ rte_pause();
+ continue;
+ }
+
+ received++;
+
+ if (ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
+ worker_tx_pkt(ev.mbuf);
+ tx++;
+ continue;
+ }
+ work(ev.mbuf);
+ ev.queue_id++;
+ worker_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
+ worker_event_enqueue(dev, port, &ev);
+ fwd++;
+ }
+
+ if (!cdata.quiet)
+ printf(" worker %u thread done. RX=%zu FWD=%zu TX=%zu\n",
+ rte_lcore_id(), received, fwd, tx);
+ return 0;
+}
+
+static int
+worker_do_tx_single_burst(void *arg)
+{
+ struct rte_event ev[BATCH_SIZE + 1];
+
+ struct worker_data *data = (struct worker_data *)arg;
+ const uint8_t dev = data->dev_id;
+ const uint8_t port = data->port_id;
+ size_t fwd = 0, received = 0, tx = 0;
+
+ while (!fdata->done) {
+ uint16_t i;
+ uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
+ BATCH_SIZE, 0);
+
+ if (!nb_rx) {
+ rte_pause();
+ continue;
+ }
+ received += nb_rx;
+
+ for (i = 0; i < nb_rx; i++) {
+ rte_prefetch0(ev[i + 1].mbuf);
+ if (ev[i].sched_type == RTE_SCHED_TYPE_ATOMIC) {
+
+ worker_tx_pkt(ev[i].mbuf);
+ ev[i].op = RTE_EVENT_OP_RELEASE;
+ tx++;
+
+ } else {
+ ev[i].queue_id++;
+ worker_fwd_event(&ev[i],
+ RTE_SCHED_TYPE_ATOMIC);
+ }
+ work(ev[i].mbuf);
+ }
+
+ worker_event_enqueue_burst(dev, port, ev, nb_rx);
+ fwd += nb_rx;
+ }
+
+ if (!cdata.quiet)
+ printf(" worker %u thread done. RX=%zu FWD=%zu TX=%zu\n",
+ rte_lcore_id(), received, fwd, tx);
+ return 0;
+}
+
/* Multi stage Pipeline Workers */
static int
@@ -265,6 +350,7 @@ worker_do_tx_burst_atq(void *arg)
worker_fwd_event(&ev[i],
cdata.queue_type);
}
+ work(ev[i].mbuf);
}
worker_event_enqueue_burst(dev, port, ev, nb_rx);
@@ -610,14 +696,21 @@ set_worker_tx_setup_data(struct setup_data *caps, bool burst)
{
uint8_t atq = cdata.all_type_queues ? 1 : 0;
- if (burst && atq)
- caps->worker_loop = worker_do_tx_burst_atq;
- if (burst && !atq)
- caps->worker_loop = worker_do_tx_burst;
- if (!burst && atq)
- caps->worker_loop = worker_do_tx_atq;
- if (!burst && !atq)
- caps->worker_loop = worker_do_tx;
+ if (cdata.num_stages == 1) {
+ if (burst)
+ caps->worker_loop = worker_do_tx_single_burst;
+ if (!burst)
+ caps->worker_loop = worker_do_tx_single;
+ } else {
+ if (burst && atq)
+ caps->worker_loop = worker_do_tx_burst_atq;
+ if (burst && !atq)
+ caps->worker_loop = worker_do_tx_burst;
+ if (!burst && atq)
+ caps->worker_loop = worker_do_tx_atq;
+ if (!burst && !atq)
+ caps->worker_loop = worker_do_tx;
+ }
caps->opt_check = opt_check;
caps->consumer_loop = NULL;
--
2.14.1
^ permalink raw reply [flat|nested] 48+ messages in thread
* Re: [dpdk-dev] [PATCH 10/13] examples/eventdev: add single stage pipeline worker
2017-12-07 20:37 ` [dpdk-dev] [PATCH 10/13] examples/eventdev: add single stage pipeline worker Pavan Nikhilesh
@ 2017-12-11 16:45 ` Eads, Gage
0 siblings, 0 replies; 48+ messages in thread
From: Eads, Gage @ 2017-12-11 16:45 UTC (permalink / raw)
To: Pavan Nikhilesh, jerin.jacobkollanukkaran, Van Haaren, Harry,
Rao, Nikhil, hemant.agrawal, Ma, Liang J
Cc: dev
Hi Pavan,
</snip>
> static int
> @@ -265,6 +350,7 @@ worker_do_tx_burst_atq(void *arg)
> worker_fwd_event(&ev[i],
> cdata.queue_type);
> }
> + work(ev[i].mbuf);
Please move this change to the prior patch ("examples/eventdev: add all type queue option")
Thanks,
Gage
^ permalink raw reply [flat|nested] 48+ messages in thread
* [dpdk-dev] [PATCH 11/13] examples/eventdev: add atq single stage pipeline worker
2017-12-07 20:36 [dpdk-dev] [PATCH 00/13] examples/eventdev: add capability based pipeline support Pavan Nikhilesh
` (9 preceding siblings ...)
2017-12-07 20:37 ` [dpdk-dev] [PATCH 10/13] examples/eventdev: add single stage pipeline worker Pavan Nikhilesh
@ 2017-12-07 20:37 ` Pavan Nikhilesh
2017-12-19 13:34 ` Van Haaren, Harry
2017-12-07 20:37 ` [dpdk-dev] [PATCH 12/13] examples/eventdev_pipeline_sw_pmd: rename example Pavan Nikhilesh
` (2 subsequent siblings)
13 siblings, 1 reply; 48+ messages in thread
From: Pavan Nikhilesh @ 2017-12-07 20:37 UTC (permalink / raw)
To: gage.eads, jerin.jacobkollanukkaran, harry.van.haaren,
nikhil.rao, hemant.agrawal, liang.j.ma
Cc: dev, Pavan Nikhilesh
Add optimized eventdev pipeline when ethdev supports thread safe Tx,
number of configured stages is one and all type queue option is enabled.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
---
.../eventdev_pipeline_sw_pmd/pipeline_worker_tx.c | 88 +++++++++++++++++++++-
1 file changed, 86 insertions(+), 2 deletions(-)
diff --git a/examples/eventdev_pipeline_sw_pmd/pipeline_worker_tx.c b/examples/eventdev_pipeline_sw_pmd/pipeline_worker_tx.c
index 15df21b7e..24ffd431f 100644
--- a/examples/eventdev_pipeline_sw_pmd/pipeline_worker_tx.c
+++ b/examples/eventdev_pipeline_sw_pmd/pipeline_worker_tx.c
@@ -106,6 +106,41 @@ worker_do_tx_single(void *arg)
return 0;
}
+static int
+worker_do_tx_single_atq(void *arg)
+{
+ struct worker_data *data = (struct worker_data *)arg;
+ const uint8_t dev = data->dev_id;
+ const uint8_t port = data->port_id;
+ size_t fwd = 0, received = 0, tx = 0;
+ struct rte_event ev;
+
+ while (!fdata->done) {
+
+ if (!rte_event_dequeue_burst(dev, port, &ev, 1, 0)) {
+ rte_pause();
+ continue;
+ }
+
+ received++;
+
+ if (ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
+ worker_tx_pkt(ev.mbuf);
+ tx++;
+ continue;
+ }
+ work(ev.mbuf);
+ worker_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
+ worker_event_enqueue(dev, port, &ev);
+ fwd++;
+ }
+
+ if (!cdata.quiet)
+ printf(" worker %u thread done. RX=%zu FWD=%zu TX=%zu\n",
+ rte_lcore_id(), received, fwd, tx);
+ return 0;
+}
+
static int
worker_do_tx_single_burst(void *arg)
{
@@ -153,6 +188,51 @@ worker_do_tx_single_burst(void *arg)
return 0;
}
+static int
+worker_do_tx_single_burst_atq(void *arg)
+{
+ struct rte_event ev[BATCH_SIZE + 1];
+
+ struct worker_data *data = (struct worker_data *)arg;
+ const uint8_t dev = data->dev_id;
+ const uint8_t port = data->port_id;
+ size_t fwd = 0, received = 0, tx = 0;
+
+ while (!fdata->done) {
+ uint16_t i;
+ uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
+ BATCH_SIZE, 0);
+
+ if (!nb_rx) {
+ rte_pause();
+ continue;
+ }
+
+ received += nb_rx;
+
+ for (i = 0; i < nb_rx; i++) {
+ rte_prefetch0(ev[i + 1].mbuf);
+ if (ev[i].sched_type == RTE_SCHED_TYPE_ATOMIC) {
+
+ worker_tx_pkt(ev[i].mbuf);
+ ev[i].op = RTE_EVENT_OP_RELEASE;
+ tx++;
+ } else
+ worker_fwd_event(&ev[i],
+ RTE_SCHED_TYPE_ATOMIC);
+ work(ev[i].mbuf);
+ }
+
+ worker_event_enqueue_burst(dev, port, ev, nb_rx);
+ fwd += nb_rx;
+ }
+
+ if (!cdata.quiet)
+ printf(" worker %u thread done. RX=%zu FWD=%zu TX=%zu\n",
+ rte_lcore_id(), received, fwd, tx);
+ return 0;
+}
+
/* Multi stage Pipeline Workers */
static int
@@ -697,9 +777,13 @@ set_worker_tx_setup_data(struct setup_data *caps, bool burst)
uint8_t atq = cdata.all_type_queues ? 1 : 0;
if (cdata.num_stages == 1) {
- if (burst)
+ if (burst && atq)
+ caps->worker_loop = worker_do_tx_single_burst_atq;
+ if (burst && !atq)
caps->worker_loop = worker_do_tx_single_burst;
- if (!burst)
+ if (!burst && atq)
+ caps->worker_loop = worker_do_tx_single_atq;
+ if (!burst && !atq)
caps->worker_loop = worker_do_tx_single;
} else {
if (burst && atq)
--
2.14.1
^ permalink raw reply [flat|nested] 48+ messages in thread
* Re: [dpdk-dev] [PATCH 11/13] examples/eventdev: add atq single stage pipeline worker
2017-12-07 20:37 ` [dpdk-dev] [PATCH 11/13] examples/eventdev: add atq " Pavan Nikhilesh
@ 2017-12-19 13:34 ` Van Haaren, Harry
0 siblings, 0 replies; 48+ messages in thread
From: Van Haaren, Harry @ 2017-12-19 13:34 UTC (permalink / raw)
To: Pavan Nikhilesh, Eads, Gage, jerin.jacobkollanukkaran, Rao,
Nikhil, hemant.agrawal, Ma, Liang J
Cc: dev
> From: Pavan Nikhilesh [mailto:pbhagavatula@caviumnetworks.com]
> Sent: Thursday, December 7, 2017 8:37 PM
> To: Eads, Gage <gage.eads@intel.com>; jerin.jacobkollanukkaran@cavium.com;
> Van Haaren, Harry <harry.van.haaren@intel.com>; Rao, Nikhil
> <nikhil.rao@intel.com>; hemant.agrawal@nxp.com; Ma, Liang J
> <liang.j.ma@intel.com>
> Cc: dev@dpdk.org; Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
> Subject: [PATCH 11/13] examples/eventdev: add atq single stage pipeline
> worker
>
> Add optimized eventdev pipeline when ethdev supports thread safe Tx,
> number of configured stages is one and all type queue option is enabled.
>
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
> ---
> .../eventdev_pipeline_sw_pmd/pipeline_worker_tx.c | 88
> +++++++++++++++++++++-
> 1 file changed, 86 insertions(+), 2 deletions(-)
> if (cdata.num_stages == 1) {
> - if (burst)
> + if (burst && atq)
> + caps->worker_loop = worker_do_tx_single_burst_atq;
> + if (burst && !atq)
> caps->worker_loop = worker_do_tx_single_burst;
> - if (!burst)
> + if (!burst && atq)
> + caps->worker_loop = worker_do_tx_single_atq;
> + if (!burst && !atq)
> caps->worker_loop = worker_do_tx_single;
> } else {
> if (burst && atq)
As per previous notes, this doesn't scale.
^ permalink raw reply [flat|nested] 48+ messages in thread
* [dpdk-dev] [PATCH 12/13] examples/eventdev_pipeline_sw_pmd: rename example
2017-12-07 20:36 [dpdk-dev] [PATCH 00/13] examples/eventdev: add capability based pipeline support Pavan Nikhilesh
` (10 preceding siblings ...)
2017-12-07 20:37 ` [dpdk-dev] [PATCH 11/13] examples/eventdev: add atq " Pavan Nikhilesh
@ 2017-12-07 20:37 ` Pavan Nikhilesh
2017-12-07 20:37 ` [dpdk-dev] [PATCH 13/13] doc: update example eventdev_pipeline Pavan Nikhilesh
2018-01-10 11:09 ` [dpdk-dev] [PATCH v2 01/15] examples/eventdev: add Rx adapter support Pavan Nikhilesh
13 siblings, 0 replies; 48+ messages in thread
From: Pavan Nikhilesh @ 2017-12-07 20:37 UTC (permalink / raw)
To: gage.eads, jerin.jacobkollanukkaran, harry.van.haaren,
nikhil.rao, hemant.agrawal, liang.j.ma
Cc: dev, Pavan Nikhilesh
Rename eventdev_pipeline_sw_pmd to eventdev_pipeline as it is no longer
specific underlying event device.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
---
examples/Makefile | 2 +-
examples/{eventdev_pipeline_sw_pmd => eventdev_pipeline}/Makefile | 2 +-
examples/{eventdev_pipeline_sw_pmd => eventdev_pipeline}/main.c | 0
.../{eventdev_pipeline_sw_pmd => eventdev_pipeline}/pipeline_common.h | 0
.../pipeline_worker_generic.c | 0
.../pipeline_worker_tx.c | 0
6 files changed, 2 insertions(+), 2 deletions(-)
rename examples/{eventdev_pipeline_sw_pmd => eventdev_pipeline}/Makefile (98%)
rename examples/{eventdev_pipeline_sw_pmd => eventdev_pipeline}/main.c (100%)
rename examples/{eventdev_pipeline_sw_pmd => eventdev_pipeline}/pipeline_common.h (100%)
rename examples/{eventdev_pipeline_sw_pmd => eventdev_pipeline}/pipeline_worker_generic.c (100%)
rename examples/{eventdev_pipeline_sw_pmd => eventdev_pipeline}/pipeline_worker_tx.c (100%)
diff --git a/examples/Makefile b/examples/Makefile
index 9f7974a19..a35434d74 100644
--- a/examples/Makefile
+++ b/examples/Makefile
@@ -102,6 +102,6 @@ $(info vm_power_manager requires libvirt >= 0.9.3)
endif
endif
-DIRS-y += eventdev_pipeline_sw_pmd
+DIRS-y += eventdev_pipeline
include $(RTE_SDK)/mk/rte.extsubdir.mk
diff --git a/examples/eventdev_pipeline_sw_pmd/Makefile b/examples/eventdev_pipeline/Makefile
similarity index 98%
rename from examples/eventdev_pipeline_sw_pmd/Makefile
rename to examples/eventdev_pipeline/Makefile
index 59ee9840a..893220d34 100644
--- a/examples/eventdev_pipeline_sw_pmd/Makefile
+++ b/examples/eventdev_pipeline/Makefile
@@ -38,7 +38,7 @@ RTE_TARGET ?= x86_64-native-linuxapp-gcc
include $(RTE_SDK)/mk/rte.vars.mk
# binary name
-APP = eventdev_pipeline_sw_pmd
+APP = eventdev_pipeline
# all source are stored in SRCS-y
SRCS-y := main.c
diff --git a/examples/eventdev_pipeline_sw_pmd/main.c b/examples/eventdev_pipeline/main.c
similarity index 100%
rename from examples/eventdev_pipeline_sw_pmd/main.c
rename to examples/eventdev_pipeline/main.c
diff --git a/examples/eventdev_pipeline_sw_pmd/pipeline_common.h b/examples/eventdev_pipeline/pipeline_common.h
similarity index 100%
rename from examples/eventdev_pipeline_sw_pmd/pipeline_common.h
rename to examples/eventdev_pipeline/pipeline_common.h
diff --git a/examples/eventdev_pipeline_sw_pmd/pipeline_worker_generic.c b/examples/eventdev_pipeline/pipeline_worker_generic.c
similarity index 100%
rename from examples/eventdev_pipeline_sw_pmd/pipeline_worker_generic.c
rename to examples/eventdev_pipeline/pipeline_worker_generic.c
diff --git a/examples/eventdev_pipeline_sw_pmd/pipeline_worker_tx.c b/examples/eventdev_pipeline/pipeline_worker_tx.c
similarity index 100%
rename from examples/eventdev_pipeline_sw_pmd/pipeline_worker_tx.c
rename to examples/eventdev_pipeline/pipeline_worker_tx.c
--
2.14.1
^ permalink raw reply [flat|nested] 48+ messages in thread
* [dpdk-dev] [PATCH 13/13] doc: update example eventdev_pipeline
2017-12-07 20:36 [dpdk-dev] [PATCH 00/13] examples/eventdev: add capability based pipeline support Pavan Nikhilesh
` (11 preceding siblings ...)
2017-12-07 20:37 ` [dpdk-dev] [PATCH 12/13] examples/eventdev_pipeline_sw_pmd: rename example Pavan Nikhilesh
@ 2017-12-07 20:37 ` Pavan Nikhilesh
2017-12-11 11:29 ` Laatz, Kevin
2018-01-10 11:09 ` [dpdk-dev] [PATCH v2 01/15] examples/eventdev: add Rx adapter support Pavan Nikhilesh
13 siblings, 1 reply; 48+ messages in thread
From: Pavan Nikhilesh @ 2017-12-07 20:37 UTC (permalink / raw)
To: gage.eads, jerin.jacobkollanukkaran, harry.van.haaren,
nikhil.rao, hemant.agrawal, liang.j.ma
Cc: dev, Pavan Nikhilesh
Removed eventdev sw pmd specific information in document, renamed the
document from eventdev_pipeline_sw_pmd to eventdev_pipeline.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
---
.../{eventdev_pipeline_sw_pmd.rst => eventdev_pipeline.rst} | 6 +++---
doc/guides/sample_app_ug/index.rst | 2 +-
2 files changed, 4 insertions(+), 4 deletions(-)
rename doc/guides/sample_app_ug/{eventdev_pipeline_sw_pmd.rst => eventdev_pipeline.rst} (97%)
diff --git a/doc/guides/sample_app_ug/eventdev_pipeline_sw_pmd.rst b/doc/guides/sample_app_ug/eventdev_pipeline.rst
similarity index 97%
rename from doc/guides/sample_app_ug/eventdev_pipeline_sw_pmd.rst
rename to doc/guides/sample_app_ug/eventdev_pipeline.rst
index 01a5f9b21..ff6d2f0b0 100644
--- a/doc/guides/sample_app_ug/eventdev_pipeline_sw_pmd.rst
+++ b/doc/guides/sample_app_ug/eventdev_pipeline.rst
@@ -29,8 +29,8 @@
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-Eventdev Pipeline SW PMD Sample Application
-===========================================
+Eventdev Pipeline Sample Application
+====================================
The eventdev pipeline sample application is a sample app that demonstrates
the usage of the eventdev API using the software PMD. It shows how an
@@ -74,7 +74,7 @@ these settings is shown below:
.. code-block:: console
- ./build/eventdev_pipeline_sw_pmd --vdev event_sw0 -- -r1 -t1 -e4 -w FF00 -s4 -n0 -c32 -W1000 -D
+ ./build/eventdev_pipeline --vdev event_sw0 -- -r1 -t1 -e4 -w FF00 -s4 -n0 -c32 -W1000 -D
The application has some sanity checking built-in, so if there is a function
(eg; the RX core) which doesn't have a cpu core mask assigned, the application
diff --git a/doc/guides/sample_app_ug/index.rst b/doc/guides/sample_app_ug/index.rst
index db68ef765..6fcdeb0fb 100644
--- a/doc/guides/sample_app_ug/index.rst
+++ b/doc/guides/sample_app_ug/index.rst
@@ -74,7 +74,7 @@ Sample Applications User Guides
netmap_compatibility
ip_pipeline
test_pipeline
- eventdev_pipeline_sw_pmd
+ eventdev_pipeline
dist_app
vm_power_management
tep_termination
--
2.14.1
^ permalink raw reply [flat|nested] 48+ messages in thread
* Re: [dpdk-dev] [PATCH 13/13] doc: update example eventdev_pipeline
2017-12-07 20:37 ` [dpdk-dev] [PATCH 13/13] doc: update example eventdev_pipeline Pavan Nikhilesh
@ 2017-12-11 11:29 ` Laatz, Kevin
0 siblings, 0 replies; 48+ messages in thread
From: Laatz, Kevin @ 2017-12-11 11:29 UTC (permalink / raw)
To: Pavan Nikhilesh, Eads, Gage, jerin.jacobkollanukkaran,
Van Haaren, Harry, Rao, Nikhil, hemant.agrawal, Ma, Liang J
Cc: dev
> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Pavan Nikhilesh
> Sent: Thursday, December 7, 2017 8:37 PM
> To: Eads, Gage <gage.eads@intel.com>;
> jerin.jacobkollanukkaran@cavium.com; Van Haaren, Harry
> <harry.van.haaren@intel.com>; Rao, Nikhil <nikhil.rao@intel.com>;
> hemant.agrawal@nxp.com; Ma, Liang J <liang.j.ma@intel.com>
> Cc: dev@dpdk.org; Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
> Subject: [dpdk-dev] [PATCH 13/13] doc: update example eventdev_pipeline
>
> Removed eventdev sw pmd specific information in document, renamed the
> document from eventdev_pipeline_sw_pmd to eventdev_pipeline.
>
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
> ---
Acked-by: Kevin Laatz <kevin.laatz@intel.com>
^ permalink raw reply [flat|nested] 48+ messages in thread
* [dpdk-dev] [PATCH v2 01/15] examples/eventdev: add Rx adapter support
2017-12-07 20:36 [dpdk-dev] [PATCH 00/13] examples/eventdev: add capability based pipeline support Pavan Nikhilesh
` (12 preceding siblings ...)
2017-12-07 20:37 ` [dpdk-dev] [PATCH 13/13] doc: update example eventdev_pipeline Pavan Nikhilesh
@ 2018-01-10 11:09 ` Pavan Nikhilesh
2018-01-10 11:10 ` [dpdk-dev] [PATCH v2 02/15] examples/eventdev: move common data into pipeline common Pavan Nikhilesh
` (14 more replies)
13 siblings, 15 replies; 48+ messages in thread
From: Pavan Nikhilesh @ 2018-01-10 11:09 UTC (permalink / raw)
To: gage.eads, jerin.jacobkollanukkaran, harry.van.haaren,
hemant.agrawal, liang.j.ma, santosh.shukla
Cc: dev, Pavan Nikhilesh
Use event Rx adapter for packets Rx instead of explicit producer logic.
Use service run iter function for granular control instead of using
dedicated service lcore.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
---
v2 Changes:
- split work funtion into delay cycles and excange_mac
- add option to configure mempool size
- remove prod_data structure(Gage)
- simplifly locks used while calling producer and scheduler(Gage)
examples/eventdev_pipeline_sw_pmd/main.c | 168 +++++++++++++++----------------
1 file changed, 80 insertions(+), 88 deletions(-)
diff --git a/examples/eventdev_pipeline_sw_pmd/main.c b/examples/eventdev_pipeline_sw_pmd/main.c
index 2e9a6d208..111dcb0ea 100644
--- a/examples/eventdev_pipeline_sw_pmd/main.c
+++ b/examples/eventdev_pipeline_sw_pmd/main.c
@@ -46,26 +46,19 @@
#include <rte_cycles.h>
#include <rte_ethdev.h>
#include <rte_eventdev.h>
+#include <rte_event_eth_rx_adapter.h>
#include <rte_service.h>
#define MAX_NUM_STAGES 8
#define BATCH_SIZE 16
#define MAX_NUM_CORE 64
-struct prod_data {
- uint8_t dev_id;
- uint8_t port_id;
- int32_t qid;
- unsigned int num_nic_ports;
-} __rte_cache_aligned;
-
struct cons_data {
uint8_t dev_id;
uint8_t port_id;
uint8_t release;
} __rte_cache_aligned;
-static struct prod_data prod_data;
static struct cons_data cons_data;
struct worker_data {
@@ -75,10 +68,9 @@ struct worker_data {
struct fastpath_data {
volatile int done;
- uint32_t rx_lock;
uint32_t tx_lock;
- uint32_t sched_lock;
uint32_t evdev_service_id;
+ uint32_t rxadptr_service_id;
bool rx_single;
bool tx_single;
bool sched_single;
@@ -106,6 +98,7 @@ struct config_data {
unsigned int worker_cq_depth;
int16_t next_qid[MAX_NUM_STAGES+2];
int16_t qid[MAX_NUM_STAGES];
+ uint8_t rx_adapter_id;
};
static struct config_data cdata = {
@@ -206,64 +199,21 @@ consumer(void)
return 0;
}
-static int
-producer(void)
-{
- static uint8_t eth_port;
- struct rte_mbuf *mbufs[BATCH_SIZE+2];
- struct rte_event ev[BATCH_SIZE+2];
- uint32_t i, num_ports = prod_data.num_nic_ports;
- int32_t qid = prod_data.qid;
- uint8_t dev_id = prod_data.dev_id;
- uint8_t port_id = prod_data.port_id;
- uint32_t prio_idx = 0;
-
- const uint16_t nb_rx = rte_eth_rx_burst(eth_port, 0, mbufs, BATCH_SIZE);
- if (++eth_port == num_ports)
- eth_port = 0;
- if (nb_rx == 0) {
- rte_pause();
- return 0;
- }
-
- for (i = 0; i < nb_rx; i++) {
- ev[i].flow_id = mbufs[i]->hash.rss;
- ev[i].op = RTE_EVENT_OP_NEW;
- ev[i].sched_type = cdata.queue_type;
- ev[i].queue_id = qid;
- ev[i].event_type = RTE_EVENT_TYPE_ETHDEV;
- ev[i].sub_event_type = 0;
- ev[i].priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
- ev[i].mbuf = mbufs[i];
- RTE_SET_USED(prio_idx);
- }
-
- const int nb_tx = rte_event_enqueue_burst(dev_id, port_id, ev, nb_rx);
- if (nb_tx != nb_rx) {
- for (i = nb_tx; i < nb_rx; i++)
- rte_pktmbuf_free(mbufs[i]);
- }
-
- return 0;
-}
-
static inline void
schedule_devices(unsigned int lcore_id)
{
- if (fdata->rx_core[lcore_id] && (fdata->rx_single ||
- rte_atomic32_cmpset(&(fdata->rx_lock), 0, 1))) {
- producer();
- rte_atomic32_clear((rte_atomic32_t *)&(fdata->rx_lock));
+ if (fdata->rx_core[lcore_id]) {
+ rte_service_run_iter_on_app_lcore(fdata->rxadptr_service_id,
+ !fdata->rx_single);
}
- if (fdata->sched_core[lcore_id] && (fdata->sched_single ||
- rte_atomic32_cmpset(&(fdata->sched_lock), 0, 1))) {
- rte_service_run_iter_on_app_lcore(fdata->evdev_service_id, 1);
+ if (fdata->sched_core[lcore_id]) {
+ rte_service_run_iter_on_app_lcore(fdata->evdev_service_id,
+ !fdata->sched_single);
if (cdata.dump_dev_signal) {
rte_event_dev_dump(0, stdout);
cdata.dump_dev_signal = 0;
}
- rte_atomic32_clear((rte_atomic32_t *)&(fdata->sched_lock));
}
if (fdata->tx_core[lcore_id] && (fdata->tx_single ||
@@ -566,6 +516,70 @@ parse_app_args(int argc, char **argv)
}
}
+static inline void
+init_rx_adapter(uint16_t nb_ports)
+{
+ int i;
+ int ret;
+ uint8_t evdev_id = 0;
+ struct rte_event_dev_info dev_info;
+
+ ret = rte_event_dev_info_get(evdev_id, &dev_info);
+
+ struct rte_event_port_conf rx_p_conf = {
+ .dequeue_depth = 8,
+ .enqueue_depth = 8,
+ .new_event_threshold = 1200,
+ };
+
+ if (rx_p_conf.dequeue_depth > dev_info.max_event_port_dequeue_depth)
+ rx_p_conf.dequeue_depth = dev_info.max_event_port_dequeue_depth;
+ if (rx_p_conf.enqueue_depth > dev_info.max_event_port_enqueue_depth)
+ rx_p_conf.enqueue_depth = dev_info.max_event_port_enqueue_depth;
+
+ ret = rte_event_eth_rx_adapter_create(cdata.rx_adapter_id, evdev_id,
+ &rx_p_conf);
+ if (ret)
+ rte_exit(EXIT_FAILURE, "failed to create rx adapter[%d]",
+ cdata.rx_adapter_id);
+
+ struct rte_event_eth_rx_adapter_queue_conf queue_conf = {
+ .ev.sched_type = cdata.queue_type,
+ .ev.queue_id = cdata.qid[0],
+ };
+
+ for (i = 0; i < nb_ports; i++) {
+ uint32_t cap;
+
+ ret = rte_event_eth_rx_adapter_caps_get(evdev_id, i, &cap);
+ if (ret)
+ rte_exit(EXIT_FAILURE,
+ "failed to get event rx adapter "
+ "capabilities");
+
+ ret = rte_event_eth_rx_adapter_queue_add(cdata.rx_adapter_id, i,
+ -1, &queue_conf);
+ if (ret)
+ rte_exit(EXIT_FAILURE,
+ "Failed to add queues to Rx adapter");
+ }
+
+ ret = rte_event_eth_rx_adapter_service_id_get(cdata.rx_adapter_id,
+ &fdata->rxadptr_service_id);
+ if (ret != -ESRCH && ret != 0) {
+ rte_exit(EXIT_FAILURE,
+ "Error getting the service ID for Rx adapter\n");
+ }
+ rte_service_runstate_set(fdata->rxadptr_service_id, 1);
+ rte_service_set_runstate_mapped_check(fdata->rxadptr_service_id, 0);
+
+ ret = rte_event_eth_rx_adapter_start(cdata.rx_adapter_id);
+ if (ret)
+ rte_exit(EXIT_FAILURE, "Rx adapter[%d] start failed",
+ cdata.rx_adapter_id);
+
+}
+
/*
* Initializes a given port using global settings and with the RX buffers
* coming from the mbuf_pool passed as a parameter.
@@ -676,15 +690,14 @@ struct port_link {
};
static int
-setup_eventdev(struct prod_data *prod_data,
- struct cons_data *cons_data,
+setup_eventdev(struct cons_data *cons_data,
struct worker_data *worker_data)
{
const uint8_t dev_id = 0;
/* +1 stages is for a SINGLE_LINK TX stage */
const uint8_t nb_queues = cdata.num_stages + 1;
- /* + 2 is one port for producer and one for consumer */
- const uint8_t nb_ports = cdata.num_workers + 2;
+ /* + 1 for consumer */
+ const uint8_t nb_ports = cdata.num_workers + 1;
struct rte_event_dev_config config = {
.nb_event_queues = nb_queues,
.nb_event_ports = nb_ports,
@@ -838,27 +851,6 @@ setup_eventdev(struct prod_data *prod_data,
__LINE__, i);
return -1;
}
- /* port for producer, no links */
- struct rte_event_port_conf rx_p_conf = {
- .dequeue_depth = 8,
- .enqueue_depth = 8,
- .new_event_threshold = 1200,
- .disable_implicit_release = disable_implicit_release,
- };
-
- if (rx_p_conf.dequeue_depth > config.nb_event_port_dequeue_depth)
- rx_p_conf.dequeue_depth = config.nb_event_port_dequeue_depth;
- if (rx_p_conf.enqueue_depth > config.nb_event_port_enqueue_depth)
- rx_p_conf.enqueue_depth = config.nb_event_port_enqueue_depth;
-
- if (rte_event_port_setup(dev_id, i + 1, &rx_p_conf) < 0) {
- printf("Error setting up port %d\n", i);
- return -1;
- }
-
- *prod_data = (struct prod_data){.dev_id = dev_id,
- .port_id = i + 1,
- .qid = cdata.qid[0] };
*cons_data = (struct cons_data){.dev_id = dev_id,
.port_id = i,
.release = disable_implicit_release };
@@ -962,12 +954,12 @@ main(int argc, char **argv)
if (worker_data == NULL)
rte_panic("rte_calloc failed\n");
- int dev_id = setup_eventdev(&prod_data, &cons_data, worker_data);
+ int dev_id = setup_eventdev(&cons_data, worker_data);
if (dev_id < 0)
rte_exit(EXIT_FAILURE, "Error setting up eventdev\n");
- prod_data.num_nic_ports = num_ports;
init_ports(num_ports);
+ init_rx_adapter(num_ports);
int worker_idx = 0;
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
@@ -982,8 +974,8 @@ main(int argc, char **argv)
if (fdata->rx_core[lcore_id])
printf(
- "[%s()] lcore %d executing NIC Rx, and using eventdev port %u\n",
- __func__, lcore_id, prod_data.port_id);
+ "[%s()] lcore %d executing NIC Rx\n",
+ __func__, lcore_id);
if (fdata->tx_core[lcore_id])
printf(
--
2.15.1
^ permalink raw reply [flat|nested] 48+ messages in thread
* [dpdk-dev] [PATCH v2 02/15] examples/eventdev: move common data into pipeline common
2018-01-10 11:09 ` [dpdk-dev] [PATCH v2 01/15] examples/eventdev: add Rx adapter support Pavan Nikhilesh
@ 2018-01-10 11:10 ` Pavan Nikhilesh
2018-01-10 11:10 ` [dpdk-dev] [PATCH v2 03/15] examples/eventdev: add framework for caps based pipeline Pavan Nikhilesh
` (13 subsequent siblings)
14 siblings, 0 replies; 48+ messages in thread
From: Pavan Nikhilesh @ 2018-01-10 11:10 UTC (permalink / raw)
To: gage.eads, jerin.jacobkollanukkaran, harry.van.haaren,
hemant.agrawal, liang.j.ma, santosh.shukla
Cc: dev, Pavan Nikhilesh
Move common structures and functions into pipeline_common.h so that they
can be used by different kinds of pipelines.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
---
v2 Changes:
- Use SPDX licence tags.
examples/eventdev_pipeline_sw_pmd/main.c | 67 +-------------------
.../eventdev_pipeline_sw_pmd/pipeline_common.h | 73 ++++++++++++++++++++++
2 files changed, 75 insertions(+), 65 deletions(-)
create mode 100644 examples/eventdev_pipeline_sw_pmd/pipeline_common.h
diff --git a/examples/eventdev_pipeline_sw_pmd/main.c b/examples/eventdev_pipeline_sw_pmd/main.c
index 111dcb0ea..2c919b7fa 100644
--- a/examples/eventdev_pipeline_sw_pmd/main.c
+++ b/examples/eventdev_pipeline_sw_pmd/main.c
@@ -35,73 +35,10 @@
#include <stdio.h>
#include <signal.h>
#include <sched.h>
-#include <stdbool.h>
-
-#include <rte_eal.h>
-#include <rte_mempool.h>
-#include <rte_mbuf.h>
-#include <rte_launch.h>
-#include <rte_malloc.h>
-#include <rte_random.h>
-#include <rte_cycles.h>
-#include <rte_ethdev.h>
-#include <rte_eventdev.h>
-#include <rte_event_eth_rx_adapter.h>
-#include <rte_service.h>
-
-#define MAX_NUM_STAGES 8
-#define BATCH_SIZE 16
-#define MAX_NUM_CORE 64
-
-struct cons_data {
- uint8_t dev_id;
- uint8_t port_id;
- uint8_t release;
-} __rte_cache_aligned;
-
-static struct cons_data cons_data;
-
-struct worker_data {
- uint8_t dev_id;
- uint8_t port_id;
-} __rte_cache_aligned;
-
-struct fastpath_data {
- volatile int done;
- uint32_t tx_lock;
- uint32_t evdev_service_id;
- uint32_t rxadptr_service_id;
- bool rx_single;
- bool tx_single;
- bool sched_single;
- unsigned int rx_core[MAX_NUM_CORE];
- unsigned int tx_core[MAX_NUM_CORE];
- unsigned int sched_core[MAX_NUM_CORE];
- unsigned int worker_core[MAX_NUM_CORE];
- struct rte_eth_dev_tx_buffer *tx_buf[RTE_MAX_ETHPORTS];
-};
-static struct fastpath_data *fdata;
-
-struct config_data {
- unsigned int active_cores;
- unsigned int num_workers;
- int64_t num_packets;
- unsigned int num_fids;
- int queue_type;
- int worker_cycles;
- int enable_queue_priorities;
- int quiet;
- int dump_dev;
- int dump_dev_signal;
- unsigned int num_stages;
- unsigned int worker_cq_depth;
- int16_t next_qid[MAX_NUM_STAGES+2];
- int16_t qid[MAX_NUM_STAGES];
- uint8_t rx_adapter_id;
-};
+#include "pipeline_common.h"
-static struct config_data cdata = {
+struct config_data cdata = {
.num_packets = (1L << 25), /* do ~32M packets */
.num_fids = 512,
.queue_type = RTE_SCHED_TYPE_ATOMIC,
diff --git a/examples/eventdev_pipeline_sw_pmd/pipeline_common.h b/examples/eventdev_pipeline_sw_pmd/pipeline_common.h
new file mode 100644
index 000000000..1dbc01f16
--- /dev/null
+++ b/examples/eventdev_pipeline_sw_pmd/pipeline_common.h
@@ -0,0 +1,73 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2016 Intel Corporation.
+ * Copyright 2017 Cavium, Inc.
+ */
+
+#include <stdbool.h>
+
+#include <rte_eal.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_launch.h>
+#include <rte_malloc.h>
+#include <rte_random.h>
+#include <rte_cycles.h>
+#include <rte_ethdev.h>
+#include <rte_eventdev.h>
+#include <rte_event_eth_rx_adapter.h>
+#include <rte_service.h>
+#include <rte_service_component.h>
+
+#define MAX_NUM_STAGES 8
+#define BATCH_SIZE 16
+#define MAX_NUM_CORE 64
+
+struct cons_data {
+ uint8_t dev_id;
+ uint8_t port_id;
+ uint8_t release;
+} __rte_cache_aligned;
+
+struct worker_data {
+ uint8_t dev_id;
+ uint8_t port_id;
+} __rte_cache_aligned;
+
+struct fastpath_data {
+ volatile int done;
+ uint32_t tx_lock;
+ uint32_t evdev_service_id;
+ uint32_t rxadptr_service_id;
+ bool rx_single;
+ bool tx_single;
+ bool sched_single;
+ unsigned int rx_core[MAX_NUM_CORE];
+ unsigned int tx_core[MAX_NUM_CORE];
+ unsigned int sched_core[MAX_NUM_CORE];
+ unsigned int worker_core[MAX_NUM_CORE];
+ struct rte_eth_dev_tx_buffer *tx_buf[RTE_MAX_ETHPORTS];
+} __rte_cache_aligned;
+
+struct config_data {
+ unsigned int active_cores;
+ unsigned int num_workers;
+ int64_t num_packets;
+ unsigned int num_fids;
+ int queue_type;
+ int worker_cycles;
+ int enable_queue_priorities;
+ int quiet;
+ int dump_dev;
+ int dump_dev_signal;
+ unsigned int num_stages;
+ unsigned int worker_cq_depth;
+ int16_t next_qid[MAX_NUM_STAGES+2];
+ int16_t qid[MAX_NUM_STAGES];
+ uint8_t rx_adapter_id;
+};
+
+struct cons_data cons_data;
+
+struct fastpath_data *fdata;
+struct config_data cdata;
--
2.15.1
^ permalink raw reply [flat|nested] 48+ messages in thread
* [dpdk-dev] [PATCH v2 03/15] examples/eventdev: add framework for caps based pipeline
2018-01-10 11:09 ` [dpdk-dev] [PATCH v2 01/15] examples/eventdev: add Rx adapter support Pavan Nikhilesh
2018-01-10 11:10 ` [dpdk-dev] [PATCH v2 02/15] examples/eventdev: move common data into pipeline common Pavan Nikhilesh
@ 2018-01-10 11:10 ` Pavan Nikhilesh
2018-01-10 11:10 ` [dpdk-dev] [PATCH v2 04/15] examples/eventdev: add generic worker pipeline Pavan Nikhilesh
` (12 subsequent siblings)
14 siblings, 0 replies; 48+ messages in thread
From: Pavan Nikhilesh @ 2018-01-10 11:10 UTC (permalink / raw)
To: gage.eads, jerin.jacobkollanukkaran, harry.van.haaren,
hemant.agrawal, liang.j.ma, santosh.shukla
Cc: dev, Pavan Nikhilesh
Add framework to support capability based pipeline.
Based on the capability of event device and probed ethernet devices the
optimal pipeline configuration can be chosen.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
---
examples/eventdev_pipeline_sw_pmd/pipeline_common.h | 17 +++++++++++++++++
1 file changed, 17 insertions(+)
diff --git a/examples/eventdev_pipeline_sw_pmd/pipeline_common.h b/examples/eventdev_pipeline_sw_pmd/pipeline_common.h
index 1dbc01f16..00721ea94 100644
--- a/examples/eventdev_pipeline_sw_pmd/pipeline_common.h
+++ b/examples/eventdev_pipeline_sw_pmd/pipeline_common.h
@@ -34,6 +34,22 @@ struct worker_data {
uint8_t port_id;
} __rte_cache_aligned;
+typedef int (*worker_loop)(void *);
+typedef int (*consumer_loop)(void);
+typedef void (*schedule_loop)(unsigned int);
+typedef int (*eventdev_setup)(struct cons_data *, struct worker_data *);
+typedef void (*rx_adapter_setup)(uint16_t nb_ports);
+typedef void (*opt_check)(void);
+
+struct setup_data {
+ worker_loop worker;
+ consumer_loop consumer;
+ schedule_loop scheduler;
+ eventdev_setup evdev_setup;
+ rx_adapter_setup adptr_setup;
+ opt_check check_opt;
+};
+
struct fastpath_data {
volatile int done;
uint32_t tx_lock;
@@ -47,6 +63,7 @@ struct fastpath_data {
unsigned int sched_core[MAX_NUM_CORE];
unsigned int worker_core[MAX_NUM_CORE];
struct rte_eth_dev_tx_buffer *tx_buf[RTE_MAX_ETHPORTS];
+ struct setup_data cap;
} __rte_cache_aligned;
struct config_data {
--
2.15.1
^ permalink raw reply [flat|nested] 48+ messages in thread
* [dpdk-dev] [PATCH v2 04/15] examples/eventdev: add generic worker pipeline
2018-01-10 11:09 ` [dpdk-dev] [PATCH v2 01/15] examples/eventdev: add Rx adapter support Pavan Nikhilesh
2018-01-10 11:10 ` [dpdk-dev] [PATCH v2 02/15] examples/eventdev: move common data into pipeline common Pavan Nikhilesh
2018-01-10 11:10 ` [dpdk-dev] [PATCH v2 03/15] examples/eventdev: add framework for caps based pipeline Pavan Nikhilesh
@ 2018-01-10 11:10 ` Pavan Nikhilesh
2018-01-10 11:10 ` [dpdk-dev] [PATCH v2 05/15] examples/eventdev: add ops to check cmdline args Pavan Nikhilesh
` (11 subsequent siblings)
14 siblings, 0 replies; 48+ messages in thread
From: Pavan Nikhilesh @ 2018-01-10 11:10 UTC (permalink / raw)
To: gage.eads, jerin.jacobkollanukkaran, harry.van.haaren,
hemant.agrawal, liang.j.ma, santosh.shukla
Cc: dev, Pavan Nikhilesh
Rename existing pipeline as generic worker pipeline.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
---
v2 Changes:
- Add SPDX licence tags
examples/eventdev_pipeline_sw_pmd/Makefile | 1 +
examples/eventdev_pipeline_sw_pmd/main.c | 440 +--------------------
.../eventdev_pipeline_sw_pmd/pipeline_common.h | 53 +++
.../pipeline_worker_generic.c | 398 +++++++++++++++++++
4 files changed, 466 insertions(+), 426 deletions(-)
create mode 100644 examples/eventdev_pipeline_sw_pmd/pipeline_worker_generic.c
diff --git a/examples/eventdev_pipeline_sw_pmd/Makefile b/examples/eventdev_pipeline_sw_pmd/Makefile
index de4e22c88..5e30556fb 100644
--- a/examples/eventdev_pipeline_sw_pmd/Makefile
+++ b/examples/eventdev_pipeline_sw_pmd/Makefile
@@ -42,6 +42,7 @@ APP = eventdev_pipeline_sw_pmd
# all source are stored in SRCS-y
SRCS-y := main.c
+SRCS-y += pipeline_worker_generic.c
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
diff --git a/examples/eventdev_pipeline_sw_pmd/main.c b/examples/eventdev_pipeline_sw_pmd/main.c
index 2c919b7fa..295c8b692 100644
--- a/examples/eventdev_pipeline_sw_pmd/main.c
+++ b/examples/eventdev_pipeline_sw_pmd/main.c
@@ -68,179 +68,6 @@ eth_tx_buffer_retry(struct rte_mbuf **pkts, uint16_t unsent,
} while (_sent != unsent);
}
-static int
-consumer(void)
-{
- const uint64_t freq_khz = rte_get_timer_hz() / 1000;
- struct rte_event packets[BATCH_SIZE];
-
- static uint64_t received;
- static uint64_t last_pkts;
- static uint64_t last_time;
- static uint64_t start_time;
- unsigned int i, j;
- uint8_t dev_id = cons_data.dev_id;
- uint8_t port_id = cons_data.port_id;
-
- uint16_t n = rte_event_dequeue_burst(dev_id, port_id,
- packets, RTE_DIM(packets), 0);
-
- if (n == 0) {
- for (j = 0; j < rte_eth_dev_count(); j++)
- rte_eth_tx_buffer_flush(j, 0, fdata->tx_buf[j]);
- return 0;
- }
- if (start_time == 0)
- last_time = start_time = rte_get_timer_cycles();
-
- received += n;
- for (i = 0; i < n; i++) {
- uint8_t outport = packets[i].mbuf->port;
- rte_eth_tx_buffer(outport, 0, fdata->tx_buf[outport],
- packets[i].mbuf);
-
- packets[i].op = RTE_EVENT_OP_RELEASE;
- }
-
- if (cons_data.release) {
- uint16_t nb_tx;
-
- nb_tx = rte_event_enqueue_burst(dev_id, port_id, packets, n);
- while (nb_tx < n)
- nb_tx += rte_event_enqueue_burst(dev_id, port_id,
- packets + nb_tx,
- n - nb_tx);
- }
-
- /* Print out mpps every 1<22 packets */
- if (!cdata.quiet && received >= last_pkts + (1<<22)) {
- const uint64_t now = rte_get_timer_cycles();
- const uint64_t total_ms = (now - start_time) / freq_khz;
- const uint64_t delta_ms = (now - last_time) / freq_khz;
- uint64_t delta_pkts = received - last_pkts;
-
- printf("# consumer RX=%"PRIu64", time %"PRIu64 "ms, "
- "avg %.3f mpps [current %.3f mpps]\n",
- received,
- total_ms,
- received / (total_ms * 1000.0),
- delta_pkts / (delta_ms * 1000.0));
- last_pkts = received;
- last_time = now;
- }
-
- cdata.num_packets -= n;
- if (cdata.num_packets <= 0)
- fdata->done = 1;
-
- return 0;
-}
-
-static inline void
-schedule_devices(unsigned int lcore_id)
-{
- if (fdata->rx_core[lcore_id]) {
- rte_service_run_iter_on_app_lcore(fdata->rxadptr_service_id,
- !fdata->rx_single);
- }
-
- if (fdata->sched_core[lcore_id]) {
- rte_service_run_iter_on_app_lcore(fdata->evdev_service_id,
- !fdata->sched_single);
- if (cdata.dump_dev_signal) {
- rte_event_dev_dump(0, stdout);
- cdata.dump_dev_signal = 0;
- }
- }
-
- if (fdata->tx_core[lcore_id] && (fdata->tx_single ||
- rte_atomic32_cmpset(&(fdata->tx_lock), 0, 1))) {
- consumer();
- rte_atomic32_clear((rte_atomic32_t *)&(fdata->tx_lock));
- }
-}
-
-static inline void
-work(struct rte_mbuf *m)
-{
- struct ether_hdr *eth;
- struct ether_addr addr;
-
- /* change mac addresses on packet (to use mbuf data) */
- /*
- * FIXME Swap mac address properly and also handle the
- * case for both odd and even number of stages that the
- * addresses end up the same at the end of the pipeline
- */
- eth = rte_pktmbuf_mtod(m, struct ether_hdr *);
- ether_addr_copy(ð->d_addr, &addr);
- ether_addr_copy(&addr, ð->d_addr);
-
- /* do a number of cycles of work per packet */
- volatile uint64_t start_tsc = rte_rdtsc();
- while (rte_rdtsc() < start_tsc + cdata.worker_cycles)
- rte_pause();
-}
-
-static int
-worker(void *arg)
-{
- struct rte_event events[BATCH_SIZE];
-
- struct worker_data *data = (struct worker_data *)arg;
- uint8_t dev_id = data->dev_id;
- uint8_t port_id = data->port_id;
- size_t sent = 0, received = 0;
- unsigned int lcore_id = rte_lcore_id();
-
- while (!fdata->done) {
- uint16_t i;
-
- schedule_devices(lcore_id);
-
- if (!fdata->worker_core[lcore_id]) {
- rte_pause();
- continue;
- }
-
- const uint16_t nb_rx = rte_event_dequeue_burst(dev_id, port_id,
- events, RTE_DIM(events), 0);
-
- if (nb_rx == 0) {
- rte_pause();
- continue;
- }
- received += nb_rx;
-
- for (i = 0; i < nb_rx; i++) {
-
- /* The first worker stage does classification */
- if (events[i].queue_id == cdata.qid[0])
- events[i].flow_id = events[i].mbuf->hash.rss
- % cdata.num_fids;
-
- events[i].queue_id = cdata.next_qid[events[i].queue_id];
- events[i].op = RTE_EVENT_OP_FORWARD;
- events[i].sched_type = cdata.queue_type;
-
- work(events[i].mbuf);
- }
- uint16_t nb_tx = rte_event_enqueue_burst(dev_id, port_id,
- events, nb_rx);
- while (nb_tx < nb_rx && !fdata->done)
- nb_tx += rte_event_enqueue_burst(dev_id, port_id,
- events + nb_tx,
- nb_rx - nb_tx);
- sent += nb_tx;
- }
-
- if (!cdata.quiet)
- printf(" worker %u thread done. RX=%zu TX=%zu\n",
- rte_lcore_id(), received, sent);
-
- return 0;
-}
-
/*
* Parse the coremask given as argument (hexadecimal string) and fill
* the global configuration (core role and core count) with the parsed
@@ -453,70 +280,6 @@ parse_app_args(int argc, char **argv)
}
}
-static inline void
-init_rx_adapter(uint16_t nb_ports)
-{
- int i;
- int ret;
- uint8_t evdev_id = 0;
- struct rte_event_dev_info dev_info;
-
- ret = rte_event_dev_info_get(evdev_id, &dev_info);
-
- struct rte_event_port_conf rx_p_conf = {
- .dequeue_depth = 8,
- .enqueue_depth = 8,
- .new_event_threshold = 1200,
- };
-
- if (rx_p_conf.dequeue_depth > dev_info.max_event_port_dequeue_depth)
- rx_p_conf.dequeue_depth = dev_info.max_event_port_dequeue_depth;
- if (rx_p_conf.enqueue_depth > dev_info.max_event_port_enqueue_depth)
- rx_p_conf.enqueue_depth = dev_info.max_event_port_enqueue_depth;
-
- ret = rte_event_eth_rx_adapter_create(cdata.rx_adapter_id, evdev_id,
- &rx_p_conf);
- if (ret)
- rte_exit(EXIT_FAILURE, "failed to create rx adapter[%d]",
- cdata.rx_adapter_id);
-
- struct rte_event_eth_rx_adapter_queue_conf queue_conf = {
- .ev.sched_type = cdata.queue_type,
- .ev.queue_id = cdata.qid[0],
- };
-
- for (i = 0; i < nb_ports; i++) {
- uint32_t cap;
-
- ret = rte_event_eth_rx_adapter_caps_get(evdev_id, i, &cap);
- if (ret)
- rte_exit(EXIT_FAILURE,
- "failed to get event rx adapter "
- "capabilities");
-
- ret = rte_event_eth_rx_adapter_queue_add(cdata.rx_adapter_id, i,
- -1, &queue_conf);
- if (ret)
- rte_exit(EXIT_FAILURE,
- "Failed to add queues to Rx adapter");
- }
-
- ret = rte_event_eth_rx_adapter_service_id_get(cdata.rx_adapter_id,
- &fdata->rxadptr_service_id);
- if (ret != -ESRCH && ret != 0) {
- rte_exit(EXIT_FAILURE,
- "Error getting the service ID for Rx adapter\n");
- }
- rte_service_runstate_set(fdata->rxadptr_service_id, 1);
- rte_service_set_runstate_mapped_check(fdata->rxadptr_service_id, 0);
-
- ret = rte_event_eth_rx_adapter_start(cdata.rx_adapter_id);
- if (ret)
- rte_exit(EXIT_FAILURE, "Rx adapter[%d] start failed",
- cdata.rx_adapter_id);
-
-}
-
/*
* Initializes a given port using global settings and with the RX buffers
* coming from the mbuf_pool passed as a parameter.
@@ -621,191 +384,12 @@ init_ports(unsigned int num_ports)
return 0;
}
-struct port_link {
- uint8_t queue_id;
- uint8_t priority;
-};
-
-static int
-setup_eventdev(struct cons_data *cons_data,
- struct worker_data *worker_data)
+static void
+do_capability_setup(uint16_t nb_ethdev, uint8_t eventdev_id)
{
- const uint8_t dev_id = 0;
- /* +1 stages is for a SINGLE_LINK TX stage */
- const uint8_t nb_queues = cdata.num_stages + 1;
- /* + 1 for consumer */
- const uint8_t nb_ports = cdata.num_workers + 1;
- struct rte_event_dev_config config = {
- .nb_event_queues = nb_queues,
- .nb_event_ports = nb_ports,
- .nb_events_limit = 4096,
- .nb_event_queue_flows = 1024,
- .nb_event_port_dequeue_depth = 128,
- .nb_event_port_enqueue_depth = 128,
- };
- struct rte_event_port_conf wkr_p_conf = {
- .dequeue_depth = cdata.worker_cq_depth,
- .enqueue_depth = 64,
- .new_event_threshold = 4096,
- };
- struct rte_event_queue_conf wkr_q_conf = {
- .schedule_type = cdata.queue_type,
- .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
- .nb_atomic_flows = 1024,
- .nb_atomic_order_sequences = 1024,
- };
- struct rte_event_port_conf tx_p_conf = {
- .dequeue_depth = 128,
- .enqueue_depth = 128,
- .new_event_threshold = 4096,
- };
- const struct rte_event_queue_conf tx_q_conf = {
- .priority = RTE_EVENT_DEV_PRIORITY_HIGHEST,
- .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
- };
-
- struct port_link worker_queues[MAX_NUM_STAGES];
- uint8_t disable_implicit_release;
- struct port_link tx_queue;
- unsigned int i;
-
- int ret, ndev = rte_event_dev_count();
- if (ndev < 1) {
- printf("%d: No Eventdev Devices Found\n", __LINE__);
- return -1;
- }
-
- struct rte_event_dev_info dev_info;
- ret = rte_event_dev_info_get(dev_id, &dev_info);
- printf("\tEventdev %d: %s\n", dev_id, dev_info.driver_name);
-
- disable_implicit_release = (dev_info.event_dev_cap &
- RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE);
-
- wkr_p_conf.disable_implicit_release = disable_implicit_release;
- tx_p_conf.disable_implicit_release = disable_implicit_release;
-
- if (dev_info.max_event_port_dequeue_depth <
- config.nb_event_port_dequeue_depth)
- config.nb_event_port_dequeue_depth =
- dev_info.max_event_port_dequeue_depth;
- if (dev_info.max_event_port_enqueue_depth <
- config.nb_event_port_enqueue_depth)
- config.nb_event_port_enqueue_depth =
- dev_info.max_event_port_enqueue_depth;
-
- ret = rte_event_dev_configure(dev_id, &config);
- if (ret < 0) {
- printf("%d: Error configuring device\n", __LINE__);
- return -1;
- }
-
- /* Q creation - one load balanced per pipeline stage*/
- printf(" Stages:\n");
- for (i = 0; i < cdata.num_stages; i++) {
- if (rte_event_queue_setup(dev_id, i, &wkr_q_conf) < 0) {
- printf("%d: error creating qid %d\n", __LINE__, i);
- return -1;
- }
- cdata.qid[i] = i;
- cdata.next_qid[i] = i+1;
- worker_queues[i].queue_id = i;
- if (cdata.enable_queue_priorities) {
- /* calculate priority stepping for each stage, leaving
- * headroom of 1 for the SINGLE_LINK TX below
- */
- const uint32_t prio_delta =
- (RTE_EVENT_DEV_PRIORITY_LOWEST-1) / nb_queues;
-
- /* higher priority for queues closer to tx */
- wkr_q_conf.priority =
- RTE_EVENT_DEV_PRIORITY_LOWEST - prio_delta * i;
- }
-
- const char *type_str = "Atomic";
- switch (wkr_q_conf.schedule_type) {
- case RTE_SCHED_TYPE_ORDERED:
- type_str = "Ordered";
- break;
- case RTE_SCHED_TYPE_PARALLEL:
- type_str = "Parallel";
- break;
- }
- printf("\tStage %d, Type %s\tPriority = %d\n", i, type_str,
- wkr_q_conf.priority);
- }
- printf("\n");
-
- /* final queue for sending to TX core */
- if (rte_event_queue_setup(dev_id, i, &tx_q_conf) < 0) {
- printf("%d: error creating qid %d\n", __LINE__, i);
- return -1;
- }
- tx_queue.queue_id = i;
- tx_queue.priority = RTE_EVENT_DEV_PRIORITY_HIGHEST;
-
- if (wkr_p_conf.dequeue_depth > config.nb_event_port_dequeue_depth)
- wkr_p_conf.dequeue_depth = config.nb_event_port_dequeue_depth;
- if (wkr_p_conf.enqueue_depth > config.nb_event_port_enqueue_depth)
- wkr_p_conf.enqueue_depth = config.nb_event_port_enqueue_depth;
-
- /* set up one port per worker, linking to all stage queues */
- for (i = 0; i < cdata.num_workers; i++) {
- struct worker_data *w = &worker_data[i];
- w->dev_id = dev_id;
- if (rte_event_port_setup(dev_id, i, &wkr_p_conf) < 0) {
- printf("Error setting up port %d\n", i);
- return -1;
- }
-
- uint32_t s;
- for (s = 0; s < cdata.num_stages; s++) {
- if (rte_event_port_link(dev_id, i,
- &worker_queues[s].queue_id,
- &worker_queues[s].priority,
- 1) != 1) {
- printf("%d: error creating link for port %d\n",
- __LINE__, i);
- return -1;
- }
- }
- w->port_id = i;
- }
-
- if (tx_p_conf.dequeue_depth > config.nb_event_port_dequeue_depth)
- tx_p_conf.dequeue_depth = config.nb_event_port_dequeue_depth;
- if (tx_p_conf.enqueue_depth > config.nb_event_port_enqueue_depth)
- tx_p_conf.enqueue_depth = config.nb_event_port_enqueue_depth;
-
- /* port for consumer, linked to TX queue */
- if (rte_event_port_setup(dev_id, i, &tx_p_conf) < 0) {
- printf("Error setting up port %d\n", i);
- return -1;
- }
- if (rte_event_port_link(dev_id, i, &tx_queue.queue_id,
- &tx_queue.priority, 1) != 1) {
- printf("%d: error creating link for port %d\n",
- __LINE__, i);
- return -1;
- }
- *cons_data = (struct cons_data){.dev_id = dev_id,
- .port_id = i,
- .release = disable_implicit_release };
-
- ret = rte_event_dev_service_id_get(dev_id,
- &fdata->evdev_service_id);
- if (ret != -ESRCH && ret != 0) {
- printf("Error getting the service ID for sw eventdev\n");
- return -1;
- }
- rte_service_runstate_set(fdata->evdev_service_id, 1);
- rte_service_set_runstate_mapped_check(fdata->evdev_service_id, 0);
- if (rte_event_dev_start(dev_id) < 0) {
- printf("Error starting eventdev\n");
- return -1;
- }
-
- return dev_id;
+ RTE_SET_USED(nb_ethdev);
+ RTE_SET_USED(eventdev_id);
+ set_worker_generic_setup_data(&fdata->cap, 1);
}
static void
@@ -886,17 +470,21 @@ main(int argc, char **argv)
if (ndevs > 1)
fprintf(stderr, "Warning: More than one eventdev, using idx 0");
+
+ do_capability_setup(num_ports, 0);
+ fdata->cap.check_opt();
+
worker_data = rte_calloc(0, cdata.num_workers,
sizeof(worker_data[0]), 0);
if (worker_data == NULL)
rte_panic("rte_calloc failed\n");
- int dev_id = setup_eventdev(&cons_data, worker_data);
+ int dev_id = fdata->cap.evdev_setup(&cons_data, worker_data);
if (dev_id < 0)
rte_exit(EXIT_FAILURE, "Error setting up eventdev\n");
init_ports(num_ports);
- init_rx_adapter(num_ports);
+ fdata->cap.adptr_setup(num_ports);
int worker_idx = 0;
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
@@ -929,8 +517,8 @@ main(int argc, char **argv)
__func__, lcore_id,
worker_data[worker_idx].port_id);
- err = rte_eal_remote_launch(worker, &worker_data[worker_idx],
- lcore_id);
+ err = rte_eal_remote_launch(fdata->cap.worker,
+ &worker_data[worker_idx], lcore_id);
if (err) {
rte_panic("Failed to launch worker on core %d\n",
lcore_id);
@@ -943,7 +531,7 @@ main(int argc, char **argv)
lcore_id = rte_lcore_id();
if (core_in_use(lcore_id))
- worker(&worker_data[worker_idx++]);
+ fdata->cap.worker(&worker_data[worker_idx++]);
rte_eal_mp_wait_lcore();
diff --git a/examples/eventdev_pipeline_sw_pmd/pipeline_common.h b/examples/eventdev_pipeline_sw_pmd/pipeline_common.h
index 00721ea94..379ba9d4b 100644
--- a/examples/eventdev_pipeline_sw_pmd/pipeline_common.h
+++ b/examples/eventdev_pipeline_sw_pmd/pipeline_common.h
@@ -84,7 +84,60 @@ struct config_data {
uint8_t rx_adapter_id;
};
+struct port_link {
+ uint8_t queue_id;
+ uint8_t priority;
+};
+
struct cons_data cons_data;
struct fastpath_data *fdata;
struct config_data cdata;
+
+static __rte_always_inline void
+work(struct rte_mbuf *m)
+{
+ struct ether_hdr *eth;
+ struct ether_addr addr;
+
+ /* change mac addresses on packet (to use mbuf data) */
+ /*
+ * FIXME Swap mac address properly and also handle the
+ * case for both odd and even number of stages that the
+ * addresses end up the same at the end of the pipeline
+ */
+ eth = rte_pktmbuf_mtod(m, struct ether_hdr *);
+ ether_addr_copy(ð->d_addr, &addr);
+ ether_addr_copy(&addr, ð->d_addr);
+
+ /* do a number of cycles of work per packet */
+ volatile uint64_t start_tsc = rte_rdtsc();
+ while (rte_rdtsc() < start_tsc + cdata.worker_cycles)
+ rte_pause();
+}
+
+static __rte_always_inline void
+schedule_devices(unsigned int lcore_id)
+{
+ if (fdata->rx_core[lcore_id]) {
+ rte_service_run_iter_on_app_lcore(fdata->rxadptr_service_id,
+ !fdata->rx_single);
+ }
+
+ if (fdata->sched_core[lcore_id]) {
+ rte_service_run_iter_on_app_lcore(fdata->evdev_service_id,
+ !fdata->sched_single);
+ if (cdata.dump_dev_signal) {
+ rte_event_dev_dump(0, stdout);
+ cdata.dump_dev_signal = 0;
+ }
+ }
+
+ if (fdata->tx_core[lcore_id] && (fdata->tx_single ||
+ rte_atomic32_cmpset(&(fdata->tx_lock), 0, 1))) {
+ fdata->cap.consumer();
+ rte_atomic32_clear((rte_atomic32_t *)&(fdata->tx_lock));
+ }
+}
+
+void set_worker_generic_setup_data(struct setup_data *caps, bool burst);
diff --git a/examples/eventdev_pipeline_sw_pmd/pipeline_worker_generic.c b/examples/eventdev_pipeline_sw_pmd/pipeline_worker_generic.c
new file mode 100644
index 000000000..d2bc6d355
--- /dev/null
+++ b/examples/eventdev_pipeline_sw_pmd/pipeline_worker_generic.c
@@ -0,0 +1,398 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2016 Intel Corporation.
+ * Copyright 2017 Cavium, Inc.
+ */
+
+#include "pipeline_common.h"
+
+static int
+worker_generic_burst(void *arg)
+{
+ struct rte_event events[BATCH_SIZE];
+
+ struct worker_data *data = (struct worker_data *)arg;
+ uint8_t dev_id = data->dev_id;
+ uint8_t port_id = data->port_id;
+ size_t sent = 0, received = 0;
+ unsigned int lcore_id = rte_lcore_id();
+
+ while (!fdata->done) {
+ uint16_t i;
+
+ if (fdata->cap.scheduler)
+ fdata->cap.scheduler(lcore_id);
+
+ if (!fdata->worker_core[lcore_id]) {
+ rte_pause();
+ continue;
+ }
+
+ const uint16_t nb_rx = rte_event_dequeue_burst(dev_id, port_id,
+ events, RTE_DIM(events), 0);
+
+ if (nb_rx == 0) {
+ rte_pause();
+ continue;
+ }
+ received += nb_rx;
+
+ for (i = 0; i < nb_rx; i++) {
+
+ /* The first worker stage does classification */
+ if (events[i].queue_id == cdata.qid[0])
+ events[i].flow_id = events[i].mbuf->hash.rss
+ % cdata.num_fids;
+
+ events[i].queue_id = cdata.next_qid[events[i].queue_id];
+ events[i].op = RTE_EVENT_OP_FORWARD;
+ events[i].sched_type = cdata.queue_type;
+
+ work(events[i].mbuf);
+ }
+ uint16_t nb_tx = rte_event_enqueue_burst(dev_id, port_id,
+ events, nb_rx);
+ while (nb_tx < nb_rx && !fdata->done)
+ nb_tx += rte_event_enqueue_burst(dev_id, port_id,
+ events + nb_tx,
+ nb_rx - nb_tx);
+ sent += nb_tx;
+ }
+
+ if (!cdata.quiet)
+ printf(" worker %u thread done. RX=%zu TX=%zu\n",
+ rte_lcore_id(), received, sent);
+
+ return 0;
+}
+
+static __rte_always_inline int
+consumer_burst(void)
+{
+ const uint64_t freq_khz = rte_get_timer_hz() / 1000;
+ struct rte_event packets[BATCH_SIZE];
+
+ static uint64_t received;
+ static uint64_t last_pkts;
+ static uint64_t last_time;
+ static uint64_t start_time;
+ unsigned int i, j;
+ uint8_t dev_id = cons_data.dev_id;
+ uint8_t port_id = cons_data.port_id;
+ uint16_t nb_ports = rte_eth_dev_count();
+
+ do {
+ uint16_t n = rte_event_dequeue_burst(dev_id, port_id,
+ packets, RTE_DIM(packets), 0);
+
+ if (n == 0) {
+ for (j = 0; j < nb_ports; j++)
+ rte_eth_tx_buffer_flush(j, 0, fdata->tx_buf[j]);
+ return 0;
+ }
+ if (start_time == 0)
+ last_time = start_time = rte_get_timer_cycles();
+
+ received += n;
+ for (i = 0; i < n; i++) {
+ uint8_t outport = packets[i].mbuf->port;
+ rte_eth_tx_buffer(outport, 0, fdata->tx_buf[outport],
+ packets[i].mbuf);
+
+ packets[i].op = RTE_EVENT_OP_RELEASE;
+ }
+
+ if (cons_data.release) {
+ uint16_t nb_tx;
+
+ nb_tx = rte_event_enqueue_burst(dev_id, port_id,
+ packets, n);
+ while (nb_tx < n)
+ nb_tx += rte_event_enqueue_burst(dev_id,
+ port_id, packets + nb_tx,
+ n - nb_tx);
+ }
+
+ /* Print out mpps every 1<22 packets */
+ if (!cdata.quiet && received >= last_pkts + (1<<22)) {
+ const uint64_t now = rte_get_timer_cycles();
+ const uint64_t total_ms = (now - start_time) / freq_khz;
+ const uint64_t delta_ms = (now - last_time) / freq_khz;
+ uint64_t delta_pkts = received - last_pkts;
+
+ printf("# consumer RX=%"PRIu64", time %"PRIu64 "ms, "
+ "avg %.3f mpps [current %.3f mpps]\n",
+ received,
+ total_ms,
+ received / (total_ms * 1000.0),
+ delta_pkts / (delta_ms * 1000.0));
+ last_pkts = received;
+ last_time = now;
+ }
+
+ cdata.num_packets -= n;
+ if (cdata.num_packets <= 0)
+ fdata->done = 1;
+ /* Be stuck in this loop if single. */
+ } while (!fdata->done && fdata->tx_single);
+
+ return 0;
+}
+
+static int
+setup_eventdev_generic(struct cons_data *cons_data,
+ struct worker_data *worker_data)
+{
+ const uint8_t dev_id = 0;
+ /* +1 stages is for a SINGLE_LINK TX stage */
+ const uint8_t nb_queues = cdata.num_stages + 1;
+ /* + 1 is one port for consumer */
+ const uint8_t nb_ports = cdata.num_workers + 1;
+ struct rte_event_dev_config config = {
+ .nb_event_queues = nb_queues,
+ .nb_event_ports = nb_ports,
+ .nb_events_limit = 4096,
+ .nb_event_queue_flows = 1024,
+ .nb_event_port_dequeue_depth = 128,
+ .nb_event_port_enqueue_depth = 128,
+ };
+ struct rte_event_port_conf wkr_p_conf = {
+ .dequeue_depth = cdata.worker_cq_depth,
+ .enqueue_depth = 64,
+ .new_event_threshold = 4096,
+ };
+ struct rte_event_queue_conf wkr_q_conf = {
+ .schedule_type = cdata.queue_type,
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .nb_atomic_flows = 1024,
+ .nb_atomic_order_sequences = 1024,
+ };
+ struct rte_event_port_conf tx_p_conf = {
+ .dequeue_depth = 128,
+ .enqueue_depth = 128,
+ .new_event_threshold = 4096,
+ };
+ struct rte_event_queue_conf tx_q_conf = {
+ .priority = RTE_EVENT_DEV_PRIORITY_HIGHEST,
+ .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
+ };
+
+ struct port_link worker_queues[MAX_NUM_STAGES];
+ uint8_t disable_implicit_release;
+ struct port_link tx_queue;
+ unsigned int i;
+
+ int ret, ndev = rte_event_dev_count();
+ if (ndev < 1) {
+ printf("%d: No Eventdev Devices Found\n", __LINE__);
+ return -1;
+ }
+
+ struct rte_event_dev_info dev_info;
+ ret = rte_event_dev_info_get(dev_id, &dev_info);
+ printf("\tEventdev %d: %s\n", dev_id, dev_info.driver_name);
+
+ disable_implicit_release = (dev_info.event_dev_cap &
+ RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE);
+
+ wkr_p_conf.disable_implicit_release = disable_implicit_release;
+ tx_p_conf.disable_implicit_release = disable_implicit_release;
+
+ if (dev_info.max_event_port_dequeue_depth <
+ config.nb_event_port_dequeue_depth)
+ config.nb_event_port_dequeue_depth =
+ dev_info.max_event_port_dequeue_depth;
+ if (dev_info.max_event_port_enqueue_depth <
+ config.nb_event_port_enqueue_depth)
+ config.nb_event_port_enqueue_depth =
+ dev_info.max_event_port_enqueue_depth;
+
+ ret = rte_event_dev_configure(dev_id, &config);
+ if (ret < 0) {
+ printf("%d: Error configuring device\n", __LINE__);
+ return -1;
+ }
+
+ /* Q creation - one load balanced per pipeline stage*/
+ printf(" Stages:\n");
+ for (i = 0; i < cdata.num_stages; i++) {
+ if (rte_event_queue_setup(dev_id, i, &wkr_q_conf) < 0) {
+ printf("%d: error creating qid %d\n", __LINE__, i);
+ return -1;
+ }
+ cdata.qid[i] = i;
+ cdata.next_qid[i] = i+1;
+ worker_queues[i].queue_id = i;
+ if (cdata.enable_queue_priorities) {
+ /* calculate priority stepping for each stage, leaving
+ * headroom of 1 for the SINGLE_LINK TX below
+ */
+ const uint32_t prio_delta =
+ (RTE_EVENT_DEV_PRIORITY_LOWEST-1) / nb_queues;
+
+ /* higher priority for queues closer to tx */
+ wkr_q_conf.priority =
+ RTE_EVENT_DEV_PRIORITY_LOWEST - prio_delta * i;
+ }
+
+ const char *type_str = "Atomic";
+ switch (wkr_q_conf.schedule_type) {
+ case RTE_SCHED_TYPE_ORDERED:
+ type_str = "Ordered";
+ break;
+ case RTE_SCHED_TYPE_PARALLEL:
+ type_str = "Parallel";
+ break;
+ }
+ printf("\tStage %d, Type %s\tPriority = %d\n", i, type_str,
+ wkr_q_conf.priority);
+ }
+ printf("\n");
+
+ /* final queue for sending to TX core */
+ if (rte_event_queue_setup(dev_id, i, &tx_q_conf) < 0) {
+ printf("%d: error creating qid %d\n", __LINE__, i);
+ return -1;
+ }
+ tx_queue.queue_id = i;
+ tx_queue.priority = RTE_EVENT_DEV_PRIORITY_HIGHEST;
+
+ if (wkr_p_conf.dequeue_depth > config.nb_event_port_dequeue_depth)
+ wkr_p_conf.dequeue_depth = config.nb_event_port_dequeue_depth;
+ if (wkr_p_conf.enqueue_depth > config.nb_event_port_enqueue_depth)
+ wkr_p_conf.enqueue_depth = config.nb_event_port_enqueue_depth;
+
+ /* set up one port per worker, linking to all stage queues */
+ for (i = 0; i < cdata.num_workers; i++) {
+ struct worker_data *w = &worker_data[i];
+ w->dev_id = dev_id;
+ if (rte_event_port_setup(dev_id, i, &wkr_p_conf) < 0) {
+ printf("Error setting up port %d\n", i);
+ return -1;
+ }
+
+ uint32_t s;
+ for (s = 0; s < cdata.num_stages; s++) {
+ if (rte_event_port_link(dev_id, i,
+ &worker_queues[s].queue_id,
+ &worker_queues[s].priority,
+ 1) != 1) {
+ printf("%d: error creating link for port %d\n",
+ __LINE__, i);
+ return -1;
+ }
+ }
+ w->port_id = i;
+ }
+
+ if (tx_p_conf.dequeue_depth > config.nb_event_port_dequeue_depth)
+ tx_p_conf.dequeue_depth = config.nb_event_port_dequeue_depth;
+ if (tx_p_conf.enqueue_depth > config.nb_event_port_enqueue_depth)
+ tx_p_conf.enqueue_depth = config.nb_event_port_enqueue_depth;
+
+ /* port for consumer, linked to TX queue */
+ if (rte_event_port_setup(dev_id, i, &tx_p_conf) < 0) {
+ printf("Error setting up port %d\n", i);
+ return -1;
+ }
+ if (rte_event_port_link(dev_id, i, &tx_queue.queue_id,
+ &tx_queue.priority, 1) != 1) {
+ printf("%d: error creating link for port %d\n",
+ __LINE__, i);
+ return -1;
+ }
+ *cons_data = (struct cons_data){.dev_id = dev_id,
+ .port_id = i,
+ .release = disable_implicit_release };
+
+ ret = rte_event_dev_service_id_get(dev_id,
+ &fdata->evdev_service_id);
+ if (ret != -ESRCH && ret != 0) {
+ printf("Error getting the service ID for sw eventdev\n");
+ return -1;
+ }
+ rte_service_runstate_set(fdata->evdev_service_id, 1);
+ rte_service_set_runstate_mapped_check(fdata->evdev_service_id, 0);
+ if (rte_event_dev_start(dev_id) < 0) {
+ printf("Error starting eventdev\n");
+ return -1;
+ }
+
+ return dev_id;
+}
+
+static void
+init_rx_adapter(uint16_t nb_ports)
+{
+ int i;
+ int ret;
+ uint8_t evdev_id = 0;
+ struct rte_event_dev_info dev_info;
+
+ ret = rte_event_dev_info_get(evdev_id, &dev_info);
+
+ struct rte_event_port_conf rx_p_conf = {
+ .dequeue_depth = 8,
+ .enqueue_depth = 8,
+ .new_event_threshold = 1200,
+ };
+
+ if (rx_p_conf.dequeue_depth > dev_info.max_event_port_dequeue_depth)
+ rx_p_conf.dequeue_depth = dev_info.max_event_port_dequeue_depth;
+ if (rx_p_conf.enqueue_depth > dev_info.max_event_port_enqueue_depth)
+ rx_p_conf.enqueue_depth = dev_info.max_event_port_enqueue_depth;
+
+ /* Create one adapter for all the ethernet ports. */
+ ret = rte_event_eth_rx_adapter_create(cdata.rx_adapter_id, evdev_id,
+ &rx_p_conf);
+ if (ret)
+ rte_exit(EXIT_FAILURE, "failed to create rx adapter[%d]",
+ cdata.rx_adapter_id);
+
+ struct rte_event_eth_rx_adapter_queue_conf queue_conf = {
+ .ev.sched_type = cdata.queue_type,
+ .ev.queue_id = cdata.qid[0],
+ };
+
+ for (i = 0; i < nb_ports; i++) {
+ uint32_t cap;
+
+ ret = rte_event_eth_rx_adapter_caps_get(evdev_id, i, &cap);
+ if (ret)
+ rte_exit(EXIT_FAILURE,
+ "failed to get event rx adapter "
+ "capabilities");
+
+ ret = rte_event_eth_rx_adapter_queue_add(cdata.rx_adapter_id, i,
+ -1, &queue_conf);
+ if (ret)
+ rte_exit(EXIT_FAILURE,
+ "Failed to add queues to Rx adapter");
+ }
+
+ ret = rte_event_eth_rx_adapter_service_id_get(cdata.rx_adapter_id,
+ &fdata->rxadptr_service_id);
+ if (ret != -ESRCH && ret != 0) {
+ rte_exit(EXIT_FAILURE,
+ "Error getting the service ID for sw eventdev\n");
+ }
+ rte_service_runstate_set(fdata->rxadptr_service_id, 1);
+ rte_service_set_runstate_mapped_check(fdata->rxadptr_service_id, 0);
+
+ ret = rte_event_eth_rx_adapter_start(cdata.rx_adapter_id);
+ if (ret)
+ rte_exit(EXIT_FAILURE, "Rx adapter[%d] start failed",
+ cdata.rx_adapter_id);
+}
+
+void
+set_worker_generic_setup_data(struct setup_data *caps, bool burst)
+{
+ RTE_SET_USED(burst);
+ caps->consumer = consumer_burst;
+ caps->worker = worker_generic_burst;
+
+ caps->adptr_setup = init_rx_adapter;
+ caps->scheduler = schedule_devices;
+ caps->evdev_setup = setup_eventdev_generic;
+}
--
2.15.1
^ permalink raw reply [flat|nested] 48+ messages in thread
* [dpdk-dev] [PATCH v2 05/15] examples/eventdev: add ops to check cmdline args
2018-01-10 11:09 ` [dpdk-dev] [PATCH v2 01/15] examples/eventdev: add Rx adapter support Pavan Nikhilesh
` (2 preceding siblings ...)
2018-01-10 11:10 ` [dpdk-dev] [PATCH v2 04/15] examples/eventdev: add generic worker pipeline Pavan Nikhilesh
@ 2018-01-10 11:10 ` Pavan Nikhilesh
2018-01-10 11:10 ` [dpdk-dev] [PATCH v2 06/15] examples/eventdev: add non burst mode generic worker Pavan Nikhilesh
` (10 subsequent siblings)
14 siblings, 0 replies; 48+ messages in thread
From: Pavan Nikhilesh @ 2018-01-10 11:10 UTC (permalink / raw)
To: gage.eads, jerin.jacobkollanukkaran, harry.van.haaren,
hemant.agrawal, liang.j.ma, santosh.shukla
Cc: dev, Pavan Nikhilesh
Each eventdev pipeline needs to allow different cmdline args combination
based on pipeline type.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
---
v2 Changes:
- remove redundant split in printf
examples/eventdev_pipeline_sw_pmd/main.c | 16 +++-----
.../eventdev_pipeline_sw_pmd/pipeline_common.h | 4 ++
.../pipeline_worker_generic.c | 43 ++++++++++++++++++++++
3 files changed, 52 insertions(+), 11 deletions(-)
diff --git a/examples/eventdev_pipeline_sw_pmd/main.c b/examples/eventdev_pipeline_sw_pmd/main.c
index 295c8b692..9e6061643 100644
--- a/examples/eventdev_pipeline_sw_pmd/main.c
+++ b/examples/eventdev_pipeline_sw_pmd/main.c
@@ -253,17 +253,11 @@ parse_app_args(int argc, char **argv)
}
}
- if (worker_lcore_mask == 0 || rx_lcore_mask == 0 ||
- sched_lcore_mask == 0 || tx_lcore_mask == 0) {
- printf("Core part of pipeline was not assigned any cores. "
- "This will stall the pipeline, please check core masks "
- "(use -h for details on setting core masks):\n"
- "\trx: %"PRIu64"\n\ttx: %"PRIu64"\n\tsched: %"PRIu64
- "\n\tworkers: %"PRIu64"\n",
- rx_lcore_mask, tx_lcore_mask, sched_lcore_mask,
- worker_lcore_mask);
- rte_exit(-1, "Fix core masks\n");
- }
+ cdata.worker_lcore_mask = worker_lcore_mask;
+ cdata.sched_lcore_mask = sched_lcore_mask;
+ cdata.rx_lcore_mask = rx_lcore_mask;
+ cdata.tx_lcore_mask = tx_lcore_mask;
+
if (cdata.num_stages == 0 || cdata.num_stages > MAX_NUM_STAGES)
usage();
diff --git a/examples/eventdev_pipeline_sw_pmd/pipeline_common.h b/examples/eventdev_pipeline_sw_pmd/pipeline_common.h
index 379ba9d4b..9e1f5e9f0 100644
--- a/examples/eventdev_pipeline_sw_pmd/pipeline_common.h
+++ b/examples/eventdev_pipeline_sw_pmd/pipeline_common.h
@@ -82,6 +82,10 @@ struct config_data {
int16_t next_qid[MAX_NUM_STAGES+2];
int16_t qid[MAX_NUM_STAGES];
uint8_t rx_adapter_id;
+ uint64_t worker_lcore_mask;
+ uint64_t rx_lcore_mask;
+ uint64_t tx_lcore_mask;
+ uint64_t sched_lcore_mask;
};
struct port_link {
diff --git a/examples/eventdev_pipeline_sw_pmd/pipeline_worker_generic.c b/examples/eventdev_pipeline_sw_pmd/pipeline_worker_generic.c
index d2bc6d355..d1b0e1db1 100644
--- a/examples/eventdev_pipeline_sw_pmd/pipeline_worker_generic.c
+++ b/examples/eventdev_pipeline_sw_pmd/pipeline_worker_generic.c
@@ -385,6 +385,48 @@ init_rx_adapter(uint16_t nb_ports)
cdata.rx_adapter_id);
}
+static void
+generic_opt_check(void)
+{
+ int i;
+ int ret;
+ uint32_t cap = 0;
+ uint8_t rx_needed = 0;
+ struct rte_event_dev_info eventdev_info;
+
+ memset(&eventdev_info, 0, sizeof(struct rte_event_dev_info));
+ rte_event_dev_info_get(0, &eventdev_info);
+
+ for (i = 0; i < rte_eth_dev_count(); i++) {
+ ret = rte_event_eth_rx_adapter_caps_get(0, i, &cap);
+ if (ret)
+ rte_exit(EXIT_FAILURE,
+ "failed to get event rx adapter capabilities");
+ rx_needed |=
+ !(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT);
+ }
+
+ if (cdata.worker_lcore_mask == 0 ||
+ (rx_needed && cdata.rx_lcore_mask == 0) ||
+ cdata.tx_lcore_mask == 0 || (cdata.sched_lcore_mask == 0
+ && !(eventdev_info.event_dev_cap &
+ RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED))) {
+ printf("Core part of pipeline was not assigned any cores. "
+ "This will stall the pipeline, please check core masks "
+ "(use -h for details on setting core masks):\n"
+ "\trx: %"PRIu64"\n\ttx: %"PRIu64"\n\tsched: %"PRIu64
+ "\n\tworkers: %"PRIu64"\n",
+ cdata.rx_lcore_mask, cdata.tx_lcore_mask,
+ cdata.sched_lcore_mask,
+ cdata.worker_lcore_mask);
+ rte_exit(-1, "Fix core masks\n");
+ }
+
+ if (eventdev_info.event_dev_cap & RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED)
+ memset(fdata->sched_core, 0,
+ sizeof(unsigned int) * MAX_NUM_CORE);
+}
+
void
set_worker_generic_setup_data(struct setup_data *caps, bool burst)
{
@@ -395,4 +437,5 @@ set_worker_generic_setup_data(struct setup_data *caps, bool burst)
caps->adptr_setup = init_rx_adapter;
caps->scheduler = schedule_devices;
caps->evdev_setup = setup_eventdev_generic;
+ caps->check_opt = generic_opt_check;
}
--
2.15.1
^ permalink raw reply [flat|nested] 48+ messages in thread
* [dpdk-dev] [PATCH v2 06/15] examples/eventdev: add non burst mode generic worker
2018-01-10 11:09 ` [dpdk-dev] [PATCH v2 01/15] examples/eventdev: add Rx adapter support Pavan Nikhilesh
` (3 preceding siblings ...)
2018-01-10 11:10 ` [dpdk-dev] [PATCH v2 05/15] examples/eventdev: add ops to check cmdline args Pavan Nikhilesh
@ 2018-01-10 11:10 ` Pavan Nikhilesh
2018-01-10 11:10 ` [dpdk-dev] [PATCH v2 07/15] examples/eventdev: modify work cycles Pavan Nikhilesh
` (9 subsequent siblings)
14 siblings, 0 replies; 48+ messages in thread
From: Pavan Nikhilesh @ 2018-01-10 11:10 UTC (permalink / raw)
To: gage.eads, jerin.jacobkollanukkaran, harry.van.haaren,
hemant.agrawal, liang.j.ma, santosh.shukla
Cc: dev, Pavan Nikhilesh
Currently, worker uses burst dequeue and burst enqueue to forward events.
Add a non burst mode based on the event dev capabilities.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
---
examples/eventdev_pipeline_sw_pmd/main.c | 12 +-
.../pipeline_worker_generic.c | 126 ++++++++++++++++++++-
2 files changed, 133 insertions(+), 5 deletions(-)
diff --git a/examples/eventdev_pipeline_sw_pmd/main.c b/examples/eventdev_pipeline_sw_pmd/main.c
index 9e6061643..947c5f786 100644
--- a/examples/eventdev_pipeline_sw_pmd/main.c
+++ b/examples/eventdev_pipeline_sw_pmd/main.c
@@ -382,8 +382,16 @@ static void
do_capability_setup(uint16_t nb_ethdev, uint8_t eventdev_id)
{
RTE_SET_USED(nb_ethdev);
- RTE_SET_USED(eventdev_id);
- set_worker_generic_setup_data(&fdata->cap, 1);
+ uint8_t burst = 0;
+
+ struct rte_event_dev_info eventdev_info;
+ memset(&eventdev_info, 0, sizeof(struct rte_event_dev_info));
+
+ rte_event_dev_info_get(eventdev_id, &eventdev_info);
+ burst = eventdev_info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE ? 1 :
+ 0;
+
+ set_worker_generic_setup_data(&fdata->cap, burst);
}
static void
diff --git a/examples/eventdev_pipeline_sw_pmd/pipeline_worker_generic.c b/examples/eventdev_pipeline_sw_pmd/pipeline_worker_generic.c
index d1b0e1db1..f4523902b 100644
--- a/examples/eventdev_pipeline_sw_pmd/pipeline_worker_generic.c
+++ b/examples/eventdev_pipeline_sw_pmd/pipeline_worker_generic.c
@@ -6,6 +6,59 @@
#include "pipeline_common.h"
+static __rte_always_inline int
+worker_generic(void *arg)
+{
+ struct rte_event ev;
+
+ struct worker_data *data = (struct worker_data *)arg;
+ uint8_t dev_id = data->dev_id;
+ uint8_t port_id = data->port_id;
+ size_t sent = 0, received = 0;
+ unsigned int lcore_id = rte_lcore_id();
+
+ while (!fdata->done) {
+
+ if (fdata->cap.scheduler)
+ fdata->cap.scheduler(lcore_id);
+
+ if (!fdata->worker_core[lcore_id]) {
+ rte_pause();
+ continue;
+ }
+
+ const uint16_t nb_rx = rte_event_dequeue_burst(dev_id, port_id,
+ &ev, 1, 0);
+
+ if (nb_rx == 0) {
+ rte_pause();
+ continue;
+ }
+ received++;
+
+ /* The first worker stage does classification */
+ if (ev.queue_id == cdata.qid[0])
+ ev.flow_id = ev.mbuf->hash.rss
+ % cdata.num_fids;
+
+ ev.queue_id = cdata.next_qid[ev.queue_id];
+ ev.op = RTE_EVENT_OP_FORWARD;
+ ev.sched_type = cdata.queue_type;
+
+ work(ev.mbuf);
+
+ while (rte_event_enqueue_burst(dev_id, port_id, &ev, 1) != 1)
+ rte_pause();
+ sent++;
+ }
+
+ if (!cdata.quiet)
+ printf(" worker %u thread done. RX=%zu TX=%zu\n",
+ rte_lcore_id(), received, sent);
+
+ return 0;
+}
+
static int
worker_generic_burst(void *arg)
{
@@ -66,6 +119,69 @@ worker_generic_burst(void *arg)
return 0;
}
+static __rte_always_inline int
+consumer(void)
+{
+ const uint64_t freq_khz = rte_get_timer_hz() / 1000;
+ struct rte_event packet;
+
+ static uint64_t received;
+ static uint64_t last_pkts;
+ static uint64_t last_time;
+ static uint64_t start_time;
+ int i;
+ uint8_t dev_id = cons_data.dev_id;
+ uint8_t port_id = cons_data.port_id;
+
+ do {
+ uint16_t n = rte_event_dequeue_burst(dev_id, port_id,
+ &packet, 1, 0);
+
+ if (n == 0) {
+ for (i = 0; i < rte_eth_dev_count(); i++)
+ rte_eth_tx_buffer_flush(i, 0, fdata->tx_buf[i]);
+ return 0;
+ }
+ if (start_time == 0)
+ last_time = start_time = rte_get_timer_cycles();
+
+ received++;
+ uint8_t outport = packet.mbuf->port;
+
+ rte_eth_tx_buffer(outport, 0, fdata->tx_buf[outport],
+ packet.mbuf);
+
+ if (cons_data.release)
+ rte_event_enqueue_burst(dev_id, port_id,
+ &packet, n);
+
+ /* Print out mpps every 1<22 packets */
+ if (!cdata.quiet && received >= last_pkts + (1<<22)) {
+ const uint64_t now = rte_get_timer_cycles();
+ const uint64_t total_ms = (now - start_time) / freq_khz;
+ const uint64_t delta_ms = (now - last_time) / freq_khz;
+ uint64_t delta_pkts = received - last_pkts;
+
+ printf("# %s RX=%"PRIu64", time %"PRIu64 "ms, "
+ "avg %.3f mpps [current %.3f mpps]\n",
+ __func__,
+ received,
+ total_ms,
+ received / (total_ms * 1000.0),
+ delta_pkts / (delta_ms * 1000.0));
+ last_pkts = received;
+ last_time = now;
+ }
+
+ cdata.num_packets--;
+ if (cdata.num_packets <= 0)
+ fdata->done = 1;
+ /* Be stuck in this loop if single. */
+ } while (!fdata->done && fdata->tx_single);
+
+ return 0;
+}
+
static __rte_always_inline int
consumer_burst(void)
{
@@ -430,9 +546,13 @@ generic_opt_check(void)
void
set_worker_generic_setup_data(struct setup_data *caps, bool burst)
{
- RTE_SET_USED(burst);
- caps->consumer = consumer_burst;
- caps->worker = worker_generic_burst;
+ if (burst) {
+ caps->consumer = consumer_burst;
+ caps->worker = worker_generic_burst;
+ } else {
+ caps->consumer = consumer;
+ caps->worker = worker_generic;
+ }
caps->adptr_setup = init_rx_adapter;
caps->scheduler = schedule_devices;
--
2.15.1
^ permalink raw reply [flat|nested] 48+ messages in thread
* [dpdk-dev] [PATCH v2 07/15] examples/eventdev: modify work cycles
2018-01-10 11:09 ` [dpdk-dev] [PATCH v2 01/15] examples/eventdev: add Rx adapter support Pavan Nikhilesh
` (4 preceding siblings ...)
2018-01-10 11:10 ` [dpdk-dev] [PATCH v2 06/15] examples/eventdev: add non burst mode generic worker Pavan Nikhilesh
@ 2018-01-10 11:10 ` Pavan Nikhilesh
2018-01-15 10:14 ` Van Haaren, Harry
2018-01-10 11:10 ` [dpdk-dev] [PATCH v2 08/15] examples/eventdev: add thread safe Tx worker pipeline Pavan Nikhilesh
` (8 subsequent siblings)
14 siblings, 1 reply; 48+ messages in thread
From: Pavan Nikhilesh @ 2018-01-10 11:10 UTC (permalink / raw)
To: gage.eads, jerin.jacobkollanukkaran, harry.van.haaren,
hemant.agrawal, liang.j.ma, santosh.shukla
Cc: dev, Pavan Nikhilesh
The current work cycles function exchanges source and destination mac
address and also pauses the core for the given cycles.
This patch splits the function into two parts i.e. exchange mac and
pause the cores. The pause cores function is invoked at every stage
where as exchange mac is invoked when packet is transmitted.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
---
examples/eventdev_pipeline_sw_pmd/pipeline_common.h | 11 +++++------
examples/eventdev_pipeline_sw_pmd/pipeline_worker_generic.c | 7 +++++--
2 files changed, 10 insertions(+), 8 deletions(-)
diff --git a/examples/eventdev_pipeline_sw_pmd/pipeline_common.h b/examples/eventdev_pipeline_sw_pmd/pipeline_common.h
index 9e1f5e9f0..d58059b78 100644
--- a/examples/eventdev_pipeline_sw_pmd/pipeline_common.h
+++ b/examples/eventdev_pipeline_sw_pmd/pipeline_common.h
@@ -99,21 +99,20 @@ struct fastpath_data *fdata;
struct config_data cdata;
static __rte_always_inline void
-work(struct rte_mbuf *m)
+exchange_mac(struct rte_mbuf *m)
{
struct ether_hdr *eth;
struct ether_addr addr;
/* change mac addresses on packet (to use mbuf data) */
- /*
- * FIXME Swap mac address properly and also handle the
- * case for both odd and even number of stages that the
- * addresses end up the same at the end of the pipeline
- */
eth = rte_pktmbuf_mtod(m, struct ether_hdr *);
ether_addr_copy(ð->d_addr, &addr);
ether_addr_copy(&addr, ð->d_addr);
+}
+static __rte_always_inline void
+work(void)
+{
/* do a number of cycles of work per packet */
volatile uint64_t start_tsc = rte_rdtsc();
while (rte_rdtsc() < start_tsc + cdata.worker_cycles)
diff --git a/examples/eventdev_pipeline_sw_pmd/pipeline_worker_generic.c b/examples/eventdev_pipeline_sw_pmd/pipeline_worker_generic.c
index f4523902b..90f87709c 100644
--- a/examples/eventdev_pipeline_sw_pmd/pipeline_worker_generic.c
+++ b/examples/eventdev_pipeline_sw_pmd/pipeline_worker_generic.c
@@ -45,7 +45,7 @@ worker_generic(void *arg)
ev.op = RTE_EVENT_OP_FORWARD;
ev.sched_type = cdata.queue_type;
- work(ev.mbuf);
+ work();
while (rte_event_enqueue_burst(dev_id, port_id, &ev, 1) != 1)
rte_pause();
@@ -101,7 +101,7 @@ worker_generic_burst(void *arg)
events[i].op = RTE_EVENT_OP_FORWARD;
events[i].sched_type = cdata.queue_type;
- work(events[i].mbuf);
+ work();
}
uint16_t nb_tx = rte_event_enqueue_burst(dev_id, port_id,
events, nb_rx);
@@ -148,6 +148,7 @@ consumer(void)
received++;
uint8_t outport = packet.mbuf->port;
+ exchange_mac(packet.mbuf);
rte_eth_tx_buffer(outport, 0, fdata->tx_buf[outport],
packet.mbuf);
@@ -212,6 +213,8 @@ consumer_burst(void)
received += n;
for (i = 0; i < n; i++) {
uint8_t outport = packets[i].mbuf->port;
+
+ exchange_mac(packets[i].mbuf);
rte_eth_tx_buffer(outport, 0, fdata->tx_buf[outport],
packets[i].mbuf);
--
2.15.1
^ permalink raw reply [flat|nested] 48+ messages in thread
* Re: [dpdk-dev] [PATCH v2 07/15] examples/eventdev: modify work cycles
2018-01-10 11:10 ` [dpdk-dev] [PATCH v2 07/15] examples/eventdev: modify work cycles Pavan Nikhilesh
@ 2018-01-15 10:14 ` Van Haaren, Harry
0 siblings, 0 replies; 48+ messages in thread
From: Van Haaren, Harry @ 2018-01-15 10:14 UTC (permalink / raw)
To: Pavan Nikhilesh, Eads, Gage, jerin.jacobkollanukkaran,
hemant.agrawal, Ma, Liang J, santosh.shukla
Cc: dev
> From: Pavan Nikhilesh [mailto:pbhagavatula@caviumnetworks.com]
> Sent: Wednesday, January 10, 2018 11:10 AM
> To: Eads, Gage <gage.eads@intel.com>; jerin.jacobkollanukkaran@cavium.com;
> Van Haaren, Harry <harry.van.haaren@intel.com>; hemant.agrawal@nxp.com; Ma,
> Liang J <liang.j.ma@intel.com>; santosh.shukla@caviumnetworks.com
> Cc: dev@dpdk.org; Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
> Subject: [dpdk-dev] [PATCH v2 07/15] examples/eventdev: modify work cycles
>
> The current work cycles function exchanges source and destination mac
> address and also pauses the core for the given cycles.
> This patch splits the function into two parts i.e. exchange mac and
> pause the cores. The pause cores function is invoked at every stage
> where as exchange mac is invoked when packet is transmitted.
>
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
Looks good. After this change, the MAC is only swapped once, making it
a "valid" forwarding app, instead of a promiscuous mode performance tester.
Acked-by: Harry van Haaren <harry.van.haaren@intel.com>
^ permalink raw reply [flat|nested] 48+ messages in thread
* [dpdk-dev] [PATCH v2 08/15] examples/eventdev: add thread safe Tx worker pipeline
2018-01-10 11:09 ` [dpdk-dev] [PATCH v2 01/15] examples/eventdev: add Rx adapter support Pavan Nikhilesh
` (5 preceding siblings ...)
2018-01-10 11:10 ` [dpdk-dev] [PATCH v2 07/15] examples/eventdev: modify work cycles Pavan Nikhilesh
@ 2018-01-10 11:10 ` Pavan Nikhilesh
2018-01-10 11:10 ` [dpdk-dev] [PATCH v2 09/15] examples/eventdev: add burst for thread safe pipeline Pavan Nikhilesh
` (7 subsequent siblings)
14 siblings, 0 replies; 48+ messages in thread
From: Pavan Nikhilesh @ 2018-01-10 11:10 UTC (permalink / raw)
To: gage.eads, jerin.jacobkollanukkaran, harry.van.haaren,
hemant.agrawal, liang.j.ma, santosh.shukla
Cc: dev, Pavan Nikhilesh
Add worker pipeline when Tx is multi thread safe.
Probe Ethernet dev capabilities and select it it is supported.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
---
v2 Changes:
- Redo function names(Harry)
examples/eventdev_pipeline_sw_pmd/Makefile | 1 +
examples/eventdev_pipeline_sw_pmd/main.c | 18 +-
.../eventdev_pipeline_sw_pmd/pipeline_common.h | 5 +
.../eventdev_pipeline_sw_pmd/pipeline_worker_tx.c | 425 +++++++++++++++++++++
4 files changed, 447 insertions(+), 2 deletions(-)
create mode 100644 examples/eventdev_pipeline_sw_pmd/pipeline_worker_tx.c
diff --git a/examples/eventdev_pipeline_sw_pmd/Makefile b/examples/eventdev_pipeline_sw_pmd/Makefile
index 5e30556fb..59ee9840a 100644
--- a/examples/eventdev_pipeline_sw_pmd/Makefile
+++ b/examples/eventdev_pipeline_sw_pmd/Makefile
@@ -43,6 +43,7 @@ APP = eventdev_pipeline_sw_pmd
# all source are stored in SRCS-y
SRCS-y := main.c
SRCS-y += pipeline_worker_generic.c
+SRCS-y += pipeline_worker_tx.c
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
diff --git a/examples/eventdev_pipeline_sw_pmd/main.c b/examples/eventdev_pipeline_sw_pmd/main.c
index 947c5f786..f877e695b 100644
--- a/examples/eventdev_pipeline_sw_pmd/main.c
+++ b/examples/eventdev_pipeline_sw_pmd/main.c
@@ -381,9 +381,20 @@ init_ports(unsigned int num_ports)
static void
do_capability_setup(uint16_t nb_ethdev, uint8_t eventdev_id)
{
- RTE_SET_USED(nb_ethdev);
+ int i;
+ uint8_t mt_unsafe = 0;
uint8_t burst = 0;
+ for (i = 0; i < nb_ethdev; i++) {
+ struct rte_eth_dev_info dev_info;
+ memset(&dev_info, 0, sizeof(struct rte_eth_dev_info));
+
+ rte_eth_dev_info_get(i, &dev_info);
+ /* Check if it is safe ask worker to tx. */
+ mt_unsafe |= !(dev_info.tx_offload_capa &
+ DEV_TX_OFFLOAD_MT_LOCKFREE);
+ }
+
struct rte_event_dev_info eventdev_info;
memset(&eventdev_info, 0, sizeof(struct rte_event_dev_info));
@@ -391,7 +402,10 @@ do_capability_setup(uint16_t nb_ethdev, uint8_t eventdev_id)
burst = eventdev_info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE ? 1 :
0;
- set_worker_generic_setup_data(&fdata->cap, burst);
+ if (mt_unsafe)
+ set_worker_generic_setup_data(&fdata->cap, burst);
+ else
+ set_worker_tx_setup_data(&fdata->cap, burst);
}
static void
diff --git a/examples/eventdev_pipeline_sw_pmd/pipeline_common.h b/examples/eventdev_pipeline_sw_pmd/pipeline_common.h
index d58059b78..e06320050 100644
--- a/examples/eventdev_pipeline_sw_pmd/pipeline_common.h
+++ b/examples/eventdev_pipeline_sw_pmd/pipeline_common.h
@@ -79,6 +79,10 @@ struct config_data {
int dump_dev_signal;
unsigned int num_stages;
unsigned int worker_cq_depth;
+ unsigned int rx_stride;
+ /* Use rx stride value to reduce congestion in entry queue when using
+ * multiple eth ports by forming multiple event queue pipelines.
+ */
int16_t next_qid[MAX_NUM_STAGES+2];
int16_t qid[MAX_NUM_STAGES];
uint8_t rx_adapter_id;
@@ -144,3 +148,4 @@ schedule_devices(unsigned int lcore_id)
}
void set_worker_generic_setup_data(struct setup_data *caps, bool burst);
+void set_worker_tx_setup_data(struct setup_data *caps, bool burst);
diff --git a/examples/eventdev_pipeline_sw_pmd/pipeline_worker_tx.c b/examples/eventdev_pipeline_sw_pmd/pipeline_worker_tx.c
new file mode 100644
index 000000000..397b1013f
--- /dev/null
+++ b/examples/eventdev_pipeline_sw_pmd/pipeline_worker_tx.c
@@ -0,0 +1,425 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
+ * Copyright 2017 Cavium, Inc.
+ */
+
+#include "pipeline_common.h"
+
+static __rte_always_inline void
+worker_fwd_event(struct rte_event *ev, uint8_t sched)
+{
+ ev->event_type = RTE_EVENT_TYPE_CPU;
+ ev->op = RTE_EVENT_OP_FORWARD;
+ ev->sched_type = sched;
+}
+
+static __rte_always_inline void
+worker_event_enqueue(const uint8_t dev, const uint8_t port,
+ struct rte_event *ev)
+{
+ while (rte_event_enqueue_burst(dev, port, ev, 1) != 1)
+ rte_pause();
+}
+
+static __rte_always_inline void
+worker_tx_pkt(struct rte_mbuf *mbuf)
+{
+ exchange_mac(mbuf);
+ while (rte_eth_tx_burst(mbuf->port, 0, &mbuf, 1) != 1)
+ rte_pause();
+}
+
+/* Multi stage Pipeline Workers */
+
+static int
+worker_do_tx(void *arg)
+{
+ struct rte_event ev;
+
+ struct worker_data *data = (struct worker_data *)arg;
+ const uint8_t dev = data->dev_id;
+ const uint8_t port = data->port_id;
+ const uint8_t lst_qid = cdata.num_stages - 1;
+ size_t fwd = 0, received = 0, tx = 0;
+
+
+ while (!fdata->done) {
+
+ if (!rte_event_dequeue_burst(dev, port, &ev, 1, 0)) {
+ rte_pause();
+ continue;
+ }
+
+ received++;
+ const uint8_t cq_id = ev.queue_id % cdata.num_stages;
+
+ if (cq_id >= lst_qid) {
+ if (ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
+ worker_tx_pkt(ev.mbuf);
+ tx++;
+ continue;
+ }
+
+ worker_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
+ ev.queue_id = (cq_id == lst_qid) ?
+ cdata.next_qid[ev.queue_id] : ev.queue_id;
+ } else {
+ ev.queue_id = cdata.next_qid[ev.queue_id];
+ worker_fwd_event(&ev, cdata.queue_type);
+ }
+ work();
+
+ worker_event_enqueue(dev, port, &ev);
+ fwd++;
+ }
+
+ if (!cdata.quiet)
+ printf(" worker %u thread done. RX=%zu FWD=%zu TX=%zu\n",
+ rte_lcore_id(), received, fwd, tx);
+
+ return 0;
+}
+
+static int
+setup_eventdev_worker_tx(struct cons_data *cons_data,
+ struct worker_data *worker_data)
+{
+ RTE_SET_USED(cons_data);
+ uint8_t i;
+ const uint8_t dev_id = 0;
+ const uint8_t nb_ports = cdata.num_workers;
+ uint8_t nb_slots = 0;
+ uint8_t nb_queues = rte_eth_dev_count() * cdata.num_stages;
+ nb_queues += rte_eth_dev_count();
+
+ struct rte_event_dev_config config = {
+ .nb_event_queues = nb_queues,
+ .nb_event_ports = nb_ports,
+ .nb_events_limit = 4096,
+ .nb_event_queue_flows = 1024,
+ .nb_event_port_dequeue_depth = 128,
+ .nb_event_port_enqueue_depth = 128,
+ };
+ struct rte_event_port_conf wkr_p_conf = {
+ .dequeue_depth = cdata.worker_cq_depth,
+ .enqueue_depth = 64,
+ .new_event_threshold = 4096,
+ };
+ struct rte_event_queue_conf wkr_q_conf = {
+ .schedule_type = cdata.queue_type,
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .nb_atomic_flows = 1024,
+ .nb_atomic_order_sequences = 1024,
+ };
+
+ int ret, ndev = rte_event_dev_count();
+
+ if (ndev < 1) {
+ printf("%d: No Eventdev Devices Found\n", __LINE__);
+ return -1;
+ }
+
+
+ struct rte_event_dev_info dev_info;
+ ret = rte_event_dev_info_get(dev_id, &dev_info);
+ printf("\tEventdev %d: %s\n", dev_id, dev_info.driver_name);
+
+ if (dev_info.max_event_port_dequeue_depth <
+ config.nb_event_port_dequeue_depth)
+ config.nb_event_port_dequeue_depth =
+ dev_info.max_event_port_dequeue_depth;
+ if (dev_info.max_event_port_enqueue_depth <
+ config.nb_event_port_enqueue_depth)
+ config.nb_event_port_enqueue_depth =
+ dev_info.max_event_port_enqueue_depth;
+
+ ret = rte_event_dev_configure(dev_id, &config);
+ if (ret < 0) {
+ printf("%d: Error configuring device\n", __LINE__);
+ return -1;
+ }
+
+ printf(" Stages:\n");
+ for (i = 0; i < nb_queues; i++) {
+
+ uint8_t slot;
+
+ nb_slots = cdata.num_stages + 1;
+ slot = i % nb_slots;
+ wkr_q_conf.schedule_type = slot == cdata.num_stages ?
+ RTE_SCHED_TYPE_ATOMIC : cdata.queue_type;
+
+ if (rte_event_queue_setup(dev_id, i, &wkr_q_conf) < 0) {
+ printf("%d: error creating qid %d\n", __LINE__, i);
+ return -1;
+ }
+ cdata.qid[i] = i;
+ cdata.next_qid[i] = i+1;
+ if (cdata.enable_queue_priorities) {
+ const uint32_t prio_delta =
+ (RTE_EVENT_DEV_PRIORITY_LOWEST) /
+ nb_slots;
+
+ /* higher priority for queues closer to tx */
+ wkr_q_conf.priority =
+ RTE_EVENT_DEV_PRIORITY_LOWEST - prio_delta *
+ (i % nb_slots);
+ }
+
+ const char *type_str = "Atomic";
+ switch (wkr_q_conf.schedule_type) {
+ case RTE_SCHED_TYPE_ORDERED:
+ type_str = "Ordered";
+ break;
+ case RTE_SCHED_TYPE_PARALLEL:
+ type_str = "Parallel";
+ break;
+ }
+ printf("\tStage %d, Type %s\tPriority = %d\n", i, type_str,
+ wkr_q_conf.priority);
+ }
+
+ printf("\n");
+ if (wkr_p_conf.dequeue_depth > config.nb_event_port_dequeue_depth)
+ wkr_p_conf.dequeue_depth = config.nb_event_port_dequeue_depth;
+ if (wkr_p_conf.enqueue_depth > config.nb_event_port_enqueue_depth)
+ wkr_p_conf.enqueue_depth = config.nb_event_port_enqueue_depth;
+
+ /* set up one port per worker, linking to all stage queues */
+ for (i = 0; i < cdata.num_workers; i++) {
+ struct worker_data *w = &worker_data[i];
+ w->dev_id = dev_id;
+ if (rte_event_port_setup(dev_id, i, &wkr_p_conf) < 0) {
+ printf("Error setting up port %d\n", i);
+ return -1;
+ }
+
+ if (rte_event_port_link(dev_id, i, NULL, NULL, 0)
+ != nb_queues) {
+ printf("%d: error creating link for port %d\n",
+ __LINE__, i);
+ return -1;
+ }
+ w->port_id = i;
+ }
+ /*
+ * Reduce the load on ingress event queue by splitting the traffic
+ * across multiple event queues.
+ * for example, nb_stages = 2 and nb_ethdev = 2 then
+ *
+ * nb_queues = (2 * 2) + 2 = 6 (non atq)
+ * rx_stride = 3
+ *
+ * So, traffic is split across queue 0 and queue 3 since queue id for
+ * rx adapter is chosen <ethport_id> * <rx_stride> i.e in the above
+ * case eth port 0, 1 will inject packets into event queue 0, 3
+ * respectively.
+ *
+ * This forms two set of queue pipelines 0->1->2->tx and 3->4->5->tx.
+ */
+ cdata.rx_stride = nb_slots;
+ ret = rte_event_dev_service_id_get(dev_id,
+ &fdata->evdev_service_id);
+ if (ret != -ESRCH && ret != 0) {
+ printf("Error getting the service ID\n");
+ return -1;
+ }
+ rte_service_runstate_set(fdata->evdev_service_id, 1);
+ rte_service_set_runstate_mapped_check(fdata->evdev_service_id, 0);
+ if (rte_event_dev_start(dev_id) < 0) {
+ printf("Error starting eventdev\n");
+ return -1;
+ }
+
+ return dev_id;
+}
+
+
+struct rx_adptr_services {
+ uint16_t nb_rx_adptrs;
+ uint32_t *rx_adpt_arr;
+};
+
+static int32_t
+service_rx_adapter(void *arg)
+{
+ int i;
+ struct rx_adptr_services *adptr_services = arg;
+
+ for (i = 0; i < adptr_services->nb_rx_adptrs; i++)
+ rte_service_run_iter_on_app_lcore(
+ adptr_services->rx_adpt_arr[i], 1);
+ return 0;
+}
+
+static void
+init_rx_adapter(uint16_t nb_ports)
+{
+ int i;
+ int ret;
+ uint8_t evdev_id = 0;
+ struct rx_adptr_services *adptr_services = NULL;
+ struct rte_event_dev_info dev_info;
+
+ ret = rte_event_dev_info_get(evdev_id, &dev_info);
+ adptr_services = rte_zmalloc(NULL, sizeof(struct rx_adptr_services), 0);
+
+ struct rte_event_port_conf rx_p_conf = {
+ .dequeue_depth = 8,
+ .enqueue_depth = 8,
+ .new_event_threshold = 1200,
+ };
+
+ if (rx_p_conf.dequeue_depth > dev_info.max_event_port_dequeue_depth)
+ rx_p_conf.dequeue_depth = dev_info.max_event_port_dequeue_depth;
+ if (rx_p_conf.enqueue_depth > dev_info.max_event_port_enqueue_depth)
+ rx_p_conf.enqueue_depth = dev_info.max_event_port_enqueue_depth;
+
+
+ struct rte_event_eth_rx_adapter_queue_conf queue_conf = {
+ .ev.sched_type = cdata.queue_type,
+ };
+
+ for (i = 0; i < nb_ports; i++) {
+ uint32_t cap;
+ uint32_t service_id;
+
+ ret = rte_event_eth_rx_adapter_create(i, evdev_id, &rx_p_conf);
+ if (ret)
+ rte_exit(EXIT_FAILURE,
+ "failed to create rx adapter[%d]",
+ cdata.rx_adapter_id);
+
+ ret = rte_event_eth_rx_adapter_caps_get(evdev_id, i, &cap);
+ if (ret)
+ rte_exit(EXIT_FAILURE,
+ "failed to get event rx adapter "
+ "capabilities");
+
+ queue_conf.ev.queue_id = cdata.rx_stride ?
+ (i * cdata.rx_stride)
+ : (uint8_t)cdata.qid[0];
+
+ ret = rte_event_eth_rx_adapter_queue_add(i, i, -1, &queue_conf);
+ if (ret)
+ rte_exit(EXIT_FAILURE,
+ "Failed to add queues to Rx adapter");
+
+
+ /* Producer needs to be scheduled. */
+ if (!(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) {
+ ret = rte_event_eth_rx_adapter_service_id_get(i,
+ &service_id);
+ if (ret != -ESRCH && ret != 0) {
+ rte_exit(EXIT_FAILURE,
+ "Error getting the service ID for rx adptr\n");
+ }
+
+ rte_service_runstate_set(service_id, 1);
+ rte_service_set_runstate_mapped_check(service_id, 0);
+
+ adptr_services->nb_rx_adptrs++;
+ adptr_services->rx_adpt_arr = rte_realloc(
+ adptr_services->rx_adpt_arr,
+ adptr_services->nb_rx_adptrs *
+ sizeof(uint32_t), 0);
+ adptr_services->rx_adpt_arr[
+ adptr_services->nb_rx_adptrs - 1] =
+ service_id;
+ }
+
+ ret = rte_event_eth_rx_adapter_start(i);
+ if (ret)
+ rte_exit(EXIT_FAILURE, "Rx adapter[%d] start failed",
+ cdata.rx_adapter_id);
+ }
+
+ if (adptr_services->nb_rx_adptrs) {
+ struct rte_service_spec service;
+
+ memset(&service, 0, sizeof(struct rte_service_spec));
+ snprintf(service.name, sizeof(service.name), "rx_service");
+ service.callback = service_rx_adapter;
+ service.callback_userdata = (void *)adptr_services;
+
+ int32_t ret = rte_service_component_register(&service,
+ &fdata->rxadptr_service_id);
+ if (ret)
+ rte_exit(EXIT_FAILURE,
+ "Rx adapter[%d] service register failed",
+ cdata.rx_adapter_id);
+
+ rte_service_runstate_set(fdata->rxadptr_service_id, 1);
+ rte_service_component_runstate_set(fdata->rxadptr_service_id,
+ 1);
+ rte_service_set_runstate_mapped_check(fdata->rxadptr_service_id,
+ 0);
+ } else {
+ memset(fdata->rx_core, 0, sizeof(unsigned int) * MAX_NUM_CORE);
+ rte_free(adptr_services);
+ }
+
+ if (!adptr_services->nb_rx_adptrs && fdata->cap.consumer == NULL &&
+ (dev_info.event_dev_cap &
+ RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED))
+ fdata->cap.scheduler = NULL;
+
+ if (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED)
+ memset(fdata->sched_core, 0,
+ sizeof(unsigned int) * MAX_NUM_CORE);
+}
+
+static void
+worker_tx_opt_check(void)
+{
+ int i;
+ int ret;
+ uint32_t cap = 0;
+ uint8_t rx_needed = 0;
+ struct rte_event_dev_info eventdev_info;
+
+ memset(&eventdev_info, 0, sizeof(struct rte_event_dev_info));
+ rte_event_dev_info_get(0, &eventdev_info);
+
+ for (i = 0; i < rte_eth_dev_count(); i++) {
+ ret = rte_event_eth_rx_adapter_caps_get(0, i, &cap);
+ if (ret)
+ rte_exit(EXIT_FAILURE,
+ "failed to get event rx adapter "
+ "capabilities");
+ rx_needed |=
+ !(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT);
+ }
+
+ if (cdata.worker_lcore_mask == 0 ||
+ (rx_needed && cdata.rx_lcore_mask == 0) ||
+ (cdata.sched_lcore_mask == 0 &&
+ !(eventdev_info.event_dev_cap &
+ RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED))) {
+ printf("Core part of pipeline was not assigned any cores. "
+ "This will stall the pipeline, please check core masks "
+ "(use -h for details on setting core masks):\n"
+ "\trx: %"PRIu64"\n\ttx: %"PRIu64"\n\tsched: %"PRIu64
+ "\n\tworkers: %"PRIu64"\n",
+ cdata.rx_lcore_mask, cdata.tx_lcore_mask,
+ cdata.sched_lcore_mask,
+ cdata.worker_lcore_mask);
+ rte_exit(-1, "Fix core masks\n");
+ }
+}
+
+void
+set_worker_tx_setup_data(struct setup_data *caps, bool burst)
+{
+ RTE_SET_USED(burst);
+ caps->worker = worker_do_tx;
+
+ memset(fdata->tx_core, 0, sizeof(unsigned int) * MAX_NUM_CORE);
+
+ caps->check_opt = worker_tx_opt_check;
+ caps->consumer = NULL;
+ caps->scheduler = schedule_devices;
+ caps->evdev_setup = setup_eventdev_worker_tx;
+ caps->adptr_setup = init_rx_adapter;
+}
--
2.15.1
^ permalink raw reply [flat|nested] 48+ messages in thread
* [dpdk-dev] [PATCH v2 09/15] examples/eventdev: add burst for thread safe pipeline
2018-01-10 11:09 ` [dpdk-dev] [PATCH v2 01/15] examples/eventdev: add Rx adapter support Pavan Nikhilesh
` (6 preceding siblings ...)
2018-01-10 11:10 ` [dpdk-dev] [PATCH v2 08/15] examples/eventdev: add thread safe Tx worker pipeline Pavan Nikhilesh
@ 2018-01-10 11:10 ` Pavan Nikhilesh
2018-01-10 11:10 ` [dpdk-dev] [PATCH v2 10/15] examples/eventdev: add all type queue option Pavan Nikhilesh
` (6 subsequent siblings)
14 siblings, 0 replies; 48+ messages in thread
From: Pavan Nikhilesh @ 2018-01-10 11:10 UTC (permalink / raw)
To: gage.eads, jerin.jacobkollanukkaran, harry.van.haaren,
hemant.agrawal, liang.j.ma, santosh.shukla
Cc: dev, Pavan Nikhilesh
Add burst mode worker pipeline when Tx is multi thread safe.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
---
.../eventdev_pipeline_sw_pmd/pipeline_worker_tx.c | 74 +++++++++++++++++++++-
1 file changed, 72 insertions(+), 2 deletions(-)
diff --git a/examples/eventdev_pipeline_sw_pmd/pipeline_worker_tx.c b/examples/eventdev_pipeline_sw_pmd/pipeline_worker_tx.c
index 397b1013f..419d8d410 100644
--- a/examples/eventdev_pipeline_sw_pmd/pipeline_worker_tx.c
+++ b/examples/eventdev_pipeline_sw_pmd/pipeline_worker_tx.c
@@ -22,6 +22,19 @@ worker_event_enqueue(const uint8_t dev, const uint8_t port,
rte_pause();
}
+static __rte_always_inline void
+worker_event_enqueue_burst(const uint8_t dev, const uint8_t port,
+ struct rte_event *ev, const uint16_t nb_rx)
+{
+ uint16_t enq;
+
+ enq = rte_event_enqueue_burst(dev, port, ev, nb_rx);
+ while (enq < nb_rx) {
+ enq += rte_event_enqueue_burst(dev, port,
+ ev + enq, nb_rx - enq);
+ }
+}
+
static __rte_always_inline void
worker_tx_pkt(struct rte_mbuf *mbuf)
{
@@ -81,6 +94,61 @@ worker_do_tx(void *arg)
return 0;
}
+static int
+worker_do_tx_burst(void *arg)
+{
+ struct rte_event ev[BATCH_SIZE];
+
+ struct worker_data *data = (struct worker_data *)arg;
+ uint8_t dev = data->dev_id;
+ uint8_t port = data->port_id;
+ uint8_t lst_qid = cdata.num_stages - 1;
+ size_t fwd = 0, received = 0, tx = 0;
+
+ while (!fdata->done) {
+ uint16_t i;
+ const uint16_t nb_rx = rte_event_dequeue_burst(dev, port,
+ ev, BATCH_SIZE, 0);
+
+ if (nb_rx == 0) {
+ rte_pause();
+ continue;
+ }
+ received += nb_rx;
+
+ for (i = 0; i < nb_rx; i++) {
+ const uint8_t cq_id = ev[i].queue_id % cdata.num_stages;
+
+ if (cq_id >= lst_qid) {
+ if (ev[i].sched_type == RTE_SCHED_TYPE_ATOMIC) {
+ worker_tx_pkt(ev[i].mbuf);
+ tx++;
+ ev[i].op = RTE_EVENT_OP_RELEASE;
+ continue;
+ }
+ ev[i].queue_id = (cq_id == lst_qid) ?
+ cdata.next_qid[ev[i].queue_id] :
+ ev[i].queue_id;
+
+ worker_fwd_event(&ev[i], RTE_SCHED_TYPE_ATOMIC);
+ } else {
+ ev[i].queue_id = cdata.next_qid[ev[i].queue_id];
+ worker_fwd_event(&ev[i], cdata.queue_type);
+ }
+ work();
+ }
+ worker_event_enqueue_burst(dev, port, ev, nb_rx);
+
+ fwd += nb_rx;
+ }
+
+ if (!cdata.quiet)
+ printf(" worker %u thread done. RX=%zu FWD=%zu TX=%zu\n",
+ rte_lcore_id(), received, fwd, tx);
+
+ return 0;
+}
+
static int
setup_eventdev_worker_tx(struct cons_data *cons_data,
struct worker_data *worker_data)
@@ -412,8 +480,10 @@ worker_tx_opt_check(void)
void
set_worker_tx_setup_data(struct setup_data *caps, bool burst)
{
- RTE_SET_USED(burst);
- caps->worker = worker_do_tx;
+ if (burst)
+ caps->worker = worker_do_tx_burst;
+ else
+ caps->worker = worker_do_tx;
memset(fdata->tx_core, 0, sizeof(unsigned int) * MAX_NUM_CORE);
--
2.15.1
^ permalink raw reply [flat|nested] 48+ messages in thread
* [dpdk-dev] [PATCH v2 10/15] examples/eventdev: add all type queue option
2018-01-10 11:09 ` [dpdk-dev] [PATCH v2 01/15] examples/eventdev: add Rx adapter support Pavan Nikhilesh
` (7 preceding siblings ...)
2018-01-10 11:10 ` [dpdk-dev] [PATCH v2 09/15] examples/eventdev: add burst for thread safe pipeline Pavan Nikhilesh
@ 2018-01-10 11:10 ` Pavan Nikhilesh
2018-01-10 11:10 ` [dpdk-dev] [PATCH v2 11/15] examples/eventdev: add single stage pipeline worker Pavan Nikhilesh
` (5 subsequent siblings)
14 siblings, 0 replies; 48+ messages in thread
From: Pavan Nikhilesh @ 2018-01-10 11:10 UTC (permalink / raw)
To: gage.eads, jerin.jacobkollanukkaran, harry.van.haaren,
hemant.agrawal, liang.j.ma, santosh.shukla
Cc: dev, Pavan Nikhilesh
Added configurable option to make queue type as all type queues i.e.
RTE_EVENT_QUEUE_CFG_ALL_TYPES based on event dev capability
RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES.
This can be enabled by supplying '-a' as a cmdline argument.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
---
v2 Changes:
- redo worker selection logic(Harry)
examples/eventdev_pipeline_sw_pmd/main.c | 7 +-
.../eventdev_pipeline_sw_pmd/pipeline_common.h | 1 +
.../pipeline_worker_generic.c | 5 +
.../eventdev_pipeline_sw_pmd/pipeline_worker_tx.c | 162 +++++++++++++++++++--
4 files changed, 164 insertions(+), 11 deletions(-)
diff --git a/examples/eventdev_pipeline_sw_pmd/main.c b/examples/eventdev_pipeline_sw_pmd/main.c
index f877e695b..2c7b02b86 100644
--- a/examples/eventdev_pipeline_sw_pmd/main.c
+++ b/examples/eventdev_pipeline_sw_pmd/main.c
@@ -148,6 +148,7 @@ static struct option long_options[] = {
{"parallel", no_argument, 0, 'p'},
{"ordered", no_argument, 0, 'o'},
{"quiet", no_argument, 0, 'q'},
+ {"use-atq", no_argument, 0, 'a'},
{"dump", no_argument, 0, 'D'},
{0, 0, 0, 0}
};
@@ -171,6 +172,7 @@ usage(void)
" -o, --ordered Use ordered scheduling\n"
" -p, --parallel Use parallel scheduling\n"
" -q, --quiet Minimize printed output\n"
+ " -a, --use-atq Use all type queues\n"
" -D, --dump Print detailed statistics before exit"
"\n";
fprintf(stderr, "%s", usage_str);
@@ -191,7 +193,7 @@ parse_app_args(int argc, char **argv)
int i;
for (;;) {
- c = getopt_long(argc, argv, "r:t:e:c:w:n:f:s:poPqDW:",
+ c = getopt_long(argc, argv, "r:t:e:c:w:n:f:s:paoPqDW:",
long_options, &option_index);
if (c == -1)
break;
@@ -224,6 +226,9 @@ parse_app_args(int argc, char **argv)
case 'p':
cdata.queue_type = RTE_SCHED_TYPE_PARALLEL;
break;
+ case 'a':
+ cdata.all_type_queues = 1;
+ break;
case 'q':
cdata.quiet = 1;
break;
diff --git a/examples/eventdev_pipeline_sw_pmd/pipeline_common.h b/examples/eventdev_pipeline_sw_pmd/pipeline_common.h
index e06320050..66553038c 100644
--- a/examples/eventdev_pipeline_sw_pmd/pipeline_common.h
+++ b/examples/eventdev_pipeline_sw_pmd/pipeline_common.h
@@ -77,6 +77,7 @@ struct config_data {
int quiet;
int dump_dev;
int dump_dev_signal;
+ int all_type_queues;
unsigned int num_stages;
unsigned int worker_cq_depth;
unsigned int rx_stride;
diff --git a/examples/eventdev_pipeline_sw_pmd/pipeline_worker_generic.c b/examples/eventdev_pipeline_sw_pmd/pipeline_worker_generic.c
index 90f87709c..2c51f4a30 100644
--- a/examples/eventdev_pipeline_sw_pmd/pipeline_worker_generic.c
+++ b/examples/eventdev_pipeline_sw_pmd/pipeline_worker_generic.c
@@ -516,6 +516,11 @@ generic_opt_check(void)
memset(&eventdev_info, 0, sizeof(struct rte_event_dev_info));
rte_event_dev_info_get(0, &eventdev_info);
+ if (cdata.all_type_queues && !(eventdev_info.event_dev_cap &
+ RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES))
+ rte_exit(EXIT_FAILURE,
+ "Event dev doesn't support all type queues\n");
+
for (i = 0; i < rte_eth_dev_count(); i++) {
ret = rte_event_eth_rx_adapter_caps_get(0, i, &cap);
if (ret)
diff --git a/examples/eventdev_pipeline_sw_pmd/pipeline_worker_tx.c b/examples/eventdev_pipeline_sw_pmd/pipeline_worker_tx.c
index 419d8d410..25eabbd6c 100644
--- a/examples/eventdev_pipeline_sw_pmd/pipeline_worker_tx.c
+++ b/examples/eventdev_pipeline_sw_pmd/pipeline_worker_tx.c
@@ -94,6 +94,52 @@ worker_do_tx(void *arg)
return 0;
}
+static int
+worker_do_tx_atq(void *arg)
+{
+ struct rte_event ev;
+
+ struct worker_data *data = (struct worker_data *)arg;
+ const uint8_t dev = data->dev_id;
+ const uint8_t port = data->port_id;
+ const uint8_t lst_qid = cdata.num_stages - 1;
+ size_t fwd = 0, received = 0, tx = 0;
+
+ while (!fdata->done) {
+
+ if (!rte_event_dequeue_burst(dev, port, &ev, 1, 0)) {
+ rte_pause();
+ continue;
+ }
+
+ received++;
+ const uint8_t cq_id = ev.sub_event_type % cdata.num_stages;
+
+ if (cq_id == lst_qid) {
+ if (ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
+ worker_tx_pkt(ev.mbuf);
+ tx++;
+ continue;
+ }
+
+ worker_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
+ } else {
+ ev.sub_event_type++;
+ worker_fwd_event(&ev, cdata.queue_type);
+ }
+ work();
+
+ worker_event_enqueue(dev, port, &ev);
+ fwd++;
+ }
+
+ if (!cdata.quiet)
+ printf(" worker %u thread done. RX=%zu FWD=%zu TX=%zu\n",
+ rte_lcore_id(), received, fwd, tx);
+
+ return 0;
+}
+
static int
worker_do_tx_burst(void *arg)
{
@@ -149,17 +195,81 @@ worker_do_tx_burst(void *arg)
return 0;
}
+static int
+worker_do_tx_burst_atq(void *arg)
+{
+ struct rte_event ev[BATCH_SIZE];
+
+ struct worker_data *data = (struct worker_data *)arg;
+ uint8_t dev = data->dev_id;
+ uint8_t port = data->port_id;
+ uint8_t lst_qid = cdata.num_stages - 1;
+ size_t fwd = 0, received = 0, tx = 0;
+
+ while (!fdata->done) {
+ uint16_t i;
+
+ const uint16_t nb_rx = rte_event_dequeue_burst(dev, port,
+ ev, BATCH_SIZE, 0);
+
+ if (nb_rx == 0) {
+ rte_pause();
+ continue;
+ }
+ received += nb_rx;
+
+ for (i = 0; i < nb_rx; i++) {
+ const uint8_t cq_id = ev[i].sub_event_type %
+ cdata.num_stages;
+
+ if (cq_id == lst_qid) {
+ if (ev[i].sched_type == RTE_SCHED_TYPE_ATOMIC) {
+ worker_tx_pkt(ev[i].mbuf);
+ tx++;
+ ev[i].op = RTE_EVENT_OP_RELEASE;
+ continue;
+ }
+
+ worker_fwd_event(&ev[i], RTE_SCHED_TYPE_ATOMIC);
+ } else {
+ ev[i].sub_event_type++;
+ worker_fwd_event(&ev[i], cdata.queue_type);
+ }
+ work();
+ }
+
+ worker_event_enqueue_burst(dev, port, ev, nb_rx);
+ fwd += nb_rx;
+ }
+
+ if (!cdata.quiet)
+ printf(" worker %u thread done. RX=%zu FWD=%zu TX=%zu\n",
+ rte_lcore_id(), received, fwd, tx);
+
+ return 0;
+}
+
static int
setup_eventdev_worker_tx(struct cons_data *cons_data,
struct worker_data *worker_data)
{
RTE_SET_USED(cons_data);
uint8_t i;
+ const uint8_t atq = cdata.all_type_queues ? 1 : 0;
const uint8_t dev_id = 0;
const uint8_t nb_ports = cdata.num_workers;
uint8_t nb_slots = 0;
- uint8_t nb_queues = rte_eth_dev_count() * cdata.num_stages;
- nb_queues += rte_eth_dev_count();
+ uint8_t nb_queues = rte_eth_dev_count();
+
+ /*
+ * In case where all type queues are not enabled, use queues equal to
+ * number of stages * eth_dev_count and one extra queue per pipeline
+ * for Tx.
+ */
+ if (!atq) {
+ nb_queues *= cdata.num_stages;
+ nb_queues += rte_eth_dev_count();
+ }
struct rte_event_dev_config config = {
.nb_event_queues = nb_queues,
@@ -211,12 +321,19 @@ setup_eventdev_worker_tx(struct cons_data *cons_data,
printf(" Stages:\n");
for (i = 0; i < nb_queues; i++) {
- uint8_t slot;
+ if (atq) {
- nb_slots = cdata.num_stages + 1;
- slot = i % nb_slots;
- wkr_q_conf.schedule_type = slot == cdata.num_stages ?
- RTE_SCHED_TYPE_ATOMIC : cdata.queue_type;
+ nb_slots = cdata.num_stages;
+ wkr_q_conf.event_queue_cfg =
+ RTE_EVENT_QUEUE_CFG_ALL_TYPES;
+ } else {
+ uint8_t slot;
+
+ nb_slots = cdata.num_stages + 1;
+ slot = i % nb_slots;
+ wkr_q_conf.schedule_type = slot == cdata.num_stages ?
+ RTE_SCHED_TYPE_ATOMIC : cdata.queue_type;
+ }
if (rte_event_queue_setup(dev_id, i, &wkr_q_conf) < 0) {
printf("%d: error creating qid %d\n", __LINE__, i);
@@ -286,7 +403,7 @@ setup_eventdev_worker_tx(struct cons_data *cons_data,
*
* This forms two set of queue pipelines 0->1->2->tx and 3->4->5->tx.
*/
- cdata.rx_stride = nb_slots;
+ cdata.rx_stride = atq ? 1 : nb_slots;
ret = rte_event_dev_service_id_get(dev_id,
&fdata->evdev_service_id);
if (ret != -ESRCH && ret != 0) {
@@ -450,6 +567,11 @@ worker_tx_opt_check(void)
memset(&eventdev_info, 0, sizeof(struct rte_event_dev_info));
rte_event_dev_info_get(0, &eventdev_info);
+ if (cdata.all_type_queues && !(eventdev_info.event_dev_cap &
+ RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES))
+ rte_exit(EXIT_FAILURE,
+ "Event dev doesn't support all type queues\n");
+
for (i = 0; i < rte_eth_dev_count(); i++) {
ret = rte_event_eth_rx_adapter_caps_get(0, i, &cap);
if (ret)
@@ -477,13 +599,33 @@ worker_tx_opt_check(void)
}
}
+static worker_loop
+get_worker_loop_burst(uint8_t atq)
+{
+ if (atq)
+ return worker_do_tx_burst_atq;
+
+ return worker_do_tx_burst;
+}
+
+static worker_loop
+get_worker_loop_non_burst(uint8_t atq)
+{
+ if (atq)
+ return worker_do_tx_atq;
+
+ return worker_do_tx;
+}
+
void
set_worker_tx_setup_data(struct setup_data *caps, bool burst)
{
+ uint8_t atq = cdata.all_type_queues ? 1 : 0;
+
if (burst)
- caps->worker = worker_do_tx_burst;
+ caps->worker = get_worker_loop_burst(atq);
else
- caps->worker = worker_do_tx;
+ caps->worker = get_worker_loop_non_burst(atq);
memset(fdata->tx_core, 0, sizeof(unsigned int) * MAX_NUM_CORE);
--
2.15.1
^ permalink raw reply [flat|nested] 48+ messages in thread
* [dpdk-dev] [PATCH v2 11/15] examples/eventdev: add single stage pipeline worker
2018-01-10 11:09 ` [dpdk-dev] [PATCH v2 01/15] examples/eventdev: add Rx adapter support Pavan Nikhilesh
` (8 preceding siblings ...)
2018-01-10 11:10 ` [dpdk-dev] [PATCH v2 10/15] examples/eventdev: add all type queue option Pavan Nikhilesh
@ 2018-01-10 11:10 ` Pavan Nikhilesh
2018-01-10 11:10 ` [dpdk-dev] [PATCH v2 12/15] examples/eventdev: add atq " Pavan Nikhilesh
` (4 subsequent siblings)
14 siblings, 0 replies; 48+ messages in thread
From: Pavan Nikhilesh @ 2018-01-10 11:10 UTC (permalink / raw)
To: gage.eads, jerin.jacobkollanukkaran, harry.van.haaren,
hemant.agrawal, liang.j.ma, santosh.shukla
Cc: dev, Pavan Nikhilesh
Add optimized eventdev pipeline when ethdev supports thread safe Tx
and number of configured stages is one.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
---
.../eventdev_pipeline_sw_pmd/pipeline_worker_tx.c | 111 ++++++++++++++++++++-
1 file changed, 107 insertions(+), 4 deletions(-)
diff --git a/examples/eventdev_pipeline_sw_pmd/pipeline_worker_tx.c b/examples/eventdev_pipeline_sw_pmd/pipeline_worker_tx.c
index 25eabbd6c..21985b00d 100644
--- a/examples/eventdev_pipeline_sw_pmd/pipeline_worker_tx.c
+++ b/examples/eventdev_pipeline_sw_pmd/pipeline_worker_tx.c
@@ -43,6 +43,90 @@ worker_tx_pkt(struct rte_mbuf *mbuf)
rte_pause();
}
+/* Single stage pipeline workers */
+
+static int
+worker_do_tx_single(void *arg)
+{
+ struct worker_data *data = (struct worker_data *)arg;
+ const uint8_t dev = data->dev_id;
+ const uint8_t port = data->port_id;
+ size_t fwd = 0, received = 0, tx = 0;
+ struct rte_event ev;
+
+ while (!fdata->done) {
+
+ if (!rte_event_dequeue_burst(dev, port, &ev, 1, 0)) {
+ rte_pause();
+ continue;
+ }
+
+ received++;
+
+ if (ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
+ worker_tx_pkt(ev.mbuf);
+ tx++;
+ continue;
+ }
+ work();
+ ev.queue_id++;
+ worker_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
+ worker_event_enqueue(dev, port, &ev);
+ fwd++;
+ }
+
+ if (!cdata.quiet)
+ printf(" worker %u thread done. RX=%zu FWD=%zu TX=%zu\n",
+ rte_lcore_id(), received, fwd, tx);
+ return 0;
+}
+
+static int
+worker_do_tx_single_burst(void *arg)
+{
+ struct rte_event ev[BATCH_SIZE + 1];
+
+ struct worker_data *data = (struct worker_data *)arg;
+ const uint8_t dev = data->dev_id;
+ const uint8_t port = data->port_id;
+ size_t fwd = 0, received = 0, tx = 0;
+
+ while (!fdata->done) {
+ uint16_t i;
+ uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
+ BATCH_SIZE, 0);
+
+ if (!nb_rx) {
+ rte_pause();
+ continue;
+ }
+ received += nb_rx;
+
+ for (i = 0; i < nb_rx; i++) {
+ rte_prefetch0(ev[i + 1].mbuf);
+ if (ev[i].sched_type == RTE_SCHED_TYPE_ATOMIC) {
+
+ worker_tx_pkt(ev[i].mbuf);
+ ev[i].op = RTE_EVENT_OP_RELEASE;
+ tx++;
+
+ } else {
+ ev[i].queue_id++;
+ worker_fwd_event(&ev[i], RTE_SCHED_TYPE_ATOMIC);
+ }
+ work();
+ }
+
+ worker_event_enqueue_burst(dev, port, ev, nb_rx);
+ fwd += nb_rx;
+ }
+
+ if (!cdata.quiet)
+ printf(" worker %u thread done. RX=%zu FWD=%zu TX=%zu\n",
+ rte_lcore_id(), received, fwd, tx);
+ return 0;
+}
+
/* Multi stage Pipeline Workers */
static int
@@ -617,15 +701,33 @@ get_worker_loop_non_burst(uint8_t atq)
return worker_do_tx;
}
-void
-set_worker_tx_setup_data(struct setup_data *caps, bool burst)
+static worker_loop
+get_worker_single_stage(bool burst)
+{
+ if (burst)
+ return worker_do_tx_single_burst;
+
+ return worker_do_tx_single;
+}
+
+static worker_loop
+get_worker_multi_stage(bool burst)
{
uint8_t atq = cdata.all_type_queues ? 1 : 0;
if (burst)
- caps->worker = get_worker_loop_burst(atq);
+ return get_worker_loop_burst(atq);
+
+ return get_worker_loop_non_burst(atq);
+}
+
+void
+set_worker_tx_setup_data(struct setup_data *caps, bool burst)
+{
+ if (cdata.num_stages == 1)
+ caps->worker = get_worker_single_stage(burst);
else
- caps->worker = get_worker_loop_non_burst(atq);
+ caps->worker = get_worker_multi_stage(burst);
memset(fdata->tx_core, 0, sizeof(unsigned int) * MAX_NUM_CORE);
@@ -634,4 +736,5 @@ set_worker_tx_setup_data(struct setup_data *caps, bool burst)
caps->scheduler = schedule_devices;
caps->evdev_setup = setup_eventdev_worker_tx;
caps->adptr_setup = init_rx_adapter;
+
}
--
2.15.1
^ permalink raw reply [flat|nested] 48+ messages in thread
* [dpdk-dev] [PATCH v2 12/15] examples/eventdev: add atq single stage pipeline worker
2018-01-10 11:09 ` [dpdk-dev] [PATCH v2 01/15] examples/eventdev: add Rx adapter support Pavan Nikhilesh
` (9 preceding siblings ...)
2018-01-10 11:10 ` [dpdk-dev] [PATCH v2 11/15] examples/eventdev: add single stage pipeline worker Pavan Nikhilesh
@ 2018-01-10 11:10 ` Pavan Nikhilesh
2018-01-10 11:10 ` [dpdk-dev] [PATCH v2 13/15] examples/eventdev: add mempool size configuration Pavan Nikhilesh
` (3 subsequent siblings)
14 siblings, 0 replies; 48+ messages in thread
From: Pavan Nikhilesh @ 2018-01-10 11:10 UTC (permalink / raw)
To: gage.eads, jerin.jacobkollanukkaran, harry.van.haaren,
hemant.agrawal, liang.j.ma, santosh.shukla
Cc: dev, Pavan Nikhilesh
Add optimized eventdev pipeline when ethdev supports thread safe Tx,
number of configured stages is one and all type queue option is enabled.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
---
.../eventdev_pipeline_sw_pmd/pipeline_worker_tx.c | 104 ++++++++++++++++++++-
1 file changed, 101 insertions(+), 3 deletions(-)
diff --git a/examples/eventdev_pipeline_sw_pmd/pipeline_worker_tx.c b/examples/eventdev_pipeline_sw_pmd/pipeline_worker_tx.c
index 21985b00d..c0d1bd9fb 100644
--- a/examples/eventdev_pipeline_sw_pmd/pipeline_worker_tx.c
+++ b/examples/eventdev_pipeline_sw_pmd/pipeline_worker_tx.c
@@ -81,6 +81,41 @@ worker_do_tx_single(void *arg)
return 0;
}
+static int
+worker_do_tx_single_atq(void *arg)
+{
+ struct worker_data *data = (struct worker_data *)arg;
+ const uint8_t dev = data->dev_id;
+ const uint8_t port = data->port_id;
+ size_t fwd = 0, received = 0, tx = 0;
+ struct rte_event ev;
+
+ while (!fdata->done) {
+
+ if (!rte_event_dequeue_burst(dev, port, &ev, 1, 0)) {
+ rte_pause();
+ continue;
+ }
+
+ received++;
+
+ if (ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
+ worker_tx_pkt(ev.mbuf);
+ tx++;
+ continue;
+ }
+ work();
+ worker_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
+ worker_event_enqueue(dev, port, &ev);
+ fwd++;
+ }
+
+ if (!cdata.quiet)
+ printf(" worker %u thread done. RX=%zu FWD=%zu TX=%zu\n",
+ rte_lcore_id(), received, fwd, tx);
+ return 0;
+}
+
static int
worker_do_tx_single_burst(void *arg)
{
@@ -127,6 +162,50 @@ worker_do_tx_single_burst(void *arg)
return 0;
}
+static int
+worker_do_tx_single_burst_atq(void *arg)
+{
+ struct rte_event ev[BATCH_SIZE + 1];
+
+ struct worker_data *data = (struct worker_data *)arg;
+ const uint8_t dev = data->dev_id;
+ const uint8_t port = data->port_id;
+ size_t fwd = 0, received = 0, tx = 0;
+
+ while (!fdata->done) {
+ uint16_t i;
+ uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
+ BATCH_SIZE, 0);
+
+ if (!nb_rx) {
+ rte_pause();
+ continue;
+ }
+
+ received += nb_rx;
+
+ for (i = 0; i < nb_rx; i++) {
+ rte_prefetch0(ev[i + 1].mbuf);
+ if (ev[i].sched_type == RTE_SCHED_TYPE_ATOMIC) {
+
+ worker_tx_pkt(ev[i].mbuf);
+ ev[i].op = RTE_EVENT_OP_RELEASE;
+ tx++;
+ } else
+ worker_fwd_event(&ev[i], RTE_SCHED_TYPE_ATOMIC);
+ work();
+ }
+
+ worker_event_enqueue_burst(dev, port, ev, nb_rx);
+ fwd += nb_rx;
+ }
+
+ if (!cdata.quiet)
+ printf(" worker %u thread done. RX=%zu FWD=%zu TX=%zu\n",
+ rte_lcore_id(), received, fwd, tx);
+ return 0;
+}
+
/* Multi stage Pipeline Workers */
static int
@@ -683,6 +762,24 @@ worker_tx_opt_check(void)
}
}
+static worker_loop
+get_worker_loop_single_burst(uint8_t atq)
+{
+ if (atq)
+ return worker_do_tx_single_burst_atq;
+
+ return worker_do_tx_single_burst;
+}
+
+static worker_loop
+get_worker_loop_single_non_burst(uint8_t atq)
+{
+ if (atq)
+ return worker_do_tx_single_atq;
+
+ return worker_do_tx_single;
+}
+
static worker_loop
get_worker_loop_burst(uint8_t atq)
{
@@ -704,10 +801,12 @@ get_worker_loop_non_burst(uint8_t atq)
static worker_loop
get_worker_single_stage(bool burst)
{
+ uint8_t atq = cdata.all_type_queues ? 1 : 0;
+
if (burst)
- return worker_do_tx_single_burst;
+ return get_worker_loop_single_burst(atq);
- return worker_do_tx_single;
+ return get_worker_loop_single_non_burst(atq);
}
static worker_loop
@@ -736,5 +835,4 @@ set_worker_tx_setup_data(struct setup_data *caps, bool burst)
caps->scheduler = schedule_devices;
caps->evdev_setup = setup_eventdev_worker_tx;
caps->adptr_setup = init_rx_adapter;
-
}
--
2.15.1
^ permalink raw reply [flat|nested] 48+ messages in thread
* [dpdk-dev] [PATCH v2 13/15] examples/eventdev: add mempool size configuration
2018-01-10 11:09 ` [dpdk-dev] [PATCH v2 01/15] examples/eventdev: add Rx adapter support Pavan Nikhilesh
` (10 preceding siblings ...)
2018-01-10 11:10 ` [dpdk-dev] [PATCH v2 12/15] examples/eventdev: add atq " Pavan Nikhilesh
@ 2018-01-10 11:10 ` Pavan Nikhilesh
2018-01-10 11:10 ` [dpdk-dev] [PATCH v2 14/15] examples/eventdev_pipeline_sw_pmd: rename example Pavan Nikhilesh
` (2 subsequent siblings)
14 siblings, 0 replies; 48+ messages in thread
From: Pavan Nikhilesh @ 2018-01-10 11:10 UTC (permalink / raw)
To: gage.eads, jerin.jacobkollanukkaran, harry.van.haaren,
hemant.agrawal, liang.j.ma, santosh.shukla
Cc: dev, Pavan Nikhilesh
Add option to configure the mempool size at run time instead of
hardcoding it to 16384 * num_ports.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
---
examples/eventdev_pipeline_sw_pmd/main.c | 12 ++++++++++--
examples/eventdev_pipeline_sw_pmd/pipeline_common.h | 1 +
2 files changed, 11 insertions(+), 2 deletions(-)
diff --git a/examples/eventdev_pipeline_sw_pmd/main.c b/examples/eventdev_pipeline_sw_pmd/main.c
index 2c7b02b86..bf2a04edb 100644
--- a/examples/eventdev_pipeline_sw_pmd/main.c
+++ b/examples/eventdev_pipeline_sw_pmd/main.c
@@ -144,6 +144,7 @@ static struct option long_options[] = {
{"sched-mask", required_argument, 0, 'e'},
{"cq-depth", required_argument, 0, 'c'},
{"work-cycles", required_argument, 0, 'W'},
+ {"mempool-size", required_argument, 0, 'm'},
{"queue-priority", no_argument, 0, 'P'},
{"parallel", no_argument, 0, 'p'},
{"ordered", no_argument, 0, 'o'},
@@ -173,6 +174,7 @@ usage(void)
" -p, --parallel Use parallel scheduling\n"
" -q, --quiet Minimize printed output\n"
" -a, --use-atq Use all type queues\n"
+ " -m, --mempool-size=N Dictate the mempool size\n"
" -D, --dump Print detailed statistics before exit"
"\n";
fprintf(stderr, "%s", usage_str);
@@ -193,7 +195,7 @@ parse_app_args(int argc, char **argv)
int i;
for (;;) {
- c = getopt_long(argc, argv, "r:t:e:c:w:n:f:s:paoPqDW:",
+ c = getopt_long(argc, argv, "r:t:e:c:w:n:f:s:m:paoPqDW:",
long_options, &option_index);
if (c == -1)
break;
@@ -253,6 +255,9 @@ parse_app_args(int argc, char **argv)
popcnt = __builtin_popcountll(sched_lcore_mask);
fdata->sched_single = (popcnt == 1);
break;
+ case 'm':
+ cdata.num_mbuf = (uint64_t)atol(optarg);
+ break;
default:
usage();
}
@@ -356,8 +361,11 @@ init_ports(unsigned int num_ports)
uint8_t portid;
unsigned int i;
+ if (!cdata.num_mbuf)
+ cdata.num_mbuf = 16384 * num_ports;
+
struct rte_mempool *mp = rte_pktmbuf_pool_create("packet_pool",
- /* mbufs */ 16384 * num_ports,
+ /* mbufs */ cdata.num_mbuf,
/* cache_size */ 512,
/* priv_size*/ 0,
/* data_room_size */ RTE_MBUF_DEFAULT_BUF_SIZE,
diff --git a/examples/eventdev_pipeline_sw_pmd/pipeline_common.h b/examples/eventdev_pipeline_sw_pmd/pipeline_common.h
index 66553038c..9703396f8 100644
--- a/examples/eventdev_pipeline_sw_pmd/pipeline_common.h
+++ b/examples/eventdev_pipeline_sw_pmd/pipeline_common.h
@@ -70,6 +70,7 @@ struct config_data {
unsigned int active_cores;
unsigned int num_workers;
int64_t num_packets;
+ uint64_t num_mbuf;
unsigned int num_fids;
int queue_type;
int worker_cycles;
--
2.15.1
^ permalink raw reply [flat|nested] 48+ messages in thread
* [dpdk-dev] [PATCH v2 14/15] examples/eventdev_pipeline_sw_pmd: rename example
2018-01-10 11:09 ` [dpdk-dev] [PATCH v2 01/15] examples/eventdev: add Rx adapter support Pavan Nikhilesh
` (11 preceding siblings ...)
2018-01-10 11:10 ` [dpdk-dev] [PATCH v2 13/15] examples/eventdev: add mempool size configuration Pavan Nikhilesh
@ 2018-01-10 11:10 ` Pavan Nikhilesh
2018-01-10 11:10 ` [dpdk-dev] [PATCH v2 15/15] doc: update example eventdev pipeline Pavan Nikhilesh
2018-01-16 10:35 ` [dpdk-dev] [PATCH v2 01/15] examples/eventdev: add Rx adapter support Van Haaren, Harry
14 siblings, 0 replies; 48+ messages in thread
From: Pavan Nikhilesh @ 2018-01-10 11:10 UTC (permalink / raw)
To: gage.eads, jerin.jacobkollanukkaran, harry.van.haaren,
hemant.agrawal, liang.j.ma, santosh.shukla
Cc: dev, Pavan Nikhilesh
Rename eventdev_pipeline_sw_pmd to eventdev_pipeline as it is no longer
specific underlying event device.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
---
examples/Makefile | 2 +-
examples/{eventdev_pipeline_sw_pmd => eventdev_pipeline}/Makefile | 2 +-
examples/{eventdev_pipeline_sw_pmd => eventdev_pipeline}/main.c | 0
.../{eventdev_pipeline_sw_pmd => eventdev_pipeline}/pipeline_common.h | 0
.../pipeline_worker_generic.c | 0
.../pipeline_worker_tx.c | 0
6 files changed, 2 insertions(+), 2 deletions(-)
rename examples/{eventdev_pipeline_sw_pmd => eventdev_pipeline}/Makefile (98%)
rename examples/{eventdev_pipeline_sw_pmd => eventdev_pipeline}/main.c (100%)
rename examples/{eventdev_pipeline_sw_pmd => eventdev_pipeline}/pipeline_common.h (100%)
rename examples/{eventdev_pipeline_sw_pmd => eventdev_pipeline}/pipeline_worker_generic.c (100%)
rename examples/{eventdev_pipeline_sw_pmd => eventdev_pipeline}/pipeline_worker_tx.c (100%)
diff --git a/examples/Makefile b/examples/Makefile
index 9f7974a19..a35434d74 100644
--- a/examples/Makefile
+++ b/examples/Makefile
@@ -102,6 +102,6 @@ $(info vm_power_manager requires libvirt >= 0.9.3)
endif
endif
-DIRS-y += eventdev_pipeline_sw_pmd
+DIRS-y += eventdev_pipeline
include $(RTE_SDK)/mk/rte.extsubdir.mk
diff --git a/examples/eventdev_pipeline_sw_pmd/Makefile b/examples/eventdev_pipeline/Makefile
similarity index 98%
rename from examples/eventdev_pipeline_sw_pmd/Makefile
rename to examples/eventdev_pipeline/Makefile
index 59ee9840a..893220d34 100644
--- a/examples/eventdev_pipeline_sw_pmd/Makefile
+++ b/examples/eventdev_pipeline/Makefile
@@ -38,7 +38,7 @@ RTE_TARGET ?= x86_64-native-linuxapp-gcc
include $(RTE_SDK)/mk/rte.vars.mk
# binary name
-APP = eventdev_pipeline_sw_pmd
+APP = eventdev_pipeline
# all source are stored in SRCS-y
SRCS-y := main.c
diff --git a/examples/eventdev_pipeline_sw_pmd/main.c b/examples/eventdev_pipeline/main.c
similarity index 100%
rename from examples/eventdev_pipeline_sw_pmd/main.c
rename to examples/eventdev_pipeline/main.c
diff --git a/examples/eventdev_pipeline_sw_pmd/pipeline_common.h b/examples/eventdev_pipeline/pipeline_common.h
similarity index 100%
rename from examples/eventdev_pipeline_sw_pmd/pipeline_common.h
rename to examples/eventdev_pipeline/pipeline_common.h
diff --git a/examples/eventdev_pipeline_sw_pmd/pipeline_worker_generic.c b/examples/eventdev_pipeline/pipeline_worker_generic.c
similarity index 100%
rename from examples/eventdev_pipeline_sw_pmd/pipeline_worker_generic.c
rename to examples/eventdev_pipeline/pipeline_worker_generic.c
diff --git a/examples/eventdev_pipeline_sw_pmd/pipeline_worker_tx.c b/examples/eventdev_pipeline/pipeline_worker_tx.c
similarity index 100%
rename from examples/eventdev_pipeline_sw_pmd/pipeline_worker_tx.c
rename to examples/eventdev_pipeline/pipeline_worker_tx.c
--
2.15.1
^ permalink raw reply [flat|nested] 48+ messages in thread
* [dpdk-dev] [PATCH v2 15/15] doc: update example eventdev pipeline
2018-01-10 11:09 ` [dpdk-dev] [PATCH v2 01/15] examples/eventdev: add Rx adapter support Pavan Nikhilesh
` (12 preceding siblings ...)
2018-01-10 11:10 ` [dpdk-dev] [PATCH v2 14/15] examples/eventdev_pipeline_sw_pmd: rename example Pavan Nikhilesh
@ 2018-01-10 11:10 ` Pavan Nikhilesh
2018-01-16 11:34 ` Kovacevic, Marko
2018-01-16 10:35 ` [dpdk-dev] [PATCH v2 01/15] examples/eventdev: add Rx adapter support Van Haaren, Harry
14 siblings, 1 reply; 48+ messages in thread
From: Pavan Nikhilesh @ 2018-01-10 11:10 UTC (permalink / raw)
To: gage.eads, jerin.jacobkollanukkaran, harry.van.haaren,
hemant.agrawal, liang.j.ma, santosh.shukla
Cc: dev, Pavan Nikhilesh
Removed eventdev sw pmd specific information in document, renamed the
document from eventdev_pipeline_sw_pmd to eventdev_pipeline.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
Acked-by: Kevin Laatz <kevin.laatz@intel.com>
---
.../{eventdev_pipeline_sw_pmd.rst => eventdev_pipeline.rst} | 6 +++---
doc/guides/sample_app_ug/index.rst | 2 +-
2 files changed, 4 insertions(+), 4 deletions(-)
rename doc/guides/sample_app_ug/{eventdev_pipeline_sw_pmd.rst => eventdev_pipeline.rst} (97%)
diff --git a/doc/guides/sample_app_ug/eventdev_pipeline_sw_pmd.rst b/doc/guides/sample_app_ug/eventdev_pipeline.rst
similarity index 97%
rename from doc/guides/sample_app_ug/eventdev_pipeline_sw_pmd.rst
rename to doc/guides/sample_app_ug/eventdev_pipeline.rst
index 01a5f9b21..ff6d2f0b0 100644
--- a/doc/guides/sample_app_ug/eventdev_pipeline_sw_pmd.rst
+++ b/doc/guides/sample_app_ug/eventdev_pipeline.rst
@@ -29,8 +29,8 @@
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-Eventdev Pipeline SW PMD Sample Application
-===========================================
+Eventdev Pipeline Sample Application
+====================================
The eventdev pipeline sample application is a sample app that demonstrates
the usage of the eventdev API using the software PMD. It shows how an
@@ -74,7 +74,7 @@ these settings is shown below:
.. code-block:: console
- ./build/eventdev_pipeline_sw_pmd --vdev event_sw0 -- -r1 -t1 -e4 -w FF00 -s4 -n0 -c32 -W1000 -D
+ ./build/eventdev_pipeline --vdev event_sw0 -- -r1 -t1 -e4 -w FF00 -s4 -n0 -c32 -W1000 -D
The application has some sanity checking built-in, so if there is a function
(eg; the RX core) which doesn't have a cpu core mask assigned, the application
diff --git a/doc/guides/sample_app_ug/index.rst b/doc/guides/sample_app_ug/index.rst
index db68ef765..6fcdeb0fb 100644
--- a/doc/guides/sample_app_ug/index.rst
+++ b/doc/guides/sample_app_ug/index.rst
@@ -74,7 +74,7 @@ Sample Applications User Guides
netmap_compatibility
ip_pipeline
test_pipeline
- eventdev_pipeline_sw_pmd
+ eventdev_pipeline
dist_app
vm_power_management
tep_termination
--
2.15.1
^ permalink raw reply [flat|nested] 48+ messages in thread
* Re: [dpdk-dev] [PATCH v2 15/15] doc: update example eventdev pipeline
2018-01-10 11:10 ` [dpdk-dev] [PATCH v2 15/15] doc: update example eventdev pipeline Pavan Nikhilesh
@ 2018-01-16 11:34 ` Kovacevic, Marko
0 siblings, 0 replies; 48+ messages in thread
From: Kovacevic, Marko @ 2018-01-16 11:34 UTC (permalink / raw)
To: 'Pavan Nikhilesh', dev
Cc: Eads, Gage, jerin.jacobkollanukkaran, Van Haaren, Harry,
hemant.agrawal, Ma, Liang J, santosh.shukla
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
> Acked-by: Kevin Laatz <kevin.laatz@intel.com>
> .../{eventdev_pipeline_sw_pmd.rst => eventdev_pipeline.rst} | 6 +++---
> doc/guides/sample_app_ug/index.rst | 2 +-
> 2 files changed, 4 insertions(+), 4 deletions(-) rename
> doc/guides/sample_app_ug/{eventdev_pipeline_sw_pmd.rst =>
> eventdev_pipeline.rst} (97%)
Acked-by: Marko Kovacevic <marko.kovacevic@intel.com>
^ permalink raw reply [flat|nested] 48+ messages in thread
* Re: [dpdk-dev] [PATCH v2 01/15] examples/eventdev: add Rx adapter support
2018-01-10 11:09 ` [dpdk-dev] [PATCH v2 01/15] examples/eventdev: add Rx adapter support Pavan Nikhilesh
` (13 preceding siblings ...)
2018-01-10 11:10 ` [dpdk-dev] [PATCH v2 15/15] doc: update example eventdev pipeline Pavan Nikhilesh
@ 2018-01-16 10:35 ` Van Haaren, Harry
2018-01-16 16:12 ` Jerin Jacob
14 siblings, 1 reply; 48+ messages in thread
From: Van Haaren, Harry @ 2018-01-16 10:35 UTC (permalink / raw)
To: Pavan Nikhilesh, Eads, Gage, jerin.jacobkollanukkaran,
hemant.agrawal, Ma, Liang J, santosh.shukla
Cc: dev
> From: Pavan Nikhilesh [mailto:pbhagavatula@caviumnetworks.com]
> Sent: Wednesday, January 10, 2018 11:10 AM
> To: Eads, Gage <gage.eads@intel.com>; jerin.jacobkollanukkaran@cavium.com;
> Van Haaren, Harry <harry.van.haaren@intel.com>; hemant.agrawal@nxp.com; Ma,
> Liang J <liang.j.ma@intel.com>; santosh.shukla@caviumnetworks.com
> Cc: dev@dpdk.org; Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
> Subject: [dpdk-dev] [PATCH v2 01/15] examples/eventdev: add Rx adapter
> support
>
> Use event Rx adapter for packets Rx instead of explicit producer logic.
> Use service run iter function for granular control instead of using
> dedicated service lcore.
>
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
> ---
>
> v2 Changes:
> - split work funtion into delay cycles and excange_mac
> - add option to configure mempool size
> - remove prod_data structure(Gage)
> - simplifly locks used while calling producer and scheduler(Gage)
Series-Acked-By: Harry van Haaren <harry.van.haaren@intel.com>
^ permalink raw reply [flat|nested] 48+ messages in thread
* Re: [dpdk-dev] [PATCH v2 01/15] examples/eventdev: add Rx adapter support
2018-01-16 10:35 ` [dpdk-dev] [PATCH v2 01/15] examples/eventdev: add Rx adapter support Van Haaren, Harry
@ 2018-01-16 16:12 ` Jerin Jacob
0 siblings, 0 replies; 48+ messages in thread
From: Jerin Jacob @ 2018-01-16 16:12 UTC (permalink / raw)
To: Van Haaren, Harry
Cc: Pavan Nikhilesh, Eads, Gage, jerin.jacobkollanukkaran,
hemant.agrawal, Ma, Liang J, santosh.shukla, dev
-----Original Message-----
> Date: Tue, 16 Jan 2018 10:35:35 +0000
> From: "Van Haaren, Harry" <harry.van.haaren@intel.com>
> To: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>, "Eads, Gage"
> <gage.eads@intel.com>, "jerin.jacobkollanukkaran@cavium.com"
> <jerin.jacobkollanukkaran@cavium.com>, "hemant.agrawal@nxp.com"
> <hemant.agrawal@nxp.com>, "Ma, Liang J" <liang.j.ma@intel.com>,
> "santosh.shukla@caviumnetworks.com" <santosh.shukla@caviumnetworks.com>
> CC: "dev@dpdk.org" <dev@dpdk.org>
> Subject: RE: [dpdk-dev] [PATCH v2 01/15] examples/eventdev: add Rx adapter
> support
>
> > From: Pavan Nikhilesh [mailto:pbhagavatula@caviumnetworks.com]
> > Sent: Wednesday, January 10, 2018 11:10 AM
> > To: Eads, Gage <gage.eads@intel.com>; jerin.jacobkollanukkaran@cavium.com;
> > Van Haaren, Harry <harry.van.haaren@intel.com>; hemant.agrawal@nxp.com; Ma,
> > Liang J <liang.j.ma@intel.com>; santosh.shukla@caviumnetworks.com
> > Cc: dev@dpdk.org; Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
> > Subject: [dpdk-dev] [PATCH v2 01/15] examples/eventdev: add Rx adapter
> > support
> >
> > Use event Rx adapter for packets Rx instead of explicit producer logic.
> > Use service run iter function for granular control instead of using
> > dedicated service lcore.
> >
> > Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
> > ---
> >
> > v2 Changes:
> > - split work funtion into delay cycles and excange_mac
> > - add option to configure mempool size
> > - remove prod_data structure(Gage)
> > - simplifly locks used while calling producer and scheduler(Gage)
>
> Series-Acked-By: Harry van Haaren <harry.van.haaren@intel.com>
Applied to series to dpdk-next-eventdev/master. Thanks.
^ permalink raw reply [flat|nested] 48+ messages in thread