DPDK patches and discussions
 help / color / mirror / Atom feed
From: <pbhagavatula@marvell.com>
To: <jerinj@marvell.com>, <akhil.goyal@nxp.com>,
	Marko Kovacevic <marko.kovacevic@intel.com>,
	Ori Kam <orika@mellanox.com>,
	Bruce Richardson <bruce.richardson@intel.com>,
	Radu Nicolau <radu.nicolau@intel.com>,
	"Tomasz Kantecki" <tomasz.kantecki@intel.com>
Cc: <dev@dpdk.org>, Pavan Nikhilesh <pbhagavatula@marvell.com>,
	"Sunil Kumar Kori" <skori@marvell.com>
Subject: [dpdk-dev] [PATCH 06/11] examples/l3fwd: add event eth Rx/Tx adapter setup
Date: Thu, 26 Sep 2019 15:35:53 +0530	[thread overview]
Message-ID: <20190926100558.24348-7-pbhagavatula@marvell.com> (raw)
In-Reply-To: <20190926100558.24348-1-pbhagavatula@marvell.com>

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Add event eth Rx/Tx adapter setup for both generic and internal port
event device pipelines.

Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 examples/l3fwd/l3fwd_eventdev.c               |  3 +
 examples/l3fwd/l3fwd_eventdev.h               | 13 +++
 examples/l3fwd/l3fwd_eventdev_generic.c       | 99 +++++++++++++++++++
 examples/l3fwd/l3fwd_eventdev_internal_port.c | 80 +++++++++++++++
 4 files changed, 195 insertions(+)

diff --git a/examples/l3fwd/l3fwd_eventdev.c b/examples/l3fwd/l3fwd_eventdev.c
index f5ac3ccce..031705b68 100644
--- a/examples/l3fwd/l3fwd_eventdev.c
+++ b/examples/l3fwd/l3fwd_eventdev.c
@@ -327,4 +327,7 @@ l3fwd_eventdev_resource_setup(struct rte_eth_conf *port_conf)
 
 	/* Event port configuration */
 	evdev_rsrc->ops.event_port_setup();
+
+	/* Rx/Tx adapters configuration */
+	evdev_rsrc->ops.adapter_setup(ethdev_count);
 }
diff --git a/examples/l3fwd/l3fwd_eventdev.h b/examples/l3fwd/l3fwd_eventdev.h
index 2640d6cec..127bb7f42 100644
--- a/examples/l3fwd/l3fwd_eventdev.h
+++ b/examples/l3fwd/l3fwd_eventdev.h
@@ -7,6 +7,7 @@
 
 #include <rte_common.h>
 #include <rte_eventdev.h>
+#include <rte_event_eth_rx_adapter.h>
 #include <rte_event_eth_tx_adapter.h>
 #include <rte_service.h>
 #include <rte_spinlock.h>
@@ -40,6 +41,16 @@ struct l3fwd_eventdev_ports {
 	rte_spinlock_t lock;
 };
 
+struct l3fwd_eventdev_rx_adptr {
+	uint8_t	nb_rx_adptr;
+	uint8_t *rx_adptr;
+};
+
+struct l3fwd_eventdev_tx_adptr {
+	uint8_t	nb_tx_adptr;
+	uint8_t *tx_adptr;
+};
+
 struct l3fwd_eventdev_setup_ops {
 	event_queue_setup_cb event_queue_setup;
 	event_port_setup_cb event_port_setup;
@@ -50,6 +61,8 @@ struct l3fwd_eventdev_setup_ops {
 
 struct l3fwd_eventdev_resources {
 	struct rte_event_port_conf def_p_conf;
+	struct l3fwd_eventdev_rx_adptr rx_adptr;
+	struct l3fwd_eventdev_tx_adptr tx_adptr;
 	uint8_t disable_implicit_release;
 	struct l3fwd_eventdev_setup_ops ops;
 	struct rte_mempool * (*pkt_pool)[NB_SOCKETS];
diff --git a/examples/l3fwd/l3fwd_eventdev_generic.c b/examples/l3fwd/l3fwd_eventdev_generic.c
index 4aec0e403..659a152b6 100644
--- a/examples/l3fwd/l3fwd_eventdev_generic.c
+++ b/examples/l3fwd/l3fwd_eventdev_generic.c
@@ -115,9 +115,108 @@ l3fwd_event_queue_setup_generic(uint16_t ethdev_count,
 	evdev_rsrc->evq.event_q_id[event_q_id] = event_q_id;
 }
 
+static void
+l3fwd_rx_tx_adapter_setup_generic(uint16_t ethdev_count)
+{
+	struct l3fwd_eventdev_resources *evdev_rsrc = l3fwd_get_eventdev_rsrc();
+	struct rte_event_eth_rx_adapter_queue_conf eth_q_conf = {
+		.rx_queue_flags = 0,
+		.ev = {
+			.queue_id = 0,
+			.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+		}
+	};
+	uint8_t event_d_id = evdev_rsrc->event_d_id;
+	uint8_t rx_adptr_id = 0;
+	uint8_t tx_adptr_id = 0;
+	uint8_t tx_port_id = 0;
+	int32_t ret, i;
+
+	/* Rx adapter setup */
+	evdev_rsrc->rx_adptr.nb_rx_adptr = 1;
+	evdev_rsrc->rx_adptr.rx_adptr = (uint8_t *)malloc(sizeof(uint8_t) *
+					evdev_rsrc->rx_adptr.nb_rx_adptr);
+	if (!evdev_rsrc->rx_adptr.rx_adptr) {
+		free(evdev_rsrc->evp.event_p_id);
+		free(evdev_rsrc->evq.event_q_id);
+		rte_exit(EXIT_FAILURE,
+			 "failed to allocate memery for Rx adapter");
+	}
+
+	ret = rte_event_eth_rx_adapter_create(rx_adptr_id, event_d_id,
+					      &evdev_rsrc->def_p_conf);
+	if (ret)
+		rte_exit(EXIT_FAILURE, "failed to create rx adapter");
+
+	eth_q_conf.ev.sched_type = evdev_rsrc->sync_mode;
+	for (i = 0; i < ethdev_count; i++) {
+		/* Configure user requested sync mode */
+		eth_q_conf.ev.queue_id = evdev_rsrc->evq.event_q_id[i];
+		ret = rte_event_eth_rx_adapter_queue_add(rx_adptr_id, i, -1,
+							 &eth_q_conf);
+		if (ret)
+			rte_exit(EXIT_FAILURE,
+				 "Failed to add queues to Rx adapter");
+	}
+
+	ret = rte_event_eth_rx_adapter_start(rx_adptr_id);
+	if (ret)
+		rte_exit(EXIT_FAILURE, "Rx adapter[%d] start failed",
+			 rx_adptr_id);
+
+	evdev_rsrc->rx_adptr.rx_adptr[0] = rx_adptr_id;
+
+	/* Tx adapter setup */
+	evdev_rsrc->tx_adptr.nb_tx_adptr = 1;
+	evdev_rsrc->tx_adptr.tx_adptr = (uint8_t *)malloc(sizeof(uint8_t) *
+					evdev_rsrc->tx_adptr.nb_tx_adptr);
+	if (!evdev_rsrc->tx_adptr.tx_adptr) {
+		free(evdev_rsrc->rx_adptr.rx_adptr);
+		free(evdev_rsrc->evp.event_p_id);
+		free(evdev_rsrc->evq.event_q_id);
+		rte_exit(EXIT_FAILURE,
+			 "failed to allocate memery for Rx adapter");
+	}
+
+	ret = rte_event_eth_tx_adapter_create(tx_adptr_id, event_d_id,
+					      &evdev_rsrc->def_p_conf);
+	if (ret)
+		rte_exit(EXIT_FAILURE, "failed to create tx adapter[%d]",
+			 tx_adptr_id);
+
+	for (i = 0; i < ethdev_count; i++) {
+		ret = rte_event_eth_tx_adapter_queue_add(tx_adptr_id, i, -1);
+		if (ret)
+			rte_exit(EXIT_FAILURE,
+				 "failed to add queues to Tx adapter");
+	}
+
+	ret = rte_event_eth_tx_adapter_event_port_get(tx_adptr_id, &tx_port_id);
+	if (ret)
+		rte_exit(EXIT_FAILURE,
+			 "Failed to get Tx adapter port id: %d\n", ret);
+
+	ret = rte_event_port_link(event_d_id, tx_port_id,
+				  &evdev_rsrc->evq.event_q_id[
+					evdev_rsrc->evq.nb_queues - 1],
+				  NULL, 1);
+	if (ret != 1)
+		rte_exit(EXIT_FAILURE,
+			 "Unable to link Tx adapter port to Tx queue:err = %d",
+			 ret);
+
+	ret = rte_event_eth_tx_adapter_start(tx_adptr_id);
+	if (ret)
+		rte_exit(EXIT_FAILURE, "Tx adapter[%d] start failed",
+			 tx_adptr_id);
+
+	evdev_rsrc->tx_adptr.tx_adptr[0] = tx_adptr_id;
+}
+
 void
 l3fwd_eventdev_set_generic_ops(struct l3fwd_eventdev_setup_ops *ops)
 {
 	ops->event_queue_setup = l3fwd_event_queue_setup_generic;
 	ops->event_port_setup = l3fwd_event_port_setup_generic;
+	ops->adapter_setup = l3fwd_rx_tx_adapter_setup_generic;
 }
diff --git a/examples/l3fwd/l3fwd_eventdev_internal_port.c b/examples/l3fwd/l3fwd_eventdev_internal_port.c
index 363e37899..811c99983 100644
--- a/examples/l3fwd/l3fwd_eventdev_internal_port.c
+++ b/examples/l3fwd/l3fwd_eventdev_internal_port.c
@@ -109,9 +109,89 @@ l3fwd_event_queue_setup_internal_port(uint16_t ethdev_count,
 	}
 }
 
+static void
+l3fwd_rx_tx_adapter_setup_internal_port(uint16_t ethdev_count)
+{
+	struct l3fwd_eventdev_resources *evdev_rsrc = l3fwd_get_eventdev_rsrc();
+	struct rte_event_eth_rx_adapter_queue_conf eth_q_conf = {
+		.rx_queue_flags = 0,
+		.ev = {
+			.queue_id = 0,
+			.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+		}
+	};
+	uint8_t event_d_id = evdev_rsrc->event_d_id;
+	int32_t ret, i;
+
+	evdev_rsrc->rx_adptr.nb_rx_adptr = ethdev_count;
+	evdev_rsrc->rx_adptr.rx_adptr = (uint8_t *)malloc(sizeof(uint8_t) *
+					evdev_rsrc->rx_adptr.nb_rx_adptr);
+	if (!evdev_rsrc->rx_adptr.rx_adptr) {
+		free(evdev_rsrc->evp.event_p_id);
+		free(evdev_rsrc->evq.event_q_id);
+		rte_exit(EXIT_FAILURE,
+			 "failed to allocate memery for Rx adapter");
+	}
+
+	for (i = 0; i < ethdev_count; i++) {
+		ret = rte_event_eth_rx_adapter_create(i, event_d_id,
+						&evdev_rsrc->def_p_conf);
+		if (ret)
+			rte_exit(EXIT_FAILURE,
+				 "failed to create rx adapter[%d]", i);
+
+		/* Configure user requested sync mode */
+		eth_q_conf.ev.queue_id = evdev_rsrc->evq.event_q_id[i];
+		eth_q_conf.ev.sched_type = evdev_rsrc->sync_mode;
+		ret = rte_event_eth_rx_adapter_queue_add(i, i, -1, &eth_q_conf);
+		if (ret)
+			rte_exit(EXIT_FAILURE,
+				 "Failed to add queues to Rx adapter");
+
+		ret = rte_event_eth_rx_adapter_start(i);
+		if (ret)
+			rte_exit(EXIT_FAILURE,
+				 "Rx adapter[%d] start failed", i);
+
+		evdev_rsrc->rx_adptr.rx_adptr[i] = i;
+	}
+
+	evdev_rsrc->tx_adptr.nb_tx_adptr = ethdev_count;
+	evdev_rsrc->tx_adptr.tx_adptr = (uint8_t *)malloc(sizeof(uint8_t) *
+					evdev_rsrc->tx_adptr.nb_tx_adptr);
+	if (!evdev_rsrc->tx_adptr.tx_adptr) {
+		free(evdev_rsrc->rx_adptr.rx_adptr);
+		free(evdev_rsrc->evp.event_p_id);
+		free(evdev_rsrc->evq.event_q_id);
+		rte_exit(EXIT_FAILURE,
+			 "failed to allocate memery for Rx adapter");
+	}
+
+	for (i = 0; i < ethdev_count; i++) {
+		ret = rte_event_eth_tx_adapter_create(i, event_d_id,
+						&evdev_rsrc->def_p_conf);
+		if (ret)
+			rte_exit(EXIT_FAILURE,
+				 "failed to create tx adapter[%d]", i);
+
+		ret = rte_event_eth_tx_adapter_queue_add(i, i, -1);
+		if (ret)
+			rte_exit(EXIT_FAILURE,
+				 "failed to add queues to Tx adapter");
+
+		ret = rte_event_eth_tx_adapter_start(i);
+		if (ret)
+			rte_exit(EXIT_FAILURE,
+				 "Tx adapter[%d] start failed", i);
+
+		evdev_rsrc->tx_adptr.tx_adptr[i] = i;
+	}
+}
+
 void
 l3fwd_eventdev_set_internal_port_ops(struct l3fwd_eventdev_setup_ops *ops)
 {
 	ops->event_queue_setup = l3fwd_event_queue_setup_internal_port;
 	ops->event_port_setup = l3fwd_event_port_setup_internal_port;
+	ops->adapter_setup = l3fwd_rx_tx_adapter_setup_internal_port;
 }
-- 
2.17.1


  parent reply	other threads:[~2019-09-26 10:07 UTC|newest]

Thread overview: 18+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-09-26 10:05 [dpdk-dev] [PATCH 00/11] example/l3fwd: introduce event device support pbhagavatula
2019-09-26 10:05 ` [dpdk-dev] [PATCH 01/11] examples/l3fwd: add framework for event device pbhagavatula
2019-09-26 10:05 ` [dpdk-dev] [PATCH 02/11] examples/l3fwd: split pipelines based on capability pbhagavatula
2019-09-26 10:05 ` [dpdk-dev] [PATCH 03/11] examples/l3fwd: add event device configuration pbhagavatula
2019-09-26 10:05 ` [dpdk-dev] [PATCH 04/11] examples/l3fwd: add ethdev setup based on eventdev pbhagavatula
2019-09-26 10:05 ` [dpdk-dev] [PATCH 05/11] examples/l3fwd: add event port and queue setup pbhagavatula
2019-09-26 10:05 ` pbhagavatula [this message]
2019-09-26 10:05 ` [dpdk-dev] [PATCH 07/11] examples/l3fwd: add service core setup based on caps pbhagavatula
2019-09-26 10:05 ` [dpdk-dev] [PATCH 08/11] examples/l3fwd: add event lpm main loop pbhagavatula
2019-09-26 10:05 ` [dpdk-dev] [PATCH 09/11] examples/l3fwd: add event em " pbhagavatula
2019-09-27 17:29   ` Stephen Hemminger
2019-09-27 17:30   ` Stephen Hemminger
2019-09-26 10:05 ` [dpdk-dev] [PATCH 10/11] examples/l3fwd: add graceful teardown for eventdevice pbhagavatula
2019-09-26 10:05 ` [dpdk-dev] [PATCH 11/11] doc: update l3fwd user guide to support eventdev pbhagavatula
2019-09-26 10:10 ` [dpdk-dev] [PATCH 00/11] example/l3fwd: introduce event device support Ananyev, Konstantin
2019-09-27  7:28   ` Pavan Nikhilesh Bhagavatula
2019-09-27 12:59     ` Ananyev, Konstantin
2019-11-15  7:00       ` Thomas Monjalon

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190926100558.24348-7-pbhagavatula@marvell.com \
    --to=pbhagavatula@marvell.com \
    --cc=akhil.goyal@nxp.com \
    --cc=bruce.richardson@intel.com \
    --cc=dev@dpdk.org \
    --cc=jerinj@marvell.com \
    --cc=marko.kovacevic@intel.com \
    --cc=orika@mellanox.com \
    --cc=radu.nicolau@intel.com \
    --cc=skori@marvell.com \
    --cc=tomasz.kantecki@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).