DPDK patches and discussions
 help / color / mirror / Atom feed
From: Anoob Joseph <anoobj@marvell.com>
To: Akhil Goyal <akhil.goyal@nxp.com>, Radu Nicolau <radu.nicolau@intel.com>
Cc: Anoob Joseph <anoobj@marvell.com>,
	Thomas Monjalon <thomas@monjalon.net>,
	 Jerin Jacob <jerinj@marvell.com>,
	Narayana Prasad <pathreya@marvell.com>, <dev@dpdk.org>,
	Lukasz Bartosik <lbartosik@marvell.com>
Subject: [dpdk-dev] [RFC PATCH 07/13] examples/ipsec-secgw: add support for internal ports
Date: Wed, 9 Oct 2019 20:40:10 +0530	[thread overview]
Message-ID: <1570633816-4706-8-git-send-email-anoobj@marvell.com> (raw)
In-Reply-To: <1570633816-4706-1-git-send-email-anoobj@marvell.com>

Add support for Rx and Tx internal ports. When internal ports are
available then a packet can be received from eth port and forwarded
to event queue by HW without any software intervention. The same
applies to Tx side where a packet sent to an event queue can by
forwarded by HW to eth port without any software intervention.

Signed-off-by: Anoob Joseph <anoobj@marvell.com>
Signed-off-by: Lukasz Bartosik <lbartosik@marvell.com>
---
 examples/ipsec-secgw/event_helper.c | 234 ++++++++++++++++++++++++++++--------
 examples/ipsec-secgw/event_helper.h |  11 ++
 2 files changed, 195 insertions(+), 50 deletions(-)

diff --git a/examples/ipsec-secgw/event_helper.c b/examples/ipsec-secgw/event_helper.c
index 6993092..858b855 100644
--- a/examples/ipsec-secgw/event_helper.c
+++ b/examples/ipsec-secgw/event_helper.c
@@ -102,7 +102,38 @@ eh_get_eventdev_params(struct eventmode_conf *em_conf,
 	return &(em_conf->eventdev_config[i]);
 }
 static inline bool
-eh_dev_has_burst_mode(uint8_t dev_id)
+eh_dev_has_rx_internal_port(uint8_t eventdev_id)
+{
+	int j;
+	bool flag = true;
+
+	RTE_ETH_FOREACH_DEV(j) {
+		uint32_t caps = 0;
+
+		rte_event_eth_rx_adapter_caps_get(eventdev_id, j, &caps);
+		if (!(caps & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT))
+			flag = false;
+	}
+	return flag;
+}
+
+static inline bool
+eh_dev_has_tx_internal_port(uint8_t eventdev_id)
+{
+	int j;
+	bool flag = true;
+
+	RTE_ETH_FOREACH_DEV(j) {
+		uint32_t caps = 0;
+
+		rte_event_eth_tx_adapter_caps_get(eventdev_id, j, &caps);
+		if (!(caps & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT))
+			flag = false;
+	}
+	return flag;
+}
+
+static inline bool eh_dev_has_burst_mode(uint8_t dev_id)
 {
 	struct rte_event_dev_info dev_info;
 
@@ -127,6 +158,8 @@ eh_set_default_conf_eventdev(struct eventmode_conf *em_conf)
 {
 	int i, ret;
 	int nb_eventdev;
+	int nb_eth_dev;
+	int lcore_count;
 	struct eventdev_params *eventdev_config;
 	struct rte_event_dev_info dev_info;
 
@@ -138,6 +171,17 @@ eh_set_default_conf_eventdev(struct eventmode_conf *em_conf)
 		return -EINVAL;
 	}
 
+	/* Get the number of eth devs */
+	nb_eth_dev = rte_eth_dev_count_avail();
+
+	if (nb_eth_dev == 0) {
+		EH_LOG_ERR("No eth devices detected");
+		return -EINVAL;
+	}
+
+	/* Get the number of lcores */
+	lcore_count = rte_lcore_count();
+
 	for (i = 0; i < nb_eventdev; i++) {
 
 		/* Get the event dev conf */
@@ -163,13 +207,19 @@ eh_set_default_conf_eventdev(struct eventmode_conf *em_conf)
 		eventdev_config->nb_eventqueue = dev_info.max_event_queues;
 		eventdev_config->nb_eventport = dev_info.max_event_ports;
 		eventdev_config->ev_queue_mode =
-				RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
+				RTE_EVENT_QUEUE_CFG_ALL_TYPES;
 
-		/* One port is required for eth Rx adapter */
-		eventdev_config->nb_eventport -= 1;
+		/* Check if there are more queues than required */
+		if (eventdev_config->nb_eventqueue > nb_eth_dev + 1) {
+			/* One queue is reserved for Tx */
+			eventdev_config->nb_eventqueue = nb_eth_dev + 1;
+		}
 
-		/* One port is reserved for eth Tx adapter */
-		eventdev_config->nb_eventport -= 1;
+		/* Check if there are more ports than required */
+		if (eventdev_config->nb_eventport > lcore_count) {
+			/* One port per lcore is enough */
+			eventdev_config->nb_eventport = lcore_count;
+		}
 
 		/* Update the number of eventdevs */
 		em_conf->nb_eventdev++;
@@ -178,6 +228,42 @@ eh_set_default_conf_eventdev(struct eventmode_conf *em_conf)
 	return 0;
 }
 
+static void
+eh_do_capability_check(struct eventmode_conf *em_conf)
+{
+	struct eventdev_params *eventdev_config;
+	uint32_t eventdev_id;
+	int all_internal_ports = 1;
+	int i;
+
+	for (i = 0; i < em_conf->nb_eventdev; i++) {
+
+		/* Get the event dev conf */
+		eventdev_config = &(em_conf->eventdev_config[i]);
+		eventdev_id = eventdev_config->eventdev_id;
+
+		/* Check if event device has internal port for Rx & Tx */
+		if (eh_dev_has_rx_internal_port(eventdev_id) &&
+		    eh_dev_has_tx_internal_port(eventdev_id)) {
+			eventdev_config->all_internal_ports = 1;
+		} else {
+			all_internal_ports = 0;
+		}
+	}
+
+	/*
+	 * If Rx & Tx internal ports are supported by all event devices then
+	 * eth cores won't be required. Override the eth core mask requested
+	 * and decrement number of event queues by one as it won't be needed
+	 * for Tx.
+	 */
+	if (all_internal_ports) {
+		rte_bitmap_reset(em_conf->eth_core_mask);
+		for (i = 0; i < em_conf->nb_eventdev; i++)
+			em_conf->eventdev_config[i].nb_eventqueue--;
+	}
+}
+
 static int
 eh_set_default_conf_link(struct eventmode_conf *em_conf)
 {
@@ -254,9 +340,12 @@ eh_set_default_conf_rx_adapter(struct eventmode_conf *em_conf)
 	int adapter_id;
 	int eventdev_id;
 	int conn_id;
+	int nb_eventqueue;
 	struct rx_adapter_conf *adapter;
 	struct rx_adapter_connection_info *conn;
 	struct eventdev_params *eventdev_config;
+	bool rx_internal_port = true;
+	uint32_t caps = 0;
 
 	/* Create one adapter with all eth queues mapped to event queues 1:1 */
 
@@ -281,7 +370,14 @@ eh_set_default_conf_rx_adapter(struct eventmode_conf *em_conf)
 	/* Set adapter conf */
 	adapter->eventdev_id = eventdev_id;
 	adapter->adapter_id = adapter_id;
-	adapter->rx_core_id = eh_get_next_eth_core(em_conf);
+
+	/*
+	 * If event device does not have internal ports for passing
+	 * packets then one queue is reserved for Tx path
+	 */
+	nb_eventqueue = eventdev_config->all_internal_ports ?
+			eventdev_config->nb_eventqueue :
+			eventdev_config->nb_eventqueue - 1;
 
 	/*
 	 * All queues of one eth device (port) will be mapped to one event
@@ -290,10 +386,10 @@ eh_set_default_conf_rx_adapter(struct eventmode_conf *em_conf)
 	 */
 
 	/* Make sure there is enough event queues for 1:1 mapping */
-	if (nb_eth_dev > eventdev_config->nb_eventqueue) {
+	if (nb_eth_dev > nb_eventqueue) {
 		EH_LOG_ERR("Not enough event queues for 1:1 mapping "
 			"[eth devs: %d, event queues: %d]\n",
-			nb_eth_dev, eventdev_config->nb_eventqueue);
+			nb_eth_dev, nb_eventqueue);
 		return -EINVAL;
 	}
 
@@ -316,11 +412,24 @@ eh_set_default_conf_rx_adapter(struct eventmode_conf *em_conf)
 		/* Add all eth queues of one eth port to one event queue */
 		conn->ethdev_rx_qid = -1;
 
+		/* Get Rx adapter capabilities */
+		rte_event_eth_rx_adapter_caps_get(eventdev_id, i, &caps);
+		if (!(caps & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT))
+			rx_internal_port = false;
+
 		/* Update no of connections */
 		adapter->nb_connections++;
 
 	}
 
+	if (rx_internal_port) {
+		/* Rx core is not required */
+		adapter->rx_core_id = -1;
+	} else {
+		/* Rx core is required */
+		adapter->rx_core_id = eh_get_next_eth_core(em_conf);
+	}
+
 	/* We have setup one adapter */
 	em_conf->nb_rx_adapter = 1;
 
@@ -338,6 +447,8 @@ eh_set_default_conf_tx_adapter(struct eventmode_conf *em_conf)
 	struct eventdev_params *eventdev_config;
 	struct tx_adapter_conf *tx_adapter;
 	struct tx_adapter_connection_info *conn;
+	bool tx_internal_port = true;
+	uint32_t caps = 0;
 
 	/*
 	 * Create one Tx adapter with all eth queues mapped to event queues
@@ -366,22 +477,6 @@ eh_set_default_conf_tx_adapter(struct eventmode_conf *em_conf)
 	tx_adapter->eventdev_id = eventdev_id;
 	tx_adapter->adapter_id = adapter_id;
 
-	/* TODO: Tx core is required only when internal port is not present */
-
-	tx_adapter->tx_core_id = eh_get_next_eth_core(em_conf);
-
-	/*
-	 * Application would need to use one event queue per adapter for
-	 * submitting packets for Tx. Reserving the last queue available
-	 * and decrementing the total available event queues for this
-	 */
-
-	/* Queue numbers start at 0 */
-	tx_adapter->tx_ev_queue = eventdev_config->nb_eventqueue - 1;
-
-	/* Update the number of event queues available in eventdev */
-	eventdev_config->nb_eventqueue--;
-
 	/*
 	 * All Tx queues of the eth device (port) will be mapped to the event
 	 * device.
@@ -412,10 +507,30 @@ eh_set_default_conf_tx_adapter(struct eventmode_conf *em_conf)
 		/* Add all eth tx queues to adapter */
 		conn->ethdev_tx_qid = -1;
 
+		/* Get Rx adapter capabilities */
+		rte_event_eth_tx_adapter_caps_get(eventdev_id, i, &caps);
+		if (!(caps & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT))
+			tx_internal_port = false;
+
 		/* Update no of connections */
 		tx_adapter->nb_connections++;
 	}
 
+	if (tx_internal_port) {
+		/* Tx core is not required */
+		tx_adapter->tx_core_id = -1;
+	} else {
+		/* Tx core is required */
+		tx_adapter->tx_core_id = eh_get_next_eth_core(em_conf);
+
+		/*
+		 * Application would need to use one event queue per adapter for
+		 * submitting packets for Tx. Reserving the last queue available
+		 */
+		/* Queue numbers start at 0 */
+		tx_adapter->tx_ev_queue = eventdev_config->nb_eventqueue - 1;
+	}
+
 	/* We have setup one adapter */
 	em_conf->nb_tx_adapter = 1;
 	return 0;
@@ -441,6 +556,9 @@ eh_validate_conf(struct eventmode_conf *em_conf)
 			return ret;
 	}
 
+	/* Perform capability check for the selected event devices*/
+	eh_do_capability_check(em_conf);
+
 	/*
 	 * See if links are specified. Else generate a default conf for
 	 * the event ports used.
@@ -500,10 +618,6 @@ eh_initialize_eventdev(struct eventmode_conf *em_conf)
 		/* Get the number of queues */
 		nb_eventqueue = eventdev_config->nb_eventqueue;
 
-		/* One queue is reserved for the final stage (doing eth tx) */
-		/* TODO handles only one Tx adapter. Fix this */
-		nb_eventqueue += 1;
-
 		/* Reset the default conf */
 		memset(&evdev_default_conf, 0,
 			sizeof(struct rte_event_dev_info));
@@ -550,11 +664,13 @@ eh_initialize_eventdev(struct eventmode_conf *em_conf)
 					eventdev_config->ev_queue_mode;
 			/*
 			 * All queues need to be set with sched_type as
-			 * schedule type for the application stage. One queue
-			 * would be reserved for the final eth tx stage. This
-			 * will be an atomic queue.
+			 * schedule type for the application stage. One
+			 * queue would be reserved for the final eth tx
+			 * stage if event device does not have internal
+			 * ports. This will be an atomic queue.
 			 */
-			if (j == nb_eventqueue-1) {
+			if (!eventdev_config->all_internal_ports &&
+			    j == nb_eventqueue-1) {
 				eventq_conf.schedule_type =
 					RTE_SCHED_TYPE_ATOMIC;
 			} else {
@@ -669,10 +785,6 @@ eh_rx_adapter_configure(struct eventmode_conf *em_conf,
 	}
 
 	/* Setup various connections in the adapter */
-
-	queue_conf.rx_queue_flags =
-			RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID;
-
 	for (j = 0; j < adapter->nb_connections; j++) {
 		/* Get connection */
 		conn = &(adapter->conn[j]);
@@ -680,9 +792,7 @@ eh_rx_adapter_configure(struct eventmode_conf *em_conf,
 		/* Setup queue conf */
 		queue_conf.ev.queue_id = conn->eventq_id;
 		queue_conf.ev.sched_type = em_conf->ext_params.sched_type;
-
-		/* Set flow ID as ethdev ID */
-		queue_conf.ev.flow_id = conn->ethdev_id;
+		queue_conf.ev.event_type = RTE_EVENT_TYPE_ETHDEV;
 
 		/* Add queue to the adapter */
 		ret = rte_event_eth_rx_adapter_queue_add(
@@ -881,6 +991,12 @@ eh_find_worker(uint32_t lcore_id, struct eh_conf *conf,
 
 	/* Populate the curr_conf with the capabilities */
 
+	/* Check for Tx internal port */
+	if (eh_dev_has_tx_internal_port(eventdev_id))
+		curr_conf.cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT;
+	else
+		curr_conf.cap.tx_internal_port = EH_TX_TYPE_NO_INTERNAL_PORT;
+
 	/* Check for burst mode */
 	if (eh_dev_has_burst_mode(eventdev_id))
 		curr_conf.cap.burst = EH_RX_TYPE_BURST;
@@ -1057,6 +1173,18 @@ eh_tx_adapter_configure(struct eventmode_conf *em_conf,
 		}
 	}
 
+	/*
+	 * Check if Tx core is assigned. If Tx core is not assigned, then
+	 * the adapter would be having internal port for submitting packets
+	 * for Tx and so Tx event queue & port setup is not required
+	 */
+	if (adapter->tx_core_id == (uint32_t) (-1)) {
+		/* Internal port is present */
+		goto skip_tx_queue_port_setup;
+	}
+
+	/* Setup Tx queue & port */
+
 	/* Get event port used by the adapter */
 	ret = rte_event_eth_tx_adapter_event_port_get(
 			adapter->adapter_id,
@@ -1067,11 +1195,6 @@ eh_tx_adapter_configure(struct eventmode_conf *em_conf,
 	}
 
 	/*
-	 * TODO: event queue for Tx adapter is required only if the
-	 * INTERNAL PORT is not present.
-	 */
-
-	/*
 	 * Tx event queue would be reserved for Tx adapter. Need to unlink
 	 * this queue from all other ports
 	 *
@@ -1081,6 +1204,7 @@ eh_tx_adapter_configure(struct eventmode_conf *em_conf,
 				      &(adapter->tx_ev_queue), 1);
 	}
 
+	/* Link Tx event queue to Tx port */
 	ret = rte_event_port_link(
 			eventdev_id,
 			tx_port_id,
@@ -1105,6 +1229,7 @@ eh_tx_adapter_configure(struct eventmode_conf *em_conf,
 	 */
 	rte_service_set_runstate_mapped_check(service_id, 0);
 
+skip_tx_queue_port_setup:
 	/* Start adapter */
 	ret = rte_event_eth_tx_adapter_start(adapter->adapter_id);
 	if (ret) {
@@ -1189,13 +1314,22 @@ eh_display_rx_adapter_conf(struct eventmode_conf *em_conf)
 
 	for (i = 0; i < nb_rx_adapter; i++) {
 		adapter = &(em_conf->rx_adapter[i]);
-		EH_LOG_INFO(
-			"\tRx adaper ID: %-2d\tConnections: %-2d\tEvent dev ID: %-2d"
-			"\tRx core: %-2d",
+		sprintf(print_buf,
+			"\tRx adaper ID: %-2d\tConnections: %-2d\tEvent dev ID: %-2d",
 			adapter->adapter_id,
 			adapter->nb_connections,
-			adapter->eventdev_id,
-			adapter->rx_core_id);
+			adapter->eventdev_id);
+		if (adapter->rx_core_id == (uint32_t)-1)
+			sprintf(print_buf + strlen(print_buf),
+				"\tRx core: %-2s", "[INTERNAL PORT]");
+		else if (adapter->rx_core_id == RTE_MAX_LCORE)
+			sprintf(print_buf + strlen(print_buf),
+				"\tRx core: %-2s", "[NONE]");
+		else
+			sprintf(print_buf + strlen(print_buf),
+				"\tRx core: %-2d", adapter->rx_core_id);
+
+		EH_LOG_INFO("%s", print_buf);
 
 		for (j = 0; j < adapter->nb_connections; j++) {
 			conn = &(adapter->conn[j]);
diff --git a/examples/ipsec-secgw/event_helper.h b/examples/ipsec-secgw/event_helper.h
index 1d5a087..a66bcb3 100644
--- a/examples/ipsec-secgw/event_helper.h
+++ b/examples/ipsec-secgw/event_helper.h
@@ -76,12 +76,21 @@ enum eh_rx_types {
 	EH_RX_TYPE_BURST
 };
 
+/**
+ * Event mode packet tx types
+ */
+enum eh_tx_types {
+	EH_TX_TYPE_INTERNAL_PORT = 0,
+	EH_TX_TYPE_NO_INTERNAL_PORT
+};
+
 /* Event dev params */
 struct eventdev_params {
 	uint8_t eventdev_id;
 	uint8_t nb_eventqueue;
 	uint8_t nb_eventport;
 	uint8_t ev_queue_mode;
+	uint8_t all_internal_ports;
 };
 
 /**
@@ -193,6 +202,8 @@ struct eh_app_worker_params {
 		struct {
 			uint64_t burst : 1;
 			/**< Specify status of rx type burst */
+			uint64_t tx_internal_port : 1;
+			/**< Specify whether tx internal port is available */
 		};
 		uint64_t u64;
 	} cap;
-- 
2.7.4


  parent reply	other threads:[~2019-10-09 15:11 UTC|newest]

Thread overview: 18+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-10-09 15:10 [dpdk-dev] [RFC PATCH 00/13] add eventmode to ipsec-secgw Anoob Joseph
2019-10-09 15:10 ` [dpdk-dev] [RFC PATCH 01/13] examples/ipsec-secgw: add framework for eventmode helper Anoob Joseph
2019-10-09 15:10 ` [dpdk-dev] [RFC PATCH 02/13] examples/ipsec-secgw: add eventdev port-lcore link Anoob Joseph
2019-10-09 15:10 ` [dpdk-dev] [RFC PATCH 03/13] examples/ipsec-secgw: add Rx adapter support Anoob Joseph
2019-10-09 15:10 ` [dpdk-dev] [RFC PATCH 04/13] examples/ipsec-secgw: add Tx " Anoob Joseph
2019-10-09 15:10 ` [dpdk-dev] [RFC PATCH 05/13] examples/ipsec-secgw: add routines to display config Anoob Joseph
2019-10-09 15:10 ` [dpdk-dev] [RFC PATCH 06/13] examples/ipsec-secgw: add routines to launch workers Anoob Joseph
2019-10-09 15:10 ` Anoob Joseph [this message]
2019-10-09 15:10 ` [dpdk-dev] [RFC PATCH 08/13] examples/ipsec-secgw: add eventmode to ipsec-secgw Anoob Joseph
2019-10-09 15:10 ` [dpdk-dev] [RFC PATCH 09/13] examples/ipsec-secgw: add app inbound worker Anoob Joseph
2019-10-09 15:10 ` [dpdk-dev] [RFC PATCH 10/13] examples/ipsec-secgw: add app processing code Anoob Joseph
2019-10-09 15:10 ` [dpdk-dev] [RFC PATCH 11/13] examples/ipsec-secgw: add driver outbound worker Anoob Joseph
2019-10-09 15:10 ` [dpdk-dev] [RFC PATCH 12/13] examples/ipsec-secgw: add app " Anoob Joseph
2019-10-09 15:10 ` [dpdk-dev] [RFC PATCH 13/13] examples/ipsec-secgw: add cmd line option for bufs Anoob Joseph
2019-10-16 13:02 ` [dpdk-dev] [RFC PATCH 00/13] add eventmode to ipsec-secgw Ananyev, Konstantin
2019-10-25  6:31   ` Anoob Joseph
2019-10-25  9:39     ` Ananyev, Konstantin
2019-10-28  5:44       ` Anoob Joseph

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1570633816-4706-8-git-send-email-anoobj@marvell.com \
    --to=anoobj@marvell.com \
    --cc=akhil.goyal@nxp.com \
    --cc=dev@dpdk.org \
    --cc=jerinj@marvell.com \
    --cc=lbartosik@marvell.com \
    --cc=pathreya@marvell.com \
    --cc=radu.nicolau@intel.com \
    --cc=thomas@monjalon.net \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).