patches for DPDK stable branches
 help / color / mirror / Atom feed
* [dpdk-stable] [PATCH 1/3] examples/packet_ordering: free resources on exit
@ 2020-06-15  8:41 Sarosh Arif
  2020-06-15  8:41 ` [dpdk-stable] [PATCH 2/3] examples/rxtx_callbacks: " Sarosh Arif
  2020-06-15  8:41 ` [dpdk-stable] [PATCH 3/3] examples/skeleton: " Sarosh Arif
  0 siblings, 2 replies; 3+ messages in thread
From: Sarosh Arif @ 2020-06-15  8:41 UTC (permalink / raw)
  To: dev, bruce.richardson, john.mcnamara, reshma.pattan, vipin.varghese
  Cc: stable, Sarosh Arif

Resources should be cleared while exiting the application.

Bugzilla ID: 437
Signed-off-by: Sarosh Arif <sarosh.arif@emumba.com>
---
 examples/packet_ordering/main.c | 51 ++++++++++++++++++++++++++-------
 1 file changed, 41 insertions(+), 10 deletions(-)

diff --git a/examples/packet_ordering/main.c b/examples/packet_ordering/main.c
index b5fc6c54b..49326d015 100644
--- a/examples/packet_ordering/main.c
+++ b/examples/packet_ordering/main.c
@@ -653,6 +653,17 @@ tx_thread(struct rte_ring *ring_in)
 	return 0;
 }
 
+static void
+stop_and_close_eth_dev(uint16_t port_id)
+{
+	RTE_ETH_FOREACH_DEV(port_id) {
+		printf("Closing port %d...", port_id);
+		rte_eth_dev_stop(port_id);
+		rte_eth_dev_close(port_id);
+		printf(" Done\n");
+	}
+}
+
 int
 main(int argc, char **argv)
 {
@@ -683,25 +694,31 @@ main(int argc, char **argv)
 		rte_exit(EXIT_FAILURE, "Invalid packet_ordering arguments\n");
 
 	/* Check if we have enought cores */
-	if (rte_lcore_count() < 3)
+	if (rte_lcore_count() < 3) {
+		stop_and_close_eth_dev(port_id);
 		rte_exit(EXIT_FAILURE, "Error, This application needs at "
 				"least 3 logical cores to run:\n"
 				"1 lcore for packet RX\n"
 				"1 lcore for packet TX\n"
 				"and at least 1 lcore for worker threads\n");
+	}
 
 	nb_ports = rte_eth_dev_count_avail();
 	if (nb_ports == 0)
-		rte_exit(EXIT_FAILURE, "Error: no ethernet ports detected\n");
-	if (nb_ports != 1 && (nb_ports & 1))
+		rte_exit(EXIT_FAILURE, "Error: no ethernet ports detected\n");
+	if (nb_ports != 1 && (nb_ports & 1)) {
+		stop_and_close_eth_dev(port_id);
 		rte_exit(EXIT_FAILURE, "Error: number of ports must be even, except "
 				"when using a single port\n");
+	}
 
 	mbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", MBUF_PER_POOL,
 			MBUF_POOL_CACHE_SIZE, 0, RTE_MBUF_DEFAULT_BUF_SIZE,
 			rte_socket_id());
-	if (mbuf_pool == NULL)
+	if (mbuf_pool == NULL) {
+		stop_and_close_eth_dev(port_id);
 		rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));
+	}
 
 	nb_ports_available = nb_ports;
 
@@ -716,12 +733,15 @@ main(int argc, char **argv)
 		/* init port */
 		printf("Initializing port %u... done\n", port_id);
 
-		if (configure_eth_port(port_id) != 0)
+		if (configure_eth_port(port_id) != 0) {
+			stop_and_close_eth_dev(port_id);
 			rte_exit(EXIT_FAILURE, "Cannot initialize port %"PRIu8"\n",
 					port_id);
+		}
 	}
 
 	if (!nb_ports_available) {
+		stop_and_close_eth_dev(port_id);
 		rte_exit(EXIT_FAILURE,
 			"All available ports are disabled. Please set portmask.\n");
 	}
@@ -729,19 +749,25 @@ main(int argc, char **argv)
 	/* Create rings for inter core communication */
 	rx_to_workers = rte_ring_create("rx_to_workers", RING_SIZE, rte_socket_id(),
 			RING_F_SP_ENQ);
-	if (rx_to_workers == NULL)
+	if (rx_to_workers == NULL) {
+		stop_and_close_eth_dev(port_id);
 		rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));
+	}
 
 	workers_to_tx = rte_ring_create("workers_to_tx", RING_SIZE, rte_socket_id(),
 			RING_F_SC_DEQ);
-	if (workers_to_tx == NULL)
+	if (workers_to_tx == NULL) {
+		stop_and_close_eth_dev(port_id);
 		rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));
+	}
 
 	if (!disable_reorder) {
 		send_args.buffer = rte_reorder_create("PKT_RO", rte_socket_id(),
 				REORDER_BUFFER_SIZE);
-		if (send_args.buffer == NULL)
+		if (send_args.buffer == NULL) {
+			stop_and_close_eth_dev(port_id);
 			rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));
+		}
 	}
 
 	last_lcore_id   = get_last_lcore_id();
@@ -769,12 +795,17 @@ main(int argc, char **argv)
 
 	/* Start rx_thread() on the master core */
 	rx_thread(rx_to_workers);
-
+
 	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
-		if (rte_eal_wait_lcore(lcore_id) < 0)
+		if (rte_eal_wait_lcore(lcore_id) < 0) {
+			stop_and_close_eth_dev(port_id);
+			rte_eal_cleanup();
 			return -1;
+		}
 	}
 
 	print_stats();
+	stop_and_close_eth_dev(port_id);
+	rte_eal_cleanup();
 	return 0;
 }
-- 
2.17.1


^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2020-06-15  8:44 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-06-15  8:41 [dpdk-stable] [PATCH 1/3] examples/packet_ordering: free resources on exit Sarosh Arif
2020-06-15  8:41 ` [dpdk-stable] [PATCH 2/3] examples/rxtx_callbacks: " Sarosh Arif
2020-06-15  8:41 ` [dpdk-stable] [PATCH 3/3] examples/skeleton: " Sarosh Arif

patches for DPDK stable branches

This inbox may be cloned and mirrored by anyone:

	git clone --mirror https://inbox.dpdk.org/stable/0 stable/git/0.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 stable stable/ https://inbox.dpdk.org/stable \
		stable@dpdk.org
	public-inbox-index stable

Example config snippet for mirrors.
Newsgroup available over NNTP:
	nntp://inbox.dpdk.org/inbox.dpdk.stable


AGPL code for this site: git clone https://public-inbox.org/public-inbox.git