DPDK patches and discussions
 help / color / mirror / Atom feed
From: "Nicolás Pernas Maradei" <nico@emutex.com>
To: dev@dpdk.org
Cc: "Nicolás Pernas Maradei" <nicolas.pernas.maradei@emutex.com>
Subject: [dpdk-dev] [PATCH v2] Fix librte_pmd_pcap driver double stop error
Date: Sat,  4 Oct 2014 23:24:17 +0100	[thread overview]
Message-ID: <1412461457-29061-1-git-send-email-nico@emutex.com> (raw)
In-Reply-To: <005307>

From: Nicolás Pernas Maradei <nicolas.pernas.maradei@emutex.com>

librte_pmd_pcap driver was opening the pcap/interfaces only at init time and
closing them only when the port was being stopped. This behaviour would cause
problems (leading to segfault) if the user closed the port 2 times. The first
time the pcap/interfaces would be normally closed but libpcap would throw an
error causing a segfault if the closed pcaps/interfaces were closed again.
This behaviour is solved by re-openning pcaps/interfaces when the port is
started (only if these weren't open already for example at init time).

Signed-off-by: Nicolás Pernas Maradei <nico@emutex.com>
---
 lib/librte_pmd_pcap/rte_eth_pcap.c | 271 +++++++++++++++++++++++++++++--------
 1 file changed, 212 insertions(+), 59 deletions(-)

diff --git a/lib/librte_pmd_pcap/rte_eth_pcap.c b/lib/librte_pmd_pcap/rte_eth_pcap.c
index 19492e8..f12d1e7 100644
--- a/lib/librte_pmd_pcap/rte_eth_pcap.c
+++ b/lib/librte_pmd_pcap/rte_eth_pcap.c
@@ -67,6 +67,8 @@ struct pcap_rx_queue {
 	struct rte_mempool *mb_pool;
 	volatile unsigned long rx_pkts;
 	volatile unsigned long err_pkts;
+	const char *name;
+	const char *type;
 };
 
 struct pcap_tx_queue {
@@ -74,27 +76,32 @@ struct pcap_tx_queue {
 	pcap_t *pcap;
 	volatile unsigned long tx_pkts;
 	volatile unsigned long err_pkts;
+	const char *name;
+	const char *type;
 };
 
 struct rx_pcaps {
 	unsigned num_of_rx;
 	pcap_t *pcaps[RTE_PMD_RING_MAX_RX_RINGS];
+	const char *names[RTE_PMD_RING_MAX_RX_RINGS];
+	const char *types[RTE_PMD_RING_MAX_RX_RINGS];
 };
 
 struct tx_pcaps {
 	unsigned num_of_tx;
 	pcap_dumper_t *dumpers[RTE_PMD_RING_MAX_TX_RINGS];
 	pcap_t *pcaps[RTE_PMD_RING_MAX_RX_RINGS];
+	const char *names[RTE_PMD_RING_MAX_RX_RINGS];
+	const char *types[RTE_PMD_RING_MAX_RX_RINGS];
 };
 
 struct pmd_internals {
+	struct pcap_rx_queue rx_queue[RTE_PMD_RING_MAX_RX_RINGS];
+	struct pcap_tx_queue tx_queue[RTE_PMD_RING_MAX_TX_RINGS];
 	unsigned nb_rx_queues;
 	unsigned nb_tx_queues;
-
 	int if_index;
-
-	struct pcap_rx_queue rx_queue[RTE_PMD_RING_MAX_RX_RINGS];
-	struct pcap_tx_queue tx_queue[RTE_PMD_RING_MAX_TX_RINGS];
+	int single_iface;
 };
 
 const char *valid_arguments[] = {
@@ -106,6 +113,10 @@ const char *valid_arguments[] = {
 	NULL
 };
 
+static int open_single_tx_pcap(const char *pcap_filename, pcap_dumper_t **dumper);
+static int open_single_rx_pcap(const char *pcap_filename, pcap_t **pcap);
+static int open_single_iface(const char *iface, pcap_t **pcap);
+
 static struct ether_addr eth_addr = { .addr_bytes = { 0, 0, 0, 0x1, 0x2, 0x3 } };
 static const char *drivername = "Pcap PMD";
 static struct rte_eth_link pmd_link = {
@@ -258,6 +269,59 @@ eth_pcap_tx(void *queue,
 static int
 eth_dev_start(struct rte_eth_dev *dev)
 {
+	unsigned i;
+	struct pmd_internals *internals = dev->data->dev_private;
+	struct pcap_tx_queue *tx;
+	struct pcap_rx_queue *rx;
+
+	/* Special iface case. Single pcap is open and shared between tx/rx. */
+	if (internals->single_iface) {
+		tx = &internals->tx_queue[0];
+		rx = &internals->rx_queue[0];
+
+		if (!tx->pcap && strcmp(tx->type, ETH_PCAP_IFACE_ARG) == 0) {
+			if (open_single_iface(tx->name, &tx->pcap) < 0)
+				return -1;
+			rx->pcap = tx->pcap;
+		}
+		goto status_up;
+	}
+
+	/* If not open already, open tx pcaps/dumpers */
+	for (i = 0; i < internals->nb_tx_queues; i++) {
+		tx = &internals->tx_queue[i];
+
+		if (!tx->dumper && strcmp(tx->type, ETH_PCAP_TX_PCAP_ARG) == 0) {
+			if (open_single_tx_pcap(tx->name, &tx->dumper) < 0)
+				return -1;
+		}
+
+		else if (!tx->pcap && strcmp(tx->type, ETH_PCAP_TX_IFACE_ARG) == 0) {
+			if (open_single_iface(tx->name, &tx->pcap) < 0)
+				return -1;
+		}
+	}
+
+	/* If not open already, open rx pcaps */
+	for (i = 0; i < internals->nb_rx_queues; i++) {
+		rx = &internals->rx_queue[i];
+
+		if (rx->pcap != NULL)
+			continue;
+
+		if (strcmp(rx->type, ETH_PCAP_RX_PCAP_ARG) == 0) {
+			if (open_single_rx_pcap(rx->name, &rx->pcap) < 0)
+				return -1;
+		}
+
+		else if (strcmp(rx->type, ETH_PCAP_RX_IFACE_ARG) == 0) {
+			if (open_single_iface(rx->name, &rx->pcap) < 0)
+				return -1;
+		}
+	}
+
+status_up:
+
 	dev->data->dev_link.link_status = 1;
 	return 0;
 }
@@ -271,19 +335,44 @@ static void
 eth_dev_stop(struct rte_eth_dev *dev)
 {
 	unsigned i;
-	pcap_dumper_t *dumper;
-	pcap_t *pcap;
 	struct pmd_internals *internals = dev->data->dev_private;
+	struct pcap_tx_queue *tx;
+	struct pcap_rx_queue *rx;
+
+	/* Special iface case. Single pcap is open and shared between tx/rx. */
+	if (internals->single_iface) {
+		tx = &internals->tx_queue[0];
+		rx = &internals->rx_queue[0];
+		pcap_close(tx->pcap);
+		tx->pcap = NULL;
+		rx->pcap = NULL;
+		goto status_down;
+	}
 
 	for (i = 0; i < internals->nb_tx_queues; i++) {
-		dumper = internals->tx_queue[i].dumper;
-		if(dumper != NULL)
-			pcap_dump_close(dumper);
-		pcap = internals->tx_queue[i].pcap;
-		if(pcap != NULL)
-			pcap_close(pcap);
+		tx = &internals->tx_queue[i];
+
+		if (tx->dumper != NULL) {
+			pcap_dump_close(tx->dumper);
+			tx->dumper = NULL;
+		}
+
+		if (tx->pcap != NULL) {
+			pcap_close(tx->pcap);
+			tx->pcap = NULL;
+		}
+	}
+
+	for (i = 0; i < internals->nb_rx_queues; i++) {
+		rx = &internals->rx_queue[i];
+
+		if (rx->pcap != NULL) {
+			pcap_close(rx->pcap);
+			rx->pcap = NULL;
+		}
 	}
 
+status_down:
 	dev->data->dev_link.link_status = 0;
 }
 
@@ -415,55 +504,79 @@ static struct eth_dev_ops ops = {
  * reference of it for use it later on.
  */
 static int
-open_rx_pcap(const char *key __rte_unused, const char *value, void *extra_args)
+open_rx_pcap(const char *key, const char *value, void *extra_args)
 {
 	unsigned i;
 	const char *pcap_filename = value;
 	struct rx_pcaps *pcaps = extra_args;
-	pcap_t *rx_pcap;
+	pcap_t *pcap = NULL;
 
 	for (i = 0; i < pcaps->num_of_rx; i++) {
-		if ((rx_pcap = pcap_open_offline(pcap_filename, errbuf)) == NULL) {
-			RTE_LOG(ERR, PMD, "Couldn't open %s: %s\n", pcap_filename, errbuf);
+		if (open_single_rx_pcap(pcap_filename, &pcap) < 0)
 			return -1;
-		}
-		pcaps->pcaps[i] = rx_pcap;
+
+		pcaps->pcaps[i] = pcap;
+		pcaps->names[i] = pcap_filename;
+		pcaps->types[i] = key;
 	}
 
 	return 0;
 }
 
+static int
+open_single_rx_pcap(const char *pcap_filename, pcap_t **pcap)
+{
+	if ((*pcap = pcap_open_offline(pcap_filename, errbuf)) == NULL) {
+		RTE_LOG(ERR, PMD, "Couldn't open %s: %s\n", pcap_filename, errbuf);
+		return -1;
+	}
+	return 0;
+}
+
 /*
  * Opens a pcap file for writing and stores a reference to it
  * for use it later on.
  */
 static int
-open_tx_pcap(const char *key __rte_unused, const char *value, void *extra_args)
+open_tx_pcap(const char *key, const char *value, void *extra_args)
 {
 	unsigned i;
 	const char *pcap_filename = value;
 	struct tx_pcaps *dumpers = extra_args;
-	pcap_t *tx_pcap;
 	pcap_dumper_t *dumper;
 
 	for (i = 0; i < dumpers->num_of_tx; i++) {
-		/*
-		 * We need to create a dummy empty pcap_t to use it
-		 * with pcap_dump_open(). We create big enough an Ethernet
-		 * pcap holder.
-		 */
-		if ((tx_pcap = pcap_open_dead(DLT_EN10MB, RTE_ETH_PCAP_SNAPSHOT_LEN))
-				== NULL) {
-			RTE_LOG(ERR, PMD, "Couldn't create dead pcap\n");
+		if (open_single_tx_pcap(pcap_filename, &dumper) < 0)
 			return -1;
-		}
 
-		/* The dumper is created using the previous pcap_t reference */
-		if ((dumper = pcap_dump_open(tx_pcap, pcap_filename)) == NULL) {
-			RTE_LOG(ERR, PMD, "Couldn't open %s for writing.\n", pcap_filename);
-			return -1;
-		}
 		dumpers->dumpers[i] = dumper;
+		dumpers->names[i] = pcap_filename;
+		dumpers->types[i] = key;
+	}
+
+	return 0;
+}
+
+static int
+open_single_tx_pcap(const char *pcap_filename, pcap_dumper_t **dumper)
+{
+	pcap_t *tx_pcap;
+	/*
+	 * We need to create a dummy empty pcap_t to use it
+	 * with pcap_dump_open(). We create big enough an Ethernet
+	 * pcap holder.
+	 */
+
+	if ((tx_pcap = pcap_open_dead(DLT_EN10MB, RTE_ETH_PCAP_SNAPSHOT_LEN))
+			== NULL) {
+		RTE_LOG(ERR, PMD, "Couldn't create dead pcap\n");
+		return -1;
+	}
+
+	/* The dumper is created using the previous pcap_t reference */
+	if ((*dumper = pcap_dump_open(tx_pcap, pcap_filename)) == NULL) {
+		RTE_LOG(ERR, PMD, "Couldn't open %s for writing.\n", pcap_filename);
+		return -1;
 	}
 
 	return 0;
@@ -488,13 +601,19 @@ open_iface_live(const char *iface, pcap_t **pcap) {
  * Opens an interface for reading and writing
  */
 static inline int
-open_rx_tx_iface(const char *key __rte_unused, const char *value, void *extra_args)
+open_rx_tx_iface(const char *key, const char *value, void *extra_args)
 {
 	const char *iface = value;
-	pcap_t **pcap = extra_args;
+	struct rx_pcaps *pcaps = extra_args;
+	pcap_t *pcap = NULL;
 
-	if(open_iface_live(iface, pcap) < 0)
+	if (open_single_iface(iface, &pcap) < 0)
 		return -1;
+
+	pcaps->pcaps[0] = pcap;
+	pcaps->names[0] = iface;
+	pcaps->types[0] = key;
+
 	return 0;
 }
 
@@ -502,7 +621,7 @@ open_rx_tx_iface(const char *key __rte_unused, const char *value, void *extra_ar
  * Opens a NIC for reading packets from it
  */
 static inline int
-open_rx_iface(const char *key __rte_unused, const char *value, void *extra_args)
+open_rx_iface(const char *key, const char *value, void *extra_args)
 {
 	unsigned i;
 	const char *iface = value;
@@ -510,9 +629,11 @@ open_rx_iface(const char *key __rte_unused, const char *value, void *extra_args)
 	pcap_t *pcap = NULL;
 
 	for (i = 0; i < pcaps->num_of_rx; i++) {
-		if(open_iface_live(iface, &pcap) < 0)
+		if (open_single_iface(iface, &pcap) < 0)
 			return -1;
 		pcaps->pcaps[i] = pcap;
+		pcaps->names[i] = iface;
+		pcaps->types[i] = key;
 	}
 
 	return 0;
@@ -521,8 +642,8 @@ open_rx_iface(const char *key __rte_unused, const char *value, void *extra_args)
 /*
  * Opens a NIC for writing packets to it
  */
-static inline int
-open_tx_iface(const char *key __rte_unused, const char *value, void *extra_args)
+static int
+open_tx_iface(const char *key, const char *value, void *extra_args)
 {
 	unsigned i;
 	const char *iface = value;
@@ -530,14 +651,26 @@ open_tx_iface(const char *key __rte_unused, const char *value, void *extra_args)
 	pcap_t *pcap;
 
 	for (i = 0; i < pcaps->num_of_tx; i++) {
-		if(open_iface_live(iface, &pcap) < 0)
+		if (open_single_iface(iface, &pcap) < 0)
 			return -1;
 		pcaps->pcaps[i] = pcap;
+		pcaps->names[i] = iface;
+		pcaps->types[i] = key;
 	}
 
 	return 0;
 }
 
+static int
+open_single_iface(const char *iface, pcap_t **pcap)
+{
+	if (open_iface_live(iface, pcap) < 0) {
+		RTE_LOG(ERR, PMD, "Couldn't open interface %s\n", iface);
+		return -1;
+	}
+
+	return 0;
+}
 
 static int
 rte_pmd_init_internals(const char *name, const unsigned nb_rx_queues,
@@ -623,9 +756,10 @@ rte_pmd_init_internals(const char *name, const unsigned nb_rx_queues,
 }
 
 static int
-rte_eth_from_pcaps_n_dumpers(const char *name, pcap_t * const rx_queues[],
+rte_eth_from_pcaps_n_dumpers(const char *name,
+		struct rx_pcaps *rx_queues,
 		const unsigned nb_rx_queues,
-		pcap_dumper_t * const tx_queues[],
+		struct tx_pcaps *tx_queues,
 		const unsigned nb_tx_queues,
 		const unsigned numa_node,
 		struct rte_kvargs *kvlist)
@@ -645,25 +779,35 @@ rte_eth_from_pcaps_n_dumpers(const char *name, pcap_t * const rx_queues[],
 		return -1;
 
 	for (i = 0; i < nb_rx_queues; i++) {
-		internals->rx_queue->pcap = rx_queues[i];
+		internals->rx_queue->pcap = rx_queues->pcaps[i];
+		internals->rx_queue->name = rx_queues->names[i];
+		internals->rx_queue->type = rx_queues->types[i];
 	}
 	for (i = 0; i < nb_tx_queues; i++) {
-		internals->tx_queue->dumper = tx_queues[i];
+		internals->tx_queue->dumper = tx_queues->dumpers[i];
+		internals->tx_queue->name = tx_queues->names[i];
+		internals->tx_queue->type = tx_queues->types[i];
 	}
 
+	/* using multiple pcaps/interfaces */
+	internals->single_iface = 0;
+
 	eth_dev->rx_pkt_burst = eth_pcap_rx;
 	eth_dev->tx_pkt_burst = eth_pcap_tx_dumper;
 
 	return 0;
 }
 
+	struct rx_pcaps pcaps;
 static int
-rte_eth_from_pcaps(const char *name, pcap_t * const rx_queues[],
+rte_eth_from_pcaps(const char *name,
+		struct rx_pcaps *rx_queues,
 		const unsigned nb_rx_queues,
-		pcap_t * const tx_queues[],
+		struct tx_pcaps *tx_queues,
 		const unsigned nb_tx_queues,
 		const unsigned numa_node,
-		struct rte_kvargs *kvlist)
+		struct rte_kvargs *kvlist,
+		int single_iface)
 {
 	struct pmd_internals *internals = NULL;
 	struct rte_eth_dev *eth_dev = NULL;
@@ -680,12 +824,19 @@ rte_eth_from_pcaps(const char *name, pcap_t * const rx_queues[],
 		return -1;
 
 	for (i = 0; i < nb_rx_queues; i++) {
-		internals->rx_queue->pcap = rx_queues[i];
+		internals->rx_queue->pcap = rx_queues->pcaps[i];
+		internals->rx_queue->name = rx_queues->names[i];
+		internals->rx_queue->type = rx_queues->types[i];
 	}
 	for (i = 0; i < nb_tx_queues; i++) {
-		internals->tx_queue->pcap = tx_queues[i];
+		internals->tx_queue->pcap = tx_queues->pcaps[i];
+		internals->tx_queue->name = tx_queues->names[i];
+		internals->tx_queue->type = tx_queues->types[i];
 	}
 
+	/* store wether we are using a single interface for rx/tx or not */
+	internals->single_iface = single_iface;
+
 	eth_dev->rx_pkt_burst = eth_pcap_rx;
 	eth_dev->tx_pkt_burst = eth_pcap_tx;
 
@@ -721,12 +872,14 @@ rte_pmd_pcap_devinit(const char *name, const char *params)
 	if (rte_kvargs_count(kvlist, ETH_PCAP_IFACE_ARG) == 1) {
 
 		ret = rte_kvargs_process(kvlist, ETH_PCAP_IFACE_ARG,
-				&open_rx_tx_iface, &pcaps.pcaps[0]);
+				&open_rx_tx_iface, &pcaps);
 		if (ret < 0)
 			return -1;
-
-		return rte_eth_from_pcaps(name, pcaps.pcaps, 1, pcaps.pcaps, 1,
-				numa_node, kvlist);
+		dumpers.pcaps[0] = pcaps.pcaps[0];
+		dumpers.names[0] = pcaps.names[0];
+		dumpers.types[0] = pcaps.types[0];
+		return rte_eth_from_pcaps(name, &pcaps, 1, &dumpers, 1,
+				numa_node, kvlist, 1);
 	}
 
 	/*
@@ -766,11 +919,11 @@ rte_pmd_pcap_devinit(const char *name, const char *params)
 		return -1;
 
 	if (using_dumpers)
-		return rte_eth_from_pcaps_n_dumpers(name, pcaps.pcaps, pcaps.num_of_rx,
-				dumpers.dumpers, dumpers.num_of_tx, numa_node, kvlist);
+		return rte_eth_from_pcaps_n_dumpers(name, &pcaps, pcaps.num_of_rx,
+				&dumpers, dumpers.num_of_tx, numa_node, kvlist);
 
-	return rte_eth_from_pcaps(name, pcaps.pcaps, pcaps.num_of_rx, dumpers.pcaps,
-			dumpers.num_of_tx, numa_node, kvlist);
+	return rte_eth_from_pcaps(name, &pcaps, pcaps.num_of_rx, &dumpers,
+			dumpers.num_of_tx, numa_node, kvlist, 0);
 
 }
 
-- 
1.9.1

       reply	other threads:[~2014-10-05 18:08 UTC|newest]

Thread overview: 4+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
     [not found] <005307>
2014-10-04 22:24 ` Nicolás Pernas Maradei [this message]
2014-10-05 18:37   ` Nicolás Pernas Maradei
2014-10-06 14:50     ` Neil Horman
2014-10-09 19:10       ` Thomas Monjalon

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1412461457-29061-1-git-send-email-nico@emutex.com \
    --to=nico@emutex.com \
    --cc=dev@dpdk.org \
    --cc=nicolas.pernas.maradei@emutex.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).