DPDK patches and discussions
 help / color / mirror / Atom feed
From: Ferruh Yigit <ferruh.yigit@intel.com>
To: dev@dpdk.org
Cc: "Nicolás Pernas Maradei" <nicolas.pernas.maradei@emutex.com>
Subject: [dpdk-dev] [PATCH v3 1/3] pcap: remove duplicate fields in internal data struct
Date: Fri, 26 Feb 2016 15:26:53 +0000	[thread overview]
Message-ID: <1456500415-27416-2-git-send-email-ferruh.yigit@intel.com> (raw)
In-Reply-To: <1456500415-27416-1-git-send-email-ferruh.yigit@intel.com>

1- Remove duplicate nb_rx/tx_queues fields from internals
2- Move duplicate code into a common function

Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
Acked-by: Nicolas Pernas Maradei <nicolas.pernas.maradei@emutex.com>
---
 drivers/net/pcap/rte_eth_pcap.c | 130 +++++++++++++++++++---------------------
 1 file changed, 61 insertions(+), 69 deletions(-)

diff --git a/drivers/net/pcap/rte_eth_pcap.c b/drivers/net/pcap/rte_eth_pcap.c
index f9230eb..c8b7dbd 100644
--- a/drivers/net/pcap/rte_eth_pcap.c
+++ b/drivers/net/pcap/rte_eth_pcap.c
@@ -103,8 +103,6 @@ struct tx_pcaps {
 struct pmd_internals {
 	struct pcap_rx_queue rx_queue[RTE_PMD_RING_MAX_RX_RINGS];
 	struct pcap_tx_queue tx_queue[RTE_PMD_RING_MAX_TX_RINGS];
-	unsigned nb_rx_queues;
-	unsigned nb_tx_queues;
 	int if_index;
 	int single_iface;
 };
@@ -396,7 +394,7 @@ eth_dev_start(struct rte_eth_dev *dev)
 	}
 
 	/* If not open already, open tx pcaps/dumpers */
-	for (i = 0; i < internals->nb_tx_queues; i++) {
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
 		tx = &internals->tx_queue[i];
 
 		if (!tx->dumper && strcmp(tx->type, ETH_PCAP_TX_PCAP_ARG) == 0) {
@@ -411,7 +409,7 @@ eth_dev_start(struct rte_eth_dev *dev)
 	}
 
 	/* If not open already, open rx pcaps */
-	for (i = 0; i < internals->nb_rx_queues; i++) {
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
 		rx = &internals->rx_queue[i];
 
 		if (rx->pcap != NULL)
@@ -457,7 +455,7 @@ eth_dev_stop(struct rte_eth_dev *dev)
 		goto status_down;
 	}
 
-	for (i = 0; i < internals->nb_tx_queues; i++) {
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
 		tx = &internals->tx_queue[i];
 
 		if (tx->dumper != NULL) {
@@ -471,7 +469,7 @@ eth_dev_stop(struct rte_eth_dev *dev)
 		}
 	}
 
-	for (i = 0; i < internals->nb_rx_queues; i++) {
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
 		rx = &internals->rx_queue[i];
 
 		if (rx->pcap != NULL) {
@@ -499,8 +497,8 @@ eth_dev_info(struct rte_eth_dev *dev,
 	dev_info->if_index = internals->if_index;
 	dev_info->max_mac_addrs = 1;
 	dev_info->max_rx_pktlen = (uint32_t) -1;
-	dev_info->max_rx_queues = (uint16_t)internals->nb_rx_queues;
-	dev_info->max_tx_queues = (uint16_t)internals->nb_tx_queues;
+	dev_info->max_rx_queues = dev->data->nb_rx_queues;
+	dev_info->max_tx_queues = dev->data->nb_tx_queues;
 	dev_info->min_rx_bufsize = 0;
 	dev_info->pci_dev = NULL;
 }
@@ -515,16 +513,16 @@ eth_stats_get(struct rte_eth_dev *dev,
 	unsigned long tx_packets_err_total = 0;
 	const struct pmd_internals *internal = dev->data->dev_private;
 
-	for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < internal->nb_rx_queues;
-			i++) {
+	for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
+			i < dev->data->nb_rx_queues; i++) {
 		igb_stats->q_ipackets[i] = internal->rx_queue[i].rx_pkts;
 		igb_stats->q_ibytes[i] = internal->rx_queue[i].rx_bytes;
 		rx_packets_total += igb_stats->q_ipackets[i];
 		rx_bytes_total += igb_stats->q_ibytes[i];
 	}
 
-	for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < internal->nb_tx_queues;
-			i++) {
+	for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
+			i < dev->data->nb_tx_queues; i++) {
 		igb_stats->q_opackets[i] = internal->tx_queue[i].tx_pkts;
 		igb_stats->q_obytes[i] = internal->tx_queue[i].tx_bytes;
 		igb_stats->q_errors[i] = internal->tx_queue[i].err_pkts;
@@ -545,11 +543,11 @@ eth_stats_reset(struct rte_eth_dev *dev)
 {
 	unsigned i;
 	struct pmd_internals *internal = dev->data->dev_private;
-	for (i = 0; i < internal->nb_rx_queues; i++) {
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
 		internal->rx_queue[i].rx_pkts = 0;
 		internal->rx_queue[i].rx_bytes = 0;
 	}
-	for (i = 0; i < internal->nb_tx_queues; i++) {
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
 		internal->tx_queue[i].tx_pkts = 0;
 		internal->tx_queue[i].tx_bytes = 0;
 		internal->tx_queue[i].err_pkts = 0;
@@ -840,9 +838,6 @@ rte_pmd_init_internals(const char *name, const unsigned nb_rx_queues,
 	/* NOTE: we'll replace the data element, of originally allocated eth_dev
 	 * so the rings are local per-process */
 
-	(*internals)->nb_rx_queues = nb_rx_queues;
-	(*internals)->nb_tx_queues = nb_tx_queues;
-
 	if (pair == NULL)
 		(*internals)->if_index = 0;
 	else
@@ -860,11 +855,11 @@ rte_pmd_init_internals(const char *name, const unsigned nb_rx_queues,
 
 	(*eth_dev)->data = data;
 	(*eth_dev)->dev_ops = &ops;
-	(*eth_dev)->data->dev_flags = RTE_ETH_DEV_DETACHABLE;
 	(*eth_dev)->driver = NULL;
-	(*eth_dev)->data->kdrv = RTE_KDRV_NONE;
-	(*eth_dev)->data->drv_name = drivername;
-	(*eth_dev)->data->numa_node = numa_node;
+	data->dev_flags = RTE_ETH_DEV_DETACHABLE;
+	data->kdrv = RTE_KDRV_NONE;
+	data->drv_name = drivername;
+	data->numa_node = numa_node;
 
 	return 0;
 
@@ -876,16 +871,12 @@ error:
 }
 
 static int
-rte_eth_from_pcaps_n_dumpers(const char *name,
-		struct rx_pcaps *rx_queues,
-		const unsigned nb_rx_queues,
-		struct tx_pcaps *tx_queues,
-		const unsigned nb_tx_queues,
-		const unsigned numa_node,
-		struct rte_kvargs *kvlist)
+rte_eth_from_pcaps_common(const char *name, struct rx_pcaps *rx_queues,
+		const unsigned nb_rx_queues, struct tx_pcaps *tx_queues,
+		const unsigned nb_tx_queues, const unsigned numa_node,
+		struct rte_kvargs *kvlist, struct pmd_internals **internals,
+		struct rte_eth_dev **eth_dev)
 {
-	struct pmd_internals *internals = NULL;
-	struct rte_eth_dev *eth_dev = NULL;
 	unsigned i;
 
 	/* do some parameter checking */
@@ -895,28 +886,51 @@ rte_eth_from_pcaps_n_dumpers(const char *name,
 		return -1;
 
 	if (rte_pmd_init_internals(name, nb_rx_queues, nb_tx_queues, numa_node,
-			&internals, &eth_dev, kvlist) < 0)
+			internals, eth_dev, kvlist) < 0)
 		return -1;
 
 	for (i = 0; i < nb_rx_queues; i++) {
-		internals->rx_queue[i].pcap = rx_queues->pcaps[i];
-		snprintf(internals->rx_queue[i].name,
-			sizeof(internals->rx_queue[i].name), "%s",
+		(*internals)->rx_queue[i].pcap = rx_queues->pcaps[i];
+		snprintf((*internals)->rx_queue[i].name,
+			sizeof((*internals)->rx_queue[i].name), "%s",
 			rx_queues->names[i]);
-		snprintf(internals->rx_queue[i].type,
-			sizeof(internals->rx_queue[i].type), "%s",
+		snprintf((*internals)->rx_queue[i].type,
+			sizeof((*internals)->rx_queue[i].type), "%s",
 			rx_queues->types[i]);
 	}
 	for (i = 0; i < nb_tx_queues; i++) {
-		internals->tx_queue[i].dumper = tx_queues->dumpers[i];
-		snprintf(internals->tx_queue[i].name,
-			sizeof(internals->tx_queue[i].name), "%s",
+		(*internals)->tx_queue[i].dumper = tx_queues->dumpers[i];
+		snprintf((*internals)->tx_queue[i].name,
+			sizeof((*internals)->tx_queue[i].name), "%s",
 			tx_queues->names[i]);
-		snprintf(internals->tx_queue[i].type,
-			sizeof(internals->tx_queue[i].type), "%s",
+		snprintf((*internals)->tx_queue[i].type,
+			sizeof((*internals)->tx_queue[i].type), "%s",
 			tx_queues->types[i]);
 	}
 
+	return 0;
+}
+
+static int
+rte_eth_from_pcaps_n_dumpers(const char *name,
+		struct rx_pcaps *rx_queues,
+		const unsigned nb_rx_queues,
+		struct tx_pcaps *tx_queues,
+		const unsigned nb_tx_queues,
+		const unsigned numa_node,
+		struct rte_kvargs *kvlist)
+{
+	struct pmd_internals *internals = NULL;
+	struct rte_eth_dev *eth_dev = NULL;
+	int ret;
+
+	ret = rte_eth_from_pcaps_common(name, rx_queues, nb_rx_queues,
+			tx_queues, nb_tx_queues, numa_node, kvlist,
+			&internals, &eth_dev);
+
+	if (ret < 0)
+		return ret;
+
 	/* using multiple pcaps/interfaces */
 	internals->single_iface = 0;
 
@@ -938,36 +952,14 @@ rte_eth_from_pcaps(const char *name,
 {
 	struct pmd_internals *internals = NULL;
 	struct rte_eth_dev *eth_dev = NULL;
-	unsigned i;
-
-	/* do some parameter checking */
-	if (rx_queues == NULL && nb_rx_queues > 0)
-		return -1;
-	if (tx_queues == NULL && nb_tx_queues > 0)
-		return -1;
+	int ret;
 
-	if (rte_pmd_init_internals(name, nb_rx_queues, nb_tx_queues, numa_node,
-			&internals, &eth_dev, kvlist) < 0)
-		return -1;
+	ret = rte_eth_from_pcaps_common(name, rx_queues, nb_rx_queues,
+			tx_queues, nb_tx_queues, numa_node, kvlist,
+			&internals, &eth_dev);
 
-	for (i = 0; i < nb_rx_queues; i++) {
-		internals->rx_queue[i].pcap = rx_queues->pcaps[i];
-		snprintf(internals->rx_queue[i].name,
-			sizeof(internals->rx_queue[i].name), "%s",
-			rx_queues->names[i]);
-		snprintf(internals->rx_queue[i].type,
-			sizeof(internals->rx_queue[i].type), "%s",
-			rx_queues->types[i]);
-	}
-	for (i = 0; i < nb_tx_queues; i++) {
-		internals->tx_queue[i].dumper = tx_queues->dumpers[i];
-		snprintf(internals->tx_queue[i].name,
-			sizeof(internals->tx_queue[i].name), "%s",
-			tx_queues->names[i]);
-		snprintf(internals->tx_queue[i].type,
-			sizeof(internals->tx_queue[i].type), "%s",
-			tx_queues->types[i]);
-	}
+	if (ret < 0)
+		return ret;
 
 	/* store wether we are using a single interface for rx/tx or not */
 	internals->single_iface = single_iface;
-- 
2.5.0

  reply	other threads:[~2016-02-26 15:27 UTC|newest]

Thread overview: 28+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-01-29 17:16 [dpdk-dev] [PATCH 0/3] clean-up on virtual PMDs Ferruh Yigit
2016-01-29 17:16 ` [dpdk-dev] [PATCH 1/3] pcap: remove duplicate fields in internal data struct Ferruh Yigit
2016-01-29 17:16 ` [dpdk-dev] [PATCH 2/3] ring: " Ferruh Yigit
2016-02-17 17:36   ` Bruce Richardson
2016-02-18  9:50     ` Ferruh Yigit
2016-01-29 17:16 ` [dpdk-dev] [PATCH 3/3] null: " Ferruh Yigit
2016-02-03  6:21   ` Tetsuya Mukawa
2016-02-18 11:26 ` [dpdk-dev] [PATCH v2 0/3] clean-up on virtual PMDs Ferruh Yigit
2016-02-18 11:26   ` [dpdk-dev] [PATCH v2 1/3] pcap: remove duplicate fields in internal data struct Ferruh Yigit
2016-02-22  9:54     ` Nicolas Pernas Maradei
2016-02-18 11:26   ` [dpdk-dev] [PATCH v2 2/3] ring: " Ferruh Yigit
2016-02-22  9:55     ` Nicolas Pernas Maradei
2016-02-23 15:26     ` Bruce Richardson
2016-02-23 15:58       ` Ferruh Yigit
2016-02-23 16:06         ` Bruce Richardson
2016-02-18 11:26   ` [dpdk-dev] [PATCH v2 3/3] null: " Ferruh Yigit
2016-02-22  9:56     ` Nicolas Pernas Maradei
2016-02-26 15:26   ` [dpdk-dev] [PATCH v3 0/3] clean-up on virtual PMDs Ferruh Yigit
2016-02-26 15:26     ` Ferruh Yigit [this message]
2016-02-26 15:26     ` [dpdk-dev] [PATCH v3 2/3] ring: rename fields in internal data struct Ferruh Yigit
2016-02-26 16:35       ` Bruce Richardson
2016-02-26 15:26     ` [dpdk-dev] [PATCH v3 3/3] null: remove duplicate " Ferruh Yigit
2016-02-26 16:58     ` [dpdk-dev] [PATCH v4 0/3] clean-up on virtual PMDs Ferruh Yigit
2016-02-26 16:58       ` [dpdk-dev] [PATCH v4 1/3] pcap: remove duplicate fields in internal data struct Ferruh Yigit
2016-02-26 16:58       ` [dpdk-dev] [PATCH v4 2/3] ring: variable rename and code cleanup Ferruh Yigit
2016-03-10 11:11         ` Bruce Richardson
2016-02-26 16:58       ` [dpdk-dev] [PATCH v4 3/3] null: remove duplicate fields in internal data struct Ferruh Yigit
2016-03-10 11:12       ` [dpdk-dev] [PATCH v4 0/3] clean-up on virtual PMDs Bruce Richardson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1456500415-27416-2-git-send-email-ferruh.yigit@intel.com \
    --to=ferruh.yigit@intel.com \
    --cc=dev@dpdk.org \
    --cc=nicolas.pernas.maradei@emutex.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).