DPDK patches and discussions
 help / color / mirror / Atom feed
From: David Marchand <david.marchand@6wind.com>
To: dev@dpdk.org
Subject: [dpdk-dev] [PATCH v3 03/20] ixgbe: indent logs sections
Date: Wed, 17 Sep 2014 15:46:35 +0200	[thread overview]
Message-ID: <1410961612-8571-4-git-send-email-david.marchand@6wind.com> (raw)
In-Reply-To: <1410961612-8571-1-git-send-email-david.marchand@6wind.com>

Prepare for next commit, indent sections where log messages will be modified so
that next patch is only about \n.

Signed-off-by: David Marchand <david.marchand@6wind.com>
---
 lib/librte_pmd_ixgbe/ixgbe_ethdev.c |   51 +++++++++++++++++------------------
 lib/librte_pmd_ixgbe/ixgbe_rxtx.c   |   34 ++++++++++++-----------
 2 files changed, 42 insertions(+), 43 deletions(-)

diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
index f914405..71b964a 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
+++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
@@ -548,7 +548,8 @@ ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
 		return -ENOSYS;
 
 	PMD_INIT_LOG(INFO, "Setting port %d, %s queue_id %d to stat index %d\n",
-		     (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", queue_id, stat_idx);
+		     (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
+		     queue_id, stat_idx);
 
 	n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
 	if (n >= IXGBE_NB_STAT_MAPPING_REGS) {
@@ -574,8 +575,9 @@ ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
 
 	PMD_INIT_LOG(INFO, "Set port %d, %s queue_id %d to stat index %d\n"
 		     "%s[%d] = 0x%08x\n",
-		     (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", queue_id, stat_idx,
-		     is_rx ? "RQSMR" : "TQSM",n, is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]);
+		     (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
+		     queue_id, stat_idx, is_rx ? "RQSMR" : "TQSM", n,
+		     is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]);
 
 	/* Now write the mapping in the appropriate register */
 	if (is_rx) {
@@ -849,8 +851,7 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
 	IXGBE_WRITE_FLUSH(hw);
 
 	if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
-		PMD_INIT_LOG(DEBUG,
-			     "MAC: %d, PHY: %d, SFP+: %d<n",
+		PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d<n",
 			     (int) hw->mac.type, (int) hw->phy.type,
 			     (int) hw->phy.sfp_type);
 	else
@@ -1038,8 +1039,8 @@ eth_ixgbevf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
 	}
 
 	PMD_INIT_LOG(DEBUG, "\nport %d vendorID=0x%x deviceID=0x%x mac.type=%s\n",
-			 eth_dev->data->port_id, pci_dev->id.vendor_id, pci_dev->id.device_id,
-			 "ixgbe_mac_82599_vf");
+		     eth_dev->data->port_id, pci_dev->id.vendor_id,
+		     pci_dev->id.device_id, "ixgbe_mac_82599_vf");
 
 	return 0;
 }
@@ -1418,8 +1419,8 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
 	if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&
 			(dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) {
 		PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port %hhu\n",
-				dev->data->dev_conf.link_duplex,
-				dev->data->port_id);
+			     dev->data->dev_conf.link_duplex,
+			     dev->data->port_id);
 		return -EINVAL;
 	}
 
@@ -1491,8 +1492,8 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
 		break;
 	default:
 		PMD_INIT_LOG(ERR, "Invalid link_speed (%hu) for port %hhu\n",
-				dev->data->dev_conf.link_speed,
-				dev->data->port_id);
+			     dev->data->dev_conf.link_speed,
+			     dev->data->port_id);
 		goto error;
 	}
 
@@ -1598,10 +1599,8 @@ ixgbe_dev_set_link_up(struct rte_eth_dev *dev)
 #ifdef RTE_NIC_BYPASS
 		if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
 			/* Not suported in bypass mode */
-			PMD_INIT_LOG(ERR,
-				"\nSet link up is not supported "
-				"by device id 0x%x\n",
-				hw->device_id);
+			PMD_INIT_LOG(ERR, "\nSet link up is not supported "
+				     "by device id 0x%x\n", hw->device_id);
 			return -ENOTSUP;
 		}
 #endif
@@ -1611,7 +1610,7 @@ ixgbe_dev_set_link_up(struct rte_eth_dev *dev)
 	}
 
 	PMD_INIT_LOG(ERR, "\nSet link up is not supported by device id 0x%x\n",
-		hw->device_id);
+		     hw->device_id);
 	return -ENOTSUP;
 }
 
@@ -1627,10 +1626,8 @@ ixgbe_dev_set_link_down(struct rte_eth_dev *dev)
 #ifdef RTE_NIC_BYPASS
 		if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
 			/* Not suported in bypass mode */
-			PMD_INIT_LOG(ERR,
-				"\nSet link down is not supported "
-				"by device id 0x%x\n",
-				 hw->device_id);
+			PMD_INIT_LOG(ERR, "\nSet link down is not supported "
+				     "by device id 0x%x\n", hw->device_id);
 			return -ENOTSUP;
 		}
 #endif
@@ -1639,9 +1636,8 @@ ixgbe_dev_set_link_down(struct rte_eth_dev *dev)
 		return 0;
 	}
 
-	PMD_INIT_LOG(ERR,
-		"\nSet link down is not supported by device id 0x%x\n",
-		 hw->device_id);
+	PMD_INIT_LOG(ERR, "\nSet link down is not supported by device id 0x%x\n",
+		     hw->device_id);
 	return -ENOTSUP;
 }
 
@@ -2599,7 +2595,7 @@ ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *p
 	 */
 	max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
 	if ((pfc_conf->fc.high_water > max_high_water) ||
-		(pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) {
+	    (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) {
 		PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB\n");
 		PMD_INIT_LOG(ERR, "High_water must <=  0x%x\n", max_high_water);
 		return (-EINVAL);
@@ -2778,7 +2774,7 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
 	struct rte_eth_conf* conf = &dev->data->dev_conf;
 
 	PMD_INIT_LOG(DEBUG, "\nConfigured Virtual Function port id: %d\n",
-		dev->data->port_id);
+		     dev->data->port_id);
 
 	/*
 	 * VF has no ability to enable/disable HW CRC
@@ -2818,7 +2814,8 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
 	/* This can fail when allocating mbufs for descriptor rings */
 	err = ixgbevf_dev_rx_init(dev);
 	if (err) {
-		PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)\n", err);
+		PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)\n",
+			     err);
 		ixgbe_dev_clear_queues(dev);
 		return err;
 	}
@@ -3098,7 +3095,7 @@ ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool,
 
 	if (hw->mac.type == ixgbe_mac_82598EB) {
 		PMD_INIT_LOG(ERR, "setting VF receive mode set should be done"
-			" on 82599 hardware and newer\n");
+			     " on 82599 hardware and newer\n");
 		return (-ENOTSUP);
 	}
 	if (ixgbe_vmdq_mode_check(hw) < 0)
diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
index 765b4e0..8732051 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
+++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
@@ -1889,8 +1889,14 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 			dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
 	} else {
 		PMD_INIT_LOG(INFO, "Using full-featured tx code path\n");
-		PMD_INIT_LOG(INFO, " - txq_flags = %lx [IXGBE_SIMPLE_FLAGS=%lx]\n", (long unsigned)txq->txq_flags, (long unsigned)IXGBE_SIMPLE_FLAGS);
-		PMD_INIT_LOG(INFO, " - tx_rs_thresh = %lu [RTE_PMD_IXGBE_TX_MAX_BURST=%lu]\n", (long unsigned)txq->tx_rs_thresh, (long unsigned)RTE_PMD_IXGBE_TX_MAX_BURST);
+		PMD_INIT_LOG(INFO, " - txq_flags = %lx "
+			     "[IXGBE_SIMPLE_FLAGS=%lx]\n",
+			     (long unsigned)txq->txq_flags,
+			     (long unsigned)IXGBE_SIMPLE_FLAGS);
+		PMD_INIT_LOG(INFO, " - tx_rs_thresh = %lu "
+			     "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]\n",
+			     (long unsigned)txq->tx_rs_thresh,
+			     (long unsigned)RTE_PMD_IXGBE_TX_MAX_BURST);
 		dev->tx_pkt_burst = ixgbe_xmit_pkts;
 	}
 
@@ -3695,9 +3701,8 @@ ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 
 		/* Allocate buffers for descriptor rings */
 		if (ixgbe_alloc_rx_queue_mbufs(rxq) != 0) {
-			PMD_INIT_LOG(ERR,
-				"Could not alloc mbuf for queue:%d\n",
-				rx_queue_id);
+			PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d\n",
+				     rx_queue_id);
 			return -1;
 		}
 		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
@@ -3711,8 +3716,8 @@ ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 			rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
 		} while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
 		if (!poll_ms)
-			PMD_INIT_LOG(ERR, "Could not enable "
-				     "Rx Queue %d\n", rx_queue_id);
+			PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d\n",
+				     rx_queue_id);
 		rte_wmb();
 		IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
 		IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
@@ -3750,8 +3755,8 @@ ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 			rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
 		} while (--poll_ms && (rxdctl | IXGBE_RXDCTL_ENABLE));
 		if (!poll_ms)
-			PMD_INIT_LOG(ERR, "Could not disable "
-				     "Rx Queue %d\n", rx_queue_id);
+			PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d\n",
+				     rx_queue_id);
 
 		rte_delay_us(RTE_IXGBE_WAIT_100_US);
 
@@ -3834,9 +3839,8 @@ ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 						IXGBE_TDT(txq->reg_idx));
 			} while (--poll_ms && (txtdh != txtdt));
 			if (!poll_ms)
-				PMD_INIT_LOG(ERR,
-				"Tx Queue %d is not empty when stopping.\n",
-				tx_queue_id);
+				PMD_INIT_LOG(ERR, "Tx Queue %d is not empty "
+					     "when stopping.\n", tx_queue_id);
 		}
 
 		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
@@ -4069,8 +4073,7 @@ ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
 			txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
 		} while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
 		if (!poll_ms)
-			PMD_INIT_LOG(ERR, "Could not enable "
-					 "Tx Queue %d\n", i);
+			PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d\n", i);
 	}
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
 
@@ -4087,8 +4090,7 @@ ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
 			rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
 		} while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
 		if (!poll_ms)
-			PMD_INIT_LOG(ERR, "Could not enable "
-					 "Rx Queue %d\n", i);
+			PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d\n", i);
 		rte_wmb();
 		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), rxq->nb_rx_desc - 1);
 
-- 
1.7.10.4

  parent reply	other threads:[~2014-09-17 13:41 UTC|newest]

Thread overview: 25+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-09-17 13:46 [dpdk-dev] [PATCH v3 00/20] cleanup logs in main PMDs David Marchand
2014-09-17 13:46 ` [dpdk-dev] [PATCH v3 01/20] ixgbe: use the right debug macro David Marchand
2014-09-17 13:46 ` [dpdk-dev] [PATCH v3 02/20] ixgbe/base: add a raw macro for use by shared code David Marchand
2014-09-17 13:46 ` David Marchand [this message]
2014-09-17 13:46 ` [dpdk-dev] [PATCH v3 04/20] ixgbe: clean log messages David Marchand
2014-09-17 13:46 ` [dpdk-dev] [PATCH v3 05/20] ixgbe: always log init messages David Marchand
2014-09-17 13:46 ` [dpdk-dev] [PATCH v3 06/20] ixgbe: add a message when forcing scatter mode David Marchand
2014-09-17 13:46 ` [dpdk-dev] [PATCH v3 07/20] ixgbe: add log messages when rx bulk mode is not usable David Marchand
2014-09-17 13:46 ` [dpdk-dev] [PATCH v3 08/20] i40e: use the right debug macro David Marchand
2014-09-17 13:46 ` [dpdk-dev] [PATCH v3 09/20] i40e/base: add a raw macro for use by shared code David Marchand
2014-09-17 13:46 ` [dpdk-dev] [PATCH v3 10/20] i40e: indent logs sections David Marchand
2014-09-17 13:46 ` [dpdk-dev] [PATCH v3 11/20] i40e: clean log messages David Marchand
2014-09-17 13:46 ` [dpdk-dev] [PATCH v3 12/20] i40e: always log init messages David Marchand
2014-09-17 13:46 ` [dpdk-dev] [PATCH v3 13/20] i40e: add log messages when rx bulk mode is not usable David Marchand
2014-09-17 13:46 ` [dpdk-dev] [PATCH v3 14/20] e1000: use the right debug macro David Marchand
2014-09-17 13:46 ` [dpdk-dev] [PATCH v3 15/20] e1000/base: add a raw macro for use by shared code David Marchand
2014-09-17 13:46 ` [dpdk-dev] [PATCH v3 16/20] e1000: indent logs sections David Marchand
2014-09-17 13:46 ` [dpdk-dev] [PATCH v3 17/20] e1000: clean log messages David Marchand
2014-09-17 13:46 ` [dpdk-dev] [PATCH v3 18/20] e1000: always log init messages David Marchand
2014-09-17 13:46 ` [dpdk-dev] [PATCH v3 19/20] e1000: add a message when forcing scatter mode David Marchand
2014-09-17 13:46 ` [dpdk-dev] [PATCH v3 20/20] eal: set log level from command line David Marchand
2014-09-17 14:45   ` Neil Horman
2014-09-18  7:46     ` David Marchand
2014-09-18 10:27       ` Neil Horman
2014-09-19  7:52 ` [dpdk-dev] [PATCH v3 00/20] cleanup logs in main PMDs Thomas Monjalon

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1410961612-8571-4-git-send-email-david.marchand@6wind.com \
    --to=david.marchand@6wind.com \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).