DPDK patches and discussions
 help / color / mirror / Atom feed
From: David Marchand <david.marchand@6wind.com>
To: dev@dpdk.org
Subject: [dpdk-dev] [PATCH v2 14/17] e1000: clean log messages
Date: Mon,  1 Sep 2014 12:24:37 +0200	[thread overview]
Message-ID: <1409567080-27083-15-git-send-email-david.marchand@6wind.com> (raw)
In-Reply-To: <1409567080-27083-1-git-send-email-david.marchand@6wind.com>

Clean log messages:
- remove leading \n in some messages,
- remove trailing \n in some messages,
- split multi lines messages,
- replace some PMD_INIT_LOG(DEBUG, "some_func") with PMD_INIT_FUNC_TRACE().

Signed-off-by: David Marchand <david.marchand@6wind.com>
---
 lib/librte_pmd_e1000/e1000_logs.h |    4 +-
 lib/librte_pmd_e1000/em_ethdev.c  |   64 ++++++++++------------
 lib/librte_pmd_e1000/em_rxtx.c    |  109 ++++++++++++++++++-------------------
 lib/librte_pmd_e1000/igb_ethdev.c |   91 +++++++++++++++----------------
 lib/librte_pmd_e1000/igb_pf.c     |    4 +-
 lib/librte_pmd_e1000/igb_rxtx.c   |   45 +++++++--------
 6 files changed, 153 insertions(+), 164 deletions(-)

diff --git a/lib/librte_pmd_e1000/e1000_logs.h b/lib/librte_pmd_e1000/e1000_logs.h
index fe6e023..4dd7208 100644
--- a/lib/librte_pmd_e1000/e1000_logs.h
+++ b/lib/librte_pmd_e1000/e1000_logs.h
@@ -37,8 +37,10 @@
 #ifdef RTE_LIBRTE_E1000_DEBUG_INIT
 #define PMD_INIT_LOG(level, fmt, args...) \
 	RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
 #else
-#define PMD_INIT_LOG(level, fmt, args...) do { } while(0)
+#define PMD_INIT_LOG(level, fmt, args...) do { } while (0)
+#define PMD_INIT_FUNC_TRACE() do { } while (0)
 #endif
 
 #ifdef RTE_LIBRTE_E1000_DEBUG_RX
diff --git a/lib/librte_pmd_e1000/em_ethdev.c b/lib/librte_pmd_e1000/em_ethdev.c
index 4555294..fd36b37 100644
--- a/lib/librte_pmd_e1000/em_ethdev.c
+++ b/lib/librte_pmd_e1000/em_ethdev.c
@@ -249,9 +249,9 @@ eth_em_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
 	if (e1000_setup_init_funcs(hw, TRUE) != E1000_SUCCESS ||
 			em_hw_init(hw) != 0) {
 		PMD_INIT_LOG(ERR, "port_id %d vendorID=0x%x deviceID=0x%x: "
-			"failed to init HW",
-			eth_dev->data->port_id, pci_dev->id.vendor_id,
-			pci_dev->id.device_id);
+			     "failed to init HW",
+			     eth_dev->data->port_id, pci_dev->id.vendor_id,
+			     pci_dev->id.device_id);
 		return -(ENODEV);
 	}
 
@@ -260,8 +260,8 @@ eth_em_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
 			hw->mac.rar_entry_count, 0);
 	if (eth_dev->data->mac_addrs == NULL) {
 		PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to "
-			"store MAC addresses",
-			ETHER_ADDR_LEN * hw->mac.rar_entry_count);
+			     "store MAC addresses",
+			     ETHER_ADDR_LEN * hw->mac.rar_entry_count);
 		return -(ENOMEM);
 	}
 
@@ -272,9 +272,9 @@ eth_em_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
 	/* initialize the vfta */
 	memset(shadow_vfta, 0, sizeof(*shadow_vfta));
 
-	PMD_INIT_LOG(INFO, "port_id %d vendorID=0x%x deviceID=0x%x\n",
-			eth_dev->data->port_id, pci_dev->id.vendor_id,
-			pci_dev->id.device_id);
+	PMD_INIT_LOG(INFO, "port_id %d vendorID=0x%x deviceID=0x%x",
+		     eth_dev->data->port_id, pci_dev->id.vendor_id,
+		     pci_dev->id.device_id);
 
 	rte_intr_callback_register(&(pci_dev->intr_handle),
 		eth_em_interrupt_handler, (void *)eth_dev);
@@ -306,17 +306,17 @@ em_hw_init(struct e1000_hw *hw)
 
 	diag = hw->mac.ops.init_params(hw);
 	if (diag != 0) {
-		PMD_INIT_LOG(ERR, "MAC Initialization Error\n");
+		PMD_INIT_LOG(ERR, "MAC Initialization Error");
 		return diag;
 	}
 	diag = hw->nvm.ops.init_params(hw);
 	if (diag != 0) {
-		PMD_INIT_LOG(ERR, "NVM Initialization Error\n");
+		PMD_INIT_LOG(ERR, "NVM Initialization Error");
 		return diag;
 	}
 	diag = hw->phy.ops.init_params(hw);
 	if (diag != 0) {
-		PMD_INIT_LOG(ERR, "PHY Initialization Error\n");
+		PMD_INIT_LOG(ERR, "PHY Initialization Error");
 		return diag;
 	}
 	(void) e1000_get_bus_info(hw);
@@ -375,7 +375,7 @@ em_hw_init(struct e1000_hw *hw)
 	diag = e1000_check_reset_block(hw);
 	if (diag < 0) {
 		PMD_INIT_LOG(ERR, "PHY reset is blocked due to "
-			"SOL/IDER session");
+			     "SOL/IDER session");
 	}
 	return (0);
 
@@ -390,11 +390,10 @@ eth_em_configure(struct rte_eth_dev *dev)
 	struct e1000_interrupt *intr =
 		E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
 
-	PMD_INIT_LOG(DEBUG, ">>");
-
+	PMD_INIT_FUNC_TRACE();
 	intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
+	PMD_INIT_FUNC_TRACE();
 
-	PMD_INIT_LOG(DEBUG, "<<");
 	return (0);
 }
 
@@ -453,7 +452,7 @@ eth_em_start(struct rte_eth_dev *dev)
 		E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	int ret, mask;
 
-	PMD_INIT_LOG(DEBUG, ">>");
+	PMD_INIT_FUNC_TRACE();
 
 	eth_em_stop(dev);
 
@@ -573,9 +572,9 @@ eth_em_start(struct rte_eth_dev *dev)
 	return (0);
 
 error_invalid_config:
-	PMD_INIT_LOG(ERR, "Invalid link_speed/link_duplex (%u/%u) for port "
-				"%u\n", dev->data->dev_conf.link_speed,
-			dev->data->dev_conf.link_duplex, dev->data->port_id);
+	PMD_INIT_LOG(ERR, "Invalid link_speed/link_duplex (%u/%u) for port %u",
+		     dev->data->dev_conf.link_speed,
+		     dev->data->dev_conf.link_duplex, dev->data->port_id);
 	em_dev_clear_queues(dev);
 	return (-EINVAL);
 }
@@ -1296,20 +1295,17 @@ eth_em_interrupt_action(struct rte_eth_dev *dev)
 	memset(&link, 0, sizeof(link));
 	rte_em_dev_atomic_read_link_status(dev, &link);
 	if (link.link_status) {
-		PMD_INIT_LOG(INFO,
-			" Port %d: Link Up - speed %u Mbps - %s\n",
-			dev->data->port_id, (unsigned)link.link_speed,
-			link.link_duplex == ETH_LINK_FULL_DUPLEX ?
-				"full-duplex" : "half-duplex");
+		PMD_INIT_LOG(INFO, " Port %d: Link Up - speed %u Mbps - %s",
+			     dev->data->port_id, (unsigned)link.link_speed,
+			     link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+			     "full-duplex" : "half-duplex");
 	} else {
-		PMD_INIT_LOG(INFO, " Port %d: Link Down\n",
-					dev->data->port_id);
+		PMD_INIT_LOG(INFO, " Port %d: Link Down",
+			     dev->data->port_id);
 	}
 	PMD_INIT_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d",
-				dev->pci_dev->addr.domain,
-				dev->pci_dev->addr.bus,
-				dev->pci_dev->addr.devid,
-				dev->pci_dev->addr.function);
+		     dev->pci_dev->addr.domain, dev->pci_dev->addr.bus,
+		     dev->pci_dev->addr.devid, dev->pci_dev->addr.function);
 	tctl = E1000_READ_REG(hw, E1000_TCTL);
 	rctl = E1000_READ_REG(hw, E1000_RCTL);
 	if (link.link_status) {
@@ -1429,14 +1425,14 @@ eth_em_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	if (fc_conf->autoneg != hw->mac.autoneg)
 		return -ENOTSUP;
 	rx_buf_size = em_get_rx_buffer_size(hw);
-	PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x \n", rx_buf_size);
+	PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
 
 	/* At least reserve one Ethernet frame for watermark */
 	max_high_water = rx_buf_size - ETHER_MAX_LEN;
 	if ((fc_conf->high_water > max_high_water) ||
 		(fc_conf->high_water < fc_conf->low_water)) {
-		PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value \n");
-		PMD_INIT_LOG(ERR, "high water must <= 0x%x \n", max_high_water);
+		PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value");
+		PMD_INIT_LOG(ERR, "high water must <= 0x%x", max_high_water);
 		return (-EINVAL);
 	}
 
@@ -1466,7 +1462,7 @@ eth_em_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		return 0;
 	}
 
-	PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x \n", err);
+	PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x", err);
 	return (-EIO);
 }
 
diff --git a/lib/librte_pmd_e1000/em_rxtx.c b/lib/librte_pmd_e1000/em_rxtx.c
index 01efa50..83ecb33 100644
--- a/lib/librte_pmd_e1000/em_rxtx.c
+++ b/lib/librte_pmd_e1000/em_rxtx.c
@@ -317,10 +317,8 @@ em_xmit_cleanup(struct em_tx_queue *txq)
 	desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
 	if (! (txr[desc_to_clean_to].upper.fields.status & E1000_TXD_STAT_DD))
 	{
-		PMD_TX_FREE_LOG(DEBUG,
-				"TX descriptor %4u is not done"
-				"(port=%d queue=%d)",
-				desc_to_clean_to,
+		PMD_TX_FREE_LOG(DEBUG, "TX descriptor %4u is not done"
+				"(port=%d queue=%d)", desc_to_clean_to,
 				txq->port_id, txq->queue_id);
 		/* Failed to clean any descriptors, better luck next time */
 		return -(1);
@@ -334,11 +332,10 @@ em_xmit_cleanup(struct em_tx_queue *txq)
 		nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
 						last_desc_cleaned);
 
-	PMD_TX_FREE_LOG(DEBUG,
-			"Cleaning %4u TX descriptors: %4u to %4u "
-			"(port=%d queue=%d)",
-			nb_tx_to_clean, last_desc_cleaned, desc_to_clean_to,
-			txq->port_id, txq->queue_id);
+	PMD_TX_FREE_LOG(DEBUG, "Cleaning %4u TX descriptors: %4u to %4u "
+			"(port=%d queue=%d)", nb_tx_to_clean,
+			last_desc_cleaned, desc_to_clean_to, txq->port_id,
+			txq->queue_id);
 
 	/*
 	 * The last descriptor to clean is done, so that means all the
@@ -451,12 +448,12 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 			tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
 
 		PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
-			" tx_first=%u tx_last=%u\n",
-			(unsigned) txq->port_id,
-			(unsigned) txq->queue_id,
-			(unsigned) tx_pkt->pkt.pkt_len,
-			(unsigned) tx_id,
-			(unsigned) tx_last);
+			   " tx_first=%u tx_last=%u",
+			   (unsigned) txq->port_id,
+			   (unsigned) txq->queue_id,
+			   (unsigned) tx_pkt->pkt.pkt_len,
+			   (unsigned) tx_id,
+			   (unsigned) tx_last);
 
 		/*
 		 * Make sure there are enough TX descriptors available to
@@ -464,8 +461,7 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 		 * nb_used better be less than or equal to txq->tx_rs_thresh
 		 */
 		while (unlikely (nb_used > txq->nb_tx_free)) {
-			PMD_TX_FREE_LOG(DEBUG,
-					"Not enough free TX descriptors "
+			PMD_TX_FREE_LOG(DEBUG, "Not enough free TX descriptors "
 					"nb_used=%4u nb_free=%4u "
 					"(port=%d queue=%d)",
 					nb_used, txq->nb_tx_free,
@@ -588,9 +584,8 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 
 		/* Set RS bit only on threshold packets' last descriptor */
 		if (txq->nb_tx_used >= txq->tx_rs_thresh) {
-			PMD_TX_FREE_LOG(DEBUG,
-					"Setting RS bit on TXD id="
-					"%4u (port=%d queue=%d)",
+			PMD_TX_FREE_LOG(DEBUG, "Setting RS bit on TXD id=%4u "
+					"(port=%d queue=%d)",
 					tx_last, txq->port_id, txq->queue_id);
 
 			cmd_type_len |= E1000_TXD_CMD_RS;
@@ -607,8 +602,8 @@ end_of_tx:
 	 * Set the Transmit Descriptor Tail (TDT)
 	 */
 	PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
-		(unsigned) txq->port_id, (unsigned) txq->queue_id,
-		(unsigned) tx_id, (unsigned) nb_tx);
+		   (unsigned) txq->port_id, (unsigned) txq->queue_id,
+		   (unsigned) tx_id, (unsigned) nb_tx);
 	E1000_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
 	txq->tx_tail = tx_id;
 
@@ -712,19 +707,19 @@ eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		 * to happen by sending specific "back-pressure" flow control
 		 * frames to its peer(s).
 		 */
-		PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u "
-			"status=0x%x pkt_len=%u\n",
-			(unsigned) rxq->port_id, (unsigned) rxq->queue_id,
-			(unsigned) rx_id, (unsigned) status,
-			(unsigned) rte_le_to_cpu_16(rxd.length));
+		PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
+			   "status=0x%x pkt_len=%u",
+			   (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
+			   (unsigned) rx_id, (unsigned) status,
+			   (unsigned) rte_le_to_cpu_16(rxd.length));
 
 		nmb = rte_rxmbuf_alloc(rxq->mb_pool);
 		if (nmb == NULL) {
 			PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
-				"queue_id=%u\n",
-				(unsigned) rxq->port_id,
-				(unsigned) rxq->queue_id);
-			rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
+				   "queue_id=%u",
+				   (unsigned) rxq->port_id,
+				   (unsigned) rxq->queue_id);
+				rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
 			break;
 		}
 
@@ -806,10 +801,10 @@ eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 	nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
 	if (nb_hold > rxq->rx_free_thresh) {
 		PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
-			"nb_hold=%u nb_rx=%u\n",
-			(unsigned) rxq->port_id, (unsigned) rxq->queue_id,
-			(unsigned) rx_id, (unsigned) nb_hold,
-			(unsigned) nb_rx);
+			   "nb_hold=%u nb_rx=%u",
+			   (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
+			   (unsigned) rx_id, (unsigned) nb_hold,
+			   (unsigned) nb_rx);
 		rx_id = (uint16_t) ((rx_id == 0) ?
 			(rxq->nb_rx_desc - 1) : (rx_id - 1));
 		E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
@@ -892,17 +887,17 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		 * to happen by sending specific "back-pressure" flow control
 		 * frames to its peer(s).
 		 */
-		PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u "
-			"status=0x%x data_len=%u\n",
-			(unsigned) rxq->port_id, (unsigned) rxq->queue_id,
-			(unsigned) rx_id, (unsigned) status,
-			(unsigned) rte_le_to_cpu_16(rxd.length));
+		PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
+			   "status=0x%x data_len=%u",
+			   (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
+			   (unsigned) rx_id, (unsigned) status,
+			   (unsigned) rte_le_to_cpu_16(rxd.length));
 
 		nmb = rte_rxmbuf_alloc(rxq->mb_pool);
 		if (nmb == NULL) {
 			PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
-				"queue_id=%u\n", (unsigned) rxq->port_id,
-				(unsigned) rxq->queue_id);
+				   "queue_id=%u", (unsigned) rxq->port_id,
+				   (unsigned) rxq->queue_id);
 			rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
 			break;
 		}
@@ -1050,10 +1045,10 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 	nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
 	if (nb_hold > rxq->rx_free_thresh) {
 		PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
-			"nb_hold=%u nb_rx=%u\n",
-			(unsigned) rxq->port_id, (unsigned) rxq->queue_id,
-			(unsigned) rx_id, (unsigned) nb_hold,
-			(unsigned) nb_rx);
+			   "nb_hold=%u nb_rx=%u",
+			   (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
+			   (unsigned) rx_id, (unsigned) nb_hold,
+			   (unsigned) nb_rx);
 		rx_id = (uint16_t) ((rx_id == 0) ?
 			(rxq->nb_rx_desc - 1) : (rx_id - 1));
 		E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
@@ -1213,7 +1208,7 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
 	if (tx_free_thresh >= (nb_desc - 3)) {
 		PMD_INIT_LOG(ERR, "tx_free_thresh must be less than the "
 			     "number of TX descriptors minus 3. "
-			     "(tx_free_thresh=%u port=%d queue=%d)\n",
+			     "(tx_free_thresh=%u port=%d queue=%d)",
 			     (unsigned int)tx_free_thresh,
 			     (int)dev->data->port_id, (int)queue_idx);
 		return -(EINVAL);
@@ -1221,7 +1216,7 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
 	if (tx_rs_thresh > tx_free_thresh) {
 		PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or equal to "
 			     "tx_free_thresh. (tx_free_thresh=%u "
-			     "tx_rs_thresh=%u port=%d queue=%d)\n",
+			     "tx_rs_thresh=%u port=%d queue=%d)",
 			     (unsigned int)tx_free_thresh,
 			     (unsigned int)tx_rs_thresh,
 			     (int)dev->data->port_id,
@@ -1238,7 +1233,7 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
 	if (tx_conf->tx_thresh.wthresh != 0 && tx_rs_thresh != 1) {
 		PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
 			     "tx_rs_thresh is greater than 1. (tx_rs_thresh=%u "
-			     "port=%d queue=%d)\n", (unsigned int)tx_rs_thresh,
+			     "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
 			     (int)dev->data->port_id, (int)queue_idx);
 		return -(EINVAL);
 	}
@@ -1289,8 +1284,8 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
 #endif
 	txq->tx_ring = (struct e1000_data_desc *) tz->addr;
 
-	PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
-		txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
+	PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
+		     txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
 
 	em_reset_tx_queue(txq);
 
@@ -1370,7 +1365,7 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
 	 */
 	if (rx_conf->rx_drop_en) {
 		PMD_INIT_LOG(ERR, "drop_en functionality not supported by "
-			     "device\n");
+			     "device");
 		return (-EINVAL);
 	}
 
@@ -1419,8 +1414,8 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
 #endif
 	rxq->rx_ring = (struct e1000_rx_desc *) rz->addr;
 
-	PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
-		rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
+	PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
+		     rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
 
 	dev->data->rx_queues[queue_idx] = rxq;
 	em_reset_rx_queue(rxq);
@@ -1437,7 +1432,7 @@ eth_em_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 	uint32_t desc = 0;
 
 	if (rx_queue_id >= dev->data->nb_rx_queues) {
-		PMD_RX_LOG(DEBUG,"Invalid RX queue_id=%d\n", rx_queue_id);
+		PMD_RX_LOG(DEBUG, "Invalid RX queue_id=%d", rx_queue_id);
 		return 0;
 	}
 
@@ -1582,8 +1577,8 @@ em_alloc_rx_queue_mbufs(struct em_rx_queue *rxq)
 		struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
 
 		if (mbuf == NULL) {
-			PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
-				"queue_id=%hu\n", rxq->queue_id);
+			PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%hu",
+				     rxq->queue_id);
 			return (-ENOMEM);
 		}
 
diff --git a/lib/librte_pmd_e1000/igb_ethdev.c b/lib/librte_pmd_e1000/igb_ethdev.c
index b45eb24..4dbf059 100644
--- a/lib/librte_pmd_e1000/igb_ethdev.c
+++ b/lib/librte_pmd_e1000/igb_ethdev.c
@@ -528,8 +528,8 @@ eth_igb_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
 		ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0);
 	if (eth_dev->data->mac_addrs == NULL) {
 		PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to "
-						"store MAC addresses",
-				ETHER_ADDR_LEN * hw->mac.rar_entry_count);
+			     "store MAC addresses",
+			     ETHER_ADDR_LEN * hw->mac.rar_entry_count);
 		error = -ENOMEM;
 		goto err_late;
 	}
@@ -553,7 +553,7 @@ eth_igb_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
 	/* Indicate SOL/IDER usage */
 	if (e1000_check_reset_block(hw) < 0) {
 		PMD_INIT_LOG(ERR, "PHY reset is blocked due to"
-					"SOL/IDER session");
+			     "SOL/IDER session");
 	}
 
 	/* initialize PF if max_vfs not zero */
@@ -565,7 +565,7 @@ eth_igb_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
 	E1000_WRITE_FLUSH(hw);
 
-	PMD_INIT_LOG(INFO, "port_id %d vendorID=0x%x deviceID=0x%x\n",
+	PMD_INIT_LOG(INFO, "port_id %d vendorID=0x%x deviceID=0x%x",
 		     eth_dev->data->port_id, pci_dev->id.vendor_id,
 		     pci_dev->id.device_id);
 
@@ -598,7 +598,7 @@ eth_igbvf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
 		E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
 	int diag;
 
-	PMD_INIT_LOG(DEBUG, "eth_igbvf_dev_init");
+	PMD_INIT_FUNC_TRACE();
 
 	eth_dev->dev_ops = &igbvf_eth_dev_ops;
 	eth_dev->rx_pkt_burst = &eth_igb_recv_pkts;
@@ -623,7 +623,7 @@ eth_igbvf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
 	diag = e1000_setup_init_funcs(hw, TRUE);
 	if (diag != 0) {
 		PMD_INIT_LOG(ERR, "Shared code init failed for igbvf: %d",
-			diag);
+			     diag);
 		return -EIO;
 	}
 
@@ -639,10 +639,9 @@ eth_igbvf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
 	eth_dev->data->mac_addrs = rte_zmalloc("igbvf", ETHER_ADDR_LEN *
 		hw->mac.rar_entry_count, 0);
 	if (eth_dev->data->mac_addrs == NULL) {
-		PMD_INIT_LOG(ERR,
-			"Failed to allocate %d bytes needed to store MAC "
-			"addresses",
-			ETHER_ADDR_LEN * hw->mac.rar_entry_count);
+		PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to "
+			     "store MAC addresses",
+			     ETHER_ADDR_LEN * hw->mac.rar_entry_count);
 		return -ENOMEM;
 	}
 
@@ -650,11 +649,9 @@ eth_igbvf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
 	ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
 			&eth_dev->data->mac_addrs[0]);
 
-	PMD_INIT_LOG(DEBUG, "\nport %d vendorID=0x%x deviceID=0x%x "
-			"mac.type=%s\n",
-			eth_dev->data->port_id, pci_dev->id.vendor_id,
-			pci_dev->id.device_id,
-			"igb_mac_82576_vf");
+	PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s",
+		     eth_dev->data->port_id, pci_dev->id.vendor_id,
+		     pci_dev->id.device_id, "igb_mac_82576_vf");
 
 	return 0;
 }
@@ -720,11 +717,9 @@ eth_igb_configure(struct rte_eth_dev *dev)
 	struct e1000_interrupt *intr =
 		E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
 
-	PMD_INIT_LOG(DEBUG, ">>");
-
+	PMD_INIT_FUNC_TRACE();
 	intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
-
-	PMD_INIT_LOG(DEBUG, "<<");
+	PMD_INIT_FUNC_TRACE();
 
 	return (0);
 }
@@ -737,7 +732,7 @@ eth_igb_start(struct rte_eth_dev *dev)
 	int ret, i, mask;
 	uint32_t ctrl_ext;
 
-	PMD_INIT_LOG(DEBUG, ">>");
+	PMD_INIT_FUNC_TRACE();
 
 	/* Power up the phy. Needed to make the link go Up */
 	e1000_power_up_phy(hw);
@@ -888,9 +883,9 @@ eth_igb_start(struct rte_eth_dev *dev)
 	return (0);
 
 error_invalid_config:
-	PMD_INIT_LOG(ERR, "Invalid link_speed/link_duplex (%u/%u) for port %u\n",
-			dev->data->dev_conf.link_speed,
-			dev->data->dev_conf.link_duplex, dev->data->port_id);
+	PMD_INIT_LOG(ERR, "Invalid link_speed/link_duplex (%u/%u) for port %u",
+		     dev->data->dev_conf.link_speed,
+		     dev->data->dev_conf.link_duplex, dev->data->port_id);
 	igb_dev_clear_queues(dev);
 	return (-EINVAL);
 }
@@ -1789,20 +1784,20 @@ eth_igb_interrupt_action(struct rte_eth_dev *dev)
 		memset(&link, 0, sizeof(link));
 		rte_igb_dev_atomic_read_link_status(dev, &link);
 		if (link.link_status) {
-			PMD_INIT_LOG(INFO,
-				" Port %d: Link Up - speed %u Mbps - %s\n",
-				dev->data->port_id, (unsigned)link.link_speed,
-				link.link_duplex == ETH_LINK_FULL_DUPLEX ?
-					"full-duplex" : "half-duplex");
+			PMD_INIT_LOG(INFO, " Port %d: Link Up - speed %u Mbps "
+				     "- %s", dev->data->port_id,
+				     (unsigned)link.link_speed,
+				     link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+				     "full-duplex" : "half-duplex");
 		} else {
-			PMD_INIT_LOG(INFO, " Port %d: Link Down\n",
-						dev->data->port_id);
+			PMD_INIT_LOG(INFO, " Port %d: Link Down",
+				     dev->data->port_id);
 		}
 		PMD_INIT_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d",
-					dev->pci_dev->addr.domain,
-					dev->pci_dev->addr.bus,
-					dev->pci_dev->addr.devid,
-					dev->pci_dev->addr.function);
+			     dev->pci_dev->addr.domain,
+			     dev->pci_dev->addr.bus,
+			     dev->pci_dev->addr.devid,
+			     dev->pci_dev->addr.function);
 		tctl = E1000_READ_REG(hw, E1000_TCTL);
 		rctl = E1000_READ_REG(hw, E1000_RCTL);
 		if (link.link_status) {
@@ -1923,14 +1918,14 @@ eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	if (fc_conf->autoneg != hw->mac.autoneg)
 		return -ENOTSUP;
 	rx_buf_size = igb_get_rx_buffer_size(hw);
-	PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x \n", rx_buf_size);
+	PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
 
 	/* At least reserve one Ethernet frame for watermark */
 	max_high_water = rx_buf_size - ETHER_MAX_LEN;
 	if ((fc_conf->high_water > max_high_water) ||
 		(fc_conf->high_water < fc_conf->low_water)) {
-		PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value \n");
-		PMD_INIT_LOG(ERR, "high water must <=  0x%x \n", max_high_water);
+		PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value");
+		PMD_INIT_LOG(ERR, "high water must <= 0x%x", max_high_water);
 		return (-EINVAL);
 	}
 
@@ -1960,7 +1955,7 @@ eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		return 0;
 	}
 
-	PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x \n", err);
+	PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x", err);
 	return (-EIO);
 }
 
@@ -1995,7 +1990,7 @@ eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index)
 static void
 igbvf_intr_disable(struct e1000_hw *hw)
 {
-	PMD_INIT_LOG(DEBUG, "igbvf_intr_disable");
+	PMD_INIT_FUNC_TRACE();
 
 	/* Clear interrupt mask to stop from interrupts being generated */
 	E1000_WRITE_REG(hw, E1000_EIMC, 0xFFFF);
@@ -2077,8 +2072,8 @@ igbvf_dev_configure(struct rte_eth_dev *dev)
 {
 	struct rte_eth_conf* conf = &dev->data->dev_conf;
 
-	PMD_INIT_LOG(DEBUG, "\nConfigured Virtual Function port id: %d\n",
-		dev->data->port_id);
+	PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
+		     dev->data->port_id);
 
 	/*
 	 * VF has no ability to enable/disable HW CRC
@@ -2086,12 +2081,12 @@ igbvf_dev_configure(struct rte_eth_dev *dev)
 	 */
 #ifndef RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC
 	if (!conf->rxmode.hw_strip_crc) {
-		PMD_INIT_LOG(INFO, "VF can't disable HW CRC Strip\n");
+		PMD_INIT_LOG(INFO, "VF can't disable HW CRC Strip");
 		conf->rxmode.hw_strip_crc = 1;
 	}
 #else
 	if (conf->rxmode.hw_strip_crc) {
-		PMD_INIT_LOG(INFO, "VF can't enable HW CRC Strip\n");
+		PMD_INIT_LOG(INFO, "VF can't enable HW CRC Strip");
 		conf->rxmode.hw_strip_crc = 0;
 	}
 #endif
@@ -2106,7 +2101,7 @@ igbvf_dev_start(struct rte_eth_dev *dev)
 		E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	int ret;
 
-	PMD_INIT_LOG(DEBUG, "igbvf_dev_start");
+	PMD_INIT_FUNC_TRACE();
 
 	hw->mac.ops.reset_hw(hw);
 
@@ -2129,7 +2124,7 @@ igbvf_dev_start(struct rte_eth_dev *dev)
 static void
 igbvf_dev_stop(struct rte_eth_dev *dev)
 {
-	PMD_INIT_LOG(DEBUG, "igbvf_dev_stop");
+	PMD_INIT_FUNC_TRACE();
 
 	igbvf_stop_adapter(dev);
 
@@ -2147,7 +2142,7 @@ igbvf_dev_close(struct rte_eth_dev *dev)
 {
 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-	PMD_INIT_LOG(DEBUG, "igbvf_dev_close");
+	PMD_INIT_FUNC_TRACE();
 
 	e1000_reset_hw(hw);
 
@@ -2203,7 +2198,7 @@ igbvf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
 	uint32_t vid_bit = 0;
 	int ret = 0;
 
-	PMD_INIT_LOG(DEBUG, "igbvf_vlan_filter_set");
+	PMD_INIT_FUNC_TRACE();
 
 	/*vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf*/
 	ret = igbvf_set_vfta(hw, vlan_id, !!on);
@@ -2432,7 +2427,7 @@ eth_igb_add_ethertype_filter(struct rte_eth_dev *dev, uint16_t index,
 
 	if (filter->priority_en) {
 		PMD_INIT_LOG(ERR, "vlan and priority (%d) is not supported"
-			" in E1000.", filter->priority);
+			     " in E1000.", filter->priority);
 		return -EINVAL;
 	}
 
diff --git a/lib/librte_pmd_e1000/igb_pf.c b/lib/librte_pmd_e1000/igb_pf.c
index 76033ad..bc3816a 100644
--- a/lib/librte_pmd_e1000/igb_pf.c
+++ b/lib/librte_pmd_e1000/igb_pf.c
@@ -404,7 +404,7 @@ igb_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf)
 
 	retval = e1000_read_mbx(hw, msgbuf, mbx_size, vf);
 	if (retval) {
-		PMD_INIT_LOG(ERR, "Error mbx recv msg from VF %d\n", vf);
+		PMD_INIT_LOG(ERR, "Error mbx recv msg from VF %d", vf);
 		return retval;
 	}
 
@@ -432,7 +432,7 @@ igb_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf)
 		retval = igb_vf_set_vlan(dev, vf, msgbuf);
 		break;
 	default:
-		PMD_INIT_LOG(DEBUG, "Unhandled Msg %8.8x\n",
+		PMD_INIT_LOG(DEBUG, "Unhandled Msg %8.8x",
 			     (unsigned) msgbuf[0]);
 		retval = E1000_ERR_MBX;
 		break;
diff --git a/lib/librte_pmd_e1000/igb_rxtx.c b/lib/librte_pmd_e1000/igb_rxtx.c
index 3aa9609..5ca06c9 100644
--- a/lib/librte_pmd_e1000/igb_rxtx.c
+++ b/lib/librte_pmd_e1000/igb_rxtx.c
@@ -396,7 +396,7 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 			tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
 
 		PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
-			   " tx_first=%u tx_last=%u\n",
+			   " tx_first=%u tx_last=%u",
 			   (unsigned) txq->port_id,
 			   (unsigned) txq->queue_id,
 			   (unsigned) pkt_len,
@@ -548,7 +548,7 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 		txd->read.cmd_type_len |=
 			rte_cpu_to_le_32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
 	}
- end_of_tx:
+end_of_tx:
 	rte_wmb();
 
 	/*
@@ -697,8 +697,8 @@ eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		 * to happen by sending specific "back-pressure" flow control
 		 * frames to its peer(s).
 		 */
-		PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u "
-			   "staterr=0x%x pkt_len=%u\n",
+		PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
+			   "staterr=0x%x pkt_len=%u",
 			   (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
 			   (unsigned) rx_id, (unsigned) staterr,
 			   (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
@@ -706,7 +706,7 @@ eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		nmb = rte_rxmbuf_alloc(rxq->mb_pool);
 		if (nmb == NULL) {
 			PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
-				   "queue_id=%u\n", (unsigned) rxq->port_id,
+				   "queue_id=%u", (unsigned) rxq->port_id,
 				   (unsigned) rxq->queue_id);
 			rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
 			break;
@@ -794,7 +794,7 @@ eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 	nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
 	if (nb_hold > rxq->rx_free_thresh) {
 		PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
-			   "nb_hold=%u nb_rx=%u\n",
+			   "nb_hold=%u nb_rx=%u",
 			   (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
 			   (unsigned) rx_id, (unsigned) nb_hold,
 			   (unsigned) nb_rx);
@@ -881,8 +881,8 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		 * to happen by sending specific "back-pressure" flow control
 		 * frames to its peer(s).
 		 */
-		PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u "
-			   "staterr=0x%x data_len=%u\n",
+		PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
+			   "staterr=0x%x data_len=%u",
 			   (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
 			   (unsigned) rx_id, (unsigned) staterr,
 			   (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
@@ -890,7 +890,7 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		nmb = rte_rxmbuf_alloc(rxq->mb_pool);
 		if (nmb == NULL) {
 			PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
-				   "queue_id=%u\n", (unsigned) rxq->port_id,
+				   "queue_id=%u", (unsigned) rxq->port_id,
 				   (unsigned) rxq->queue_id);
 			rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
 			break;
@@ -1049,7 +1049,7 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 	nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
 	if (nb_hold > rxq->rx_free_thresh) {
 		PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
-			   "nb_hold=%u nb_rx=%u\n",
+			   "nb_hold=%u nb_rx=%u",
 			   (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
 			   (unsigned) rx_id, (unsigned) nb_hold,
 			   (unsigned) nb_rx);
@@ -1211,14 +1211,14 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
 	 */
 	if (tx_conf->tx_free_thresh != 0)
 		PMD_INIT_LOG(WARNING, "The tx_free_thresh parameter is not "
-			     "used for the 1G driver.\n");
+			     "used for the 1G driver.");
 	if (tx_conf->tx_rs_thresh != 0)
 		PMD_INIT_LOG(WARNING, "The tx_rs_thresh parameter is not "
-			     "used for the 1G driver.\n");
+			     "used for the 1G driver.");
 	if (tx_conf->tx_thresh.wthresh == 0)
 		PMD_INIT_LOG(WARNING, "To improve 1G driver performance, "
 			     "consider setting the TX WTHRESH value to 4, 8, "
-			     "or 16.\n");
+			     "or 16.");
 
 	/* Free memory prior to re-allocation if needed */
 	if (dev->data->tx_queues[queue_idx] != NULL) {
@@ -1271,7 +1271,7 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
 		igb_tx_queue_release(txq);
 		return (-ENOMEM);
 	}
-	PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
+	PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"",
 		     txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
 
 	igb_reset_tx_queue(txq, dev);
@@ -1409,7 +1409,7 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
 		igb_rx_queue_release(rxq);
 		return (-ENOMEM);
 	}
-	PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
+	PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"",
 		     rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
 
 	dev->data->rx_queues[queue_idx] = rxq;
@@ -1427,7 +1427,7 @@ eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 	uint32_t desc = 0;
 
 	if (rx_queue_id >= dev->data->nb_rx_queues) {
-		PMD_RX_LOG(ERR, "Invalid RX queue id=%d\n", rx_queue_id);
+		PMD_RX_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id);
 		return 0;
 	}
 
@@ -1726,7 +1726,7 @@ igb_is_vmdq_supported(const struct rte_eth_dev *dev)
 	case e1000_i210:
 	case e1000_i211:
 	default:
-		PMD_INIT_LOG(ERR, "Cannot support VMDq feature\n");
+		PMD_INIT_LOG(ERR, "Cannot support VMDq feature");
 		return 0;
 	}
 }
@@ -1739,7 +1739,8 @@ igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
 	uint32_t mrqc, vt_ctl, vmolr, rctl;
 	int i;
 
-	PMD_INIT_LOG(DEBUG, ">>");
+	PMD_INIT_FUNC_TRACE();
+
 	hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
 
@@ -1827,8 +1828,8 @@ igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
 		struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
 
 		if (mbuf == NULL) {
-			PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
-				"queue_id=%hu\n", rxq->queue_id);
+			PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%hu",
+				     rxq->queue_id);
 			return (-ENOMEM);
 		}
 		dma_addr =
@@ -2273,7 +2274,7 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev)
 			 * to avoid Write-Back not triggered sometimes
 			 */
 			rxdctl |= 0x10000;
-			PMD_INIT_LOG(DEBUG, "Force set RX WTHRESH to 1 !\n");
+			PMD_INIT_LOG(DEBUG, "Force set RX WTHRESH to 1 !");
 		}
 		else
 			rxdctl |= ((rxq->wthresh & 0x1F) << 16);
@@ -2341,7 +2342,7 @@ eth_igbvf_tx_init(struct rte_eth_dev *dev)
 			 * to avoid Write-Back not triggered sometimes
 			 */
 			txdctl |= 0x10000;
-			PMD_INIT_LOG(DEBUG, "Force set TX WTHRESH to 1 !\n");
+			PMD_INIT_LOG(DEBUG, "Force set TX WTHRESH to 1 !");
 		}
 		else
 			txdctl |= ((txq->wthresh & 0x1F) << 16);
-- 
1.7.10.4

  parent reply	other threads:[~2014-09-01 10:20 UTC|newest]

Thread overview: 30+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-09-01 10:24 [dpdk-dev] [PATCH v2 00/17] cleanup logs in main PMDs David Marchand
2014-09-01 10:24 ` [dpdk-dev] [PATCH v2 01/17] ixgbe: use the right debug macro David Marchand
2014-09-02 13:43   ` Jay Rolette
2014-09-02 14:16     ` David Marchand
2014-09-02 14:21       ` Thomas Monjalon
2014-09-02 17:57         ` Jay Rolette
2014-09-01 10:24 ` [dpdk-dev] [PATCH v2 02/17] ixgbe/base: add a _RAW macro for use by shared code David Marchand
2014-09-01 10:24 ` [dpdk-dev] [PATCH v2 03/17] ixgbe: clean log messages David Marchand
2014-09-02 15:19   ` Jay Rolette
2014-09-01 10:24 ` [dpdk-dev] [PATCH v2 04/17] ixgbe: always log init messages David Marchand
2014-09-01 10:24 ` [dpdk-dev] [PATCH v2 05/17] ixgbe: add a message when forcing scatter mode David Marchand
2014-09-01 10:24 ` [dpdk-dev] [PATCH v2 06/17] ixgbe: add log messages when rx bulk mode is not usable David Marchand
2014-09-01 10:24 ` [dpdk-dev] [PATCH v2 07/17] i40e: use the right debug macro David Marchand
2014-09-02 18:25   ` Jay Rolette
2014-09-01 10:24 ` [dpdk-dev] [PATCH v2 08/17] i40e/base: add a _RAW macro for use by shared code David Marchand
2014-09-01 10:24 ` [dpdk-dev] [PATCH v2 09/17] i40e: clean log messages David Marchand
2014-09-02 18:20   ` Jay Rolette
2014-09-01 10:24 ` [dpdk-dev] [PATCH v2 10/17] i40e: always log init messages David Marchand
2014-09-01 10:24 ` [dpdk-dev] [PATCH v2 11/17] i40e: add log messages when rx bulk mode is not usable David Marchand
2014-09-01 10:24 ` [dpdk-dev] [PATCH v2 12/17] e1000: use the right debug macro David Marchand
2014-09-02 18:29   ` Jay Rolette
2014-09-01 10:24 ` [dpdk-dev] [PATCH v2 13/17] e1000/base: add a _RAW macro for use by shared code David Marchand
2014-09-01 10:24 ` David Marchand [this message]
2014-09-02 19:05   ` [dpdk-dev] [PATCH v2 14/17] e1000: clean log messages Jay Rolette
2014-09-02 19:19     ` David Marchand
2014-09-01 10:24 ` [dpdk-dev] [PATCH v2 15/17] e1000: always log init messages David Marchand
2014-09-01 10:24 ` [dpdk-dev] [PATCH v2 16/17] e1000: add a message when forcing scatter mode David Marchand
2014-09-02 19:20   ` David Marchand
2014-09-01 10:24 ` [dpdk-dev] [PATCH v2 17/17] eal: set log level from command line David Marchand
2014-09-12 12:32 ` [dpdk-dev] [PATCH v2 00/17] cleanup logs in main PMDs Bruce Richardson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1409567080-27083-15-git-send-email-david.marchand@6wind.com \
    --to=david.marchand@6wind.com \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).