- * [dpdk-dev] [PATCH v3 01/20] ixgbe: use the right debug macro
  2014-09-17 13:46 [dpdk-dev] [PATCH v3 00/20] cleanup logs in main PMDs David Marchand
@ 2014-09-17 13:46 ` David Marchand
  2014-09-17 13:46 ` [dpdk-dev] [PATCH v3 02/20] ixgbe/base: add a raw macro for use by shared code David Marchand
                   ` (19 subsequent siblings)
  20 siblings, 0 replies; 25+ messages in thread
From: David Marchand @ 2014-09-17 13:46 UTC (permalink / raw)
  To: dev
- We should not use DEBUGOUT*/DEBUGFUNC macros in non-shared code.
These macros come as compat wrappers for shared code.
- We should avoid calling RTE_LOG directly as pmd provides a wrapper for logs.
- Replace some PMD_INIT_LOG(DEBUG, "some_func") with PMD_INIT_FUNC_TRACE().
Signed-off-by: David Marchand <david.marchand@6wind.com>
v2 Reviewed-by: Jay Rolette <rolette@infiniteio.com>
v2 Acked-by: Bruce Richardson <bruce.richardson@intel.com>
---
 lib/librte_pmd_ixgbe/ixgbe_82599_bypass.c |   14 ++++----
 lib/librte_pmd_ixgbe/ixgbe_bypass.c       |   26 +++++++-------
 lib/librte_pmd_ixgbe/ixgbe_ethdev.c       |   37 ++++++++++----------
 lib/librte_pmd_ixgbe/ixgbe_pf.c           |    4 +--
 lib/librte_pmd_ixgbe/ixgbe_rxtx.c         |   53 +++++++++++++++--------------
 5 files changed, 68 insertions(+), 66 deletions(-)
diff --git a/lib/librte_pmd_ixgbe/ixgbe_82599_bypass.c b/lib/librte_pmd_ixgbe/ixgbe_82599_bypass.c
index 0f0000c..2623419 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_82599_bypass.c
+++ b/lib/librte_pmd_ixgbe/ixgbe_82599_bypass.c
@@ -63,7 +63,7 @@ ixgbe_set_fiber_fixed_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed)
 		rs = IXGBE_SFF_SOFT_RS_SELECT_1G;
 		break;
 	default:
-		DEBUGOUT("Invalid fixed module speed\n");
+		PMD_DRV_LOG("Invalid fixed module speed");
 		return;
 	}
 
@@ -72,7 +72,7 @@ ixgbe_set_fiber_fixed_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed)
 					   IXGBE_I2C_EEPROM_DEV_ADDR2,
 					   &eeprom_data);
 	if (status) {
-		DEBUGOUT("Failed to read Rx Rate Select RS0\n");
+		PMD_DRV_LOG("Failed to read Rx Rate Select RS0");
 		goto out;
 	}
 
@@ -82,7 +82,7 @@ ixgbe_set_fiber_fixed_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed)
 					    IXGBE_I2C_EEPROM_DEV_ADDR2,
 					    eeprom_data);
 	if (status) {
-		DEBUGOUT("Failed to write Rx Rate Select RS0\n");
+		PMD_DRV_LOG("Failed to write Rx Rate Select RS0");
 		goto out;
 	}
 
@@ -91,7 +91,7 @@ ixgbe_set_fiber_fixed_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed)
 					   IXGBE_I2C_EEPROM_DEV_ADDR2,
 					   &eeprom_data);
 	if (status) {
-		DEBUGOUT("Failed to read Rx Rate Select RS1\n");
+		PMD_DRV_LOG("Failed to read Rx Rate Select RS1");
 		goto out;
 	}
 
@@ -101,7 +101,7 @@ ixgbe_set_fiber_fixed_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed)
 					    IXGBE_I2C_EEPROM_DEV_ADDR2,
 					    eeprom_data);
 	if (status) {
-		DEBUGOUT("Failed to write Rx Rate Select RS1\n");
+		PMD_DRV_LOG("Failed to write Rx Rate Select RS1");
 		goto out;
 	}
 out:
@@ -130,7 +130,7 @@ ixgbe_setup_mac_link_multispeed_fixed_fiber(struct ixgbe_hw *hw,
 	bool link_up = false;
 	bool negotiation;
 
-	DEBUGFUNC("");
+	PMD_INIT_FUNC_TRACE();
 
 	/* Mask off requested but non-supported speeds */
 	status = ixgbe_get_link_capabilities(hw, &link_speed, &negotiation);
@@ -261,7 +261,7 @@ ixgbe_bypass_get_media_type(struct ixgbe_hw *hw)
 {
 	enum ixgbe_media_type media_type;
 
-	DEBUGFUNC("");
+	PMD_INIT_FUNC_TRACE();
 
 	if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
 		media_type = ixgbe_media_type_fiber;
diff --git a/lib/librte_pmd_ixgbe/ixgbe_bypass.c b/lib/librte_pmd_ixgbe/ixgbe_bypass.c
index 1d21dc0..1a980b8 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_bypass.c
+++ b/lib/librte_pmd_ixgbe/ixgbe_bypass.c
@@ -40,20 +40,20 @@
 #define	BYPASS_STATUS_OFF_MASK	3
 
 /* Macros to check for invlaid function pointers. */
-#define	FUNC_PTR_OR_ERR_RET(func, retval) do {             \
-	if ((func) == NULL) {                              \
-		DEBUGOUT("%s:%d function not supported\n", \
-			__func__, __LINE__);               \
-		return (retval);                           \
-	}                                                  \
+#define	FUNC_PTR_OR_ERR_RET(func, retval) do {              \
+	if ((func) == NULL) {                               \
+		PMD_DRV_LOG("%s:%d function not supported", \
+			    __func__, __LINE__);            \
+		return retval;                            \
+	}                                                   \
 } while(0)
 
-#define	FUNC_PTR_OR_RET(func) do {                         \
-	if ((func) == NULL) {                              \
-		DEBUGOUT("%s:%d function not supported\n", \
-			__func__, __LINE__);               \
-		return;                                    \
-	}                                                  \
+#define	FUNC_PTR_OR_RET(func) do {                          \
+	if ((func) == NULL) {                               \
+		PMD_DRV_LOG("%s:%d function not supported", \
+			    __func__, __LINE__);            \
+		return;                                     \
+	}                                                   \
 } while(0)
 
 
@@ -114,7 +114,7 @@ ixgbe_bypass_init(struct rte_eth_dev *dev)
 	/* Only allow BYPASS ops on the first port */
 	if (hw->device_id != IXGBE_DEV_ID_82599_BYPASS ||
 			hw->bus.func != 0) {
-		DEBUGOUT("bypass function is not supported on that device\n");
+		PMD_DRV_LOG("bypass function is not supported on that device");
 		return;
 	}
 
diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
index 59122a1..f914405 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
+++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
@@ -667,7 +667,7 @@ ixgbe_swfw_lock_reset(struct ixgbe_hw *hw)
 	 */
 	mask = IXGBE_GSSR_PHY0_SM << hw->bus.func;
 	if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
-		   DEBUGOUT1("SWFW phy%d lock released", hw->bus.func);
+		PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released", hw->bus.func);
 	}
 	ixgbe_release_swfw_semaphore(hw, mask);
 
@@ -679,7 +679,7 @@ ixgbe_swfw_lock_reset(struct ixgbe_hw *hw)
 	 */
 	mask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_MAC_CSR_SM | IXGBE_GSSR_SW_MNG_SM;
 	if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
-		   DEBUGOUT("SWFW common locks released");
+		PMD_DRV_LOG(DEBUG, "SWFW common locks released");
 	}
 	ixgbe_release_swfw_semaphore(hw, mask);
 }
@@ -933,7 +933,7 @@ eth_ixgbevf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
 		IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
 	struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr;
 
-	PMD_INIT_LOG(DEBUG, "eth_ixgbevf_dev_init");
+	PMD_INIT_FUNC_TRACE();
 
 	eth_dev->dev_ops = &ixgbevf_eth_dev_ops;
 	eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
@@ -1012,16 +1012,15 @@ eth_ixgbevf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
 			eth_dev->data->mac_addrs = NULL;
 			return diag;
 		}
-		RTE_LOG(INFO, PMD,
-			"\tVF MAC address not assigned by Host PF\n"
-			"\tAssign randomly generated MAC address "
-			"%02x:%02x:%02x:%02x:%02x:%02x\n",
-			perm_addr->addr_bytes[0],
-			perm_addr->addr_bytes[1],
-			perm_addr->addr_bytes[2],
-			perm_addr->addr_bytes[3],
-			perm_addr->addr_bytes[4],
-			perm_addr->addr_bytes[5]);
+		PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF");
+		PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address "
+			     "%02x:%02x:%02x:%02x:%02x:%02x",
+			     perm_addr->addr_bytes[0],
+			     perm_addr->addr_bytes[1],
+			     perm_addr->addr_bytes[2],
+			     perm_addr->addr_bytes[3],
+			     perm_addr->addr_bytes[4],
+			     perm_addr->addr_bytes[5]);
 	}
 
 	/* Copy the permanent MAC address */
@@ -1090,7 +1089,7 @@ rte_ixgbe_pmd_init(const char *name __rte_unused, const char *params __rte_unuse
 static int
 rte_ixgbevf_pmd_init(const char *name __rte_unused, const char *param __rte_unused)
 {
-	DEBUGFUNC("rte_ixgbevf_pmd_init");
+	PMD_INIT_FUNC_TRACE();
 
 	rte_eth_driver_register(&rte_ixgbevf_pmd);
 	return (0);
@@ -2515,7 +2514,7 @@ ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num)
 		fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
 		break;
 	default:
-		DEBUGOUT("Flow control param set incorrectly\n");
+		PMD_DRV_LOG(DEBUG, "Flow control param set incorrectly");
 		ret_val = IXGBE_ERR_CONFIG;
 		goto out;
 		break;
@@ -2765,7 +2764,7 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 static void
 ixgbevf_intr_disable(struct ixgbe_hw *hw)
 {
-	PMD_INIT_LOG(DEBUG, "ixgbevf_intr_disable");
+	PMD_INIT_FUNC_TRACE();
 
 	/* Clear interrupt mask to stop from interrupts being generated */
 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
@@ -2807,7 +2806,7 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	int err, mask = 0;
 
-	PMD_INIT_LOG(DEBUG, "ixgbevf_dev_start");
+	PMD_INIT_FUNC_TRACE();
 
 	hw->mac.ops.reset_hw(hw);
 
@@ -2842,7 +2841,7 @@ ixgbevf_dev_stop(struct rte_eth_dev *dev)
 {
 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-	PMD_INIT_LOG(DEBUG, "ixgbevf_dev_stop");
+	PMD_INIT_FUNC_TRACE();
 
 	hw->adapter_stopped = TRUE;
 	ixgbe_stop_adapter(hw);
@@ -2861,7 +2860,7 @@ ixgbevf_dev_close(struct rte_eth_dev *dev)
 {
 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-	PMD_INIT_LOG(DEBUG, "ixgbevf_dev_close");
+	PMD_INIT_FUNC_TRACE();
 
 	ixgbe_reset_hw(hw);
 
diff --git a/lib/librte_pmd_ixgbe/ixgbe_pf.c b/lib/librte_pmd_ixgbe/ixgbe_pf.c
index 170944d..59fb58b 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_pf.c
+++ b/lib/librte_pmd_ixgbe/ixgbe_pf.c
@@ -478,7 +478,7 @@ ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf)
 
 	retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
 	if (retval) {
-		RTE_LOG(ERR, PMD, "Error mbx recv msg from VF %d\n", vf);
+		PMD_DRV_LOG(ERR, "Error mbx recv msg from VF %d", vf);
 		return retval;
 	}
 
@@ -511,7 +511,7 @@ ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf)
 		retval = ixgbe_vf_set_vlan(dev, vf, msgbuf);
 		break;
 	default:
-		RTE_LOG(DEBUG, PMD, "Unhandled Msg %8.8x\n", (unsigned)  msgbuf[0]);
+		PMD_DRV_LOG(DEBUG, "Unhandled Msg %8.8x", (unsigned)msgbuf[0]);
 		retval = IXGBE_ERR_MBX;
 		break;
 	}
diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
index 575a014..765b4e0 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
+++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
@@ -1763,33 +1763,36 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
 			tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
 	if (tx_rs_thresh >= (nb_desc - 2)) {
-		RTE_LOG(ERR, PMD, "tx_rs_thresh must be less than the number "
-			"of TX descriptors minus 2. (tx_rs_thresh=%u port=%d "
-				"queue=%d)\n", (unsigned int)tx_rs_thresh,
-				(int)dev->data->port_id, (int)queue_idx);
+		PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the number "
+			     "of TX descriptors minus 2. (tx_rs_thresh=%u "
+			     "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
+			     (int)dev->data->port_id, (int)queue_idx);
 		return -(EINVAL);
 	}
 	if (tx_free_thresh >= (nb_desc - 3)) {
-		RTE_LOG(ERR, PMD, "tx_rs_thresh must be less than the "
-			"tx_free_thresh must be less than the number of TX "
-			"descriptors minus 3. (tx_free_thresh=%u port=%d "
-				"queue=%d)\n", (unsigned int)tx_free_thresh,
-				(int)dev->data->port_id, (int)queue_idx);
+		PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
+			     "tx_free_thresh must be less than the number of "
+			     "TX descriptors minus 3. (tx_free_thresh=%u "
+			     "port=%d queue=%d)",
+			     (unsigned int)tx_free_thresh,
+			     (int)dev->data->port_id, (int)queue_idx);
 		return -(EINVAL);
 	}
 	if (tx_rs_thresh > tx_free_thresh) {
-		RTE_LOG(ERR, PMD, "tx_rs_thresh must be less than or equal to "
-			"tx_free_thresh. (tx_free_thresh=%u tx_rs_thresh=%u "
-			"port=%d queue=%d)\n", (unsigned int)tx_free_thresh,
-			(unsigned int)tx_rs_thresh, (int)dev->data->port_id,
-							(int)queue_idx);
+		PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or equal to "
+			     "tx_free_thresh. (tx_free_thresh=%u "
+			     "tx_rs_thresh=%u port=%d queue=%d)",
+			     (unsigned int)tx_free_thresh,
+			     (unsigned int)tx_rs_thresh,
+			     (int)dev->data->port_id,
+			     (int)queue_idx);
 		return -(EINVAL);
 	}
 	if ((nb_desc % tx_rs_thresh) != 0) {
-		RTE_LOG(ERR, PMD, "tx_rs_thresh must be a divisor of the "
-			"number of TX descriptors. (tx_rs_thresh=%u port=%d "
-				"queue=%d)\n", (unsigned int)tx_rs_thresh,
-				(int)dev->data->port_id, (int)queue_idx);
+		PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
+			     "number of TX descriptors. (tx_rs_thresh=%u "
+			     "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
+			     (int)dev->data->port_id, (int)queue_idx);
 		return -(EINVAL);
 	}
 
@@ -1800,10 +1803,10 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	 * accumulates WTHRESH descriptors.
 	 */
 	if ((tx_rs_thresh > 1) && (tx_conf->tx_thresh.wthresh != 0)) {
-		RTE_LOG(ERR, PMD, "TX WTHRESH must be set to 0 if "
-			"tx_rs_thresh is greater than 1. (tx_rs_thresh=%u "
-			"port=%d queue=%d)\n", (unsigned int)tx_rs_thresh,
-				(int)dev->data->port_id, (int)queue_idx);
+		PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
+			     "tx_rs_thresh is greater than 1. (tx_rs_thresh=%u "
+			     "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
+			     (int)dev->data->port_id, (int)queue_idx);
 		return -(EINVAL);
 	}
 
@@ -3276,7 +3279,7 @@ ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
 			IXGBE_WRITE_REG(hw, IXGBE_MRQC, IXGBE_MRQC_VMDQRT8TCEN);
 			break;
 		default:
-			RTE_LOG(ERR, PMD, "invalid pool number in IOV mode\n");
+			PMD_INIT_LOG(ERR, "invalid pool number in IOV mode");
 		}
 	}
 
@@ -3329,7 +3332,7 @@ ixgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
 			break;
 		default:
 			mtqc = IXGBE_MTQC_64Q_1PB;
-			RTE_LOG(ERR, PMD, "invalid pool number in IOV mode\n");
+			PMD_INIT_LOG(ERR, "invalid pool number in IOV mode");
 		}
 		IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
 	}
@@ -3592,7 +3595,7 @@ ixgbe_dev_tx_init(struct rte_eth_dev *dev)
 static inline void
 ixgbe_setup_loopback_link_82599(struct ixgbe_hw *hw)
 {
-	DEBUGFUNC("ixgbe_setup_loopback_link_82599");
+	PMD_INIT_FUNC_TRACE();
 
 	if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
 		if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM) !=
-- 
1.7.10.4
^ permalink raw reply	[flat|nested] 25+ messages in thread
- * [dpdk-dev] [PATCH v3 02/20] ixgbe/base: add a raw macro for use by shared code
  2014-09-17 13:46 [dpdk-dev] [PATCH v3 00/20] cleanup logs in main PMDs David Marchand
  2014-09-17 13:46 ` [dpdk-dev] [PATCH v3 01/20] ixgbe: use the right debug macro David Marchand
@ 2014-09-17 13:46 ` David Marchand
  2014-09-17 13:46 ` [dpdk-dev] [PATCH v3 03/20] ixgbe: indent logs sections David Marchand
                   ` (18 subsequent siblings)
  20 siblings, 0 replies; 25+ messages in thread
From: David Marchand @ 2014-09-17 13:46 UTC (permalink / raw)
  To: dev
Since shared code always add a trailing \n, add a PMD_DRV_LOG_RAW macro that
will not add one.
Signed-off-by: David Marchand <david.marchand@6wind.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
---
 lib/librte_pmd_ixgbe/ixgbe/ixgbe_osdep.h |    4 ++--
 lib/librte_pmd_ixgbe/ixgbe_logs.h        |    9 ++++++---
 2 files changed, 8 insertions(+), 5 deletions(-)
diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_osdep.h b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_osdep.h
index 2bf1a6d..ae9c280 100644
--- a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_osdep.h
+++ b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_osdep.h
@@ -54,8 +54,8 @@
 #define usec_delay(x) DELAY(x)
 #define msec_delay(x) DELAY(1000*(x))
 
-#define DEBUGFUNC(F)            DEBUGOUT(F);
-#define DEBUGOUT(S, args...)    PMD_DRV_LOG(DEBUG, S, ##args)
+#define DEBUGFUNC(F)            DEBUGOUT(F "\n");
+#define DEBUGOUT(S, args...)    PMD_DRV_LOG_RAW(DEBUG, S, ##args)
 #define DEBUGOUT1(S, args...)   DEBUGOUT(S, ##args)
 #define DEBUGOUT2(S, args...)   DEBUGOUT(S, ##args)
 #define DEBUGOUT3(S, args...)   DEBUGOUT(S, ##args)
diff --git a/lib/librte_pmd_ixgbe/ixgbe_logs.h b/lib/librte_pmd_ixgbe/ixgbe_logs.h
index 9f0a684..4685c18 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_logs.h
+++ b/lib/librte_pmd_ixgbe/ixgbe_logs.h
@@ -65,10 +65,13 @@
 #endif
 
 #ifdef RTE_LIBRTE_IXGBE_DEBUG_DRIVER
-#define PMD_DRV_LOG(level, fmt, args...) \
-	RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#define PMD_DRV_LOG_RAW(level, fmt, args...) \
+	RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args)
 #else
-#define PMD_DRV_LOG(level, fmt, args...) do { } while(0)
+#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0)
 #endif
 
+#define PMD_DRV_LOG(level, fmt, args...) \
+	PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
+
 #endif /* _IXGBE_LOGS_H_ */
-- 
1.7.10.4
^ permalink raw reply	[flat|nested] 25+ messages in thread
- * [dpdk-dev] [PATCH v3 03/20] ixgbe: indent logs sections
  2014-09-17 13:46 [dpdk-dev] [PATCH v3 00/20] cleanup logs in main PMDs David Marchand
  2014-09-17 13:46 ` [dpdk-dev] [PATCH v3 01/20] ixgbe: use the right debug macro David Marchand
  2014-09-17 13:46 ` [dpdk-dev] [PATCH v3 02/20] ixgbe/base: add a raw macro for use by shared code David Marchand
@ 2014-09-17 13:46 ` David Marchand
  2014-09-17 13:46 ` [dpdk-dev] [PATCH v3 04/20] ixgbe: clean log messages David Marchand
                   ` (17 subsequent siblings)
  20 siblings, 0 replies; 25+ messages in thread
From: David Marchand @ 2014-09-17 13:46 UTC (permalink / raw)
  To: dev
Prepare for next commit, indent sections where log messages will be modified so
that next patch is only about \n.
Signed-off-by: David Marchand <david.marchand@6wind.com>
---
 lib/librte_pmd_ixgbe/ixgbe_ethdev.c |   51 +++++++++++++++++------------------
 lib/librte_pmd_ixgbe/ixgbe_rxtx.c   |   34 ++++++++++++-----------
 2 files changed, 42 insertions(+), 43 deletions(-)
diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
index f914405..71b964a 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
+++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
@@ -548,7 +548,8 @@ ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
 		return -ENOSYS;
 
 	PMD_INIT_LOG(INFO, "Setting port %d, %s queue_id %d to stat index %d\n",
-		     (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", queue_id, stat_idx);
+		     (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
+		     queue_id, stat_idx);
 
 	n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
 	if (n >= IXGBE_NB_STAT_MAPPING_REGS) {
@@ -574,8 +575,9 @@ ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
 
 	PMD_INIT_LOG(INFO, "Set port %d, %s queue_id %d to stat index %d\n"
 		     "%s[%d] = 0x%08x\n",
-		     (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", queue_id, stat_idx,
-		     is_rx ? "RQSMR" : "TQSM",n, is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]);
+		     (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
+		     queue_id, stat_idx, is_rx ? "RQSMR" : "TQSM", n,
+		     is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]);
 
 	/* Now write the mapping in the appropriate register */
 	if (is_rx) {
@@ -849,8 +851,7 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
 	IXGBE_WRITE_FLUSH(hw);
 
 	if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
-		PMD_INIT_LOG(DEBUG,
-			     "MAC: %d, PHY: %d, SFP+: %d<n",
+		PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d<n",
 			     (int) hw->mac.type, (int) hw->phy.type,
 			     (int) hw->phy.sfp_type);
 	else
@@ -1038,8 +1039,8 @@ eth_ixgbevf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
 	}
 
 	PMD_INIT_LOG(DEBUG, "\nport %d vendorID=0x%x deviceID=0x%x mac.type=%s\n",
-			 eth_dev->data->port_id, pci_dev->id.vendor_id, pci_dev->id.device_id,
-			 "ixgbe_mac_82599_vf");
+		     eth_dev->data->port_id, pci_dev->id.vendor_id,
+		     pci_dev->id.device_id, "ixgbe_mac_82599_vf");
 
 	return 0;
 }
@@ -1418,8 +1419,8 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
 	if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&
 			(dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) {
 		PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port %hhu\n",
-				dev->data->dev_conf.link_duplex,
-				dev->data->port_id);
+			     dev->data->dev_conf.link_duplex,
+			     dev->data->port_id);
 		return -EINVAL;
 	}
 
@@ -1491,8 +1492,8 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
 		break;
 	default:
 		PMD_INIT_LOG(ERR, "Invalid link_speed (%hu) for port %hhu\n",
-				dev->data->dev_conf.link_speed,
-				dev->data->port_id);
+			     dev->data->dev_conf.link_speed,
+			     dev->data->port_id);
 		goto error;
 	}
 
@@ -1598,10 +1599,8 @@ ixgbe_dev_set_link_up(struct rte_eth_dev *dev)
 #ifdef RTE_NIC_BYPASS
 		if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
 			/* Not suported in bypass mode */
-			PMD_INIT_LOG(ERR,
-				"\nSet link up is not supported "
-				"by device id 0x%x\n",
-				hw->device_id);
+			PMD_INIT_LOG(ERR, "\nSet link up is not supported "
+				     "by device id 0x%x\n", hw->device_id);
 			return -ENOTSUP;
 		}
 #endif
@@ -1611,7 +1610,7 @@ ixgbe_dev_set_link_up(struct rte_eth_dev *dev)
 	}
 
 	PMD_INIT_LOG(ERR, "\nSet link up is not supported by device id 0x%x\n",
-		hw->device_id);
+		     hw->device_id);
 	return -ENOTSUP;
 }
 
@@ -1627,10 +1626,8 @@ ixgbe_dev_set_link_down(struct rte_eth_dev *dev)
 #ifdef RTE_NIC_BYPASS
 		if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
 			/* Not suported in bypass mode */
-			PMD_INIT_LOG(ERR,
-				"\nSet link down is not supported "
-				"by device id 0x%x\n",
-				 hw->device_id);
+			PMD_INIT_LOG(ERR, "\nSet link down is not supported "
+				     "by device id 0x%x\n", hw->device_id);
 			return -ENOTSUP;
 		}
 #endif
@@ -1639,9 +1636,8 @@ ixgbe_dev_set_link_down(struct rte_eth_dev *dev)
 		return 0;
 	}
 
-	PMD_INIT_LOG(ERR,
-		"\nSet link down is not supported by device id 0x%x\n",
-		 hw->device_id);
+	PMD_INIT_LOG(ERR, "\nSet link down is not supported by device id 0x%x\n",
+		     hw->device_id);
 	return -ENOTSUP;
 }
 
@@ -2599,7 +2595,7 @@ ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *p
 	 */
 	max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
 	if ((pfc_conf->fc.high_water > max_high_water) ||
-		(pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) {
+	    (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) {
 		PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB\n");
 		PMD_INIT_LOG(ERR, "High_water must <=  0x%x\n", max_high_water);
 		return (-EINVAL);
@@ -2778,7 +2774,7 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
 	struct rte_eth_conf* conf = &dev->data->dev_conf;
 
 	PMD_INIT_LOG(DEBUG, "\nConfigured Virtual Function port id: %d\n",
-		dev->data->port_id);
+		     dev->data->port_id);
 
 	/*
 	 * VF has no ability to enable/disable HW CRC
@@ -2818,7 +2814,8 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
 	/* This can fail when allocating mbufs for descriptor rings */
 	err = ixgbevf_dev_rx_init(dev);
 	if (err) {
-		PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)\n", err);
+		PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)\n",
+			     err);
 		ixgbe_dev_clear_queues(dev);
 		return err;
 	}
@@ -3098,7 +3095,7 @@ ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool,
 
 	if (hw->mac.type == ixgbe_mac_82598EB) {
 		PMD_INIT_LOG(ERR, "setting VF receive mode set should be done"
-			" on 82599 hardware and newer\n");
+			     " on 82599 hardware and newer\n");
 		return (-ENOTSUP);
 	}
 	if (ixgbe_vmdq_mode_check(hw) < 0)
diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
index 765b4e0..8732051 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
+++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
@@ -1889,8 +1889,14 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 			dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
 	} else {
 		PMD_INIT_LOG(INFO, "Using full-featured tx code path\n");
-		PMD_INIT_LOG(INFO, " - txq_flags = %lx [IXGBE_SIMPLE_FLAGS=%lx]\n", (long unsigned)txq->txq_flags, (long unsigned)IXGBE_SIMPLE_FLAGS);
-		PMD_INIT_LOG(INFO, " - tx_rs_thresh = %lu [RTE_PMD_IXGBE_TX_MAX_BURST=%lu]\n", (long unsigned)txq->tx_rs_thresh, (long unsigned)RTE_PMD_IXGBE_TX_MAX_BURST);
+		PMD_INIT_LOG(INFO, " - txq_flags = %lx "
+			     "[IXGBE_SIMPLE_FLAGS=%lx]\n",
+			     (long unsigned)txq->txq_flags,
+			     (long unsigned)IXGBE_SIMPLE_FLAGS);
+		PMD_INIT_LOG(INFO, " - tx_rs_thresh = %lu "
+			     "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]\n",
+			     (long unsigned)txq->tx_rs_thresh,
+			     (long unsigned)RTE_PMD_IXGBE_TX_MAX_BURST);
 		dev->tx_pkt_burst = ixgbe_xmit_pkts;
 	}
 
@@ -3695,9 +3701,8 @@ ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 
 		/* Allocate buffers for descriptor rings */
 		if (ixgbe_alloc_rx_queue_mbufs(rxq) != 0) {
-			PMD_INIT_LOG(ERR,
-				"Could not alloc mbuf for queue:%d\n",
-				rx_queue_id);
+			PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d\n",
+				     rx_queue_id);
 			return -1;
 		}
 		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
@@ -3711,8 +3716,8 @@ ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 			rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
 		} while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
 		if (!poll_ms)
-			PMD_INIT_LOG(ERR, "Could not enable "
-				     "Rx Queue %d\n", rx_queue_id);
+			PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d\n",
+				     rx_queue_id);
 		rte_wmb();
 		IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
 		IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
@@ -3750,8 +3755,8 @@ ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 			rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
 		} while (--poll_ms && (rxdctl | IXGBE_RXDCTL_ENABLE));
 		if (!poll_ms)
-			PMD_INIT_LOG(ERR, "Could not disable "
-				     "Rx Queue %d\n", rx_queue_id);
+			PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d\n",
+				     rx_queue_id);
 
 		rte_delay_us(RTE_IXGBE_WAIT_100_US);
 
@@ -3834,9 +3839,8 @@ ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 						IXGBE_TDT(txq->reg_idx));
 			} while (--poll_ms && (txtdh != txtdt));
 			if (!poll_ms)
-				PMD_INIT_LOG(ERR,
-				"Tx Queue %d is not empty when stopping.\n",
-				tx_queue_id);
+				PMD_INIT_LOG(ERR, "Tx Queue %d is not empty "
+					     "when stopping.\n", tx_queue_id);
 		}
 
 		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
@@ -4069,8 +4073,7 @@ ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
 			txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
 		} while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
 		if (!poll_ms)
-			PMD_INIT_LOG(ERR, "Could not enable "
-					 "Tx Queue %d\n", i);
+			PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d\n", i);
 	}
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
 
@@ -4087,8 +4090,7 @@ ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
 			rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
 		} while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
 		if (!poll_ms)
-			PMD_INIT_LOG(ERR, "Could not enable "
-					 "Rx Queue %d\n", i);
+			PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d\n", i);
 		rte_wmb();
 		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), rxq->nb_rx_desc - 1);
 
-- 
1.7.10.4
^ permalink raw reply	[flat|nested] 25+ messages in thread
- * [dpdk-dev] [PATCH v3 04/20] ixgbe: clean log messages
  2014-09-17 13:46 [dpdk-dev] [PATCH v3 00/20] cleanup logs in main PMDs David Marchand
                   ` (2 preceding siblings ...)
  2014-09-17 13:46 ` [dpdk-dev] [PATCH v3 03/20] ixgbe: indent logs sections David Marchand
@ 2014-09-17 13:46 ` David Marchand
  2014-09-17 13:46 ` [dpdk-dev] [PATCH v3 05/20] ixgbe: always log init messages David Marchand
                   ` (16 subsequent siblings)
  20 siblings, 0 replies; 25+ messages in thread
From: David Marchand @ 2014-09-17 13:46 UTC (permalink / raw)
  To: dev
Clean log messages:
- remove leading \n in some messages,
- remove trailing \n in some messages,
- split multi lines messages.
Signed-off-by: David Marchand <david.marchand@6wind.com>
v2 Reviewed-by: Jay Rolette <rolette@infiniteio.com>
v2 Acked-by: Bruce Richardson <bruce.richardson@intel.com>
---
 lib/librte_pmd_ixgbe/ixgbe_ethdev.c |   82 +++++++++++++++++------------------
 lib/librte_pmd_ixgbe/ixgbe_fdir.c   |    6 +--
 lib/librte_pmd_ixgbe/ixgbe_rxtx.c   |   64 +++++++++++++--------------
 3 files changed, 76 insertions(+), 76 deletions(-)
diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
index 71b964a..18988be 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
+++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
@@ -547,13 +547,13 @@ ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
 	if ((hw->mac.type != ixgbe_mac_82599EB) && (hw->mac.type != ixgbe_mac_X540))
 		return -ENOSYS;
 
-	PMD_INIT_LOG(INFO, "Setting port %d, %s queue_id %d to stat index %d\n",
+	PMD_INIT_LOG(INFO, "Setting port %d, %s queue_id %d to stat index %d",
 		     (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
 		     queue_id, stat_idx);
 
 	n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
 	if (n >= IXGBE_NB_STAT_MAPPING_REGS) {
-		PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded\n");
+		PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded");
 		return -EIO;
 	}
 	offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);
@@ -573,20 +573,20 @@ ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
 	else
 		stat_mappings->rqsmr[n] |= qsmr_mask;
 
-	PMD_INIT_LOG(INFO, "Set port %d, %s queue_id %d to stat index %d\n"
-		     "%s[%d] = 0x%08x\n",
+	PMD_INIT_LOG(INFO, "Set port %d, %s queue_id %d to stat index %d",
 		     (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
-		     queue_id, stat_idx, is_rx ? "RQSMR" : "TQSM", n,
+		     queue_id, stat_idx);
+	PMD_INIT_LOG(INFO, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
 		     is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]);
 
 	/* Now write the mapping in the appropriate register */
 	if (is_rx) {
-		PMD_INIT_LOG(INFO, "Write 0x%x to RX IXGBE stat mapping reg:%d\n",
+		PMD_INIT_LOG(INFO, "Write 0x%x to RX IXGBE stat mapping reg:%d",
 			     stat_mappings->rqsmr[n], n);
 		IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]);
 	}
 	else {
-		PMD_INIT_LOG(INFO, "Write 0x%x to TX IXGBE stat mapping reg:%d\n",
+		PMD_INIT_LOG(INFO, "Write 0x%x to TX IXGBE stat mapping reg:%d",
 			     stat_mappings->tqsm[n], n);
 		IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]);
 	}
@@ -793,11 +793,12 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
 	if (diag == IXGBE_ERR_EEPROM_VERSION) {
 		PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
 		    "LOM.  Please be aware there may be issues associated "
-		    "with your hardware.\n If you are experiencing problems "
+		    "with your hardware.");
+		PMD_INIT_LOG(ERR, "If you are experiencing problems "
 		    "please contact your Intel or hardware representative "
-		    "who provided you with this hardware.\n");
+		    "who provided you with this hardware.");
 	} else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED)
-		PMD_INIT_LOG(ERR, "Unsupported SFP+ Module\n");
+		PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
 	if (diag) {
 		PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", diag);
 		return -EIO;
@@ -851,11 +852,11 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
 	IXGBE_WRITE_FLUSH(hw);
 
 	if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
-		PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d<n",
+		PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
 			     (int) hw->mac.type, (int) hw->phy.type,
 			     (int) hw->phy.sfp_type);
 	else
-		PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d\n",
+		PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
 			     (int) hw->mac.type, (int) hw->phy.type);
 
 	PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
@@ -1038,7 +1039,7 @@ eth_ixgbevf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
 			return (-EIO);
 	}
 
-	PMD_INIT_LOG(DEBUG, "\nport %d vendorID=0x%x deviceID=0x%x mac.type=%s\n",
+	PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s",
 		     eth_dev->data->port_id, pci_dev->id.vendor_id,
 		     pci_dev->id.device_id, "ixgbe_mac_82599_vf");
 
@@ -1418,7 +1419,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
 	/* IXGBE devices don't support half duplex */
 	if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&
 			(dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) {
-		PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port %hhu\n",
+		PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port %hhu",
 			     dev->data->dev_conf.link_duplex,
 			     dev->data->port_id);
 		return -EINVAL;
@@ -1444,7 +1445,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
 	/* This can fail when allocating mbufs for descriptor rings */
 	err = ixgbe_dev_rx_init(dev);
 	if (err) {
-		PMD_INIT_LOG(ERR, "Unable to initialize RX hardware\n");
+		PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
 		goto error;
 	}
 
@@ -1491,7 +1492,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
 		speed = IXGBE_LINK_SPEED_10GB_FULL;
 		break;
 	default:
-		PMD_INIT_LOG(ERR, "Invalid link_speed (%hu) for port %hhu\n",
+		PMD_INIT_LOG(ERR, "Invalid link_speed (%hu) for port %hhu",
 			     dev->data->dev_conf.link_speed,
 			     dev->data->port_id);
 		goto error;
@@ -1599,8 +1600,8 @@ ixgbe_dev_set_link_up(struct rte_eth_dev *dev)
 #ifdef RTE_NIC_BYPASS
 		if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
 			/* Not suported in bypass mode */
-			PMD_INIT_LOG(ERR, "\nSet link up is not supported "
-				     "by device id 0x%x\n", hw->device_id);
+			PMD_INIT_LOG(ERR, "Set link up is not supported "
+				     "by device id 0x%x", hw->device_id);
 			return -ENOTSUP;
 		}
 #endif
@@ -1609,7 +1610,7 @@ ixgbe_dev_set_link_up(struct rte_eth_dev *dev)
 		return 0;
 	}
 
-	PMD_INIT_LOG(ERR, "\nSet link up is not supported by device id 0x%x\n",
+	PMD_INIT_LOG(ERR, "Set link up is not supported by device id 0x%x",
 		     hw->device_id);
 	return -ENOTSUP;
 }
@@ -1626,8 +1627,8 @@ ixgbe_dev_set_link_down(struct rte_eth_dev *dev)
 #ifdef RTE_NIC_BYPASS
 		if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
 			/* Not suported in bypass mode */
-			PMD_INIT_LOG(ERR, "\nSet link down is not supported "
-				     "by device id 0x%x\n", hw->device_id);
+			PMD_INIT_LOG(ERR, "Set link down is not supported "
+				     "by device id 0x%x", hw->device_id);
 			return -ENOTSUP;
 		}
 #endif
@@ -1636,7 +1637,7 @@ ixgbe_dev_set_link_down(struct rte_eth_dev *dev)
 		return 0;
 	}
 
-	PMD_INIT_LOG(ERR, "\nSet link down is not supported by device id 0x%x\n",
+	PMD_INIT_LOG(ERR, "Set link down is not supported by device id 0x%x",
 		     hw->device_id);
 	return -ENOTSUP;
 }
@@ -2175,7 +2176,7 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
 	struct rte_eth_link link;
 	int intr_enable_delay = false;
 
-	PMD_DRV_LOG(DEBUG, "intr action type %d\n", intr->flags);
+	PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
 
 	if (intr->flags & IXGBE_FLAG_MAILBOX) {
 		ixgbe_pf_mbx_process(dev);
@@ -2252,7 +2253,7 @@ ixgbe_dev_interrupt_delayed_handler(void *param)
 		_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
 	}
 
-	PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]\n", eicr);
+	PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
 	ixgbe_enable_intr(dev);
 	rte_intr_enable(&(dev->pci_dev->intr_handle));
 }
@@ -2366,7 +2367,7 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	if (fc_conf->autoneg != !hw->fc.disable_fc_autoneg)
 		return -ENOTSUP;
 	rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0));
-	PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x \n", rx_buf_size);
+	PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
 
 	/*
 	 * At least reserve one Ethernet frame for watermark
@@ -2375,8 +2376,8 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
 	if ((fc_conf->high_water > max_high_water) ||
 		(fc_conf->high_water < fc_conf->low_water)) {
-		PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB\n");
-		PMD_INIT_LOG(ERR, "High_water must <=  0x%x\n", max_high_water);
+		PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
+		PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
 		return (-EINVAL);
 	}
 
@@ -2408,7 +2409,7 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		return 0;
 	}
 
-	PMD_INIT_LOG(ERR, "ixgbe_fc_enable = 0x%x \n", err);
+	PMD_INIT_LOG(ERR, "ixgbe_fc_enable = 0x%x", err);
 	return -EIO;
 }
 
@@ -2438,13 +2439,13 @@ ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num)
 	if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
 		 /* High/Low water can not be 0 */
 		if( (!hw->fc.high_water[tc_num])|| (!hw->fc.low_water[tc_num])) {
-			PMD_INIT_LOG(ERR,"Invalid water mark configuration\n");
+			PMD_INIT_LOG(ERR,"Invalid water mark configuration");
 			ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
 			goto out;
 		}
 
 		if(hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) {
-			PMD_INIT_LOG(ERR,"Invalid water mark configuration\n");
+			PMD_INIT_LOG(ERR, "Invalid water mark configuration");
 			ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
 			goto out;
 		}
@@ -2588,7 +2589,7 @@ ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *p
 	ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
 	tc_num = map[pfc_conf->priority];
 	rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num));
-	PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x \n", rx_buf_size);
+	PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
 	/*
 	 * At least reserve one Ethernet frame for watermark
 	 * high_water/low_water in kilo bytes for ixgbe
@@ -2596,8 +2597,8 @@ ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *p
 	max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
 	if ((pfc_conf->fc.high_water > max_high_water) ||
 	    (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) {
-		PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB\n");
-		PMD_INIT_LOG(ERR, "High_water must <=  0x%x\n", max_high_water);
+		PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
+		PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
 		return (-EINVAL);
 	}
 
@@ -2613,7 +2614,7 @@ ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *p
 	if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED))
 		return 0;
 
-	PMD_INIT_LOG(ERR, "ixgbe_dcb_pfc_enable = 0x%x \n", err);
+	PMD_INIT_LOG(ERR, "ixgbe_dcb_pfc_enable = 0x%x", err);
 	return -EIO;
 }
 
@@ -2773,7 +2774,7 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
 {
 	struct rte_eth_conf* conf = &dev->data->dev_conf;
 
-	PMD_INIT_LOG(DEBUG, "\nConfigured Virtual Function port id: %d\n",
+	PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
 		     dev->data->port_id);
 
 	/*
@@ -2782,12 +2783,12 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
 	 */
 #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
 	if (!conf->rxmode.hw_strip_crc) {
-		PMD_INIT_LOG(INFO, "VF can't disable HW CRC Strip\n");
+		PMD_INIT_LOG(INFO, "VF can't disable HW CRC Strip");
 		conf->rxmode.hw_strip_crc = 1;
 	}
 #else
 	if (conf->rxmode.hw_strip_crc) {
-		PMD_INIT_LOG(INFO, "VF can't enable HW CRC Strip\n");
+		PMD_INIT_LOG(INFO, "VF can't enable HW CRC Strip");
 		conf->rxmode.hw_strip_crc = 0;
 	}
 #endif
@@ -2814,8 +2815,7 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
 	/* This can fail when allocating mbufs for descriptor rings */
 	err = ixgbevf_dev_rx_init(dev);
 	if (err) {
-		PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)\n",
-			     err);
+		PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)", err);
 		ixgbe_dev_clear_queues(dev);
 		return err;
 	}
@@ -2966,7 +2966,7 @@ ixgbe_vmdq_mode_check(struct ixgbe_hw *hw)
 	/* we only need to do this if VMDq is enabled */
 	reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
 	if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) {
-		PMD_INIT_LOG(ERR, "VMDq must be enabled for this setting\n");
+		PMD_INIT_LOG(ERR, "VMDq must be enabled for this setting");
 		return (-1);
 	}
 
@@ -3095,7 +3095,7 @@ ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool,
 
 	if (hw->mac.type == ixgbe_mac_82598EB) {
 		PMD_INIT_LOG(ERR, "setting VF receive mode set should be done"
-			     " on 82599 hardware and newer\n");
+			     " on 82599 hardware and newer");
 		return (-ENOTSUP);
 	}
 	if (ixgbe_vmdq_mode_check(hw) < 0)
diff --git a/lib/librte_pmd_ixgbe/ixgbe_fdir.c b/lib/librte_pmd_ixgbe/ixgbe_fdir.c
index 6c0a530..8819aac 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_fdir.c
+++ b/lib/librte_pmd_ixgbe/ixgbe_fdir.c
@@ -112,7 +112,7 @@ static void fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
 	}
 
 	if (i >= IXGBE_FDIR_INIT_DONE_POLL)
-		PMD_INIT_LOG(WARNING, "Flow Director poll time exceeded!\n");
+		PMD_INIT_LOG(WARNING, "Flow Director poll time exceeded!");
 }
 
 /*
@@ -381,7 +381,7 @@ fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
 	fdirhashcmd |= fdirhash;
 	IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
 
-	PMD_INIT_LOG(DEBUG, "Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
+	PMD_INIT_LOG(DEBUG, "Tx Queue=%x hash=%x", queue, (u32)fdirhashcmd);
 }
 
 /*
@@ -614,7 +614,7 @@ fdir_set_input_mask_82599(struct ixgbe_hw *hw,
 		/* use the L4 protocol mask for raw IPv4/IPv6 traffic */
 		fdirm |= IXGBE_FDIRM_L4P;
 		if (input_mask->dst_port_mask || input_mask->src_port_mask) {
-			PMD_INIT_LOG(ERR, " Error on src/dst port mask\n");
+			PMD_INIT_LOG(ERR, " Error on src/dst port mask");
 			return -EINVAL;
 		}
 	}
diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
index 8732051..9a3fd0d 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
+++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
@@ -615,7 +615,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 			tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
 
 		PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
-			   " tx_first=%u tx_last=%u\n",
+			   " tx_first=%u tx_last=%u",
 			   (unsigned) txq->port_id,
 			   (unsigned) txq->queue_id,
 			   (unsigned) pkt_len,
@@ -1066,7 +1066,7 @@ rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		if (ixgbe_rx_alloc_bufs(rxq) != 0) {
 			int i, j;
 			PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
-				   "queue_id=%u\n", (unsigned) rxq->port_id,
+				   "queue_id=%u", (unsigned) rxq->port_id,
 				   (unsigned) rxq->queue_id);
 
 			rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
@@ -1193,7 +1193,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		 * frames to its peer(s).
 		 */
 		PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
-			   "ext_err_stat=0x%08x pkt_len=%u\n",
+			   "ext_err_stat=0x%08x pkt_len=%u",
 			   (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
 			   (unsigned) rx_id, (unsigned) staterr,
 			   (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
@@ -1201,7 +1201,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		nmb = rte_rxmbuf_alloc(rxq->mb_pool);
 		if (nmb == NULL) {
 			PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
-				   "queue_id=%u\n", (unsigned) rxq->port_id,
+				   "queue_id=%u", (unsigned) rxq->port_id,
 				   (unsigned) rxq->queue_id);
 			rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
 			break;
@@ -1295,7 +1295,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 	nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
 	if (nb_hold > rxq->rx_free_thresh) {
 		PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
-			   "nb_hold=%u nb_rx=%u\n",
+			   "nb_hold=%u nb_rx=%u",
 			   (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
 			   (unsigned) rx_id, (unsigned) nb_hold,
 			   (unsigned) nb_rx);
@@ -1382,8 +1382,8 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		 * to happen by sending specific "back-pressure" flow control
 		 * frames to its peer(s).
 		 */
-		PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u "
-			   "staterr=0x%x data_len=%u\n",
+		PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
+			   "staterr=0x%x data_len=%u",
 			   (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
 			   (unsigned) rx_id, (unsigned) staterr,
 			   (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
@@ -1391,7 +1391,7 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		nmb = rte_rxmbuf_alloc(rxq->mb_pool);
 		if (nmb == NULL) {
 			PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
-				   "queue_id=%u\n", (unsigned) rxq->port_id,
+				   "queue_id=%u", (unsigned) rxq->port_id,
 				   (unsigned) rxq->queue_id);
 			rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
 			break;
@@ -1559,7 +1559,7 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 	nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
 	if (nb_hold > rxq->rx_free_thresh) {
 		PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
-			   "nb_hold=%u nb_rx=%u\n",
+			   "nb_hold=%u nb_rx=%u",
 			   (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
 			   (unsigned) rx_id, (unsigned) nb_hold,
 			   (unsigned) nb_rx);
@@ -1871,30 +1871,30 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 		ixgbe_tx_queue_release(txq);
 		return (-ENOMEM);
 	}
-	PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
+	PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
 		     txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
 
 	/* Use a simple Tx queue (no offloads, no multi segs) if possible */
 	if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS) &&
 	    (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) {
-		PMD_INIT_LOG(INFO, "Using simple tx code path\n");
+		PMD_INIT_LOG(INFO, "Using simple tx code path");
 #ifdef RTE_IXGBE_INC_VECTOR
 		if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ &&
 		    ixgbe_txq_vec_setup(txq, socket_id) == 0) {
-			PMD_INIT_LOG(INFO, "Vector tx enabled.\n");
+			PMD_INIT_LOG(INFO, "Vector tx enabled.");
 			dev->tx_pkt_burst = ixgbe_xmit_pkts_vec;
 		}
 		else
 #endif
 			dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
 	} else {
-		PMD_INIT_LOG(INFO, "Using full-featured tx code path\n");
+		PMD_INIT_LOG(INFO, "Using full-featured tx code path");
 		PMD_INIT_LOG(INFO, " - txq_flags = %lx "
-			     "[IXGBE_SIMPLE_FLAGS=%lx]\n",
+			     "[IXGBE_SIMPLE_FLAGS=%lx]",
 			     (long unsigned)txq->txq_flags,
 			     (long unsigned)IXGBE_SIMPLE_FLAGS);
 		PMD_INIT_LOG(INFO, " - tx_rs_thresh = %lu "
-			     "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]\n",
+			     "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]",
 			     (long unsigned)txq->tx_rs_thresh,
 			     (long unsigned)RTE_PMD_IXGBE_TX_MAX_BURST);
 		dev->tx_pkt_burst = ixgbe_xmit_pkts;
@@ -2156,7 +2156,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 		ixgbe_rx_queue_release(rxq);
 		return (-ENOMEM);
 	}
-	PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
+	PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
 		     rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
 
 	/*
@@ -2170,13 +2170,13 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 #ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
 		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
 			     "satisfied. Rx Burst Bulk Alloc function will be "
-			     "used on port=%d, queue=%d.\n",
+			     "used on port=%d, queue=%d.",
 			     rxq->port_id, rxq->queue_id);
 		dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc;
 #ifdef RTE_IXGBE_INC_VECTOR
 		if (!ixgbe_rx_vec_condition_check(dev)) {
 			PMD_INIT_LOG(INFO, "Vector rx enabled, please make "
-				     "sure RX burst size no less than 32.\n");
+				     "sure RX burst size no less than 32.");
 			ixgbe_rxq_vec_setup(rxq, socket_id);
 			dev->rx_pkt_burst = ixgbe_recv_pkts_vec;
 		}
@@ -2186,7 +2186,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions "
 			     "are not satisfied, Scattered Rx is requested, "
 			     "or RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC is not "
-			     "enabled (port=%d, queue=%d).\n",
+			     "enabled (port=%d, queue=%d).",
 			     rxq->port_id, rxq->queue_id);
 	}
 	dev->data->rx_queues[queue_idx] = rxq;
@@ -2205,7 +2205,7 @@ ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 	uint32_t desc = 0;
 
 	if (rx_queue_id >= dev->data->nb_rx_queues) {
-		PMD_RX_LOG(ERR, "Invalid RX queue id=%d\n", rx_queue_id);
+		PMD_RX_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id);
 		return 0;
 	}
 
@@ -2921,7 +2921,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 		ixgbe_dcb_rx_hw_config(hw, dcb_config);
 		break;
 	default:
-		PMD_INIT_LOG(ERR, "Incorrect DCB RX mode configuration\n");
+		PMD_INIT_LOG(ERR, "Incorrect DCB RX mode configuration");
 		break;
 	}
 	switch (dev->data->dev_conf.txmode.mq_mode) {
@@ -2943,7 +2943,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 		ixgbe_dcb_tx_hw_config(hw, dcb_config);
 		break;
 	default:
-		PMD_INIT_LOG(ERR, "Incorrect DCB TX mode configuration\n");
+		PMD_INIT_LOG(ERR, "Incorrect DCB TX mode configuration");
 		break;
 	}
 
@@ -3214,7 +3214,7 @@ ixgbe_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
 		volatile union ixgbe_adv_rx_desc *rxd;
 		struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
 		if (mbuf == NULL) {
-			PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u\n",
+			PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u",
 				     (unsigned) rxq->queue_id);
 			return (-ENOMEM);
 		}
@@ -3606,7 +3606,7 @@ ixgbe_setup_loopback_link_82599(struct ixgbe_hw *hw)
 	if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
 		if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM) !=
 				IXGBE_SUCCESS) {
-			PMD_INIT_LOG(ERR, "Could not enable loopback mode\n");
+			PMD_INIT_LOG(ERR, "Could not enable loopback mode");
 			/* ignore error */
 			return;
 		}
@@ -3701,7 +3701,7 @@ ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 
 		/* Allocate buffers for descriptor rings */
 		if (ixgbe_alloc_rx_queue_mbufs(rxq) != 0) {
-			PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d\n",
+			PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d",
 				     rx_queue_id);
 			return -1;
 		}
@@ -3716,7 +3716,7 @@ ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 			rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
 		} while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
 		if (!poll_ms)
-			PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d\n",
+			PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d",
 				     rx_queue_id);
 		rte_wmb();
 		IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
@@ -3755,7 +3755,7 @@ ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 			rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
 		} while (--poll_ms && (rxdctl | IXGBE_RXDCTL_ENABLE));
 		if (!poll_ms)
-			PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d\n",
+			PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d",
 				     rx_queue_id);
 
 		rte_delay_us(RTE_IXGBE_WAIT_100_US);
@@ -3799,7 +3799,7 @@ ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 			} while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
 			if (!poll_ms)
 				PMD_INIT_LOG(ERR, "Could not enable "
-					     "Tx Queue %d\n", tx_queue_id);
+					     "Tx Queue %d", tx_queue_id);
 		}
 		rte_wmb();
 		IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
@@ -3840,7 +3840,7 @@ ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 			} while (--poll_ms && (txtdh != txtdt));
 			if (!poll_ms)
 				PMD_INIT_LOG(ERR, "Tx Queue %d is not empty "
-					     "when stopping.\n", tx_queue_id);
+					     "when stopping.", tx_queue_id);
 		}
 
 		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
@@ -3857,7 +3857,7 @@ ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 			} while (--poll_ms && (txdctl | IXGBE_TXDCTL_ENABLE));
 			if (!poll_ms)
 				PMD_INIT_LOG(ERR, "Could not disable "
-					     "Tx Queue %d\n", tx_queue_id);
+					     "Tx Queue %d", tx_queue_id);
 		}
 
 		if (txq->ops != NULL) {
@@ -4073,7 +4073,7 @@ ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
 			txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
 		} while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
 		if (!poll_ms)
-			PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d\n", i);
+			PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d", i);
 	}
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
 
@@ -4090,7 +4090,7 @@ ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
 			rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
 		} while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
 		if (!poll_ms)
-			PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d\n", i);
+			PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", i);
 		rte_wmb();
 		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), rxq->nb_rx_desc - 1);
 
-- 
1.7.10.4
^ permalink raw reply	[flat|nested] 25+ messages in thread
- * [dpdk-dev] [PATCH v3 05/20] ixgbe: always log init messages
  2014-09-17 13:46 [dpdk-dev] [PATCH v3 00/20] cleanup logs in main PMDs David Marchand
                   ` (3 preceding siblings ...)
  2014-09-17 13:46 ` [dpdk-dev] [PATCH v3 04/20] ixgbe: clean log messages David Marchand
@ 2014-09-17 13:46 ` David Marchand
  2014-09-17 13:46 ` [dpdk-dev] [PATCH v3 06/20] ixgbe: add a message when forcing scatter mode David Marchand
                   ` (15 subsequent siblings)
  20 siblings, 0 replies; 25+ messages in thread
From: David Marchand @ 2014-09-17 13:46 UTC (permalink / raw)
  To: dev
'init' messages should always be logged and filtered at runtime by rte_log.
All the more so as these messages are not in the datapath.
Signed-off-by: David Marchand <david.marchand@6wind.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
---
 lib/librte_pmd_ixgbe/ixgbe_logs.h |    7 ++++---
 1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/lib/librte_pmd_ixgbe/ixgbe_logs.h b/lib/librte_pmd_ixgbe/ixgbe_logs.h
index 4685c18..572e030 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_logs.h
+++ b/lib/librte_pmd_ixgbe/ixgbe_logs.h
@@ -34,12 +34,13 @@
 #ifndef _IXGBE_LOGS_H_
 #define _IXGBE_LOGS_H_
 
-#ifdef RTE_LIBRTE_IXGBE_DEBUG_INIT
 #define PMD_INIT_LOG(level, fmt, args...) \
-	RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+	rte_log(RTE_LOG_ ## level, RTE_LOGTYPE_PMD, \
+		"PMD: %s(): " fmt "\n", __func__, ##args)
+
+#ifdef RTE_LIBRTE_IXGBE_DEBUG_INIT
 #define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
 #else
-#define PMD_INIT_LOG(level, fmt, args...) do { } while(0)
 #define PMD_INIT_FUNC_TRACE() do { } while(0)
 #endif
 
-- 
1.7.10.4
^ permalink raw reply	[flat|nested] 25+ messages in thread
- * [dpdk-dev] [PATCH v3 06/20] ixgbe: add a message when forcing scatter mode
  2014-09-17 13:46 [dpdk-dev] [PATCH v3 00/20] cleanup logs in main PMDs David Marchand
                   ` (4 preceding siblings ...)
  2014-09-17 13:46 ` [dpdk-dev] [PATCH v3 05/20] ixgbe: always log init messages David Marchand
@ 2014-09-17 13:46 ` David Marchand
  2014-09-17 13:46 ` [dpdk-dev] [PATCH v3 07/20] ixgbe: add log messages when rx bulk mode is not usable David Marchand
                   ` (14 subsequent siblings)
  20 siblings, 0 replies; 25+ messages in thread
From: David Marchand @ 2014-09-17 13:46 UTC (permalink / raw)
  To: dev
Signed-off-by: David Marchand <david.marchand@6wind.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
---
 lib/librte_pmd_ixgbe/ixgbe_rxtx.c |    8 ++++++++
 1 file changed, 8 insertions(+)
diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
index 9a3fd0d..98fbd46 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
+++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
@@ -3488,12 +3488,16 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
 		/* It adds dual VLAN length for supporting dual VLAN */
 		if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
 				2 * IXGBE_VLAN_TAG_SIZE) > buf_size){
+			if (!dev->data->scattered_rx)
+				PMD_INIT_LOG(DEBUG, "forcing scatter mode");
 			dev->data->scattered_rx = 1;
 			dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
 		}
 	}
 
 	if (dev->data->dev_conf.rxmode.enable_scatter) {
+		if (!dev->data->scattered_rx)
+			PMD_INIT_LOG(DEBUG, "forcing scatter mode");
 		dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
 		dev->data->scattered_rx = 1;
 	}
@@ -3979,12 +3983,16 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
 		/* It adds dual VLAN length for supporting dual VLAN */
 		if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
 				2 * IXGBE_VLAN_TAG_SIZE) > buf_size) {
+			if (!dev->data->scattered_rx)
+				PMD_INIT_LOG(DEBUG, "forcing scatter mode");
 			dev->data->scattered_rx = 1;
 			dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
 		}
 	}
 
 	if (dev->data->dev_conf.rxmode.enable_scatter) {
+		if (!dev->data->scattered_rx)
+			PMD_INIT_LOG(DEBUG, "forcing scatter mode");
 		dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
 		dev->data->scattered_rx = 1;
 	}
-- 
1.7.10.4
^ permalink raw reply	[flat|nested] 25+ messages in thread
- * [dpdk-dev] [PATCH v3 07/20] ixgbe: add log messages when rx bulk mode is not usable
  2014-09-17 13:46 [dpdk-dev] [PATCH v3 00/20] cleanup logs in main PMDs David Marchand
                   ` (5 preceding siblings ...)
  2014-09-17 13:46 ` [dpdk-dev] [PATCH v3 06/20] ixgbe: add a message when forcing scatter mode David Marchand
@ 2014-09-17 13:46 ` David Marchand
  2014-09-17 13:46 ` [dpdk-dev] [PATCH v3 08/20] i40e: use the right debug macro David Marchand
                   ` (13 subsequent siblings)
  20 siblings, 0 replies; 25+ messages in thread
From: David Marchand @ 2014-09-17 13:46 UTC (permalink / raw)
  To: dev
Signed-off-by: David Marchand <david.marchand@6wind.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
---
 lib/librte_pmd_ixgbe/ixgbe_rxtx.c |   29 ++++++++++++++++++++++++-----
 1 file changed, 24 insertions(+), 5 deletions(-)
diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
index 98fbd46..4876feb 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
+++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
@@ -1976,15 +1976,34 @@ check_rx_burst_bulk_alloc_preconditions(__rte_unused struct igb_rx_queue *rxq)
 	 * outside of this function.
 	 */
 #ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
-	if (! (rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST))
+	if (!(rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST)) {
+		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
+			     "rxq->rx_free_thresh=%d, "
+			     "RTE_PMD_IXGBE_RX_MAX_BURST=%d",
+			     rxq->rx_free_thresh, RTE_PMD_IXGBE_RX_MAX_BURST);
 		ret = -EINVAL;
-	else if (! (rxq->rx_free_thresh < rxq->nb_rx_desc))
+	} else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
+		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
+			     "rxq->rx_free_thresh=%d, "
+			     "rxq->nb_rx_desc=%d",
+			     rxq->rx_free_thresh, rxq->nb_rx_desc);
 		ret = -EINVAL;
-	else if (! ((rxq->nb_rx_desc % rxq->rx_free_thresh) == 0))
+	} else if (!((rxq->nb_rx_desc % rxq->rx_free_thresh) == 0)) {
+		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
+			     "rxq->nb_rx_desc=%d, "
+			     "rxq->rx_free_thresh=%d",
+			     rxq->nb_rx_desc, rxq->rx_free_thresh);
 		ret = -EINVAL;
-	else if (! (rxq->nb_rx_desc <
-	       (IXGBE_MAX_RING_DESC - RTE_PMD_IXGBE_RX_MAX_BURST)))
+	} else if (!(rxq->nb_rx_desc <
+	       (IXGBE_MAX_RING_DESC - RTE_PMD_IXGBE_RX_MAX_BURST))) {
+		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
+			     "rxq->nb_rx_desc=%d, "
+			     "IXGBE_MAX_RING_DESC=%d, "
+			     "RTE_PMD_IXGBE_RX_MAX_BURST=%d",
+			     rxq->nb_rx_desc, IXGBE_MAX_RING_DESC,
+			     RTE_PMD_IXGBE_RX_MAX_BURST);
 		ret = -EINVAL;
+	}
 #else
 	ret = -EINVAL;
 #endif
-- 
1.7.10.4
^ permalink raw reply	[flat|nested] 25+ messages in thread
- * [dpdk-dev] [PATCH v3 08/20] i40e: use the right debug macro
  2014-09-17 13:46 [dpdk-dev] [PATCH v3 00/20] cleanup logs in main PMDs David Marchand
                   ` (6 preceding siblings ...)
  2014-09-17 13:46 ` [dpdk-dev] [PATCH v3 07/20] ixgbe: add log messages when rx bulk mode is not usable David Marchand
@ 2014-09-17 13:46 ` David Marchand
  2014-09-17 13:46 ` [dpdk-dev] [PATCH v3 09/20] i40e/base: add a raw macro for use by shared code David Marchand
                   ` (12 subsequent siblings)
  20 siblings, 0 replies; 25+ messages in thread
From: David Marchand @ 2014-09-17 13:46 UTC (permalink / raw)
  To: dev
- Don't use DEBUGFUNC macro in non-shared code.
- Don't use printf for logs.
- We should avoid calling RTE_LOG directly as pmd provides a wrapper for logs.
- Replace some PMD_INIT_LOG(DEBUG, "some_func") with PMD_INIT_FUNC_TRACE().
Signed-off-by: David Marchand <david.marchand@6wind.com>
v2 Reviewed-by: Jay Rolette <rolette@infiniteio.com>
v2 Acked-by: Bruce Richardson <bruce.richardson@intel.com>
---
 lib/librte_pmd_i40e/i40e_ethdev.c    |  146 +++++++++++++++++-----------------
 lib/librte_pmd_i40e/i40e_ethdev_vf.c |    4 +-
 lib/librte_pmd_i40e/i40e_pf.c        |    6 +-
 lib/librte_pmd_i40e/i40e_rxtx.c      |   64 +++++++--------
 4 files changed, 111 insertions(+), 109 deletions(-)
diff --git a/lib/librte_pmd_i40e/i40e_ethdev.c b/lib/librte_pmd_i40e/i40e_ethdev.c
index 4e65ca4..af2e1cb 100644
--- a/lib/librte_pmd_i40e/i40e_ethdev.c
+++ b/lib/librte_pmd_i40e/i40e_ethdev.c
@@ -1059,24 +1059,23 @@ i40e_update_vsi_stats(struct i40e_vsi *vsi)
 			    &oes->tx_errors, &nes->tx_errors);
 	vsi->offset_loaded = true;
 
-#ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
-	printf("***************** VSI[%u] stats start *******************\n",
-								vsi->vsi_id);
-	printf("rx_bytes:            %lu\n", nes->rx_bytes);
-	printf("rx_unicast:          %lu\n", nes->rx_unicast);
-	printf("rx_multicast:        %lu\n", nes->rx_multicast);
-	printf("rx_broadcast:        %lu\n", nes->rx_broadcast);
-	printf("rx_discards:         %lu\n", nes->rx_discards);
-	printf("rx_unknown_protocol: %lu\n", nes->rx_unknown_protocol);
-	printf("tx_bytes:            %lu\n", nes->tx_bytes);
-	printf("tx_unicast:          %lu\n", nes->tx_unicast);
-	printf("tx_multicast:        %lu\n", nes->tx_multicast);
-	printf("tx_broadcast:        %lu\n", nes->tx_broadcast);
-	printf("tx_discards:         %lu\n", nes->tx_discards);
-	printf("tx_errors:           %lu\n", nes->tx_errors);
-	printf("***************** VSI[%u] stats end *******************\n",
-								vsi->vsi_id);
-#endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
+	PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats start *******************",
+		    vsi->vsi_id);
+	PMD_DRV_LOG(DEBUG, "rx_bytes:            %lu", nes->rx_bytes);
+	PMD_DRV_LOG(DEBUG, "rx_unicast:          %lu", nes->rx_unicast);
+	PMD_DRV_LOG(DEBUG, "rx_multicast:        %lu", nes->rx_multicast);
+	PMD_DRV_LOG(DEBUG, "rx_broadcast:        %lu", nes->rx_broadcast);
+	PMD_DRV_LOG(DEBUG, "rx_discards:         %lu", nes->rx_discards);
+	PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %lu",
+		    nes->rx_unknown_protocol);
+	PMD_DRV_LOG(DEBUG, "tx_bytes:            %lu", nes->tx_bytes);
+	PMD_DRV_LOG(DEBUG, "tx_unicast:          %lu", nes->tx_unicast);
+	PMD_DRV_LOG(DEBUG, "tx_multicast:        %lu", nes->tx_multicast);
+	PMD_DRV_LOG(DEBUG, "tx_broadcast:        %lu", nes->tx_broadcast);
+	PMD_DRV_LOG(DEBUG, "tx_discards:         %lu", nes->tx_discards);
+	PMD_DRV_LOG(DEBUG, "tx_errors:           %lu", nes->tx_errors);
+	PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats end *******************",
+		    vsi->vsi_id);
 }
 
 /* Get all statistics of a port */
@@ -1277,69 +1276,74 @@ i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 	if (pf->main_vsi)
 		i40e_update_vsi_stats(pf->main_vsi);
 
-#ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
-	printf("***************** PF stats start *******************\n");
-	printf("rx_bytes:            %lu\n", ns->eth.rx_bytes);
-	printf("rx_unicast:          %lu\n", ns->eth.rx_unicast);
-	printf("rx_multicast:        %lu\n", ns->eth.rx_multicast);
-	printf("rx_broadcast:        %lu\n", ns->eth.rx_broadcast);
-	printf("rx_discards:         %lu\n", ns->eth.rx_discards);
-	printf("rx_unknown_protocol: %lu\n", ns->eth.rx_unknown_protocol);
-	printf("tx_bytes:            %lu\n", ns->eth.tx_bytes);
-	printf("tx_unicast:          %lu\n", ns->eth.tx_unicast);
-	printf("tx_multicast:        %lu\n", ns->eth.tx_multicast);
-	printf("tx_broadcast:        %lu\n", ns->eth.tx_broadcast);
-	printf("tx_discards:         %lu\n", ns->eth.tx_discards);
-	printf("tx_errors:           %lu\n", ns->eth.tx_errors);
-
-	printf("tx_dropped_link_down:     %lu\n", ns->tx_dropped_link_down);
-	printf("crc_errors:               %lu\n", ns->crc_errors);
-	printf("illegal_bytes:            %lu\n", ns->illegal_bytes);
-	printf("error_bytes:              %lu\n", ns->error_bytes);
-	printf("mac_local_faults:         %lu\n", ns->mac_local_faults);
-	printf("mac_remote_faults:        %lu\n", ns->mac_remote_faults);
-	printf("rx_length_errors:         %lu\n", ns->rx_length_errors);
-	printf("link_xon_rx:              %lu\n", ns->link_xon_rx);
-	printf("link_xoff_rx:             %lu\n", ns->link_xoff_rx);
+	PMD_DRV_LOG(DEBUG, "***************** PF stats start *******************");
+	PMD_DRV_LOG(DEBUG, "rx_bytes:            %lu", ns->eth.rx_bytes);
+	PMD_DRV_LOG(DEBUG, "rx_unicast:          %lu", ns->eth.rx_unicast);
+	PMD_DRV_LOG(DEBUG, "rx_multicast:        %lu", ns->eth.rx_multicast);
+	PMD_DRV_LOG(DEBUG, "rx_broadcast:        %lu", ns->eth.rx_broadcast);
+	PMD_DRV_LOG(DEBUG, "rx_discards:         %lu", ns->eth.rx_discards);
+	PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %lu",
+		    ns->eth.rx_unknown_protocol);
+	PMD_DRV_LOG(DEBUG, "tx_bytes:            %lu", ns->eth.tx_bytes);
+	PMD_DRV_LOG(DEBUG, "tx_unicast:          %lu", ns->eth.tx_unicast);
+	PMD_DRV_LOG(DEBUG, "tx_multicast:        %lu", ns->eth.tx_multicast);
+	PMD_DRV_LOG(DEBUG, "tx_broadcast:        %lu", ns->eth.tx_broadcast);
+	PMD_DRV_LOG(DEBUG, "tx_discards:         %lu", ns->eth.tx_discards);
+	PMD_DRV_LOG(DEBUG, "tx_errors:           %lu", ns->eth.tx_errors);
+
+	PMD_DRV_LOG(DEBUG, "tx_dropped_link_down:     %lu",
+		    ns->tx_dropped_link_down);
+	PMD_DRV_LOG(DEBUG, "crc_errors:               %lu", ns->crc_errors);
+	PMD_DRV_LOG(DEBUG, "illegal_bytes:            %lu",
+		    ns->illegal_bytes);
+	PMD_DRV_LOG(DEBUG, "error_bytes:              %lu", ns->error_bytes);
+	PMD_DRV_LOG(DEBUG, "mac_local_faults:         %lu",
+		    ns->mac_local_faults);
+	PMD_DRV_LOG(DEBUG, "mac_remote_faults:        %lu",
+		    ns->mac_remote_faults);
+	PMD_DRV_LOG(DEBUG, "rx_length_errors:         %lu",
+		    ns->rx_length_errors);
+	PMD_DRV_LOG(DEBUG, "link_xon_rx:              %lu", ns->link_xon_rx);
+	PMD_DRV_LOG(DEBUG, "link_xoff_rx:             %lu", ns->link_xoff_rx);
 	for (i = 0; i < 8; i++) {
-		printf("priority_xon_rx[%d]:      %lu\n",
+		PMD_DRV_LOG(DEBUG, "priority_xon_rx[%d]:      %lu",
 				i, ns->priority_xon_rx[i]);
-		printf("priority_xoff_rx[%d]:     %lu\n",
+		PMD_DRV_LOG(DEBUG, "priority_xoff_rx[%d]:     %lu",
 				i, ns->priority_xoff_rx[i]);
 	}
-	printf("link_xon_tx:              %lu\n", ns->link_xon_tx);
-	printf("link_xoff_tx:             %lu\n", ns->link_xoff_tx);
+	PMD_DRV_LOG(DEBUG, "link_xon_tx:              %lu", ns->link_xon_tx);
+	PMD_DRV_LOG(DEBUG, "link_xoff_tx:             %lu", ns->link_xoff_tx);
 	for (i = 0; i < 8; i++) {
-		printf("priority_xon_tx[%d]:      %lu\n",
+		PMD_DRV_LOG(DEBUG, "priority_xon_tx[%d]:      %lu",
 				i, ns->priority_xon_tx[i]);
-		printf("priority_xoff_tx[%d]:     %lu\n",
+		PMD_DRV_LOG(DEBUG, "priority_xoff_tx[%d]:     %lu",
 				i, ns->priority_xoff_tx[i]);
-		printf("priority_xon_2_xoff[%d]:  %lu\n",
+		PMD_DRV_LOG(DEBUG, "priority_xon_2_xoff[%d]:  %lu",
 				i, ns->priority_xon_2_xoff[i]);
 	}
-	printf("rx_size_64:               %lu\n", ns->rx_size_64);
-	printf("rx_size_127:              %lu\n", ns->rx_size_127);
-	printf("rx_size_255:              %lu\n", ns->rx_size_255);
-	printf("rx_size_511:              %lu\n", ns->rx_size_511);
-	printf("rx_size_1023:             %lu\n", ns->rx_size_1023);
-	printf("rx_size_1522:             %lu\n", ns->rx_size_1522);
-	printf("rx_size_big:              %lu\n", ns->rx_size_big);
-	printf("rx_undersize:             %lu\n", ns->rx_undersize);
-	printf("rx_fragments:             %lu\n", ns->rx_fragments);
-	printf("rx_oversize:              %lu\n", ns->rx_oversize);
-	printf("rx_jabber:                %lu\n", ns->rx_jabber);
-	printf("tx_size_64:               %lu\n", ns->tx_size_64);
-	printf("tx_size_127:              %lu\n", ns->tx_size_127);
-	printf("tx_size_255:              %lu\n", ns->tx_size_255);
-	printf("tx_size_511:              %lu\n", ns->tx_size_511);
-	printf("tx_size_1023:             %lu\n", ns->tx_size_1023);
-	printf("tx_size_1522:             %lu\n", ns->tx_size_1522);
-	printf("tx_size_big:              %lu\n", ns->tx_size_big);
-	printf("mac_short_packet_dropped: %lu\n",
+	PMD_DRV_LOG(DEBUG, "rx_size_64:               %lu", ns->rx_size_64);
+	PMD_DRV_LOG(DEBUG, "rx_size_127:              %lu", ns->rx_size_127);
+	PMD_DRV_LOG(DEBUG, "rx_size_255:              %lu", ns->rx_size_255);
+	PMD_DRV_LOG(DEBUG, "rx_size_511:              %lu", ns->rx_size_511);
+	PMD_DRV_LOG(DEBUG, "rx_size_1023:             %lu", ns->rx_size_1023);
+	PMD_DRV_LOG(DEBUG, "rx_size_1522:             %lu", ns->rx_size_1522);
+	PMD_DRV_LOG(DEBUG, "rx_size_big:              %lu", ns->rx_size_big);
+	PMD_DRV_LOG(DEBUG, "rx_undersize:             %lu", ns->rx_undersize);
+	PMD_DRV_LOG(DEBUG, "rx_fragments:             %lu", ns->rx_fragments);
+	PMD_DRV_LOG(DEBUG, "rx_oversize:              %lu", ns->rx_oversize);
+	PMD_DRV_LOG(DEBUG, "rx_jabber:                %lu", ns->rx_jabber);
+	PMD_DRV_LOG(DEBUG, "tx_size_64:               %lu", ns->tx_size_64);
+	PMD_DRV_LOG(DEBUG, "tx_size_127:              %lu", ns->tx_size_127);
+	PMD_DRV_LOG(DEBUG, "tx_size_255:              %lu", ns->tx_size_255);
+	PMD_DRV_LOG(DEBUG, "tx_size_511:              %lu", ns->tx_size_511);
+	PMD_DRV_LOG(DEBUG, "tx_size_1023:             %lu", ns->tx_size_1023);
+	PMD_DRV_LOG(DEBUG, "tx_size_1522:             %lu", ns->tx_size_1522);
+	PMD_DRV_LOG(DEBUG, "tx_size_big:              %lu", ns->tx_size_big);
+	PMD_DRV_LOG(DEBUG, "mac_short_packet_dropped: %lu",
 			ns->mac_short_packet_dropped);
-	printf("checksum_error:           %lu\n", ns->checksum_error);
-	printf("***************** PF stats end ********************\n");
-#endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
+	PMD_DRV_LOG(DEBUG, "checksum_error:           %lu",
+		    ns->checksum_error);
+	PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************");
 }
 
 /* Reset the statistics */
diff --git a/lib/librte_pmd_i40e/i40e_ethdev_vf.c b/lib/librte_pmd_i40e/i40e_ethdev_vf.c
index d8552ad..ed62668 100644
--- a/lib/librte_pmd_i40e/i40e_ethdev_vf.c
+++ b/lib/librte_pmd_i40e/i40e_ethdev_vf.c
@@ -1132,7 +1132,7 @@ static int
 rte_i40evf_pmd_init(const char *name __rte_unused,
 		    const char *params __rte_unused)
 {
-	DEBUGFUNC("rte_i40evf_pmd_init");
+	PMD_INIT_FUNC_TRACE();
 
 	rte_eth_driver_register(&rte_i40evf_pmd);
 
@@ -1384,7 +1384,7 @@ i40evf_dev_start(struct rte_eth_dev *dev)
 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	struct ether_addr mac_addr;
 
-	PMD_DRV_LOG(DEBUG, "i40evf_dev_start");
+	PMD_INIT_FUNC_TRACE();
 
 	vf->max_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
 	if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
diff --git a/lib/librte_pmd_i40e/i40e_pf.c b/lib/librte_pmd_i40e/i40e_pf.c
index e8b154d..4e1e043 100644
--- a/lib/librte_pmd_i40e/i40e_pf.c
+++ b/lib/librte_pmd_i40e/i40e_pf.c
@@ -253,10 +253,8 @@ i40e_pf_host_send_msg_to_vf(struct i40e_pf_vf *vf,
 	ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, opcode, retval,
 						msg, msglen, NULL);
 	if (ret) {
-		PMD_DRV_LOG(ERR, "Fail to send message to VF, err %u\n",
-			hw->aq.asq_last_status);
-		printf("Fail to send message to VF, err %u\n",
-					hw->aq.asq_last_status);
+		PMD_INIT_LOG(ERR, "Fail to send message to VF, err %u",
+			     hw->aq.asq_last_status);
 	}
 
 	return ret;
diff --git a/lib/librte_pmd_i40e/i40e_rxtx.c b/lib/librte_pmd_i40e/i40e_rxtx.c
index e41e8d0..70e2bd4 100644
--- a/lib/librte_pmd_i40e/i40e_rxtx.c
+++ b/lib/librte_pmd_i40e/i40e_rxtx.c
@@ -1787,50 +1787,50 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
 		tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
 	if (tx_rs_thresh >= (nb_desc - 2)) {
-		RTE_LOG(ERR, PMD, "tx_rs_thresh must be less than the "
-				"number of TX descriptors minus 2. "
-				"(tx_rs_thresh=%u port=%d queue=%d)\n",
-					(unsigned int)tx_rs_thresh,
-					(int)dev->data->port_id,
-						(int)queue_idx);
+		PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
+			     "number of TX descriptors minus 2. "
+			     "(tx_rs_thresh=%u port=%d queue=%d)",
+			     (unsigned int)tx_rs_thresh,
+			     (int)dev->data->port_id,
+			     (int)queue_idx);
 		return I40E_ERR_PARAM;
 	}
 	if (tx_free_thresh >= (nb_desc - 3)) {
-		RTE_LOG(ERR, PMD, "tx_rs_thresh must be less than the "
-				"tx_free_thresh must be less than the "
-				"number of TX descriptors minus 3. "
-				"(tx_free_thresh=%u port=%d queue=%d)\n",
-					(unsigned int)tx_free_thresh,
-						(int)dev->data->port_id,
-							(int)queue_idx);
+		PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
+			     "tx_free_thresh must be less than the "
+			     "number of TX descriptors minus 3. "
+			     "(tx_free_thresh=%u port=%d queue=%d)",
+			     (unsigned int)tx_free_thresh,
+			     (int)dev->data->port_id,
+			     (int)queue_idx);
 		return I40E_ERR_PARAM;
 	}
 	if (tx_rs_thresh > tx_free_thresh) {
-		RTE_LOG(ERR, PMD, "tx_rs_thresh must be less than or "
-				"equal to tx_free_thresh. (tx_free_thresh=%u"
-				" tx_rs_thresh=%u port=%d queue=%d)\n",
-						(unsigned int)tx_free_thresh,
-						(unsigned int)tx_rs_thresh,
-						(int)dev->data->port_id,
-							(int)queue_idx);
+		PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or "
+			     "equal to tx_free_thresh. (tx_free_thresh=%u"
+			     " tx_rs_thresh=%u port=%d queue=%d)",
+			     (unsigned int)tx_free_thresh,
+			     (unsigned int)tx_rs_thresh,
+			     (int)dev->data->port_id,
+			     (int)queue_idx);
 		return I40E_ERR_PARAM;
 	}
 	if ((nb_desc % tx_rs_thresh) != 0) {
-		RTE_LOG(ERR, PMD, "tx_rs_thresh must be a divisor of the "
-				"number of TX descriptors. (tx_rs_thresh=%u"
-						" port=%d queue=%d)\n",
-						(unsigned int)tx_rs_thresh,
-						(int)dev->data->port_id,
-							(int)queue_idx);
+		PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
+			     "number of TX descriptors. (tx_rs_thresh=%u"
+			     " port=%d queue=%d)",
+			     (unsigned int)tx_rs_thresh,
+			     (int)dev->data->port_id,
+			     (int)queue_idx);
 		return I40E_ERR_PARAM;
 	}
 	if ((tx_rs_thresh > 1) && (tx_conf->tx_thresh.wthresh != 0)) {
-		RTE_LOG(ERR, PMD, "TX WTHRESH must be set to 0 if "
-				"tx_rs_thresh is greater than 1. "
-				"(tx_rs_thresh=%u port=%d queue=%d)\n",
-					(unsigned int)tx_rs_thresh,
-					(int)dev->data->port_id,
-						(int)queue_idx);
+		PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
+			     "tx_rs_thresh is greater than 1. "
+			     "(tx_rs_thresh=%u port=%d queue=%d)",
+			     (unsigned int)tx_rs_thresh,
+			     (int)dev->data->port_id,
+			     (int)queue_idx);
 		return I40E_ERR_PARAM;
 	}
 
-- 
1.7.10.4
^ permalink raw reply	[flat|nested] 25+ messages in thread
- * [dpdk-dev] [PATCH v3 09/20] i40e/base: add a raw macro for use by shared code
  2014-09-17 13:46 [dpdk-dev] [PATCH v3 00/20] cleanup logs in main PMDs David Marchand
                   ` (7 preceding siblings ...)
  2014-09-17 13:46 ` [dpdk-dev] [PATCH v3 08/20] i40e: use the right debug macro David Marchand
@ 2014-09-17 13:46 ` David Marchand
  2014-09-17 13:46 ` [dpdk-dev] [PATCH v3 10/20] i40e: indent logs sections David Marchand
                   ` (11 subsequent siblings)
  20 siblings, 0 replies; 25+ messages in thread
From: David Marchand @ 2014-09-17 13:46 UTC (permalink / raw)
  To: dev
Since shared code always add a trailing \n, add a PMD_DRV_LOG_RAW macro that
will not add one.
Signed-off-by: David Marchand <david.marchand@6wind.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
---
 lib/librte_pmd_i40e/i40e/i40e_osdep.h |    8 ++++----
 lib/librte_pmd_i40e/i40e_logs.h       |    9 ++++++---
 2 files changed, 10 insertions(+), 7 deletions(-)
diff --git a/lib/librte_pmd_i40e/i40e/i40e_osdep.h b/lib/librte_pmd_i40e/i40e/i40e_osdep.h
index 0ed4b65..de71b0d 100644
--- a/lib/librte_pmd_i40e/i40e/i40e_osdep.h
+++ b/lib/librte_pmd_i40e/i40e/i40e_osdep.h
@@ -100,10 +100,10 @@ typedef enum i40e_status_code i40e_status;
 #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
 #define ASSERT(x) if(!(x)) rte_panic("IXGBE: x")
 
-#define DEBUGOUT(S)        PMD_DRV_LOG(DEBUG, S)
-#define DEBUGOUT1(S, A...) PMD_DRV_LOG(DEBUG, S, ##A)
+#define DEBUGOUT(S)        PMD_DRV_LOG_RAW(DEBUG, S)
+#define DEBUGOUT1(S, A...) PMD_DRV_LOG_RAW(DEBUG, S, ##A)
 
-#define DEBUGFUNC(F) DEBUGOUT(F)
+#define DEBUGFUNC(F) DEBUGOUT(F "\n")
 #define DEBUGOUT2 DEBUGOUT1
 #define DEBUGOUT3 DEBUGOUT2
 #define DEBUGOUT6 DEBUGOUT3
@@ -112,7 +112,7 @@ typedef enum i40e_status_code i40e_status;
 #define i40e_debug(h, m, s, ...)                                \
 do {                                                            \
 	if (((m) & (h)->debug_mask))                            \
-		PMD_DRV_LOG(DEBUG, "i40e %02x.%x " s,           \
+		PMD_DRV_LOG_RAW(DEBUG, "i40e %02x.%x " s,       \
 			(h)->bus.device, (h)->bus.func,         \
 					##__VA_ARGS__);         \
 } while (0)
diff --git a/lib/librte_pmd_i40e/i40e_logs.h b/lib/librte_pmd_i40e/i40e_logs.h
index f991dd2..043ecba 100644
--- a/lib/librte_pmd_i40e/i40e_logs.h
+++ b/lib/librte_pmd_i40e/i40e_logs.h
@@ -65,10 +65,13 @@
 #endif
 
 #ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
-#define PMD_DRV_LOG(level, fmt, args...) \
-	RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#define PMD_DRV_LOG_RAW(level, fmt, args...) \
+	RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args)
 #else
-#define PMD_DRV_LOG(level, fmt, args...) do { } while(0)
+#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0)
 #endif
 
+#define PMD_DRV_LOG(level, fmt, args...) \
+	PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
+
 #endif /* _I40E_LOGS_H_ */
-- 
1.7.10.4
^ permalink raw reply	[flat|nested] 25+ messages in thread
- * [dpdk-dev] [PATCH v3 10/20] i40e: indent logs sections
  2014-09-17 13:46 [dpdk-dev] [PATCH v3 00/20] cleanup logs in main PMDs David Marchand
                   ` (8 preceding siblings ...)
  2014-09-17 13:46 ` [dpdk-dev] [PATCH v3 09/20] i40e/base: add a raw macro for use by shared code David Marchand
@ 2014-09-17 13:46 ` David Marchand
  2014-09-17 13:46 ` [dpdk-dev] [PATCH v3 11/20] i40e: clean log messages David Marchand
                   ` (10 subsequent siblings)
  20 siblings, 0 replies; 25+ messages in thread
From: David Marchand @ 2014-09-17 13:46 UTC (permalink / raw)
  To: dev
Prepare for next commit, indent sections where log messages will be modified so
that next patch is only about \n.
Signed-off-by: David Marchand <david.marchand@6wind.com>
---
 lib/librte_pmd_i40e/i40e_ethdev.c    |  101 +++++++++++++++++-----------------
 lib/librte_pmd_i40e/i40e_ethdev_vf.c |   76 ++++++++++++-------------
 lib/librte_pmd_i40e/i40e_pf.c        |    3 +-
 lib/librte_pmd_i40e/i40e_rxtx.c      |   54 +++++++++---------
 4 files changed, 113 insertions(+), 121 deletions(-)
diff --git a/lib/librte_pmd_i40e/i40e_ethdev.c b/lib/librte_pmd_i40e/i40e_ethdev.c
index af2e1cb..aadb548 100644
--- a/lib/librte_pmd_i40e/i40e_ethdev.c
+++ b/lib/librte_pmd_i40e/i40e_ethdev.c
@@ -371,7 +371,7 @@ eth_i40e_dev_init(__rte_unused struct eth_driver *eth_drv,
 	hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr);
 	if (!hw->hw_addr) {
 		PMD_INIT_LOG(ERR, "Hardware is not available, "
-					"as address is NULL\n");
+			     "as address is NULL\n");
 		return -ENODEV;
 	}
 
@@ -406,13 +406,12 @@ eth_i40e_dev_init(__rte_unused struct eth_driver *eth_drv,
 		PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
 		return -EIO;
 	}
-	PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM "
-			"%02d.%02d.%02d eetrack %04x\n",
-			hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
-			hw->aq.api_maj_ver, hw->aq.api_min_ver,
-			((hw->nvm.version >> 12) & 0xf),
-			((hw->nvm.version >> 4) & 0xff),
-			(hw->nvm.version & 0xf), hw->nvm.eetrack);
+	PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x\n",
+		     hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
+		     hw->aq.api_maj_ver, hw->aq.api_min_ver,
+		     ((hw->nvm.version >> 12) & 0xf),
+		     ((hw->nvm.version >> 4) & 0xff),
+		     (hw->nvm.version & 0xf), hw->nvm.eetrack);
 
 	/* Disable LLDP */
 	ret = i40e_aq_stop_lldp(hw, true, NULL);
@@ -764,8 +763,8 @@ i40e_dev_start(struct rte_eth_dev *dev)
 	if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&
 		(dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) {
 		PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port %hhu\n",
-				dev->data->dev_conf.link_duplex,
-				dev->data->port_id);
+			     dev->data->dev_conf.link_duplex,
+			     dev->data->port_id);
 		return -EINVAL;
 	}
 
@@ -1844,21 +1843,22 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev)
 		pf->vf_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
 		if (dev->pci_dev->max_vfs > hw->func_caps.num_vfs) {
 			PMD_INIT_LOG(ERR, "Config VF number %u, "
-				"max supported %u.\n", dev->pci_dev->max_vfs,
-						hw->func_caps.num_vfs);
+				     "max supported %u.\n",
+				     dev->pci_dev->max_vfs,
+				     hw->func_caps.num_vfs);
 			return -EINVAL;
 		}
 		if (pf->vf_nb_qps > I40E_MAX_QP_NUM_PER_VF) {
 			PMD_INIT_LOG(ERR, "FVL VF queue %u, "
-				"max support %u queues.\n", pf->vf_nb_qps,
-						I40E_MAX_QP_NUM_PER_VF);
+				     "max support %u queues.\n",
+				     pf->vf_nb_qps, I40E_MAX_QP_NUM_PER_VF);
 			return -EINVAL;
 		}
 		pf->vf_num = dev->pci_dev->max_vfs;
 		sum_queues += pf->vf_nb_qps * pf->vf_num;
 		sum_vsis   += pf->vf_num;
 		PMD_INIT_LOG(INFO, "Max VF num:%u each has queue pairs:%u\n",
-						pf->vf_num, pf->vf_nb_qps);
+			     pf->vf_num, pf->vf_nb_qps);
 	} else
 		pf->vf_num = 0;
 
@@ -1883,16 +1883,17 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev)
 		sum_queues > hw->func_caps.num_rx_qp) {
 		PMD_INIT_LOG(ERR, "VSI/QUEUE setting can't be satisfied\n");
 		PMD_INIT_LOG(ERR, "Max VSIs: %u, asked:%u\n",
-				pf->max_num_vsi, sum_vsis);
+			     pf->max_num_vsi, sum_vsis);
 		PMD_INIT_LOG(ERR, "Total queue pairs:%u, asked:%u\n",
-				hw->func_caps.num_rx_qp, sum_queues);
+			     hw->func_caps.num_rx_qp, sum_queues);
 		return -EINVAL;
 	}
 
 	/* Each VSI occupy 1 MSIX interrupt at least, plus IRQ0 for misc intr cause */
 	if (sum_vsis > hw->func_caps.num_msix_vectors - 1) {
-		PMD_INIT_LOG(ERR, "Too many VSIs(%u), MSIX intr(%u) not enough\n",
-				sum_vsis, hw->func_caps.num_msix_vectors);
+		PMD_INIT_LOG(ERR, "Too many VSIs(%u), "
+			     "MSIX intr(%u) not enough\n",
+			     sum_vsis, hw->func_caps.num_msix_vectors);
 		return -EINVAL;
 	}
 	return I40E_SUCCESS;
@@ -1952,8 +1953,7 @@ i40e_res_pool_init (struct i40e_res_pool_info *pool, uint32_t base,
 
 	entry = rte_zmalloc("i40e", sizeof(*entry), 0);
 	if (entry == NULL) {
-		PMD_DRV_LOG(ERR, "Failed to allocate memory for "
-						"resource pool\n");
+		PMD_DRV_LOG(ERR, "Failed to allocate memory for resource pool\n");
 		return -ENOMEM;
 	}
 
@@ -2097,7 +2097,7 @@ i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
 
 	if (pool->num_free < num) {
 		PMD_DRV_LOG(ERR, "No resource. ask:%u, available:%u\n",
-				num, pool->num_free);
+			    num, pool->num_free);
 		return -ENOMEM;
 	}
 
@@ -2135,7 +2135,7 @@ i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
 		entry = rte_zmalloc("res_pool", sizeof(*entry), 0);
 		if (entry == NULL) {
 			PMD_DRV_LOG(ERR, "Failed to allocate memory for "
-					"resource pool\n");
+				    "resource pool\n");
 			return -ENOMEM;
 		}
 		entry->base = valid_entry->base;
@@ -2170,15 +2170,14 @@ validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
 
 	/* If DCB is not supported, only default TC is supported */
 	if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) {
-		PMD_DRV_LOG(ERR, "DCB is not enabled, "
-				"only TC0 is supported\n");
+		PMD_DRV_LOG(ERR, "DCB is not enabled, only TC0 is supported\n");
 		return -EINVAL;
 	}
 
 	if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
 		PMD_DRV_LOG(ERR, "Enabled TC map 0x%x not applicable to "
-			"HW support 0x%x\n", hw->func_caps.enabled_tcmap,
-							enabled_tcmap);
+			    "HW support 0x%x\n", hw->func_caps.enabled_tcmap,
+			    enabled_tcmap);
 		return -EINVAL;
 	}
 	return I40E_SUCCESS;
@@ -2357,7 +2356,7 @@ i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
 
 	if (NULL == pf || vsi == NULL) {
 		PMD_DRV_LOG(ERR, "veb setup failed, "
-			"associated VSI shouldn't null\n");
+			    "associated VSI shouldn't null\n");
 		return NULL;
 	}
 	hw = I40E_PF_TO_HW(pf);
@@ -2377,7 +2376,7 @@ i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
 
 	if (ret != I40E_SUCCESS) {
 		PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d\n",
-					hw->aq.asq_last_status);
+			    hw->aq.asq_last_status);
 		goto fail;
 	}
 
@@ -2386,7 +2385,7 @@ i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
 				&veb->stats_idx, NULL, NULL, NULL);
 	if (ret != I40E_SUCCESS) {
 		PMD_DRV_LOG(ERR, "Get veb statics index failed, aq_err: %d\n",
-						hw->aq.asq_last_status);
+			    hw->aq.asq_last_status);
 		goto fail;
 	}
 
@@ -2473,7 +2472,7 @@ i40e_update_default_filter_setting(struct i40e_vsi *vsi)
 		struct i40e_mac_filter *f;
 
 		PMD_DRV_LOG(WARNING, "Cannot remove the default "
-						"macvlan filter\n");
+			    "macvlan filter\n");
 		/* It needs to add the permanent mac into mac list */
 		f = rte_zmalloc("macv_filter", sizeof(*f), 0);
 		if (f == NULL) {
@@ -2503,8 +2502,8 @@ i40e_vsi_dump_bw_config(struct i40e_vsi *vsi)
 	memset(&bw_config, 0, sizeof(bw_config));
 	ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
 	if (ret != I40E_SUCCESS) {
-		PMD_DRV_LOG(ERR, "VSI failed to get bandwidth "
-			"configuration %u\n", hw->aq.asq_last_status);
+		PMD_DRV_LOG(ERR, "VSI failed to get bandwidth configuration %u\n",
+			    hw->aq.asq_last_status);
 		return ret;
 	}
 
@@ -2513,7 +2512,7 @@ i40e_vsi_dump_bw_config(struct i40e_vsi *vsi)
 					&ets_sla_config, NULL);
 	if (ret != I40E_SUCCESS) {
 		PMD_DRV_LOG(ERR, "VSI failed to get TC bandwdith "
-			"configuration %u\n", hw->aq.asq_last_status);
+			    "configuration %u\n", hw->aq.asq_last_status);
 		return ret;
 	}
 
@@ -2522,12 +2521,12 @@ i40e_vsi_dump_bw_config(struct i40e_vsi *vsi)
 	PMD_DRV_LOG(INFO, "VSI max_bw:%u\n", bw_config.max_bw);
 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
 		PMD_DRV_LOG(INFO, "\tVSI TC%u:share credits %u\n", i,
-					ets_sla_config.share_credits[i]);
+			    ets_sla_config.share_credits[i]);
 		PMD_DRV_LOG(INFO, "\tVSI TC%u:credits %u\n", i,
-			rte_le_to_cpu_16(ets_sla_config.credits[i]));
+			    rte_le_to_cpu_16(ets_sla_config.credits[i]));
 		PMD_DRV_LOG(INFO, "\tVSI TC%u: max credits: %u", i,
-			rte_le_to_cpu_16(ets_sla_config.credits[i / 4]) >>
-								(i * 4));
+			    rte_le_to_cpu_16(ets_sla_config.credits[i / 4]) >>
+			    (i * 4));
 	}
 
 	return 0;
@@ -2549,13 +2548,13 @@ i40e_vsi_setup(struct i40e_pf *pf,
 
 	if (type != I40E_VSI_MAIN && uplink_vsi == NULL) {
 		PMD_DRV_LOG(ERR, "VSI setup failed, "
-			"VSI link shouldn't be NULL\n");
+			    "VSI link shouldn't be NULL\n");
 		return NULL;
 	}
 
 	if (type == I40E_VSI_MAIN && uplink_vsi != NULL) {
 		PMD_DRV_LOG(ERR, "VSI setup failed, MAIN VSI "
-				"uplink VSI should be NULL\n");
+			    "uplink VSI should be NULL\n");
 		return NULL;
 	}
 
@@ -2656,7 +2655,7 @@ i40e_vsi_setup(struct i40e_pf *pf,
 						I40E_DEFAULT_TCMAP);
 		if (ret != I40E_SUCCESS) {
 			PMD_DRV_LOG(ERR, "Failed to configure "
-					"TC queue mapping\n");
+				    "TC queue mapping\n");
 			goto fail_msix_alloc;
 		}
 		ctxt.seid = vsi->seid;
@@ -2719,7 +2718,7 @@ i40e_vsi_setup(struct i40e_pf *pf,
 						I40E_DEFAULT_TCMAP);
 		if (ret != I40E_SUCCESS) {
 			PMD_DRV_LOG(ERR, "Failed to configure "
-					"TC queue mapping\n");
+				    "TC queue mapping\n");
 			goto fail_msix_alloc;
 		}
 		ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
@@ -2739,7 +2738,7 @@ i40e_vsi_setup(struct i40e_pf *pf,
 		ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
 		if (ret) {
 			PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d\n",
-				 hw->aq.asq_last_status);
+				    hw->aq.asq_last_status);
 			goto fail_msix_alloc;
 		}
 		memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
@@ -2807,7 +2806,7 @@ i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on)
 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
 	if (ret)
 		PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping\n",
-						on ? "enable" : "disable");
+			    on ? "enable" : "disable");
 
 	return ret;
 }
@@ -2997,7 +2996,7 @@ i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
 	/* Check if it is timeout */
 	if (j >= I40E_CHK_Q_ENA_COUNT) {
 		PMD_DRV_LOG(ERR, "Failed to %s tx queue[%u]\n",
-			(on ? "enable" : "disable"), q_idx);
+			    (on ? "enable" : "disable"), q_idx);
 		return I40E_ERR_TIMEOUT;
 	}
 
@@ -3076,7 +3075,7 @@ i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
 	/* Check if it is timeout */
 	if (j >= I40E_CHK_Q_ENA_COUNT) {
 		PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]\n",
-			(on ? "enable" : "disable"), q_idx);
+			    (on ? "enable" : "disable"), q_idx);
 		return I40E_ERR_TIMEOUT;
 	}
 
@@ -3168,7 +3167,7 @@ i40e_vsi_rx_init(struct i40e_vsi *vsi)
 		ret = i40e_rx_queue_init(data->rx_queues[i]);
 		if (ret != I40E_SUCCESS) {
 			PMD_DRV_LOG(ERR, "Failed to do RX queue "
-					"initialization\n");
+				    "initialization\n");
 			break;
 		}
 	}
@@ -3351,7 +3350,7 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
 
 		if (ret != I40E_SUCCESS) {
 			PMD_DRV_LOG(INFO, "Failed to read msg from AdminQ, "
-				"aq_err: %u\n", hw->aq.asq_last_status);
+				    "aq_err: %u\n", hw->aq.asq_last_status);
 			break;
 		}
 		opcode = rte_le_to_cpu_16(info.desc.opcode);
@@ -3368,7 +3367,7 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
 			break;
 		default:
 			PMD_DRV_LOG(ERR, "Request %u is not supported yet\n",
-				opcode);
+				    opcode);
 			break;
 		}
 		/* Reset the buffer after processing one */
@@ -3405,7 +3404,7 @@ i40e_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
 	/* Shared IRQ case, return */
 	if (!(cause & I40E_PFINT_ICR0_INTEVENT_MASK)) {
 		PMD_DRV_LOG(INFO, "Port%d INT0:share IRQ case, "
-			"no INT event to process\n", hw->pf_id);
+			    "no INT event to process\n", hw->pf_id);
 		goto done;
 	}
 
@@ -3626,7 +3625,7 @@ i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
 				if (vsi->vfta[j] & (1 << k)) {
 					if (i > num - 1) {
 						PMD_DRV_LOG(ERR, "vlan number "
-								"not match\n");
+							    "not match\n");
 						return I40E_ERR_PARAM;
 					}
 					(void)rte_memcpy(&mv_f[i].macaddr,
diff --git a/lib/librte_pmd_i40e/i40e_ethdev_vf.c b/lib/librte_pmd_i40e/i40e_ethdev_vf.c
index ed62668..17009bd 100644
--- a/lib/librte_pmd_i40e/i40e_ethdev_vf.c
+++ b/lib/librte_pmd_i40e/i40e_ethdev_vf.c
@@ -206,7 +206,7 @@ i40evf_parse_pfmsg(struct i40e_vf *vf,
 				vpe->event_data.link_event.link_status;
 			vf->pend_msg |= PFMSG_LINK_CHANGE;
 			PMD_DRV_LOG(INFO, "Link status update:%s\n",
-					vf->link_up ? "up" : "down");
+				    vf->link_up ? "up" : "down");
 			break;
 		case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
 			vf->vf_reset = true;
@@ -219,9 +219,8 @@ i40evf_parse_pfmsg(struct i40e_vf *vf,
 			PMD_DRV_LOG(INFO, "PF driver closed\n");
 			break;
 		default:
-			PMD_DRV_LOG(ERR,
-				"%s: Unknown event %d from pf\n",
-				__func__, vpe->event);
+			PMD_DRV_LOG(ERR, "%s: Unknown event %d from pf\n",
+				    __func__, vpe->event);
 		}
 	} else {
 		/* async reply msg on command issued by vf previously */
@@ -351,7 +350,7 @@ i40evf_execute_vf_cmd(struct rte_eth_dev *dev, struct vf_cmd_info *args)
 		PMD_DRV_LOG(ERR, "Failed to read message from AdminQ\n");
 	else if (args->ops != info.ops)
 		PMD_DRV_LOG(ERR, "command mismatch, expect %u, get %u\n",
-				args->ops, info.ops);
+			    args->ops, info.ops);
 
 	return (err | info.result);
 }
@@ -392,8 +391,8 @@ i40evf_check_api_version(struct rte_eth_dev *dev)
 	else if ((pver->major != version.major) ||
 	    (pver->minor != version.minor)) {
 		PMD_INIT_LOG(ERR, "pf/vf API version mismatch. "
-			"(%u.%u)-(%u.%u)\n", pver->major, pver->minor,
-					version.major, version.minor);
+			     "(%u.%u)-(%u.%u)\n", pver->major, pver->minor,
+			     version.major, version.minor);
 		return -1;
 	}
 
@@ -418,8 +417,7 @@ i40evf_get_vf_resource(struct rte_eth_dev *dev)
 	err = i40evf_execute_vf_cmd(dev, &args);
 
 	if (err) {
-		PMD_DRV_LOG(ERR, "fail to execute command "
-					"OP_GET_VF_RESOURCE\n");
+		PMD_DRV_LOG(ERR, "fail to execute command OP_GET_VF_RESOURCE\n");
 		return err;
 	}
 
@@ -462,7 +460,7 @@ i40evf_config_promisc(struct rte_eth_dev *dev,
 
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command "
-				"CONFIG_PROMISCUOUS_MODE\n");
+			    "CONFIG_PROMISCUOUS_MODE\n");
 	return err;
 }
 
@@ -595,7 +593,7 @@ i40evf_configure_queues(struct rte_eth_dev *dev)
 	err = i40evf_execute_vf_cmd(dev, &args);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command "
-				"OP_CONFIG_VSI_QUEUES\n");
+			    "OP_CONFIG_VSI_QUEUES\n");
 	rte_free(queue_info);
 
 	return err;
@@ -661,8 +659,8 @@ i40evf_switch_queue(struct rte_eth_dev *dev, bool isrx, uint16_t qid,
 	args.out_size = I40E_AQ_BUF_SZ;
 	err = i40evf_execute_vf_cmd(dev, &args);
 	if (err)
-		PMD_DRV_LOG(ERR, "fail to switch %s %u %s\n", isrx ? "RX" : "TX",
-			qid, on ? "on" : "off");
+		PMD_DRV_LOG(ERR, "fail to switch %s %u %s\n",
+			    isrx ? "RX" : "TX", qid, on ? "on" : "off");
 
 	return err;
 }
@@ -680,8 +678,7 @@ i40evf_start_queues(struct rte_eth_dev *dev)
 		if (rxq->start_rx_per_q)
 			continue;
 		if (i40evf_dev_rx_queue_start(dev, i) != 0) {
-			PMD_DRV_LOG(ERR, "Fail to start queue %u\n",
-				i);
+			PMD_DRV_LOG(ERR, "Fail to start queue %u\n", i);
 			return -1;
 		}
 	}
@@ -691,8 +688,7 @@ i40evf_start_queues(struct rte_eth_dev *dev)
 		if (txq->start_tx_per_q)
 			continue;
 		if (i40evf_dev_tx_queue_start(dev, i) != 0) {
-			PMD_DRV_LOG(ERR, "Fail to start queue %u\n",
-				i);
+			PMD_DRV_LOG(ERR, "Fail to start queue %u\n", i);
 			return -1;
 		}
 	}
@@ -708,8 +704,7 @@ i40evf_stop_queues(struct rte_eth_dev *dev)
 	/* Stop TX queues first */
 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
 		if (i40evf_dev_tx_queue_stop(dev, i) != 0) {
-			PMD_DRV_LOG(ERR, "Fail to start queue %u\n",
-				i);
+			PMD_DRV_LOG(ERR, "Fail to start queue %u\n", i);
 			return -1;
 		}
 	}
@@ -717,8 +712,7 @@ i40evf_stop_queues(struct rte_eth_dev *dev)
 	/* Then stop RX queues */
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
 		if (i40evf_dev_rx_queue_stop(dev, i) != 0) {
-			PMD_DRV_LOG(ERR, "Fail to start queue %u\n",
-				i);
+			PMD_DRV_LOG(ERR, "Fail to start queue %u\n", i);
 			return -1;
 		}
 	}
@@ -738,9 +732,9 @@ i40evf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
 
 	if (i40e_validate_mac_addr(addr->addr_bytes) != I40E_SUCCESS) {
 		PMD_DRV_LOG(ERR, "Invalid mac:%x:%x:%x:%x:%x:%x\n",
-			addr->addr_bytes[0], addr->addr_bytes[1],
-			addr->addr_bytes[2], addr->addr_bytes[3],
-			addr->addr_bytes[4], addr->addr_bytes[5]);
+			    addr->addr_bytes[0], addr->addr_bytes[1],
+			    addr->addr_bytes[2], addr->addr_bytes[3],
+			    addr->addr_bytes[4], addr->addr_bytes[5]);
 		return -1;
 	}
 
@@ -758,7 +752,7 @@ i40evf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
 	err = i40evf_execute_vf_cmd(dev, &args);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command "
-				"OP_ADD_ETHER_ADDRESS\n");
+			    "OP_ADD_ETHER_ADDRESS\n");
 
 	return err;
 }
@@ -775,9 +769,9 @@ i40evf_del_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
 
 	if (i40e_validate_mac_addr(addr->addr_bytes) != I40E_SUCCESS) {
 		PMD_DRV_LOG(ERR, "Invalid mac:%x-%x-%x-%x-%x-%x\n",
-			addr->addr_bytes[0], addr->addr_bytes[1],
-			addr->addr_bytes[2], addr->addr_bytes[3],
-			addr->addr_bytes[4], addr->addr_bytes[5]);
+			    addr->addr_bytes[0], addr->addr_bytes[1],
+			    addr->addr_bytes[2], addr->addr_bytes[3],
+			    addr->addr_bytes[4], addr->addr_bytes[5]);
 		return -1;
 	}
 
@@ -795,7 +789,7 @@ i40evf_del_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
 	err = i40evf_execute_vf_cmd(dev, &args);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command "
-				"OP_DEL_ETHER_ADDRESS\n");
+			    "OP_DEL_ETHER_ADDRESS\n");
 
 	return err;
 }
@@ -1244,7 +1238,7 @@ i40evf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 
 		if (err)
 			PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on\n",
-				rx_queue_id);
+				    rx_queue_id);
 	}
 
 	return err;
@@ -1263,7 +1257,7 @@ i40evf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 
 		if (err) {
 			PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off\n",
-				rx_queue_id);
+				    rx_queue_id);
 			return err;
 		}
 
@@ -1288,7 +1282,7 @@ i40evf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 
 		if (err)
 			PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on\n",
-				tx_queue_id);
+				    tx_queue_id);
 	}
 
 	return err;
@@ -1307,7 +1301,7 @@ i40evf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 
 		if (err) {
 			PMD_DRV_LOG(ERR, "Failed to switch TX queue %u of\n",
-				tx_queue_id);
+				    tx_queue_id);
 			return err;
 		}
 
@@ -1391,20 +1385,20 @@ i40evf_dev_start(struct rte_eth_dev *dev)
 		if (vf->max_pkt_len <= ETHER_MAX_LEN ||
 			vf->max_pkt_len > I40E_FRAME_SIZE_MAX) {
 			PMD_DRV_LOG(ERR, "maximum packet length must "
-				"be larger than %u and smaller than %u,"
-					"as jumbo frame is enabled\n",
-						(uint32_t)ETHER_MAX_LEN,
-					(uint32_t)I40E_FRAME_SIZE_MAX);
+				    "be larger than %u and smaller than %u,"
+				    "as jumbo frame is enabled\n",
+				    (uint32_t)ETHER_MAX_LEN,
+				    (uint32_t)I40E_FRAME_SIZE_MAX);
 			return I40E_ERR_CONFIG;
 		}
 	} else {
 		if (vf->max_pkt_len < ETHER_MIN_LEN ||
 			vf->max_pkt_len > ETHER_MAX_LEN) {
 			PMD_DRV_LOG(ERR, "maximum packet length must be "
-					"larger than %u and smaller than %u, "
-					"as jumbo frame is disabled\n",
-						(uint32_t)ETHER_MIN_LEN,
-						(uint32_t)ETHER_MAX_LEN);
+				    "larger than %u and smaller than %u, "
+				    "as jumbo frame is disabled\n",
+				    (uint32_t)ETHER_MIN_LEN,
+				    (uint32_t)ETHER_MAX_LEN);
 			return I40E_ERR_CONFIG;
 		}
 	}
diff --git a/lib/librte_pmd_i40e/i40e_pf.c b/lib/librte_pmd_i40e/i40e_pf.c
index 4e1e043..ed9773a 100644
--- a/lib/librte_pmd_i40e/i40e_pf.c
+++ b/lib/librte_pmd_i40e/i40e_pf.c
@@ -930,8 +930,7 @@ i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev,
 	case I40E_VIRTCHNL_OP_FCOE:
 		PMD_DRV_LOG(ERR, "OP_FCOE received, not supported\n");
 	default:
-		PMD_DRV_LOG(ERR, "%u received, not supported\n",
-							opcode);
+		PMD_DRV_LOG(ERR, "%u received, not supported\n", opcode);
 		i40e_pf_host_send_msg_to_vf(vf, opcode,
 				I40E_ERR_PARAM, NULL, 0);
 		break;
diff --git a/lib/librte_pmd_i40e/i40e_rxtx.c b/lib/librte_pmd_i40e/i40e_rxtx.c
index 70e2bd4..99a6572 100644
--- a/lib/librte_pmd_i40e/i40e_rxtx.c
+++ b/lib/librte_pmd_i40e/i40e_rxtx.c
@@ -728,8 +728,8 @@ rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 			uint16_t i, j;
 
 			PMD_RX_LOG(DEBUG, "Rx mbuf alloc failed for "
-					"port_id=%u, queue_id=%u\n",
-					rxq->port_id, rxq->queue_id);
+				   "port_id=%u, queue_id=%u\n",
+				   rxq->port_id, rxq->queue_id);
 			rxq->rx_nb_avail = 0;
 			rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
 			for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
@@ -1453,7 +1453,7 @@ i40e_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 
 		if (err) {
 			PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on\n",
-				rx_queue_id);
+				    rx_queue_id);
 
 			i40e_rx_queue_release_mbufs(rxq);
 			i40e_reset_rx_queue(rxq);
@@ -1479,7 +1479,7 @@ i40e_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 
 		if (err) {
 			PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off\n",
-				rx_queue_id);
+				    rx_queue_id);
 			return err;
 		}
 		i40e_rx_queue_release_mbufs(rxq);
@@ -1503,7 +1503,7 @@ i40e_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 		err = i40e_switch_tx_queue(hw, tx_queue_id + q_base, TRUE);
 		if (err)
 			PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on\n",
-				tx_queue_id);
+				    tx_queue_id);
 	}
 
 	return err;
@@ -1525,7 +1525,7 @@ i40e_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 
 		if (err) {
 			PMD_DRV_LOG(ERR, "Failed to switch TX queue %u of\n",
-				tx_queue_id);
+				    tx_queue_id);
 			return err;
 		}
 
@@ -1553,14 +1553,14 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
 
 	if (!vsi || queue_idx >= vsi->nb_qps) {
 		PMD_DRV_LOG(ERR, "VSI not available or queue "
-				"index exceeds the maximum\n");
+			    "index exceeds the maximum\n");
 		return I40E_ERR_PARAM;
 	}
 	if (((nb_desc * sizeof(union i40e_rx_desc)) % I40E_ALIGN) != 0 ||
 					(nb_desc > I40E_MAX_RING_DESC) ||
 					(nb_desc < I40E_MIN_RING_DESC)) {
 		PMD_DRV_LOG(ERR, "Number (%u) of receive descriptors is "
-						"invalid\n", nb_desc);
+			    "invalid\n", nb_desc);
 		return I40E_ERR_PARAM;
 	}
 
@@ -1577,7 +1577,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
 				 socket_id);
 	if (!rxq) {
 		PMD_DRV_LOG(ERR, "Failed to allocate memory for "
-					"rx queue data structure\n");
+			    "rx queue data structure\n");
 		return (-ENOMEM);
 	}
 	rxq->mp = mp;
@@ -1644,17 +1644,17 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
 	if (!use_def_burst_func && !dev->data->scattered_rx) {
 #ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
 		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
-			"satisfied. Rx Burst Bulk Alloc function will be "
-					"used on port=%d, queue=%d.\n",
-					rxq->port_id, rxq->queue_id);
+			     "satisfied. Rx Burst Bulk Alloc function will be "
+			     "used on port=%d, queue=%d.\n",
+			     rxq->port_id, rxq->queue_id);
 		dev->rx_pkt_burst = i40e_recv_pkts_bulk_alloc;
 #endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */
 	} else {
 		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
-				"not satisfied, Scattered Rx is requested, "
-				"or RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC is "
-					"not enabled on port=%d, queue=%d.\n",
-						rxq->port_id, rxq->queue_id);
+			     "not satisfied, Scattered Rx is requested, "
+			     "or RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC is "
+			     "not enabled on port=%d, queue=%d.\n",
+			     rxq->port_id, rxq->queue_id);
 	}
 
 	return 0;
@@ -1750,7 +1750,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
 
 	if (!vsi || queue_idx >= vsi->nb_qps) {
 		PMD_DRV_LOG(ERR, "VSI is NULL, or queue index (%u) "
-				"exceeds the maximum\n", queue_idx);
+			    "exceeds the maximum\n", queue_idx);
 		return I40E_ERR_PARAM;
 	}
 
@@ -1758,7 +1758,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
 					(nb_desc > I40E_MAX_RING_DESC) ||
 					(nb_desc < I40E_MIN_RING_DESC)) {
 		PMD_DRV_LOG(ERR, "Number (%u) of transmit descriptors is "
-                                                "invalid\n", nb_desc);
+			    "invalid\n", nb_desc);
 		return I40E_ERR_PARAM;
 	}
 
@@ -1847,7 +1847,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
 				  socket_id);
 	if (!txq) {
 		PMD_DRV_LOG(ERR, "Failed to allocate memory for "
-					"tx queue structure\n");
+			    "tx queue structure\n");
 		return (-ENOMEM);
 	}
 
@@ -2192,20 +2192,20 @@ i40e_rx_queue_config(struct i40e_rx_queue *rxq)
 		if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
 			rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
 			PMD_DRV_LOG(ERR, "maximum packet length must "
-				"be larger than %u and smaller than %u,"
-					"as jumbo frame is enabled\n",
-						(uint32_t)ETHER_MAX_LEN,
-					(uint32_t)I40E_FRAME_SIZE_MAX);
+				    "be larger than %u and smaller than %u,"
+				    "as jumbo frame is enabled\n",
+				    (uint32_t)ETHER_MAX_LEN,
+				    (uint32_t)I40E_FRAME_SIZE_MAX);
 			return I40E_ERR_CONFIG;
 		}
 	} else {
 		if (rxq->max_pkt_len < ETHER_MIN_LEN ||
 			rxq->max_pkt_len > ETHER_MAX_LEN) {
 			PMD_DRV_LOG(ERR, "maximum packet length must be "
-					"larger than %u and smaller than %u, "
-					"as jumbo frame is disabled\n",
-						(uint32_t)ETHER_MIN_LEN,
-						(uint32_t)ETHER_MAX_LEN);
+				    "larger than %u and smaller than %u, "
+				    "as jumbo frame is disabled\n",
+				    (uint32_t)ETHER_MIN_LEN,
+				    (uint32_t)ETHER_MAX_LEN);
 			return I40E_ERR_CONFIG;
 		}
 	}
-- 
1.7.10.4
^ permalink raw reply	[flat|nested] 25+ messages in thread
- * [dpdk-dev] [PATCH v3 11/20] i40e: clean log messages
  2014-09-17 13:46 [dpdk-dev] [PATCH v3 00/20] cleanup logs in main PMDs David Marchand
                   ` (9 preceding siblings ...)
  2014-09-17 13:46 ` [dpdk-dev] [PATCH v3 10/20] i40e: indent logs sections David Marchand
@ 2014-09-17 13:46 ` David Marchand
  2014-09-17 13:46 ` [dpdk-dev] [PATCH v3 12/20] i40e: always log init messages David Marchand
                   ` (9 subsequent siblings)
  20 siblings, 0 replies; 25+ messages in thread
From: David Marchand @ 2014-09-17 13:46 UTC (permalink / raw)
  To: dev
Clean log messages:
- remove leading \n in some messages,
- remove trailing \n in some messages,
- split multi lines messages.
Signed-off-by: David Marchand <david.marchand@6wind.com>
v2 Reviewed-by: Jay Rolette <rolette@infiniteio.com>
v2 Acked-by: Bruce Richardson <bruce.richardson@intel.com>
---
 lib/librte_pmd_i40e/i40e_ethdev.c    |  248 +++++++++++++++++-----------------
 lib/librte_pmd_i40e/i40e_ethdev_vf.c |  114 ++++++++--------
 lib/librte_pmd_i40e/i40e_pf.c        |   72 +++++-----
 lib/librte_pmd_i40e/i40e_rxtx.c      |   76 +++++------
 4 files changed, 255 insertions(+), 255 deletions(-)
diff --git a/lib/librte_pmd_i40e/i40e_ethdev.c b/lib/librte_pmd_i40e/i40e_ethdev.c
index aadb548..a00d6ca 100644
--- a/lib/librte_pmd_i40e/i40e_ethdev.c
+++ b/lib/librte_pmd_i40e/i40e_ethdev.c
@@ -371,7 +371,7 @@ eth_i40e_dev_init(__rte_unused struct eth_driver *eth_drv,
 	hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr);
 	if (!hw->hw_addr) {
 		PMD_INIT_LOG(ERR, "Hardware is not available, "
-			     "as address is NULL\n");
+			     "as address is NULL");
 		return -ENODEV;
 	}
 
@@ -406,7 +406,7 @@ eth_i40e_dev_init(__rte_unused struct eth_driver *eth_drv,
 		PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
 		return -EIO;
 	}
-	PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x\n",
+	PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x",
 		     hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
 		     hw->aq.api_maj_ver, hw->aq.api_min_ver,
 		     ((hw->nvm.version >> 12) & 0xf),
@@ -416,7 +416,7 @@ eth_i40e_dev_init(__rte_unused struct eth_driver *eth_drv,
 	/* Disable LLDP */
 	ret = i40e_aq_stop_lldp(hw, true, NULL);
 	if (ret != I40E_SUCCESS) /* Its failure can be ignored */
-		PMD_INIT_LOG(INFO, "Failed to stop lldp\n");
+		PMD_INIT_LOG(INFO, "Failed to stop lldp");
 
 	/* Clear PXE mode */
 	i40e_clear_pxe_mode(hw);
@@ -438,13 +438,13 @@ eth_i40e_dev_init(__rte_unused struct eth_driver *eth_drv,
 	/* Initialize the queue management */
 	ret = i40e_res_pool_init(&pf->qp_pool, 0, hw->func_caps.num_tx_qp);
 	if (ret < 0) {
-		PMD_INIT_LOG(ERR, "Failed to init queue pool\n");
+		PMD_INIT_LOG(ERR, "Failed to init queue pool");
 		goto err_qp_pool_init;
 	}
 	ret = i40e_res_pool_init(&pf->msix_pool, 1,
 				hw->func_caps.num_msix_vectors - 1);
 	if (ret < 0) {
-		PMD_INIT_LOG(ERR, "Failed to init MSIX pool\n");
+		PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
 		goto err_msix_pool_init;
 	}
 
@@ -722,9 +722,9 @@ i40e_phy_conf_link(struct i40e_hw *hw, uint8_t abilities, uint8_t force_speed)
 	phy_conf.eeer = phy_ab.eeer_val;
 	phy_conf.low_power_ctrl = phy_ab.d3_lpan;
 
-	PMD_DRV_LOG(DEBUG, "\n\tCurrent: abilities %x, link_speed %x\n"
-		    "\tConfig:  abilities %x, link_speed %x",
-		    phy_ab.abilities, phy_ab.link_speed,
+	PMD_DRV_LOG(DEBUG, "\tCurrent: abilities %x, link_speed %x",
+		    phy_ab.abilities, phy_ab.link_speed);
+	PMD_DRV_LOG(DEBUG, "\tConfig:  abilities %x, link_speed %x",
 		    phy_conf.abilities, phy_conf.link_speed);
 
 	status = i40e_aq_set_phy_config(hw, &phy_conf, NULL);
@@ -762,7 +762,7 @@ i40e_dev_start(struct rte_eth_dev *dev)
 
 	if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&
 		(dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) {
-		PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port %hhu\n",
+		PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port %hhu",
 			     dev->data->dev_conf.link_duplex,
 			     dev->data->port_id);
 		return -EINVAL;
@@ -771,7 +771,7 @@ i40e_dev_start(struct rte_eth_dev *dev)
 	/* Initialize VSI */
 	ret = i40e_vsi_init(vsi);
 	if (ret != I40E_SUCCESS) {
-		PMD_DRV_LOG(ERR, "Failed to init VSI\n");
+		PMD_DRV_LOG(ERR, "Failed to init VSI");
 		goto err_up;
 	}
 
@@ -782,7 +782,7 @@ i40e_dev_start(struct rte_eth_dev *dev)
 	/* Enable all queues which have been configured */
 	ret = i40e_vsi_switch_queues(vsi, TRUE);
 	if (ret != I40E_SUCCESS) {
-		PMD_DRV_LOG(ERR, "Failed to enable VSI\n");
+		PMD_DRV_LOG(ERR, "Failed to enable VSI");
 		goto err_up;
 	}
 
@@ -790,13 +790,13 @@ i40e_dev_start(struct rte_eth_dev *dev)
 	if ((vsi->type == I40E_VSI_MAIN) || (vsi->type == I40E_VSI_VMDQ2)) {
 		ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, true, NULL);
 		if (ret != I40E_SUCCESS)
-			PMD_DRV_LOG(INFO, "fail to set vsi broadcast\n");
+			PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
 	}
 
 	/* Apply link configure */
 	ret = i40e_apply_link_speed(dev);
 	if (I40E_SUCCESS != ret) {
-		PMD_DRV_LOG(ERR, "Fail to apply link setting\n");
+		PMD_DRV_LOG(ERR, "Fail to apply link setting");
 		goto err_up;
 	}
 
@@ -871,12 +871,12 @@ i40e_dev_promiscuous_enable(struct rte_eth_dev *dev)
 	status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
 							true, NULL);
 	if (status != I40E_SUCCESS)
-		PMD_DRV_LOG(ERR, "Failed to enable unicast promiscuous\n");
+		PMD_DRV_LOG(ERR, "Failed to enable unicast promiscuous");
 
 	status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
 							TRUE, NULL);
 	if (status != I40E_SUCCESS)
-		PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous\n");
+		PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
 
 }
 
@@ -891,12 +891,12 @@ i40e_dev_promiscuous_disable(struct rte_eth_dev *dev)
 	status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
 							false, NULL);
 	if (status != I40E_SUCCESS)
-		PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous\n");
+		PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous");
 
 	status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
 							false, NULL);
 	if (status != I40E_SUCCESS)
-		PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous\n");
+		PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
 }
 
 static void
@@ -909,7 +909,7 @@ i40e_dev_allmulticast_enable(struct rte_eth_dev *dev)
 
 	ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL);
 	if (ret != I40E_SUCCESS)
-		PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous\n");
+		PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
 }
 
 static void
@@ -926,7 +926,7 @@ i40e_dev_allmulticast_disable(struct rte_eth_dev *dev)
 	ret = i40e_aq_set_vsi_multicast_promiscuous(hw,
 				vsi->seid, FALSE, NULL);
 	if (ret != I40E_SUCCESS)
-		PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous\n");
+		PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
 }
 
 /*
@@ -971,7 +971,7 @@ i40e_dev_link_update(struct rte_eth_dev *dev,
 	if (status != I40E_SUCCESS) {
 		link.link_speed = ETH_LINK_SPEED_100;
 		link.link_duplex = ETH_LINK_FULL_DUPLEX;
-		PMD_DRV_LOG(ERR, "Failed to get link info\n");
+		PMD_DRV_LOG(ERR, "Failed to get link info");
 		goto out;
 	}
 
@@ -1519,12 +1519,12 @@ i40e_macaddr_add(struct rte_eth_dev *dev,
 	int ret;
 
 	if (!is_valid_assigned_ether_addr(mac_addr)) {
-		PMD_DRV_LOG(ERR, "Invalid ethernet address\n");
+		PMD_DRV_LOG(ERR, "Invalid ethernet address");
 		return;
 	}
 
 	if (is_same_ether_addr(mac_addr, &(pf->dev_addr))) {
-		PMD_DRV_LOG(INFO, "Ignore adding permanent mac address\n");
+		PMD_DRV_LOG(INFO, "Ignore adding permanent mac address");
 		return;
 	}
 
@@ -1532,7 +1532,7 @@ i40e_macaddr_add(struct rte_eth_dev *dev,
 	ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_ONLY,
 					mac_addr->addr_bytes, NULL);
 	if (ret != I40E_SUCCESS) {
-		PMD_DRV_LOG(ERR, "Failed to write mac address\n");
+		PMD_DRV_LOG(ERR, "Failed to write mac address");
 		return;
 	}
 
@@ -1542,7 +1542,7 @@ i40e_macaddr_add(struct rte_eth_dev *dev,
 
 	ret = i40e_vsi_add_mac(vsi, mac_addr);
 	if (ret != I40E_SUCCESS) {
-		PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter\n");
+		PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
 		return;
 	}
 
@@ -1572,7 +1572,7 @@ i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
 	ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_ONLY,
 					hw->mac.perm_addr, NULL);
 	if (ret != I40E_SUCCESS) {
-		PMD_DRV_LOG(ERR, "Failed to write mac address\n");
+		PMD_DRV_LOG(ERR, "Failed to write mac address");
 		return;
 	}
 
@@ -1793,7 +1793,7 @@ i40e_get_cap(struct i40e_hw *hw)
 						I40E_MAX_CAP_ELE_NUM;
 	buf = rte_zmalloc("i40e", len, 0);
 	if (!buf) {
-		PMD_DRV_LOG(ERR, "Failed to allocate memory\n");
+		PMD_DRV_LOG(ERR, "Failed to allocate memory");
 		return I40E_ERR_NO_MEMORY;
 	}
 
@@ -1801,7 +1801,7 @@ i40e_get_cap(struct i40e_hw *hw)
 	ret = i40e_aq_discover_capabilities(hw, buf, len, &size,
 			i40e_aqc_opc_list_func_capabilities, NULL);
 	if (ret != I40E_SUCCESS)
-		PMD_DRV_LOG(ERR, "Failed to discover capabilities\n");
+		PMD_DRV_LOG(ERR, "Failed to discover capabilities");
 
 	/* Free the temporary buffer after being used */
 	rte_free(buf);
@@ -1818,13 +1818,13 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev)
 
 	/* First check if FW support SRIOV */
 	if (dev->pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
-		PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV\n");
+		PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
 		return -EINVAL;
 	}
 
 	pf->flags = I40E_FLAG_HEADER_SPLIT_DISABLED;
 	pf->max_num_vsi = RTE_MIN(hw->func_caps.num_vsis, I40E_MAX_NUM_VSIS);
-	PMD_INIT_LOG(INFO, "Max supported VSIs:%u\n", pf->max_num_vsi);
+	PMD_INIT_LOG(INFO, "Max supported VSIs:%u", pf->max_num_vsi);
 	/* Allocate queues for pf */
 	if (hw->func_caps.rss) {
 		pf->flags |= I40E_FLAG_RSS;
@@ -1836,28 +1836,28 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev)
 	sum_queues = pf->lan_nb_qps;
 	/* Default VSI is not counted in */
 	sum_vsis = 0;
-	PMD_INIT_LOG(INFO, "PF queue pairs:%u\n", pf->lan_nb_qps);
+	PMD_INIT_LOG(INFO, "PF queue pairs:%u", pf->lan_nb_qps);
 
 	if (hw->func_caps.sr_iov_1_1 && dev->pci_dev->max_vfs) {
 		pf->flags |= I40E_FLAG_SRIOV;
 		pf->vf_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
 		if (dev->pci_dev->max_vfs > hw->func_caps.num_vfs) {
 			PMD_INIT_LOG(ERR, "Config VF number %u, "
-				     "max supported %u.\n",
+				     "max supported %u.",
 				     dev->pci_dev->max_vfs,
 				     hw->func_caps.num_vfs);
 			return -EINVAL;
 		}
 		if (pf->vf_nb_qps > I40E_MAX_QP_NUM_PER_VF) {
 			PMD_INIT_LOG(ERR, "FVL VF queue %u, "
-				     "max support %u queues.\n",
+				     "max support %u queues.",
 				     pf->vf_nb_qps, I40E_MAX_QP_NUM_PER_VF);
 			return -EINVAL;
 		}
 		pf->vf_num = dev->pci_dev->max_vfs;
 		sum_queues += pf->vf_nb_qps * pf->vf_num;
 		sum_vsis   += pf->vf_num;
-		PMD_INIT_LOG(INFO, "Max VF num:%u each has queue pairs:%u\n",
+		PMD_INIT_LOG(INFO, "Max VF num:%u each has queue pairs:%u",
 			     pf->vf_num, pf->vf_nb_qps);
 	} else
 		pf->vf_num = 0;
@@ -1867,7 +1867,7 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev)
 		pf->vmdq_nb_qps = I40E_DEFAULT_QP_NUM_VMDQ;
 		sum_queues += pf->vmdq_nb_qps;
 		sum_vsis += 1;
-		PMD_INIT_LOG(INFO, "VMDQ queue pairs:%u\n", pf->vmdq_nb_qps);
+		PMD_INIT_LOG(INFO, "VMDQ queue pairs:%u", pf->vmdq_nb_qps);
 	}
 
 	if (hw->func_caps.fd) {
@@ -1881,18 +1881,18 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev)
 
 	if (sum_vsis > pf->max_num_vsi ||
 		sum_queues > hw->func_caps.num_rx_qp) {
-		PMD_INIT_LOG(ERR, "VSI/QUEUE setting can't be satisfied\n");
-		PMD_INIT_LOG(ERR, "Max VSIs: %u, asked:%u\n",
+		PMD_INIT_LOG(ERR, "VSI/QUEUE setting can't be satisfied");
+		PMD_INIT_LOG(ERR, "Max VSIs: %u, asked:%u",
 			     pf->max_num_vsi, sum_vsis);
-		PMD_INIT_LOG(ERR, "Total queue pairs:%u, asked:%u\n",
+		PMD_INIT_LOG(ERR, "Total queue pairs:%u, asked:%u",
 			     hw->func_caps.num_rx_qp, sum_queues);
 		return -EINVAL;
 	}
 
-	/* Each VSI occupy 1 MSIX interrupt at least, plus IRQ0 for misc intr cause */
+	/* Each VSI occupy 1 MSIX interrupt at least, plus IRQ0 for misc intr
+	 * cause */
 	if (sum_vsis > hw->func_caps.num_msix_vectors - 1) {
-		PMD_INIT_LOG(ERR, "Too many VSIs(%u), "
-			     "MSIX intr(%u) not enough\n",
+		PMD_INIT_LOG(ERR, "Too many VSIs(%u), MSIX intr(%u) not enough",
 			     sum_vsis, hw->func_caps.num_msix_vectors);
 		return -EINVAL;
 	}
@@ -1911,7 +1911,7 @@ i40e_pf_get_switch_config(struct i40e_pf *pf)
 	switch_config = (struct i40e_aqc_get_switch_config_resp *)\
 			rte_zmalloc("i40e", I40E_AQ_LARGE_BUF, 0);
 	if (!switch_config) {
-		PMD_DRV_LOG(ERR, "Failed to allocated memory\n");
+		PMD_DRV_LOG(ERR, "Failed to allocated memory");
 		return -ENOMEM;
 	}
 
@@ -1919,12 +1919,12 @@ i40e_pf_get_switch_config(struct i40e_pf *pf)
 	ret = i40e_aq_get_switch_config(hw, switch_config,
 		I40E_AQ_LARGE_BUF, &start_seid, NULL);
 	if (ret != I40E_SUCCESS) {
-		PMD_DRV_LOG(ERR, "Failed to get switch configurations\n");
+		PMD_DRV_LOG(ERR, "Failed to get switch configurations");
 		goto fail;
 	}
 	num_reported = rte_le_to_cpu_16(switch_config->header.num_reported);
 	if (num_reported != 1) { /* The number should be 1 */
-		PMD_DRV_LOG(ERR, "Wrong number of switch config reported\n");
+		PMD_DRV_LOG(ERR, "Wrong number of switch config reported");
 		goto fail;
 	}
 
@@ -1934,7 +1934,7 @@ i40e_pf_get_switch_config(struct i40e_pf *pf)
 		pf->mac_seid = rte_le_to_cpu_16(element->uplink_seid);
 		pf->main_vsi_seid = rte_le_to_cpu_16(element->seid);
 	} else
-		PMD_DRV_LOG(INFO, "Unknown element type\n");
+		PMD_DRV_LOG(INFO, "Unknown element type");
 
 fail:
 	rte_free(switch_config);
@@ -1953,7 +1953,7 @@ i40e_res_pool_init (struct i40e_res_pool_info *pool, uint32_t base,
 
 	entry = rte_zmalloc("i40e", sizeof(*entry), 0);
 	if (entry == NULL) {
-		PMD_DRV_LOG(ERR, "Failed to allocate memory for resource pool\n");
+		PMD_DRV_LOG(ERR, "Failed to allocate memory for resource pool");
 		return -ENOMEM;
 	}
 
@@ -2006,7 +2006,7 @@ i40e_res_pool_free(struct i40e_res_pool_info *pool,
 	int insert;
 
 	if (pool == NULL) {
-		PMD_DRV_LOG(ERR, "Invalid parameter\n");
+		PMD_DRV_LOG(ERR, "Invalid parameter");
 		return -EINVAL;
 	}
 
@@ -2022,7 +2022,7 @@ i40e_res_pool_free(struct i40e_res_pool_info *pool,
 
 	/* Not find, return */
 	if (valid_entry == NULL) {
-		PMD_DRV_LOG(ERR, "Failed to find entry\n");
+		PMD_DRV_LOG(ERR, "Failed to find entry");
 		return -EINVAL;
 	}
 
@@ -2091,12 +2091,12 @@ i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
 	struct pool_entry *entry, *valid_entry;
 
 	if (pool == NULL || num == 0) {
-		PMD_DRV_LOG(ERR, "Invalid parameter\n");
+		PMD_DRV_LOG(ERR, "Invalid parameter");
 		return -EINVAL;
 	}
 
 	if (pool->num_free < num) {
-		PMD_DRV_LOG(ERR, "No resource. ask:%u, available:%u\n",
+		PMD_DRV_LOG(ERR, "No resource. ask:%u, available:%u",
 			    num, pool->num_free);
 		return -ENOMEM;
 	}
@@ -2117,7 +2117,7 @@ i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
 
 	/* Not find one to satisfy the request, return */
 	if (valid_entry == NULL) {
-		PMD_DRV_LOG(ERR, "No valid entry found\n");
+		PMD_DRV_LOG(ERR, "No valid entry found");
 		return -ENOMEM;
 	}
 	/**
@@ -2135,7 +2135,7 @@ i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
 		entry = rte_zmalloc("res_pool", sizeof(*entry), 0);
 		if (entry == NULL) {
 			PMD_DRV_LOG(ERR, "Failed to allocate memory for "
-				    "resource pool\n");
+				    "resource pool");
 			return -ENOMEM;
 		}
 		entry->base = valid_entry->base;
@@ -2170,13 +2170,13 @@ validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
 
 	/* If DCB is not supported, only default TC is supported */
 	if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) {
-		PMD_DRV_LOG(ERR, "DCB is not enabled, only TC0 is supported\n");
+		PMD_DRV_LOG(ERR, "DCB is not enabled, only TC0 is supported");
 		return -EINVAL;
 	}
 
 	if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
 		PMD_DRV_LOG(ERR, "Enabled TC map 0x%x not applicable to "
-			    "HW support 0x%x\n", hw->func_caps.enabled_tcmap,
+			    "HW support 0x%x", hw->func_caps.enabled_tcmap,
 			    enabled_tcmap);
 		return -EINVAL;
 	}
@@ -2193,7 +2193,7 @@ i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
 	int ret;
 
 	if (vsi == NULL || info == NULL) {
-		PMD_DRV_LOG(ERR, "invalid parameters\n");
+		PMD_DRV_LOG(ERR, "invalid parameters");
 		return I40E_ERR_PARAM;
 	}
 
@@ -2225,7 +2225,7 @@ i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
 	hw = I40E_VSI_TO_HW(vsi);
 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
 	if (ret != I40E_SUCCESS)
-		PMD_DRV_LOG(ERR, "Failed to update VSI params\n");
+		PMD_DRV_LOG(ERR, "Failed to update VSI params");
 
 	return ret;
 }
@@ -2242,7 +2242,7 @@ i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
 		return ret;
 
 	if (!vsi->seid) {
-		PMD_DRV_LOG(ERR, "seid not valid\n");
+		PMD_DRV_LOG(ERR, "seid not valid");
 		return -EINVAL;
 	}
 
@@ -2254,7 +2254,7 @@ i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
 
 	ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw_data, NULL);
 	if (ret != I40E_SUCCESS) {
-		PMD_DRV_LOG(ERR, "Failed to configure TC BW\n");
+		PMD_DRV_LOG(ERR, "Failed to configure TC BW");
 		return ret;
 	}
 
@@ -2332,7 +2332,7 @@ i40e_veb_release(struct i40e_veb *veb)
 		return -EINVAL;
 
 	if (!TAILQ_EMPTY(&veb->head)) {
-		PMD_DRV_LOG(ERR, "VEB still has VSI attached, can't remove\n");
+		PMD_DRV_LOG(ERR, "VEB still has VSI attached, can't remove");
 		return -EACCES;
 	}
 
@@ -2356,14 +2356,14 @@ i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
 
 	if (NULL == pf || vsi == NULL) {
 		PMD_DRV_LOG(ERR, "veb setup failed, "
-			    "associated VSI shouldn't null\n");
+			    "associated VSI shouldn't null");
 		return NULL;
 	}
 	hw = I40E_PF_TO_HW(pf);
 
 	veb = rte_zmalloc("i40e_veb", sizeof(struct i40e_veb), 0);
 	if (!veb) {
-		PMD_DRV_LOG(ERR, "Failed to allocate memory for veb\n");
+		PMD_DRV_LOG(ERR, "Failed to allocate memory for veb");
 		goto fail;
 	}
 
@@ -2375,7 +2375,7 @@ i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
 		I40E_DEFAULT_TCMAP, false, false, &veb->seid, NULL);
 
 	if (ret != I40E_SUCCESS) {
-		PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d\n",
+		PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d",
 			    hw->aq.asq_last_status);
 		goto fail;
 	}
@@ -2384,7 +2384,7 @@ i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
 	ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL,
 				&veb->stats_idx, NULL, NULL, NULL);
 	if (ret != I40E_SUCCESS) {
-		PMD_DRV_LOG(ERR, "Get veb statics index failed, aq_err: %d\n",
+		PMD_DRV_LOG(ERR, "Get veb statics index failed, aq_err: %d",
 			    hw->aq.asq_last_status);
 		goto fail;
 	}
@@ -2432,7 +2432,7 @@ i40e_vsi_release(struct i40e_vsi *vsi)
 	if (vsi->type != I40E_VSI_MAIN) {
 		/* Remove vsi from parent's sibling list */
 		if (vsi->parent_vsi == NULL || vsi->parent_vsi->veb == NULL) {
-			PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL\n");
+			PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
 			return I40E_ERR_PARAM;
 		}
 		TAILQ_REMOVE(&vsi->parent_vsi->veb->head,
@@ -2441,7 +2441,7 @@ i40e_vsi_release(struct i40e_vsi *vsi)
 		/* Remove all switch element of the VSI */
 		ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
 		if (ret != I40E_SUCCESS)
-			PMD_DRV_LOG(ERR, "Failed to delete element\n");
+			PMD_DRV_LOG(ERR, "Failed to delete element");
 	}
 	i40e_res_pool_free(&pf->qp_pool, vsi->base_queue);
 
@@ -2472,11 +2472,11 @@ i40e_update_default_filter_setting(struct i40e_vsi *vsi)
 		struct i40e_mac_filter *f;
 
 		PMD_DRV_LOG(WARNING, "Cannot remove the default "
-			    "macvlan filter\n");
+			    "macvlan filter");
 		/* It needs to add the permanent mac into mac list */
 		f = rte_zmalloc("macv_filter", sizeof(*f), 0);
 		if (f == NULL) {
-			PMD_DRV_LOG(ERR, "failed to allocate memory\n");
+			PMD_DRV_LOG(ERR, "failed to allocate memory");
 			return I40E_ERR_NO_MEMORY;
 		}
 		(void)rte_memcpy(&f->macaddr.addr_bytes, hw->mac.perm_addr,
@@ -2502,7 +2502,7 @@ i40e_vsi_dump_bw_config(struct i40e_vsi *vsi)
 	memset(&bw_config, 0, sizeof(bw_config));
 	ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
 	if (ret != I40E_SUCCESS) {
-		PMD_DRV_LOG(ERR, "VSI failed to get bandwidth configuration %u\n",
+		PMD_DRV_LOG(ERR, "VSI failed to get bandwidth configuration %u",
 			    hw->aq.asq_last_status);
 		return ret;
 	}
@@ -2512,17 +2512,17 @@ i40e_vsi_dump_bw_config(struct i40e_vsi *vsi)
 					&ets_sla_config, NULL);
 	if (ret != I40E_SUCCESS) {
 		PMD_DRV_LOG(ERR, "VSI failed to get TC bandwdith "
-			    "configuration %u\n", hw->aq.asq_last_status);
+			    "configuration %u", hw->aq.asq_last_status);
 		return ret;
 	}
 
 	/* Not store the info yet, just print out */
-	PMD_DRV_LOG(INFO, "VSI bw limit:%u\n", bw_config.port_bw_limit);
-	PMD_DRV_LOG(INFO, "VSI max_bw:%u\n", bw_config.max_bw);
+	PMD_DRV_LOG(INFO, "VSI bw limit:%u", bw_config.port_bw_limit);
+	PMD_DRV_LOG(INFO, "VSI max_bw:%u", bw_config.max_bw);
 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
-		PMD_DRV_LOG(INFO, "\tVSI TC%u:share credits %u\n", i,
+		PMD_DRV_LOG(INFO, "\tVSI TC%u:share credits %u", i,
 			    ets_sla_config.share_credits[i]);
-		PMD_DRV_LOG(INFO, "\tVSI TC%u:credits %u\n", i,
+		PMD_DRV_LOG(INFO, "\tVSI TC%u:credits %u", i,
 			    rte_le_to_cpu_16(ets_sla_config.credits[i]));
 		PMD_DRV_LOG(INFO, "\tVSI TC%u: max credits: %u", i,
 			    rte_le_to_cpu_16(ets_sla_config.credits[i / 4]) >>
@@ -2548,13 +2548,13 @@ i40e_vsi_setup(struct i40e_pf *pf,
 
 	if (type != I40E_VSI_MAIN && uplink_vsi == NULL) {
 		PMD_DRV_LOG(ERR, "VSI setup failed, "
-			    "VSI link shouldn't be NULL\n");
+			    "VSI link shouldn't be NULL");
 		return NULL;
 	}
 
 	if (type == I40E_VSI_MAIN && uplink_vsi != NULL) {
 		PMD_DRV_LOG(ERR, "VSI setup failed, MAIN VSI "
-			    "uplink VSI should be NULL\n");
+			    "uplink VSI should be NULL");
 		return NULL;
 	}
 
@@ -2563,14 +2563,14 @@ i40e_vsi_setup(struct i40e_pf *pf,
 		uplink_vsi->veb = i40e_veb_setup(pf, uplink_vsi);
 
 		if (NULL == uplink_vsi->veb) {
-			PMD_DRV_LOG(ERR, "VEB setup failed\n");
+			PMD_DRV_LOG(ERR, "VEB setup failed");
 			return NULL;
 		}
 	}
 
 	vsi = rte_zmalloc("i40e_vsi", sizeof(struct i40e_vsi), 0);
 	if (!vsi) {
-		PMD_DRV_LOG(ERR, "Failed to allocate memory for vsi\n");
+		PMD_DRV_LOG(ERR, "Failed to allocate memory for vsi");
 		return NULL;
 	}
 	TAILQ_INIT(&vsi->mac_list);
@@ -2628,7 +2628,7 @@ i40e_vsi_setup(struct i40e_pf *pf,
 		ctxt.vf_num = 0;
 		ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
 		if (ret != I40E_SUCCESS) {
-			PMD_DRV_LOG(ERR, "Failed to get VSI params\n");
+			PMD_DRV_LOG(ERR, "Failed to get VSI params");
 			goto fail_msix_alloc;
 		}
 		(void)rte_memcpy(&vsi->info, &ctxt.info,
@@ -2639,7 +2639,7 @@ i40e_vsi_setup(struct i40e_pf *pf,
 		/* Configure tc, enabled TC0 only */
 		if (i40e_vsi_update_tc_bandwidth(vsi, I40E_DEFAULT_TCMAP) !=
 			I40E_SUCCESS) {
-			PMD_DRV_LOG(ERR, "Failed to update TC bandwidth\n");
+			PMD_DRV_LOG(ERR, "Failed to update TC bandwidth");
 			goto fail_msix_alloc;
 		}
 
@@ -2655,7 +2655,7 @@ i40e_vsi_setup(struct i40e_pf *pf,
 						I40E_DEFAULT_TCMAP);
 		if (ret != I40E_SUCCESS) {
 			PMD_DRV_LOG(ERR, "Failed to configure "
-				    "TC queue mapping\n");
+				    "TC queue mapping");
 			goto fail_msix_alloc;
 		}
 		ctxt.seid = vsi->seid;
@@ -2666,7 +2666,7 @@ i40e_vsi_setup(struct i40e_pf *pf,
 		/* Update VSI parameters */
 		ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
 		if (ret != I40E_SUCCESS) {
-			PMD_DRV_LOG(ERR, "Failed to update VSI params\n");
+			PMD_DRV_LOG(ERR, "Failed to update VSI params");
 			goto fail_msix_alloc;
 		}
 
@@ -2718,7 +2718,7 @@ i40e_vsi_setup(struct i40e_pf *pf,
 						I40E_DEFAULT_TCMAP);
 		if (ret != I40E_SUCCESS) {
 			PMD_DRV_LOG(ERR, "Failed to configure "
-				    "TC queue mapping\n");
+				    "TC queue mapping");
 			goto fail_msix_alloc;
 		}
 		ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
@@ -2730,14 +2730,14 @@ i40e_vsi_setup(struct i40e_pf *pf,
 		 */
 	}
 	else {
-		PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet\n");
+		PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet");
 		goto fail_msix_alloc;
 	}
 
 	if (vsi->type != I40E_VSI_MAIN) {
 		ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
 		if (ret) {
-			PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d\n",
+			PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d",
 				    hw->aq.asq_last_status);
 			goto fail_msix_alloc;
 		}
@@ -2753,7 +2753,7 @@ i40e_vsi_setup(struct i40e_pf *pf,
 	/* MAC/VLAN configuration */
 	ret = i40e_vsi_add_mac(vsi, &broadcast);
 	if (ret != I40E_SUCCESS) {
-		PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter\n");
+		PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
 		goto fail_msix_alloc;
 	}
 
@@ -2805,7 +2805,7 @@ i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on)
 	(void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
 	if (ret)
-		PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping\n",
+		PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
 			    on ? "enable" : "disable");
 
 	return ret;
@@ -2826,7 +2826,7 @@ i40e_dev_init_vlan(struct rte_eth_dev *dev)
 	ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
 				data->dev_conf.txmode.hw_vlan_insert_pvid);
 	if (ret)
-		PMD_DRV_LOG(INFO, "Failed to update VSI params\n");
+		PMD_DRV_LOG(INFO, "Failed to update VSI params");
 
 	return ret;
 }
@@ -2851,13 +2851,13 @@ i40e_update_flow_control(struct i40e_hw *hw)
 	memset(&link_status, 0, sizeof(link_status));
 	ret = i40e_aq_get_link_info(hw, FALSE, &link_status, NULL);
 	if (ret != I40E_SUCCESS) {
-		PMD_DRV_LOG(ERR, "Failed to get link status information\n");
+		PMD_DRV_LOG(ERR, "Failed to get link status information");
 		goto write_reg; /* Disable flow control */
 	}
 
 	an_info = hw->phy.link_info.an_info;
 	if (!(an_info & I40E_AQ_AN_COMPLETED)) {
-		PMD_DRV_LOG(INFO, "Link auto negotiation not completed\n");
+		PMD_DRV_LOG(INFO, "Link auto negotiation not completed");
 		ret = I40E_ERR_NOT_READY;
 		goto write_reg; /* Disable flow control */
 	}
@@ -2995,7 +2995,7 @@ i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
 	}
 	/* Check if it is timeout */
 	if (j >= I40E_CHK_Q_ENA_COUNT) {
-		PMD_DRV_LOG(ERR, "Failed to %s tx queue[%u]\n",
+		PMD_DRV_LOG(ERR, "Failed to %s tx queue[%u]",
 			    (on ? "enable" : "disable"), q_idx);
 		return I40E_ERR_TIMEOUT;
 	}
@@ -3074,7 +3074,7 @@ i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
 
 	/* Check if it is timeout */
 	if (j >= I40E_CHK_Q_ENA_COUNT) {
-		PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]\n",
+		PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
 			    (on ? "enable" : "disable"), q_idx);
 		return I40E_ERR_TIMEOUT;
 	}
@@ -3118,7 +3118,7 @@ i40e_vsi_switch_queues(struct i40e_vsi *vsi, bool on)
 		/* enable rx queues before enabling tx queues */
 		ret = i40e_vsi_switch_rx_queues(vsi, on);
 		if (ret) {
-			PMD_DRV_LOG(ERR, "Failed to switch rx queues\n");
+			PMD_DRV_LOG(ERR, "Failed to switch rx queues");
 			return ret;
 		}
 		ret = i40e_vsi_switch_tx_queues(vsi, on);
@@ -3126,7 +3126,7 @@ i40e_vsi_switch_queues(struct i40e_vsi *vsi, bool on)
 		/* Stop tx queues before stopping rx queues */
 		ret = i40e_vsi_switch_tx_queues(vsi, on);
 		if (ret) {
-			PMD_DRV_LOG(ERR, "Failed to switch tx queues\n");
+			PMD_DRV_LOG(ERR, "Failed to switch tx queues");
 			return ret;
 		}
 		ret = i40e_vsi_switch_rx_queues(vsi, on);
@@ -3167,7 +3167,7 @@ i40e_vsi_rx_init(struct i40e_vsi *vsi)
 		ret = i40e_rx_queue_init(data->rx_queues[i]);
 		if (ret != I40E_SUCCESS) {
 			PMD_DRV_LOG(ERR, "Failed to do RX queue "
-				    "initialization\n");
+				    "initialization");
 			break;
 		}
 	}
@@ -3183,12 +3183,12 @@ i40e_vsi_init(struct i40e_vsi *vsi)
 
 	err = i40e_vsi_tx_init(vsi);
 	if (err) {
-		PMD_DRV_LOG(ERR, "Failed to do vsi TX initialization\n");
+		PMD_DRV_LOG(ERR, "Failed to do vsi TX initialization");
 		return err;
 	}
 	err = i40e_vsi_rx_init(vsi);
 	if (err) {
-		PMD_DRV_LOG(ERR, "Failed to do vsi RX initialization\n");
+		PMD_DRV_LOG(ERR, "Failed to do vsi RX initialization");
 		return err;
 	}
 
@@ -3317,14 +3317,14 @@ i40e_dev_handle_vfr_event(struct rte_eth_dev *dev)
 			/* Clear the event first */
 			I40E_WRITE_REG(hw, I40E_GLGEN_VFLRSTAT(index),
 							(0x1 << offset));
-			PMD_DRV_LOG(INFO, "VF %u reset occured\n", abs_vf_id);
+			PMD_DRV_LOG(INFO, "VF %u reset occured", abs_vf_id);
 			/**
 			 * Only notify a VF reset event occured,
 			 * don't trigger another SW reset
 			 */
 			ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
 			if (ret != I40E_SUCCESS)
-				PMD_DRV_LOG(ERR, "Failed to do VF reset\n");
+				PMD_DRV_LOG(ERR, "Failed to do VF reset");
 		}
 	}
 }
@@ -3340,7 +3340,7 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
 	info.msg_size = I40E_AQ_BUF_SZ;
 	info.msg_buf = rte_zmalloc("msg_buffer", I40E_AQ_BUF_SZ, 0);
 	if (!info.msg_buf) {
-		PMD_DRV_LOG(ERR, "Failed to allocate mem\n");
+		PMD_DRV_LOG(ERR, "Failed to allocate mem");
 		return;
 	}
 
@@ -3350,7 +3350,7 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
 
 		if (ret != I40E_SUCCESS) {
 			PMD_DRV_LOG(INFO, "Failed to read msg from AdminQ, "
-				    "aq_err: %u\n", hw->aq.asq_last_status);
+				    "aq_err: %u", hw->aq.asq_last_status);
 			break;
 		}
 		opcode = rte_le_to_cpu_16(info.desc.opcode);
@@ -3366,7 +3366,7 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
 					info.msg_size);
 			break;
 		default:
-			PMD_DRV_LOG(ERR, "Request %u is not supported yet\n",
+			PMD_DRV_LOG(ERR, "Request %u is not supported yet",
 				    opcode);
 			break;
 		}
@@ -3404,38 +3404,38 @@ i40e_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
 	/* Shared IRQ case, return */
 	if (!(cause & I40E_PFINT_ICR0_INTEVENT_MASK)) {
 		PMD_DRV_LOG(INFO, "Port%d INT0:share IRQ case, "
-			    "no INT event to process\n", hw->pf_id);
+			    "no INT event to process", hw->pf_id);
 		goto done;
 	}
 
 	if (cause & I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK) {
-		PMD_DRV_LOG(INFO, "INT:Link status changed\n");
+		PMD_DRV_LOG(INFO, "INT:Link status changed");
 		i40e_dev_link_update(dev, 0);
 	}
 
 	if (cause & I40E_PFINT_ICR0_ECC_ERR_MASK)
-		PMD_DRV_LOG(INFO, "INT:Unrecoverable ECC Error\n");
+		PMD_DRV_LOG(INFO, "INT:Unrecoverable ECC Error");
 
 	if (cause & I40E_PFINT_ICR0_MAL_DETECT_MASK)
-		PMD_DRV_LOG(INFO, "INT:Malicious programming detected\n");
+		PMD_DRV_LOG(INFO, "INT:Malicious programming detected");
 
 	if (cause & I40E_PFINT_ICR0_GRST_MASK)
-		PMD_DRV_LOG(INFO, "INT:Global Resets Requested\n");
+		PMD_DRV_LOG(INFO, "INT:Global Resets Requested");
 
 	if (cause & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
-		PMD_DRV_LOG(INFO, "INT:PCI EXCEPTION occured\n");
+		PMD_DRV_LOG(INFO, "INT:PCI EXCEPTION occured");
 
 	if (cause & I40E_PFINT_ICR0_HMC_ERR_MASK)
-		PMD_DRV_LOG(INFO, "INT:HMC error occured\n");
+		PMD_DRV_LOG(INFO, "INT:HMC error occured");
 
 	/* Add processing func to deal with VF reset vent */
 	if (cause & I40E_PFINT_ICR0_VFLR_MASK) {
-		PMD_DRV_LOG(INFO, "INT:VF reset detected\n");
+		PMD_DRV_LOG(INFO, "INT:VF reset detected");
 		i40e_dev_handle_vfr_event(dev);
 	}
 	/* Find admin queue event */
 	if (cause & I40E_PFINT_ICR0_ADMINQ_MASK) {
-		PMD_DRV_LOG(INFO, "INT:ADMINQ event\n");
+		PMD_DRV_LOG(INFO, "INT:ADMINQ event");
 		i40e_dev_handle_aq_msg(dev);
 	}
 
@@ -3465,7 +3465,7 @@ i40e_add_macvlan_filters(struct i40e_vsi *vsi,
 
 	req_list = rte_zmalloc("macvlan_add", ele_buff_size, 0);
 	if (req_list == NULL) {
-		PMD_DRV_LOG(ERR, "Fail to allocate memory\n");
+		PMD_DRV_LOG(ERR, "Fail to allocate memory");
 		return I40E_ERR_NO_MEMORY;
 	}
 
@@ -3487,7 +3487,7 @@ i40e_add_macvlan_filters(struct i40e_vsi *vsi,
 		ret = i40e_aq_add_macvlan(hw, vsi->seid, req_list,
 						actual_num, NULL);
 		if (ret != I40E_SUCCESS) {
-			PMD_DRV_LOG(ERR, "Failed to add macvlan filter\n");
+			PMD_DRV_LOG(ERR, "Failed to add macvlan filter");
 			goto DONE;
 		}
 		num += actual_num;
@@ -3517,7 +3517,7 @@ i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
 
 	req_list = rte_zmalloc("macvlan_remove", ele_buff_size, 0);
 	if (req_list == NULL) {
-		PMD_DRV_LOG(ERR, "Fail to allocate memory\n");
+		PMD_DRV_LOG(ERR, "Fail to allocate memory");
 		return I40E_ERR_NO_MEMORY;
 	}
 
@@ -3537,7 +3537,7 @@ i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
 		ret = i40e_aq_remove_macvlan(hw, vsi->seid, req_list,
 						actual_num, NULL);
 		if (ret != I40E_SUCCESS) {
-			PMD_DRV_LOG(ERR, "Failed to remove macvlan filter\n");
+			PMD_DRV_LOG(ERR, "Failed to remove macvlan filter");
 			goto DONE;
 		}
 		num += actual_num;
@@ -3625,7 +3625,7 @@ i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
 				if (vsi->vfta[j] & (1 << k)) {
 					if (i > num - 1) {
 						PMD_DRV_LOG(ERR, "vlan number "
-							    "not match\n");
+							    "not match");
 						return I40E_ERR_PARAM;
 					}
 					(void)rte_memcpy(&mv_f[i].macaddr,
@@ -3654,7 +3654,7 @@ i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
 
 	TAILQ_FOREACH(f, &vsi->mac_list, next) {
 		if (i > num - 1) {
-			PMD_DRV_LOG(ERR, "buffer number not match\n");
+			PMD_DRV_LOG(ERR, "buffer number not match");
 			return I40E_ERR_PARAM;
 		}
 		(void)rte_memcpy(&mv_f[i].macaddr, &f->macaddr, ETH_ADDR_LEN);
@@ -3684,7 +3684,7 @@ i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
 
 	mv_f = rte_zmalloc("macvlan_data", num * sizeof(*mv_f), 0);
 	if (mv_f == NULL) {
-		PMD_DRV_LOG(ERR, "failed to allocate memory\n");
+		PMD_DRV_LOG(ERR, "failed to allocate memory");
 		return I40E_ERR_NO_MEMORY;
 	}
 
@@ -3730,14 +3730,14 @@ i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan)
 	mac_num = vsi->mac_num;
 
 	if (mac_num == 0) {
-		PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr\n");
+		PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
 		return I40E_ERR_PARAM;
 	}
 
 	mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
 
 	if (mv_f == NULL) {
-		PMD_DRV_LOG(ERR, "failed to allocate memory\n");
+		PMD_DRV_LOG(ERR, "failed to allocate memory");
 		return I40E_ERR_NO_MEMORY;
 	}
 
@@ -3781,14 +3781,14 @@ i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan)
 	mac_num = vsi->mac_num;
 
 	if (mac_num == 0) {
-		PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr\n");
+		PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
 		return I40E_ERR_PARAM;
 	}
 
 	mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
 
 	if (mv_f == NULL) {
-		PMD_DRV_LOG(ERR, "failed to allocate memory\n");
+		PMD_DRV_LOG(ERR, "failed to allocate memory");
 		return I40E_ERR_NO_MEMORY;
 	}
 
@@ -3848,7 +3848,7 @@ i40e_vsi_add_mac(struct i40e_vsi *vsi, struct ether_addr *addr)
 
 	mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
 	if (mv_f == NULL) {
-		PMD_DRV_LOG(ERR, "failed to allocate memory\n");
+		PMD_DRV_LOG(ERR, "failed to allocate memory");
 		return I40E_ERR_NO_MEMORY;
 	}
 
@@ -3863,7 +3863,7 @@ i40e_vsi_add_mac(struct i40e_vsi *vsi, struct ether_addr *addr)
 	/* Add the mac addr into mac list */
 	f = rte_zmalloc("macv_filter", sizeof(*f), 0);
 	if (f == NULL) {
-		PMD_DRV_LOG(ERR, "failed to allocate memory\n");
+		PMD_DRV_LOG(ERR, "failed to allocate memory");
 		ret = I40E_ERR_NO_MEMORY;
 		goto DONE;
 	}
@@ -3893,12 +3893,12 @@ i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr)
 
 	vlan_num = vsi->vlan_num;
 	if (vlan_num == 0) {
-		PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0\n");
+		PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
 		return I40E_ERR_PARAM;
 	}
 	mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
 	if (mv_f == NULL) {
-		PMD_DRV_LOG(ERR, "failed to allocate memory\n");
+		PMD_DRV_LOG(ERR, "failed to allocate memory");
 		return I40E_ERR_NO_MEMORY;
 	}
 
diff --git a/lib/librte_pmd_i40e/i40e_ethdev_vf.c b/lib/librte_pmd_i40e/i40e_ethdev_vf.c
index 17009bd..f6c4873 100644
--- a/lib/librte_pmd_i40e/i40e_ethdev_vf.c
+++ b/lib/librte_pmd_i40e/i40e_ethdev_vf.c
@@ -205,21 +205,21 @@ i40evf_parse_pfmsg(struct i40e_vf *vf,
 			vf->link_up =
 				vpe->event_data.link_event.link_status;
 			vf->pend_msg |= PFMSG_LINK_CHANGE;
-			PMD_DRV_LOG(INFO, "Link status update:%s\n",
+			PMD_DRV_LOG(INFO, "Link status update:%s",
 				    vf->link_up ? "up" : "down");
 			break;
 		case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
 			vf->vf_reset = true;
 			vf->pend_msg |= PFMSG_RESET_IMPENDING;
-			PMD_DRV_LOG(INFO, "vf is reseting\n");
+			PMD_DRV_LOG(INFO, "vf is reseting");
 			break;
 		case I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
 			vf->dev_closed = true;
 			vf->pend_msg |= PFMSG_DRIVER_CLOSE;
-			PMD_DRV_LOG(INFO, "PF driver closed\n");
+			PMD_DRV_LOG(INFO, "PF driver closed");
 			break;
 		default:
-			PMD_DRV_LOG(ERR, "%s: Unknown event %d from pf\n",
+			PMD_DRV_LOG(ERR, "%s: Unknown event %d from pf",
 				    __func__, vpe->event);
 		}
 	} else {
@@ -314,7 +314,7 @@ _atomic_set_cmd(struct i40e_vf *vf, enum i40e_virtchnl_ops ops)
 			I40E_VIRTCHNL_OP_UNKNOWN, ops);
 
 	if (!ret)
-		PMD_DRV_LOG(ERR, "There is incomplete cmd %d\n", vf->pend_cmd);
+		PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
 
 	return !ret;
 }
@@ -338,7 +338,7 @@ i40evf_execute_vf_cmd(struct rte_eth_dev *dev, struct vf_cmd_info *args)
 	err = i40e_aq_send_msg_to_pf(hw, args->ops, I40E_SUCCESS,
 		     args->in_args, args->in_args_size, NULL);
 	if (err) {
-		PMD_DRV_LOG(ERR, "fail to send cmd %d\n", args->ops);
+		PMD_DRV_LOG(ERR, "fail to send cmd %d", args->ops);
 		return err;
 	}
 
@@ -347,9 +347,9 @@ i40evf_execute_vf_cmd(struct rte_eth_dev *dev, struct vf_cmd_info *args)
 	if (!err && args->ops == info.ops)
 		_clear_cmd(vf);
 	else if (err)
-		PMD_DRV_LOG(ERR, "Failed to read message from AdminQ\n");
+		PMD_DRV_LOG(ERR, "Failed to read message from AdminQ");
 	else if (args->ops != info.ops)
-		PMD_DRV_LOG(ERR, "command mismatch, expect %u, get %u\n",
+		PMD_DRV_LOG(ERR, "command mismatch, expect %u, get %u",
 			    args->ops, info.ops);
 
 	return (err | info.result);
@@ -377,7 +377,7 @@ i40evf_check_api_version(struct rte_eth_dev *dev)
 
 	err = i40evf_execute_vf_cmd(dev, &args);
 	if (err) {
-		PMD_INIT_LOG(ERR, "fail to execute command OP_VERSION\n");
+		PMD_INIT_LOG(ERR, "fail to execute command OP_VERSION");
 		return err;
 	}
 
@@ -385,13 +385,13 @@ i40evf_check_api_version(struct rte_eth_dev *dev)
 	/* We are talking with DPDK host */
 	if (pver->major == I40E_DPDK_VERSION_MAJOR) {
 		vf->host_is_dpdk = TRUE;
-		PMD_DRV_LOG(INFO, "Detect PF host is DPDK app\n");
+		PMD_DRV_LOG(INFO, "Detect PF host is DPDK app");
 	}
 	/* It's linux host driver */
 	else if ((pver->major != version.major) ||
 	    (pver->minor != version.minor)) {
 		PMD_INIT_LOG(ERR, "pf/vf API version mismatch. "
-			     "(%u.%u)-(%u.%u)\n", pver->major, pver->minor,
+			     "(%u.%u)-(%u.%u)", pver->major, pver->minor,
 			     version.major, version.minor);
 		return -1;
 	}
@@ -417,7 +417,7 @@ i40evf_get_vf_resource(struct rte_eth_dev *dev)
 	err = i40evf_execute_vf_cmd(dev, &args);
 
 	if (err) {
-		PMD_DRV_LOG(ERR, "fail to execute command OP_GET_VF_RESOURCE\n");
+		PMD_DRV_LOG(ERR, "fail to execute command OP_GET_VF_RESOURCE");
 		return err;
 	}
 
@@ -460,7 +460,7 @@ i40evf_config_promisc(struct rte_eth_dev *dev,
 
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command "
-			    "CONFIG_PROMISCUOUS_MODE\n");
+			    "CONFIG_PROMISCUOUS_MODE");
 	return err;
 }
 
@@ -485,7 +485,7 @@ i40evf_config_vlan_offload(struct rte_eth_dev *dev,
 
 	err = i40evf_execute_vf_cmd(dev, &args);
 	if (err)
-		PMD_DRV_LOG(ERR, "fail to execute command CFG_VLAN_OFFLOAD\n");
+		PMD_DRV_LOG(ERR, "fail to execute command CFG_VLAN_OFFLOAD");
 
 	return err;
 }
@@ -500,7 +500,7 @@ i40evf_config_vlan_pvid(struct rte_eth_dev *dev,
 	struct i40e_virtchnl_pvid_info tpid_info;
 
 	if (dev == NULL || info == NULL) {
-		PMD_DRV_LOG(ERR, "invalid parameters\n");
+		PMD_DRV_LOG(ERR, "invalid parameters");
 		return I40E_ERR_PARAM;
 	}
 
@@ -516,7 +516,7 @@ i40evf_config_vlan_pvid(struct rte_eth_dev *dev,
 
 	err = i40evf_execute_vf_cmd(dev, &args);
 	if (err)
-		PMD_DRV_LOG(ERR, "fail to execute command CFG_VLAN_PVID\n");
+		PMD_DRV_LOG(ERR, "fail to execute command CFG_VLAN_PVID");
 
 	return err;
 }
@@ -540,7 +540,7 @@ i40evf_configure_queues(struct rte_eth_dev *dev)
 	len = sizeof(*queue_info) + sizeof(*queue_cfg) * nb_qpairs;
 	queue_info = rte_zmalloc("queue_info", len, 0);
 	if (queue_info == NULL) {
-		PMD_INIT_LOG(ERR, "failed alloc memory for queue_info\n");
+		PMD_INIT_LOG(ERR, "failed alloc memory for queue_info");
 		return -1;
 	}
 	queue_info->vsi_id = vf->vsi_res->vsi_id;
@@ -593,7 +593,7 @@ i40evf_configure_queues(struct rte_eth_dev *dev)
 	err = i40evf_execute_vf_cmd(dev, &args);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command "
-			    "OP_CONFIG_VSI_QUEUES\n");
+			    "OP_CONFIG_VSI_QUEUES");
 	rte_free(queue_info);
 
 	return err;
@@ -628,7 +628,7 @@ i40evf_config_irq_map(struct rte_eth_dev *dev)
 	args.out_size = I40E_AQ_BUF_SZ;
 	err = i40evf_execute_vf_cmd(dev, &args);
 	if (err)
-		PMD_DRV_LOG(ERR, "fail to execute command OP_ENABLE_QUEUES\n");
+		PMD_DRV_LOG(ERR, "fail to execute command OP_ENABLE_QUEUES");
 
 	return err;
 }
@@ -659,7 +659,7 @@ i40evf_switch_queue(struct rte_eth_dev *dev, bool isrx, uint16_t qid,
 	args.out_size = I40E_AQ_BUF_SZ;
 	err = i40evf_execute_vf_cmd(dev, &args);
 	if (err)
-		PMD_DRV_LOG(ERR, "fail to switch %s %u %s\n",
+		PMD_DRV_LOG(ERR, "fail to switch %s %u %s",
 			    isrx ? "RX" : "TX", qid, on ? "on" : "off");
 
 	return err;
@@ -678,7 +678,7 @@ i40evf_start_queues(struct rte_eth_dev *dev)
 		if (rxq->start_rx_per_q)
 			continue;
 		if (i40evf_dev_rx_queue_start(dev, i) != 0) {
-			PMD_DRV_LOG(ERR, "Fail to start queue %u\n", i);
+			PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
 			return -1;
 		}
 	}
@@ -688,7 +688,7 @@ i40evf_start_queues(struct rte_eth_dev *dev)
 		if (txq->start_tx_per_q)
 			continue;
 		if (i40evf_dev_tx_queue_start(dev, i) != 0) {
-			PMD_DRV_LOG(ERR, "Fail to start queue %u\n", i);
+			PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
 			return -1;
 		}
 	}
@@ -704,7 +704,7 @@ i40evf_stop_queues(struct rte_eth_dev *dev)
 	/* Stop TX queues first */
 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
 		if (i40evf_dev_tx_queue_stop(dev, i) != 0) {
-			PMD_DRV_LOG(ERR, "Fail to start queue %u\n", i);
+			PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
 			return -1;
 		}
 	}
@@ -712,7 +712,7 @@ i40evf_stop_queues(struct rte_eth_dev *dev)
 	/* Then stop RX queues */
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
 		if (i40evf_dev_rx_queue_stop(dev, i) != 0) {
-			PMD_DRV_LOG(ERR, "Fail to start queue %u\n", i);
+			PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
 			return -1;
 		}
 	}
@@ -731,7 +731,7 @@ i40evf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
 	struct vf_cmd_info args;
 
 	if (i40e_validate_mac_addr(addr->addr_bytes) != I40E_SUCCESS) {
-		PMD_DRV_LOG(ERR, "Invalid mac:%x:%x:%x:%x:%x:%x\n",
+		PMD_DRV_LOG(ERR, "Invalid mac:%x:%x:%x:%x:%x:%x",
 			    addr->addr_bytes[0], addr->addr_bytes[1],
 			    addr->addr_bytes[2], addr->addr_bytes[3],
 			    addr->addr_bytes[4], addr->addr_bytes[5]);
@@ -752,7 +752,7 @@ i40evf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
 	err = i40evf_execute_vf_cmd(dev, &args);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command "
-			    "OP_ADD_ETHER_ADDRESS\n");
+			    "OP_ADD_ETHER_ADDRESS");
 
 	return err;
 }
@@ -768,7 +768,7 @@ i40evf_del_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
 	struct vf_cmd_info args;
 
 	if (i40e_validate_mac_addr(addr->addr_bytes) != I40E_SUCCESS) {
-		PMD_DRV_LOG(ERR, "Invalid mac:%x-%x-%x-%x-%x-%x\n",
+		PMD_DRV_LOG(ERR, "Invalid mac:%x-%x-%x-%x-%x-%x",
 			    addr->addr_bytes[0], addr->addr_bytes[1],
 			    addr->addr_bytes[2], addr->addr_bytes[3],
 			    addr->addr_bytes[4], addr->addr_bytes[5]);
@@ -789,7 +789,7 @@ i40evf_del_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
 	err = i40evf_execute_vf_cmd(dev, &args);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command "
-			    "OP_DEL_ETHER_ADDRESS\n");
+			    "OP_DEL_ETHER_ADDRESS");
 
 	return err;
 }
@@ -813,7 +813,7 @@ i40evf_get_statics(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 
 	err = i40evf_execute_vf_cmd(dev, &args);
 	if (err) {
-		PMD_DRV_LOG(ERR, "fail to execute command OP_GET_STATS\n");
+		PMD_DRV_LOG(ERR, "fail to execute command OP_GET_STATS");
 		return err;
 	}
 	pstats = (struct i40e_eth_stats *)args.out_buffer;
@@ -851,7 +851,7 @@ i40evf_add_vlan(struct rte_eth_dev *dev, uint16_t vlanid)
 	args.out_size = I40E_AQ_BUF_SZ;
 	err = i40evf_execute_vf_cmd(dev, &args);
 	if (err)
-		PMD_DRV_LOG(ERR, "fail to execute command OP_ADD_VLAN\n");
+		PMD_DRV_LOG(ERR, "fail to execute command OP_ADD_VLAN");
 
 	return err;
 }
@@ -878,7 +878,7 @@ i40evf_del_vlan(struct rte_eth_dev *dev, uint16_t vlanid)
 	args.out_size = I40E_AQ_BUF_SZ;
 	err = i40evf_execute_vf_cmd(dev, &args);
 	if (err)
-		PMD_DRV_LOG(ERR, "fail to execute command OP_DEL_VLAN\n");
+		PMD_DRV_LOG(ERR, "fail to execute command OP_DEL_VLAN");
 
 	return err;
 }
@@ -897,7 +897,7 @@ i40evf_get_link_status(struct rte_eth_dev *dev, struct rte_eth_link *link)
 	args.out_size = I40E_AQ_BUF_SZ;
 	err = i40evf_execute_vf_cmd(dev, &args);
 	if (err) {
-		PMD_DRV_LOG(ERR, "fail to execute command OP_GET_LINK_STAT\n");
+		PMD_DRV_LOG(ERR, "fail to execute command OP_GET_LINK_STAT");
 		return err;
 	}
 
@@ -933,7 +933,7 @@ i40evf_reset_vf(struct i40e_hw *hw)
 	int i, reset;
 
 	if (i40e_vf_reset(hw) != I40E_SUCCESS) {
-		PMD_INIT_LOG(ERR, "Reset VF NIC failed\n");
+		PMD_INIT_LOG(ERR, "Reset VF NIC failed");
 		return -1;
 	}
 	/**
@@ -958,7 +958,7 @@ i40evf_reset_vf(struct i40e_hw *hw)
 	}
 
 	if (i >= MAX_RESET_WAIT_CNT) {
-		PMD_INIT_LOG(ERR, "Reset VF NIC failed\n");
+		PMD_INIT_LOG(ERR, "Reset VF NIC failed");
 		return -1;
 	}
 
@@ -974,49 +974,49 @@ i40evf_init_vf(struct rte_eth_dev *dev)
 
 	err = i40evf_set_mac_type(hw);
 	if (err) {
-		PMD_INIT_LOG(ERR, "set_mac_type failed: %d\n", err);
+		PMD_INIT_LOG(ERR, "set_mac_type failed: %d", err);
 		goto err;
 	}
 
 	i40e_init_adminq_parameter(hw);
 	err = i40e_init_adminq(hw);
 	if (err) {
-		PMD_INIT_LOG(ERR, "init_adminq failed: %d\n", err);
+		PMD_INIT_LOG(ERR, "init_adminq failed: %d", err);
 		goto err;
 	}
 
 
 	/* Reset VF and wait until it's complete */
 	if (i40evf_reset_vf(hw)) {
-		PMD_INIT_LOG(ERR, "reset NIC failed\n");
+		PMD_INIT_LOG(ERR, "reset NIC failed");
 		goto err_aq;
 	}
 
 	/* VF reset, shutdown admin queue and initialize again */
 	if (i40e_shutdown_adminq(hw) != I40E_SUCCESS) {
-		PMD_INIT_LOG(ERR, "i40e_shutdown_adminq failed\n");
+		PMD_INIT_LOG(ERR, "i40e_shutdown_adminq failed");
 		return -1;
 	}
 
 	i40e_init_adminq_parameter(hw);
 	if (i40e_init_adminq(hw) != I40E_SUCCESS) {
-		PMD_INIT_LOG(ERR, "init_adminq failed\n");
+		PMD_INIT_LOG(ERR, "init_adminq failed");
 		return -1;
 	}
 	if (i40evf_check_api_version(dev) != 0) {
-		PMD_INIT_LOG(ERR, "check_api version failed\n");
+		PMD_INIT_LOG(ERR, "check_api version failed");
 		goto err_aq;
 	}
 	bufsz = sizeof(struct i40e_virtchnl_vf_resource) +
 		(I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource));
 	vf->vf_res = rte_zmalloc("vf_res", bufsz, 0);
 	if (!vf->vf_res) {
-		PMD_INIT_LOG(ERR, "unable to allocate vf_res memory\n");
+		PMD_INIT_LOG(ERR, "unable to allocate vf_res memory");
 			goto err_aq;
 	}
 
 	if (i40evf_get_vf_resource(dev) != 0) {
-		PMD_INIT_LOG(ERR, "i40evf_get_vf_config failed\n");
+		PMD_INIT_LOG(ERR, "i40evf_get_vf_config failed");
 		goto err_alloc;
 	}
 
@@ -1027,7 +1027,7 @@ i40evf_init_vf(struct rte_eth_dev *dev)
 	}
 
 	if (!vf->vsi_res) {
-		PMD_INIT_LOG(ERR, "no LAN VSI found\n");
+		PMD_INIT_LOG(ERR, "no LAN VSI found");
 		goto err_alloc;
 	}
 
@@ -1086,7 +1086,7 @@ i40evf_dev_init(__rte_unused struct eth_driver *eth_drv,
 	hw->hw_addr = (void *)eth_dev->pci_dev->mem_resource[0].addr;
 
 	if(i40evf_init_vf(eth_dev) != 0) {
-		PMD_INIT_LOG(ERR, "Init vf failed\n");
+		PMD_INIT_LOG(ERR, "Init vf failed");
 		return -1;
 	}
 
@@ -1223,7 +1223,7 @@ i40evf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 
 		err = i40e_alloc_rx_queue_mbufs(rxq);
 		if (err) {
-			PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf\n");
+			PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
 			return err;
 		}
 
@@ -1237,7 +1237,7 @@ i40evf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 		err = i40evf_switch_queue(dev, TRUE, rx_queue_id, TRUE);
 
 		if (err)
-			PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on\n",
+			PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
 				    rx_queue_id);
 	}
 
@@ -1256,7 +1256,7 @@ i40evf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 		err = i40evf_switch_queue(dev, TRUE, rx_queue_id, FALSE);
 
 		if (err) {
-			PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off\n",
+			PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
 				    rx_queue_id);
 			return err;
 		}
@@ -1281,7 +1281,7 @@ i40evf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 		err = i40evf_switch_queue(dev, FALSE, tx_queue_id, TRUE);
 
 		if (err)
-			PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on\n",
+			PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
 				    tx_queue_id);
 	}
 
@@ -1300,7 +1300,7 @@ i40evf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 		err = i40evf_switch_queue(dev, FALSE, tx_queue_id, FALSE);
 
 		if (err) {
-			PMD_DRV_LOG(ERR, "Failed to switch TX queue %u of\n",
+			PMD_DRV_LOG(ERR, "Failed to switch TX queue %u of",
 				    tx_queue_id);
 			return err;
 		}
@@ -1386,7 +1386,7 @@ i40evf_dev_start(struct rte_eth_dev *dev)
 			vf->max_pkt_len > I40E_FRAME_SIZE_MAX) {
 			PMD_DRV_LOG(ERR, "maximum packet length must "
 				    "be larger than %u and smaller than %u,"
-				    "as jumbo frame is enabled\n",
+				    "as jumbo frame is enabled",
 				    (uint32_t)ETHER_MAX_LEN,
 				    (uint32_t)I40E_FRAME_SIZE_MAX);
 			return I40E_ERR_CONFIG;
@@ -1396,7 +1396,7 @@ i40evf_dev_start(struct rte_eth_dev *dev)
 			vf->max_pkt_len > ETHER_MAX_LEN) {
 			PMD_DRV_LOG(ERR, "maximum packet length must be "
 				    "larger than %u and smaller than %u, "
-				    "as jumbo frame is disabled\n",
+				    "as jumbo frame is disabled",
 				    (uint32_t)ETHER_MIN_LEN,
 				    (uint32_t)ETHER_MAX_LEN);
 			return I40E_ERR_CONFIG;
@@ -1407,18 +1407,18 @@ i40evf_dev_start(struct rte_eth_dev *dev)
 					dev->data->nb_tx_queues);
 
 	if (i40evf_rx_init(dev) != 0){
-		PMD_DRV_LOG(ERR, "failed to do RX init\n");
+		PMD_DRV_LOG(ERR, "failed to do RX init");
 		return -1;
 	}
 
 	i40evf_tx_init(dev);
 
 	if (i40evf_configure_queues(dev) != 0) {
-		PMD_DRV_LOG(ERR, "configure queues failed\n");
+		PMD_DRV_LOG(ERR, "configure queues failed");
 		goto err_queue;
 	}
 	if (i40evf_config_irq_map(dev)) {
-		PMD_DRV_LOG(ERR, "config_irq_map failed\n");
+		PMD_DRV_LOG(ERR, "config_irq_map failed");
 		goto err_queue;
 	}
 
@@ -1426,12 +1426,12 @@ i40evf_dev_start(struct rte_eth_dev *dev)
 	(void)rte_memcpy(mac_addr.addr_bytes, hw->mac.addr,
 				sizeof(mac_addr.addr_bytes));
 	if (i40evf_add_mac_addr(dev, &mac_addr)) {
-		PMD_DRV_LOG(ERR, "Failed to add mac addr\n");
+		PMD_DRV_LOG(ERR, "Failed to add mac addr");
 		goto err_queue;
 	}
 
 	if (i40evf_start_queues(dev) != 0) {
-		PMD_DRV_LOG(ERR, "enable queues failed\n");
+		PMD_DRV_LOG(ERR, "enable queues failed");
 		goto err_mac;
 	}
 
@@ -1555,7 +1555,7 @@ i40evf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 {
 	memset(stats, 0, sizeof(*stats));
 	if (i40evf_get_statics(dev, stats))
-		PMD_DRV_LOG(ERR, "Get statics failed\n");
+		PMD_DRV_LOG(ERR, "Get statics failed");
 }
 
 static void
diff --git a/lib/librte_pmd_i40e/i40e_pf.c b/lib/librte_pmd_i40e/i40e_pf.c
index ed9773a..682ff44 100644
--- a/lib/librte_pmd_i40e/i40e_pf.c
+++ b/lib/librte_pmd_i40e/i40e_pf.c
@@ -158,7 +158,7 @@ i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset)
 	}
 
 	if (i >= VFRESET_MAX_WAIT_CNT) {
-		PMD_DRV_LOG(ERR, "VF reset timeout\n");
+		PMD_DRV_LOG(ERR, "VF reset timeout");
 		return -ETIMEDOUT;
 	}
 
@@ -171,7 +171,7 @@ i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset)
 		qsel.tx_queues = qsel.rx_queues;
 		ret = i40e_pf_host_switch_queues(vf, &qsel, false);
 		if (ret != I40E_SUCCESS) {
-			PMD_DRV_LOG(ERR, "Disable VF queues failed\n");
+			PMD_DRV_LOG(ERR, "Disable VF queues failed");
 			return -EFAULT;
 		}
 
@@ -190,7 +190,7 @@ i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset)
 		/* remove VSI */
 		ret = i40e_vsi_release(vf->vsi);
 		if (ret != I40E_SUCCESS) {
-			PMD_DRV_LOG(ERR, "Release VSI failed\n");
+			PMD_DRV_LOG(ERR, "Release VSI failed");
 			return -EFAULT;
 		}
 	}
@@ -209,7 +209,7 @@ i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset)
 	}
 
 	if (i >= VFRESET_MAX_WAIT_CNT) {
-		PMD_DRV_LOG(ERR, "Wait VF PCI transaction end timeout\n");
+		PMD_DRV_LOG(ERR, "Wait VF PCI transaction end timeout");
 		return -ETIMEDOUT;
 	}
 
@@ -225,13 +225,13 @@ i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset)
 	vf->vsi = i40e_vsi_setup(vf->pf, I40E_VSI_SRIOV,
 			vf->pf->main_vsi, vf->vf_idx);
 	if (vf->vsi == NULL) {
-		PMD_DRV_LOG(ERR, "Add vsi failed\n");
+		PMD_DRV_LOG(ERR, "Add vsi failed");
 		return -EFAULT;
 	}
 
 	ret = i40e_pf_vf_queues_mapping(vf);
 	if (ret != I40E_SUCCESS) {
-		PMD_DRV_LOG(ERR, "queue mapping error\n");
+		PMD_DRV_LOG(ERR, "queue mapping error");
 		i40e_vsi_release(vf->vsi);
 		return -EFAULT;
 	}
@@ -295,7 +295,7 @@ i40e_pf_host_process_cmd_get_vf_resource(struct i40e_pf_vf *vf)
 
 	vf_res = rte_zmalloc("i40e_vf_res", len, 0);
 	if (vf_res == NULL) {
-		PMD_DRV_LOG(ERR, "failed to allocate mem\n");
+		PMD_DRV_LOG(ERR, "failed to allocate mem");
 		ret = I40E_ERR_NO_MEMORY;
 		vf_res = NULL;
 		len = 0;
@@ -421,7 +421,7 @@ i40e_pf_host_process_cmd_config_vsi_queues(struct i40e_pf_vf *vf,
 
 	if (msg == NULL || msglen <= sizeof(*qconfig) ||
 		qconfig->num_queue_pairs > vsi->nb_qps) {
-		PMD_DRV_LOG(ERR, "vsi_queue_config_info argument wrong\n");
+		PMD_DRV_LOG(ERR, "vsi_queue_config_info argument wrong");
 		ret = I40E_ERR_PARAM;
 		goto send_msg;
 	}
@@ -466,20 +466,20 @@ i40e_pf_host_process_cmd_config_irq_map(struct i40e_pf_vf *vf,
 	    (struct i40e_virtchnl_irq_map_info *)msg;
 
 	if (msg == NULL || msglen < sizeof(struct i40e_virtchnl_irq_map_info)) {
-		PMD_DRV_LOG(ERR, "buffer too short\n");
+		PMD_DRV_LOG(ERR, "buffer too short");
 		ret = I40E_ERR_PARAM;
 		goto send_msg;
 	}
 
 	/* Assume VF only have 1 vector to bind all queues */
 	if (irqmap->num_vectors != 1) {
-		PMD_DRV_LOG(ERR, "DKDK host only support 1 vector\n");
+		PMD_DRV_LOG(ERR, "DKDK host only support 1 vector");
 		ret = I40E_ERR_PARAM;
 		goto send_msg;
 	}
 
 	if (irqmap->vecmap[0].vector_id == 0) {
-		PMD_DRV_LOG(ERR, "DPDK host don't support use IRQ0\n");
+		PMD_DRV_LOG(ERR, "DPDK host don't support use IRQ0");
 		ret = I40E_ERR_PARAM;
 		goto send_msg;
 	}
@@ -601,7 +601,7 @@ i40e_pf_host_process_cmd_add_ether_address(struct i40e_pf_vf *vf,
 	struct ether_addr *mac;
 
 	if (msg == NULL || msglen <= sizeof(*addr_list)) {
-		PMD_DRV_LOG(ERR, "add_ether_address argument too short\n");
+		PMD_DRV_LOG(ERR, "add_ether_address argument too short");
 		ret = I40E_ERR_PARAM;
 		goto send_msg;
 	}
@@ -634,7 +634,7 @@ i40e_pf_host_process_cmd_del_ether_address(struct i40e_pf_vf *vf,
 	struct ether_addr *mac;
 
 	if (msg == NULL || msglen <= sizeof(*addr_list)) {
-		PMD_DRV_LOG(ERR, "delete_ether_address argument too short\n");
+		PMD_DRV_LOG(ERR, "delete_ether_address argument too short");
 		ret = I40E_ERR_PARAM;
 		goto send_msg;
 	}
@@ -667,7 +667,7 @@ i40e_pf_host_process_cmd_add_vlan(struct i40e_pf_vf *vf,
 	uint16_t *vid;
 
 	if (msg == NULL || msglen <= sizeof(*vlan_filter_list)) {
-		PMD_DRV_LOG(ERR, "add_vlan argument too short\n");
+		PMD_DRV_LOG(ERR, "add_vlan argument too short");
 		ret = I40E_ERR_PARAM;
 		goto send_msg;
 	}
@@ -699,7 +699,7 @@ i40e_pf_host_process_cmd_del_vlan(struct i40e_pf_vf *vf,
 	uint16_t *vid;
 
 	if (msg == NULL || msglen <= sizeof(*vlan_filter_list)) {
-		PMD_DRV_LOG(ERR, "delete_vlan argument too short\n");
+		PMD_DRV_LOG(ERR, "delete_vlan argument too short");
 		ret = I40E_ERR_PARAM;
 		goto send_msg;
 	}
@@ -796,7 +796,7 @@ i40e_pf_host_process_cmd_cfg_vlan_offload(
 	ret = i40e_vsi_config_vlan_stripping(vf->vsi,
 						!!offload->enable_vlan_strip);
 	if (ret != 0)
-		PMD_DRV_LOG(ERR, "Failed to configure vlan stripping\n");
+		PMD_DRV_LOG(ERR, "Failed to configure vlan stripping");
 
 send_msg:
 	i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD,
@@ -842,13 +842,13 @@ i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev,
 	uint16_t vf_id = abs_vf_id - hw->func_caps.vf_base_id;
 
 	if (!dev || vf_id > pf->vf_num - 1 || !pf->vfs) {
-		PMD_DRV_LOG(ERR, "invalid argument\n");
+		PMD_DRV_LOG(ERR, "invalid argument");
 		return;
 	}
 
 	vf = &pf->vfs[vf_id];
 	if (!vf->vsi) {
-		PMD_DRV_LOG(ERR, "NO VSI associated with VF found\n");
+		PMD_DRV_LOG(ERR, "NO VSI associated with VF found");
 		i40e_pf_host_send_msg_to_vf(vf, opcode,
 			I40E_ERR_NO_AVAILABLE_VSI, NULL, 0);
 		return;
@@ -856,81 +856,81 @@ i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev,
 
 	switch (opcode) {
 	case I40E_VIRTCHNL_OP_VERSION :
-		PMD_DRV_LOG(INFO, "OP_VERSION received\n");
+		PMD_DRV_LOG(INFO, "OP_VERSION received");
 		i40e_pf_host_process_cmd_version(vf);
 		break;
 	case I40E_VIRTCHNL_OP_RESET_VF :
-		PMD_DRV_LOG(INFO, "OP_RESET_VF received\n");
+		PMD_DRV_LOG(INFO, "OP_RESET_VF received");
 		i40e_pf_host_process_cmd_reset_vf(vf);
 		break;
 	case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
-		PMD_DRV_LOG(INFO, "OP_GET_VF_RESOURCES received\n");
+		PMD_DRV_LOG(INFO, "OP_GET_VF_RESOURCES received");
 		i40e_pf_host_process_cmd_get_vf_resource(vf);
 		break;
 	case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
-		PMD_DRV_LOG(INFO, "OP_CONFIG_VSI_QUEUES received\n");
+		PMD_DRV_LOG(INFO, "OP_CONFIG_VSI_QUEUES received");
 		i40e_pf_host_process_cmd_config_vsi_queues(vf,
 						msg, msglen);
 		break;
 	case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
-		PMD_DRV_LOG(INFO, "OP_CONFIG_IRQ_MAP received\n");
+		PMD_DRV_LOG(INFO, "OP_CONFIG_IRQ_MAP received");
 		i40e_pf_host_process_cmd_config_irq_map(vf, msg, msglen);
 		break;
 	case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
-		PMD_DRV_LOG(INFO, "OP_ENABLE_QUEUES received\n");
+		PMD_DRV_LOG(INFO, "OP_ENABLE_QUEUES received");
 		i40e_pf_host_process_cmd_enable_queues(vf,
 						msg, msglen);
 		break;
 	case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
-		PMD_DRV_LOG(INFO, "OP_DISABLE_QUEUE received\n");
+		PMD_DRV_LOG(INFO, "OP_DISABLE_QUEUE received");
 		i40e_pf_host_process_cmd_disable_queues(vf,
 						msg, msglen);
 		break;
 	case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
-		PMD_DRV_LOG(INFO, "OP_ADD_ETHER_ADDRESS received\n");
+		PMD_DRV_LOG(INFO, "OP_ADD_ETHER_ADDRESS received");
 		i40e_pf_host_process_cmd_add_ether_address(vf,
 						msg, msglen);
 		break;
 	case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
-		PMD_DRV_LOG(INFO, "OP_DEL_ETHER_ADDRESS received\n");
+		PMD_DRV_LOG(INFO, "OP_DEL_ETHER_ADDRESS received");
 		i40e_pf_host_process_cmd_del_ether_address(vf,
 						msg, msglen);
 		break;
 	case I40E_VIRTCHNL_OP_ADD_VLAN:
-		PMD_DRV_LOG(INFO, "OP_ADD_VLAN received\n");
+		PMD_DRV_LOG(INFO, "OP_ADD_VLAN received");
 		i40e_pf_host_process_cmd_add_vlan(vf, msg, msglen);
 		break;
 	case I40E_VIRTCHNL_OP_DEL_VLAN:
-		PMD_DRV_LOG(INFO, "OP_DEL_VLAN received\n");
+		PMD_DRV_LOG(INFO, "OP_DEL_VLAN received");
 		i40e_pf_host_process_cmd_del_vlan(vf, msg, msglen);
 		break;
 	case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
-		PMD_DRV_LOG(INFO, "OP_CONFIG_PROMISCUOUS_MODE received\n");
+		PMD_DRV_LOG(INFO, "OP_CONFIG_PROMISCUOUS_MODE received");
 		i40e_pf_host_process_cmd_config_promisc_mode(vf, msg, msglen);
 		break;
 	case I40E_VIRTCHNL_OP_GET_STATS:
-		PMD_DRV_LOG(INFO, "OP_GET_STATS received\n");
+		PMD_DRV_LOG(INFO, "OP_GET_STATS received");
 		i40e_pf_host_process_cmd_get_stats(vf);
 		break;
 	case I40E_VIRTCHNL_OP_GET_LINK_STAT:
-		PMD_DRV_LOG(INFO, "OP_GET_LINK_STAT received\n");
+		PMD_DRV_LOG(INFO, "OP_GET_LINK_STAT received");
 		i40e_pf_host_process_cmd_get_link_status(vf);
 		break;
 	case I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD:
-		PMD_DRV_LOG(INFO, "OP_CFG_VLAN_OFFLOAD received\n");
+		PMD_DRV_LOG(INFO, "OP_CFG_VLAN_OFFLOAD received");
 		i40e_pf_host_process_cmd_cfg_vlan_offload(vf, msg, msglen);
 		break;
 	case I40E_VIRTCHNL_OP_CFG_VLAN_PVID:
-		PMD_DRV_LOG(INFO, "OP_CFG_VLAN_PVID received\n");
+		PMD_DRV_LOG(INFO, "OP_CFG_VLAN_PVID received");
 		i40e_pf_host_process_cmd_cfg_pvid(vf, msg, msglen);
 		break;
 	 /* Don't add command supported below, which will
 	 *  return an error code.
 	 */
 	case I40E_VIRTCHNL_OP_FCOE:
-		PMD_DRV_LOG(ERR, "OP_FCOE received, not supported\n");
+		PMD_DRV_LOG(ERR, "OP_FCOE received, not supported");
 	default:
-		PMD_DRV_LOG(ERR, "%u received, not supported\n", opcode);
+		PMD_DRV_LOG(ERR, "%u received, not supported", opcode);
 		i40e_pf_host_send_msg_to_vf(vf, opcode,
 				I40E_ERR_PARAM, NULL, 0);
 		break;
diff --git a/lib/librte_pmd_i40e/i40e_rxtx.c b/lib/librte_pmd_i40e/i40e_rxtx.c
index 99a6572..22f9fd5 100644
--- a/lib/librte_pmd_i40e/i40e_rxtx.c
+++ b/lib/librte_pmd_i40e/i40e_rxtx.c
@@ -420,13 +420,13 @@ i40e_txd_enable_checksum(uint32_t ol_flags,
 			uint8_t l3_len)
 {
 	if (!l2_len) {
-		PMD_DRV_LOG(DEBUG, "L2 length set to 0\n");
+		PMD_DRV_LOG(DEBUG, "L2 length set to 0");
 		return;
 	}
 	*td_offset |= (l2_len >> 1) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
 
 	if (!l3_len) {
-		PMD_DRV_LOG(DEBUG, "L3 length set to 0\n");
+		PMD_DRV_LOG(DEBUG, "L3 length set to 0");
 		return;
 	}
 
@@ -676,7 +676,7 @@ i40e_rx_alloc_bufs(struct i40e_rx_queue *rxq)
 	diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep,
 					rxq->rx_free_thresh);
 	if (unlikely(diag != 0)) {
-		PMD_DRV_LOG(ERR, "Failed to get mbufs in bulk\n");
+		PMD_DRV_LOG(ERR, "Failed to get mbufs in bulk");
 		return -ENOMEM;
 	}
 
@@ -728,7 +728,7 @@ rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 			uint16_t i, j;
 
 			PMD_RX_LOG(DEBUG, "Rx mbuf alloc failed for "
-				   "port_id=%u, queue_id=%u\n",
+				   "port_id=%u, queue_id=%u",
 				   rxq->port_id, rxq->queue_id);
 			rxq->rx_nb_avail = 0;
 			rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
@@ -1440,7 +1440,7 @@ i40e_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 
 		err = i40e_alloc_rx_queue_mbufs(rxq);
 		if (err) {
-			PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf\n");
+			PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
 			return err;
 		}
 
@@ -1452,7 +1452,7 @@ i40e_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 		err = i40e_switch_rx_queue(hw, rx_queue_id + q_base, TRUE);
 
 		if (err) {
-			PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on\n",
+			PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
 				    rx_queue_id);
 
 			i40e_rx_queue_release_mbufs(rxq);
@@ -1478,7 +1478,7 @@ i40e_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 		err = i40e_switch_rx_queue(hw, rx_queue_id + q_base, FALSE);
 
 		if (err) {
-			PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off\n",
+			PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
 				    rx_queue_id);
 			return err;
 		}
@@ -1502,7 +1502,7 @@ i40e_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 	if (tx_queue_id < dev->data->nb_tx_queues) {
 		err = i40e_switch_tx_queue(hw, tx_queue_id + q_base, TRUE);
 		if (err)
-			PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on\n",
+			PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
 				    tx_queue_id);
 	}
 
@@ -1524,7 +1524,7 @@ i40e_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 		err = i40e_switch_tx_queue(hw, tx_queue_id + q_base, FALSE);
 
 		if (err) {
-			PMD_DRV_LOG(ERR, "Failed to switch TX queue %u of\n",
+			PMD_DRV_LOG(ERR, "Failed to switch TX queue %u of",
 				    tx_queue_id);
 			return err;
 		}
@@ -1553,14 +1553,14 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
 
 	if (!vsi || queue_idx >= vsi->nb_qps) {
 		PMD_DRV_LOG(ERR, "VSI not available or queue "
-			    "index exceeds the maximum\n");
+			    "index exceeds the maximum");
 		return I40E_ERR_PARAM;
 	}
 	if (((nb_desc * sizeof(union i40e_rx_desc)) % I40E_ALIGN) != 0 ||
 					(nb_desc > I40E_MAX_RING_DESC) ||
 					(nb_desc < I40E_MIN_RING_DESC)) {
 		PMD_DRV_LOG(ERR, "Number (%u) of receive descriptors is "
-			    "invalid\n", nb_desc);
+			    "invalid", nb_desc);
 		return I40E_ERR_PARAM;
 	}
 
@@ -1577,7 +1577,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
 				 socket_id);
 	if (!rxq) {
 		PMD_DRV_LOG(ERR, "Failed to allocate memory for "
-			    "rx queue data structure\n");
+			    "rx queue data structure");
 		return (-ENOMEM);
 	}
 	rxq->mp = mp;
@@ -1602,7 +1602,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
 					socket_id);
 	if (!rz) {
 		i40e_dev_rx_queue_release(rxq);
-		PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX\n");
+		PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX");
 		return (-ENOMEM);
 	}
 
@@ -1631,7 +1631,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
 				   socket_id);
 	if (!rxq->sw_ring) {
 		i40e_dev_rx_queue_release(rxq);
-		PMD_DRV_LOG(ERR, "Failed to allocate memory for SW ring\n");
+		PMD_DRV_LOG(ERR, "Failed to allocate memory for SW ring");
 		return (-ENOMEM);
 	}
 
@@ -1645,7 +1645,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
 #ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
 		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
 			     "satisfied. Rx Burst Bulk Alloc function will be "
-			     "used on port=%d, queue=%d.\n",
+			     "used on port=%d, queue=%d.",
 			     rxq->port_id, rxq->queue_id);
 		dev->rx_pkt_burst = i40e_recv_pkts_bulk_alloc;
 #endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */
@@ -1653,7 +1653,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
 		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
 			     "not satisfied, Scattered Rx is requested, "
 			     "or RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC is "
-			     "not enabled on port=%d, queue=%d.\n",
+			     "not enabled on port=%d, queue=%d.",
 			     rxq->port_id, rxq->queue_id);
 	}
 
@@ -1666,7 +1666,7 @@ i40e_dev_rx_queue_release(void *rxq)
 	struct i40e_rx_queue *q = (struct i40e_rx_queue *)rxq;
 
 	if (!q) {
-		PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL\n");
+		PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
 		return;
 	}
 
@@ -1684,7 +1684,7 @@ i40e_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 	uint16_t desc = 0;
 
 	if (unlikely(rx_queue_id >= dev->data->nb_rx_queues)) {
-		PMD_DRV_LOG(ERR, "Invalid RX queue id %u\n", rx_queue_id);
+		PMD_DRV_LOG(ERR, "Invalid RX queue id %u", rx_queue_id);
 		return 0;
 	}
 
@@ -1718,7 +1718,7 @@ i40e_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
 	int ret;
 
 	if (unlikely(offset >= rxq->nb_rx_desc)) {
-		PMD_DRV_LOG(ERR, "Invalid RX queue id %u\n", offset);
+		PMD_DRV_LOG(ERR, "Invalid RX queue id %u", offset);
 		return 0;
 	}
 
@@ -1750,7 +1750,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
 
 	if (!vsi || queue_idx >= vsi->nb_qps) {
 		PMD_DRV_LOG(ERR, "VSI is NULL, or queue index (%u) "
-			    "exceeds the maximum\n", queue_idx);
+			    "exceeds the maximum", queue_idx);
 		return I40E_ERR_PARAM;
 	}
 
@@ -1758,7 +1758,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
 					(nb_desc > I40E_MAX_RING_DESC) ||
 					(nb_desc < I40E_MIN_RING_DESC)) {
 		PMD_DRV_LOG(ERR, "Number (%u) of transmit descriptors is "
-			    "invalid\n", nb_desc);
+			    "invalid", nb_desc);
 		return I40E_ERR_PARAM;
 	}
 
@@ -1847,7 +1847,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
 				  socket_id);
 	if (!txq) {
 		PMD_DRV_LOG(ERR, "Failed to allocate memory for "
-			    "tx queue structure\n");
+			    "tx queue structure");
 		return (-ENOMEM);
 	}
 
@@ -1861,7 +1861,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
 					socket_id);
 	if (!tz) {
 		i40e_dev_tx_queue_release(txq);
-		PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX\n");
+		PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX");
 		return (-ENOMEM);
 	}
 
@@ -1893,7 +1893,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
 				   socket_id);
 	if (!txq->sw_ring) {
 		i40e_dev_tx_queue_release(txq);
-		PMD_DRV_LOG(ERR, "Failed to allocate memory for SW TX ring\n");
+		PMD_DRV_LOG(ERR, "Failed to allocate memory for SW TX ring");
 		return (-ENOMEM);
 	}
 
@@ -1904,10 +1904,10 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	/* Use a simple TX queue without offloads or multi segs if possible */
 	if (((txq->txq_flags & I40E_SIMPLE_FLAGS) == I40E_SIMPLE_FLAGS) &&
 				(txq->tx_rs_thresh >= I40E_TX_MAX_BURST)) {
-		PMD_INIT_LOG(INFO, "Using simple tx path\n");
+		PMD_INIT_LOG(INFO, "Using simple tx path");
 		dev->tx_pkt_burst = i40e_xmit_pkts_simple;
 	} else {
-		PMD_INIT_LOG(INFO, "Using full-featured tx path\n");
+		PMD_INIT_LOG(INFO, "Using full-featured tx path");
 		dev->tx_pkt_burst = i40e_xmit_pkts;
 	}
 
@@ -1920,7 +1920,7 @@ i40e_dev_tx_queue_release(void *txq)
 	struct i40e_tx_queue *q = (struct i40e_tx_queue *)txq;
 
 	if (!q) {
-		PMD_DRV_LOG(DEBUG, "Pointer to TX queue is NULL\n");
+		PMD_DRV_LOG(DEBUG, "Pointer to TX queue is NULL");
 		return;
 	}
 
@@ -1961,7 +1961,7 @@ i40e_rx_queue_release_mbufs(struct i40e_rx_queue *rxq)
 	uint16_t i;
 
 	if (!rxq || !rxq->sw_ring) {
-		PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL\n");
+		PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
 		return;
 	}
 
@@ -2021,7 +2021,7 @@ i40e_tx_queue_release_mbufs(struct i40e_tx_queue *txq)
 	uint16_t i;
 
 	if (!txq || !txq->sw_ring) {
-		PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL\n");
+		PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
 		return;
 	}
 
@@ -2040,7 +2040,7 @@ i40e_reset_tx_queue(struct i40e_tx_queue *txq)
 	uint16_t i, prev, size;
 
 	if (!txq) {
-		PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL\n");
+		PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
 		return;
 	}
 
@@ -2091,13 +2091,13 @@ i40e_tx_queue_init(struct i40e_tx_queue *txq)
 
 	err = i40e_clear_lan_tx_queue_context(hw, pf_q);
 	if (err != I40E_SUCCESS) {
-		PMD_DRV_LOG(ERR, "Failure of clean lan tx queue context\n");
+		PMD_DRV_LOG(ERR, "Failure of clean lan tx queue context");
 		return err;
 	}
 
 	err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
 	if (err != I40E_SUCCESS) {
-		PMD_DRV_LOG(ERR, "Failure of set lan tx queue context\n");
+		PMD_DRV_LOG(ERR, "Failure of set lan tx queue context");
 		return err;
 	}
 
@@ -2125,7 +2125,7 @@ i40e_alloc_rx_queue_mbufs(struct i40e_rx_queue *rxq)
 		struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mp);
 
 		if (unlikely(!mbuf)) {
-			PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX\n");
+			PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
 			return -ENOMEM;
 		}
 
@@ -2193,7 +2193,7 @@ i40e_rx_queue_config(struct i40e_rx_queue *rxq)
 			rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
 			PMD_DRV_LOG(ERR, "maximum packet length must "
 				    "be larger than %u and smaller than %u,"
-				    "as jumbo frame is enabled\n",
+				    "as jumbo frame is enabled",
 				    (uint32_t)ETHER_MAX_LEN,
 				    (uint32_t)I40E_FRAME_SIZE_MAX);
 			return I40E_ERR_CONFIG;
@@ -2203,7 +2203,7 @@ i40e_rx_queue_config(struct i40e_rx_queue *rxq)
 			rxq->max_pkt_len > ETHER_MAX_LEN) {
 			PMD_DRV_LOG(ERR, "maximum packet length must be "
 				    "larger than %u and smaller than %u, "
-				    "as jumbo frame is disabled\n",
+				    "as jumbo frame is disabled",
 				    (uint32_t)ETHER_MIN_LEN,
 				    (uint32_t)ETHER_MAX_LEN);
 			return I40E_ERR_CONFIG;
@@ -2228,7 +2228,7 @@ i40e_rx_queue_init(struct i40e_rx_queue *rxq)
 
 	err = i40e_rx_queue_config(rxq);
 	if (err < 0) {
-		PMD_DRV_LOG(ERR, "Failed to config RX queue\n");
+		PMD_DRV_LOG(ERR, "Failed to config RX queue");
 		return err;
 	}
 
@@ -2260,12 +2260,12 @@ i40e_rx_queue_init(struct i40e_rx_queue *rxq)
 
 	err = i40e_clear_lan_rx_queue_context(hw, pf_q);
 	if (err != I40E_SUCCESS) {
-		PMD_DRV_LOG(ERR, "Failed to clear LAN RX queue context\n");
+		PMD_DRV_LOG(ERR, "Failed to clear LAN RX queue context");
 		return err;
 	}
 	err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
 	if (err != I40E_SUCCESS) {
-		PMD_DRV_LOG(ERR, "Failed to set LAN RX queue context\n");
+		PMD_DRV_LOG(ERR, "Failed to set LAN RX queue context");
 		return err;
 	}
 
-- 
1.7.10.4
^ permalink raw reply	[flat|nested] 25+ messages in thread
- * [dpdk-dev] [PATCH v3 12/20] i40e: always log init messages
  2014-09-17 13:46 [dpdk-dev] [PATCH v3 00/20] cleanup logs in main PMDs David Marchand
                   ` (10 preceding siblings ...)
  2014-09-17 13:46 ` [dpdk-dev] [PATCH v3 11/20] i40e: clean log messages David Marchand
@ 2014-09-17 13:46 ` David Marchand
  2014-09-17 13:46 ` [dpdk-dev] [PATCH v3 13/20] i40e: add log messages when rx bulk mode is not usable David Marchand
                   ` (8 subsequent siblings)
  20 siblings, 0 replies; 25+ messages in thread
From: David Marchand @ 2014-09-17 13:46 UTC (permalink / raw)
  To: dev
'init' messages should always be logged and filtered at runtime by rte_log.
All the more so as these messages are not in the datapath.
Signed-off-by: David Marchand <david.marchand@6wind.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
---
 lib/librte_pmd_i40e/i40e_logs.h |    7 ++++---
 1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/lib/librte_pmd_i40e/i40e_logs.h b/lib/librte_pmd_i40e/i40e_logs.h
index 043ecba..399867f 100644
--- a/lib/librte_pmd_i40e/i40e_logs.h
+++ b/lib/librte_pmd_i40e/i40e_logs.h
@@ -34,12 +34,13 @@
 #ifndef _I40E_LOGS_H_
 #define _I40E_LOGS_H_
 
-#ifdef RTE_LIBRTE_I40E_DEBUG_INIT
 #define PMD_INIT_LOG(level, fmt, args...) \
-	RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+	rte_log(RTE_LOG_ ## level, RTE_LOGTYPE_PMD, \
+		"PMD: %s(): " fmt "\n", __func__, ##args)
+
+#ifdef RTE_LIBRTE_I40E_DEBUG_INIT
 #define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
 #else
-#define PMD_INIT_LOG(level, fmt, args...) do { } while(0)
 #define PMD_INIT_FUNC_TRACE() do { } while(0)
 #endif
 
-- 
1.7.10.4
^ permalink raw reply	[flat|nested] 25+ messages in thread
- * [dpdk-dev] [PATCH v3 13/20] i40e: add log messages when rx bulk mode is not usable
  2014-09-17 13:46 [dpdk-dev] [PATCH v3 00/20] cleanup logs in main PMDs David Marchand
                   ` (11 preceding siblings ...)
  2014-09-17 13:46 ` [dpdk-dev] [PATCH v3 12/20] i40e: always log init messages David Marchand
@ 2014-09-17 13:46 ` David Marchand
  2014-09-17 13:46 ` [dpdk-dev] [PATCH v3 14/20] e1000: use the right debug macro David Marchand
                   ` (7 subsequent siblings)
  20 siblings, 0 replies; 25+ messages in thread
From: David Marchand @ 2014-09-17 13:46 UTC (permalink / raw)
  To: dev
Signed-off-by: David Marchand <david.marchand@6wind.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
---
 lib/librte_pmd_i40e/i40e_rxtx.c |   29 ++++++++++++++++++++++++-----
 1 file changed, 24 insertions(+), 5 deletions(-)
diff --git a/lib/librte_pmd_i40e/i40e_rxtx.c b/lib/librte_pmd_i40e/i40e_rxtx.c
index 22f9fd5..481ebed 100644
--- a/lib/librte_pmd_i40e/i40e_rxtx.c
+++ b/lib/librte_pmd_i40e/i40e_rxtx.c
@@ -537,15 +537,34 @@ check_rx_burst_bulk_alloc_preconditions(__rte_unused struct i40e_rx_queue *rxq)
 	int ret = 0;
 
 #ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
-	if (!(rxq->rx_free_thresh >= RTE_PMD_I40E_RX_MAX_BURST))
+	if (!(rxq->rx_free_thresh >= RTE_PMD_I40E_RX_MAX_BURST)) {
+		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
+			     "rxq->rx_free_thresh=%d, "
+			     "RTE_PMD_I40E_RX_MAX_BURST=%d",
+			     rxq->rx_free_thresh, RTE_PMD_I40E_RX_MAX_BURST);
 		ret = -EINVAL;
-	else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc))
+	} else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
+		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
+			     "rxq->rx_free_thresh=%d, "
+			     "rxq->nb_rx_desc=%d",
+			     rxq->rx_free_thresh, rxq->nb_rx_desc);
 		ret = -EINVAL;
-	else if (!(rxq->nb_rx_desc % rxq->rx_free_thresh) == 0)
+	} else if (!(rxq->nb_rx_desc % rxq->rx_free_thresh) == 0) {
+		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
+			     "rxq->nb_rx_desc=%d, "
+			     "rxq->rx_free_thresh=%d",
+			     rxq->nb_rx_desc, rxq->rx_free_thresh);
 		ret = -EINVAL;
-	else if (!(rxq->nb_rx_desc < (I40E_MAX_RING_DESC -
-				RTE_PMD_I40E_RX_MAX_BURST)))
+	} else if (!(rxq->nb_rx_desc < (I40E_MAX_RING_DESC -
+				RTE_PMD_I40E_RX_MAX_BURST))) {
+		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
+			     "rxq->nb_rx_desc=%d, "
+			     "I40E_MAX_RING_DESC=%d, "
+			     "RTE_PMD_I40E_RX_MAX_BURST=%d",
+			     rxq->nb_rx_desc, I40E_MAX_RING_DESC,
+			     RTE_PMD_I40E_RX_MAX_BURST);
 		ret = -EINVAL;
+	}
 #else
 	ret = -EINVAL;
 #endif
-- 
1.7.10.4
^ permalink raw reply	[flat|nested] 25+ messages in thread
- * [dpdk-dev] [PATCH v3 14/20] e1000: use the right debug macro
  2014-09-17 13:46 [dpdk-dev] [PATCH v3 00/20] cleanup logs in main PMDs David Marchand
                   ` (12 preceding siblings ...)
  2014-09-17 13:46 ` [dpdk-dev] [PATCH v3 13/20] i40e: add log messages when rx bulk mode is not usable David Marchand
@ 2014-09-17 13:46 ` David Marchand
  2014-09-17 13:46 ` [dpdk-dev] [PATCH v3 15/20] e1000/base: add a raw macro for use by shared code David Marchand
                   ` (6 subsequent siblings)
  20 siblings, 0 replies; 25+ messages in thread
From: David Marchand @ 2014-09-17 13:46 UTC (permalink / raw)
  To: dev
- We should not use DEBUGOUT* / DEBUGFUNC macros in non-shared code.
These macros come as compat wrappers for shared code.
- We should avoid calling RTE_LOG directly as pmd provides a wrapper for logs.
Signed-off-by: David Marchand <david.marchand@6wind.com>
v2 Reviewed-by: Jay Rolette <rolette@infiniteio.com>
v2 Acked-by: Bruce Richardson <bruce.richardson@intel.com>
---
 lib/librte_pmd_e1000/em_rxtx.c    |   32 ++++++++++++++++++--------------
 lib/librte_pmd_e1000/igb_ethdev.c |    9 +++++----
 lib/librte_pmd_e1000/igb_pf.c     |    5 +++--
 lib/librte_pmd_e1000/igb_rxtx.c   |   16 +++++++---------
 4 files changed, 33 insertions(+), 29 deletions(-)
diff --git a/lib/librte_pmd_e1000/em_rxtx.c b/lib/librte_pmd_e1000/em_rxtx.c
index ba7e3a9..ce0c115 100644
--- a/lib/librte_pmd_e1000/em_rxtx.c
+++ b/lib/librte_pmd_e1000/em_rxtx.c
@@ -1233,18 +1233,21 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
 					DEFAULT_TX_RS_THRESH);
 
 	if (tx_free_thresh >= (nb_desc - 3)) {
-		RTE_LOG(ERR, PMD, "tx_free_thresh must be less than the "
-			"number of TX descriptors minus 3. (tx_free_thresh=%u "
-			"port=%d queue=%d)\n", (unsigned int)tx_free_thresh,
-				(int)dev->data->port_id, (int)queue_idx);
+		PMD_INIT_LOG(ERR, "tx_free_thresh must be less than the "
+			     "number of TX descriptors minus 3. "
+			     "(tx_free_thresh=%u port=%d queue=%d)",
+			     (unsigned int)tx_free_thresh,
+			     (int)dev->data->port_id, (int)queue_idx);
 		return -(EINVAL);
 	}
 	if (tx_rs_thresh > tx_free_thresh) {
-		RTE_LOG(ERR, PMD, "tx_rs_thresh must be less than or equal to "
-			"tx_free_thresh. (tx_free_thresh=%u tx_rs_thresh=%u "
-			"port=%d queue=%d)\n", (unsigned int)tx_free_thresh,
-			(unsigned int)tx_rs_thresh, (int)dev->data->port_id,
-							(int)queue_idx);
+		PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or equal to "
+			     "tx_free_thresh. (tx_free_thresh=%u "
+			     "tx_rs_thresh=%u port=%d queue=%d)",
+			     (unsigned int)tx_free_thresh,
+			     (unsigned int)tx_rs_thresh,
+			     (int)dev->data->port_id,
+			     (int)queue_idx);
 		return -(EINVAL);
 	}
 
@@ -1255,10 +1258,10 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
 	 * accumulates WTHRESH descriptors.
 	 */
 	if (tx_conf->tx_thresh.wthresh != 0 && tx_rs_thresh != 1) {
-		RTE_LOG(ERR, PMD, "TX WTHRESH must be set to 0 if "
-			"tx_rs_thresh is greater than 1. (tx_rs_thresh=%u "
-			"port=%d queue=%d)\n", (unsigned int)tx_rs_thresh,
-				(int)dev->data->port_id, (int)queue_idx);
+		PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
+			     "tx_rs_thresh is greater than 1. (tx_rs_thresh=%u "
+			     "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
+			     (int)dev->data->port_id, (int)queue_idx);
 		return -(EINVAL);
 	}
 
@@ -1388,7 +1391,8 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
 	 * EM devices don't support drop_en functionality
 	 */
 	if (rx_conf->rx_drop_en) {
-		RTE_LOG(ERR, PMD, "drop_en functionality not supported by device\n");
+		PMD_INIT_LOG(ERR, "drop_en functionality not supported by "
+			     "device");
 		return (-EINVAL);
 	}
 
diff --git a/lib/librte_pmd_e1000/igb_ethdev.c b/lib/librte_pmd_e1000/igb_ethdev.c
index 3187d92..b45eb24 100644
--- a/lib/librte_pmd_e1000/igb_ethdev.c
+++ b/lib/librte_pmd_e1000/igb_ethdev.c
@@ -400,7 +400,7 @@ igb_reset_swfw_lock(struct e1000_hw *hw)
 	 * So force the release of the faulty lock.
 	 */
 	if (e1000_get_hw_semaphore_generic(hw) < 0) {
-		DEBUGOUT("SMBI lock released");
+		PMD_DRV_LOG(DEBUG, "SMBI lock released");
 	}
 	e1000_put_hw_semaphore_generic(hw);
 
@@ -416,7 +416,8 @@ igb_reset_swfw_lock(struct e1000_hw *hw)
 		if (hw->bus.func > E1000_FUNC_1)
 			mask <<= 2;
 		if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) {
-			DEBUGOUT1("SWFW phy%d lock released", hw->bus.func);
+			PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released",
+				    hw->bus.func);
 		}
 		hw->mac.ops.release_swfw_sync(hw, mask);
 
@@ -428,7 +429,7 @@ igb_reset_swfw_lock(struct e1000_hw *hw)
 		 */
 		mask = E1000_SWFW_EEP_SM;
 		if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) {
-			DEBUGOUT("SWFW common locks released");
+			PMD_DRV_LOG(DEBUG, "SWFW common locks released");
 		}
 		hw->mac.ops.release_swfw_sync(hw, mask);
 	}
@@ -707,7 +708,7 @@ igb_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
 static int
 rte_igbvf_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
 {
-	DEBUGFUNC("rte_igbvf_pmd_init");
+	PMD_INIT_FUNC_TRACE();
 
 	rte_eth_driver_register(&rte_igbvf_pmd);
 	return (0);
diff --git a/lib/librte_pmd_e1000/igb_pf.c b/lib/librte_pmd_e1000/igb_pf.c
index 3d405f0..bc3816a 100644
--- a/lib/librte_pmd_e1000/igb_pf.c
+++ b/lib/librte_pmd_e1000/igb_pf.c
@@ -404,7 +404,7 @@ igb_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf)
 
 	retval = e1000_read_mbx(hw, msgbuf, mbx_size, vf);
 	if (retval) {
-		RTE_LOG(ERR, PMD, "Error mbx recv msg from VF %d\n", vf);
+		PMD_INIT_LOG(ERR, "Error mbx recv msg from VF %d", vf);
 		return retval;
 	}
 
@@ -432,7 +432,8 @@ igb_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf)
 		retval = igb_vf_set_vlan(dev, vf, msgbuf);
 		break;
 	default:
-		RTE_LOG(DEBUG, PMD, "Unhandled Msg %8.8x\n", (unsigned) msgbuf[0]);
+		PMD_INIT_LOG(DEBUG, "Unhandled Msg %8.8x",
+			     (unsigned) msgbuf[0]);
 		retval = E1000_ERR_MBX;
 		break;
 	}
diff --git a/lib/librte_pmd_e1000/igb_rxtx.c b/lib/librte_pmd_e1000/igb_rxtx.c
index d4a803e..5600d02 100644
--- a/lib/librte_pmd_e1000/igb_rxtx.c
+++ b/lib/librte_pmd_e1000/igb_rxtx.c
@@ -1229,17 +1229,15 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
 	 * driver.
 	 */
 	if (tx_conf->tx_free_thresh != 0)
-		RTE_LOG(WARNING, PMD,
-			"The tx_free_thresh parameter is not "
-			"used for the 1G driver.\n");
+		PMD_INIT_LOG(WARNING, "The tx_free_thresh parameter is not "
+			     "used for the 1G driver.");
 	if (tx_conf->tx_rs_thresh != 0)
-		RTE_LOG(WARNING, PMD,
-			"The tx_rs_thresh parameter is not "
-			"used for the 1G driver.\n");
+		PMD_INIT_LOG(WARNING, "The tx_rs_thresh parameter is not "
+			     "used for the 1G driver.");
 	if (tx_conf->tx_thresh.wthresh == 0)
-		RTE_LOG(WARNING, PMD,
-			"To improve 1G driver performance, consider setting "
-			"the TX WTHRESH value to 4, 8, or 16.\n");
+		PMD_INIT_LOG(WARNING, "To improve 1G driver performance, "
+			     "consider setting the TX WTHRESH value to 4, 8, "
+			     "or 16.");
 
 	/* Free memory prior to re-allocation if needed */
 	if (dev->data->tx_queues[queue_idx] != NULL) {
-- 
1.7.10.4
^ permalink raw reply	[flat|nested] 25+ messages in thread
- * [dpdk-dev] [PATCH v3 15/20] e1000/base: add a raw macro for use by shared code
  2014-09-17 13:46 [dpdk-dev] [PATCH v3 00/20] cleanup logs in main PMDs David Marchand
                   ` (13 preceding siblings ...)
  2014-09-17 13:46 ` [dpdk-dev] [PATCH v3 14/20] e1000: use the right debug macro David Marchand
@ 2014-09-17 13:46 ` David Marchand
  2014-09-17 13:46 ` [dpdk-dev] [PATCH v3 16/20] e1000: indent logs sections David Marchand
                   ` (5 subsequent siblings)
  20 siblings, 0 replies; 25+ messages in thread
From: David Marchand @ 2014-09-17 13:46 UTC (permalink / raw)
  To: dev
Since shared code always add a trailing \n, add a PMD_DRV_LOG_RAW macro that
will not add one.
Signed-off-by: David Marchand <david.marchand@6wind.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
---
 lib/librte_pmd_e1000/e1000/e1000_osdep.h |    4 ++--
 lib/librte_pmd_e1000/e1000_logs.h        |    9 ++++++---
 2 files changed, 8 insertions(+), 5 deletions(-)
diff --git a/lib/librte_pmd_e1000/e1000/e1000_osdep.h b/lib/librte_pmd_e1000/e1000/e1000_osdep.h
index b083a82..438641e 100644
--- a/lib/librte_pmd_e1000/e1000/e1000_osdep.h
+++ b/lib/librte_pmd_e1000/e1000/e1000_osdep.h
@@ -52,8 +52,8 @@
 #define msec_delay(x) DELAY(1000*(x))
 #define msec_delay_irq(x) DELAY(1000*(x))
 
-#define DEBUGFUNC(F)            DEBUGOUT(F);
-#define DEBUGOUT(S, args...)    PMD_DRV_LOG(DEBUG, S, ##args)
+#define DEBUGFUNC(F)            DEBUGOUT(F "\n");
+#define DEBUGOUT(S, args...)    PMD_DRV_LOG_RAW(DEBUG, S, ##args)
 #define DEBUGOUT1(S, args...)   DEBUGOUT(S, ##args)
 #define DEBUGOUT2(S, args...)   DEBUGOUT(S, ##args)
 #define DEBUGOUT3(S, args...)   DEBUGOUT(S, ##args)
diff --git a/lib/librte_pmd_e1000/e1000_logs.h b/lib/librte_pmd_e1000/e1000_logs.h
index b6b3bb7..fe6e023 100644
--- a/lib/librte_pmd_e1000/e1000_logs.h
+++ b/lib/librte_pmd_e1000/e1000_logs.h
@@ -63,10 +63,13 @@
 #endif
 
 #ifdef RTE_LIBRTE_E1000_DEBUG_DRIVER
-#define PMD_DRV_LOG(level, fmt, args...) \
-	RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#define PMD_DRV_LOG_RAW(level, fmt, args...) \
+	RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args)
 #else
-#define PMD_DRV_LOG(level, fmt, args...) do { } while(0)
+#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0)
 #endif
 
+#define PMD_DRV_LOG(level, fmt, args...) \
+	PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
+
 #endif /* _E1000_LOGS_H_ */
-- 
1.7.10.4
^ permalink raw reply	[flat|nested] 25+ messages in thread
- * [dpdk-dev] [PATCH v3 16/20] e1000: indent logs sections
  2014-09-17 13:46 [dpdk-dev] [PATCH v3 00/20] cleanup logs in main PMDs David Marchand
                   ` (14 preceding siblings ...)
  2014-09-17 13:46 ` [dpdk-dev] [PATCH v3 15/20] e1000/base: add a raw macro for use by shared code David Marchand
@ 2014-09-17 13:46 ` David Marchand
  2014-09-17 13:46 ` [dpdk-dev] [PATCH v3 17/20] e1000: clean log messages David Marchand
                   ` (4 subsequent siblings)
  20 siblings, 0 replies; 25+ messages in thread
From: David Marchand @ 2014-09-17 13:46 UTC (permalink / raw)
  To: dev
Prepare for next commit, indent sections where log messages will be modified so
that next patch is only about \n.
Signed-off-by: David Marchand <david.marchand@6wind.com>
---
 lib/librte_pmd_e1000/em_ethdev.c  |   28 ++++++++---------
 lib/librte_pmd_e1000/em_rxtx.c    |   60 ++++++++++++++++++-------------------
 lib/librte_pmd_e1000/igb_ethdev.c |   34 ++++++++++-----------
 lib/librte_pmd_e1000/igb_rxtx.c   |    2 +-
 4 files changed, 60 insertions(+), 64 deletions(-)
diff --git a/lib/librte_pmd_e1000/em_ethdev.c b/lib/librte_pmd_e1000/em_ethdev.c
index 4555294..71aee67 100644
--- a/lib/librte_pmd_e1000/em_ethdev.c
+++ b/lib/librte_pmd_e1000/em_ethdev.c
@@ -273,8 +273,8 @@ eth_em_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
 	memset(shadow_vfta, 0, sizeof(*shadow_vfta));
 
 	PMD_INIT_LOG(INFO, "port_id %d vendorID=0x%x deviceID=0x%x\n",
-			eth_dev->data->port_id, pci_dev->id.vendor_id,
-			pci_dev->id.device_id);
+		     eth_dev->data->port_id, pci_dev->id.vendor_id,
+		     pci_dev->id.device_id);
 
 	rte_intr_callback_register(&(pci_dev->intr_handle),
 		eth_em_interrupt_handler, (void *)eth_dev);
@@ -574,8 +574,8 @@ eth_em_start(struct rte_eth_dev *dev)
 
 error_invalid_config:
 	PMD_INIT_LOG(ERR, "Invalid link_speed/link_duplex (%u/%u) for port "
-				"%u\n", dev->data->dev_conf.link_speed,
-			dev->data->dev_conf.link_duplex, dev->data->port_id);
+		     "%u\n", dev->data->dev_conf.link_speed,
+		     dev->data->dev_conf.link_duplex, dev->data->port_id);
 	em_dev_clear_queues(dev);
 	return (-EINVAL);
 }
@@ -1296,20 +1296,16 @@ eth_em_interrupt_action(struct rte_eth_dev *dev)
 	memset(&link, 0, sizeof(link));
 	rte_em_dev_atomic_read_link_status(dev, &link);
 	if (link.link_status) {
-		PMD_INIT_LOG(INFO,
-			" Port %d: Link Up - speed %u Mbps - %s\n",
-			dev->data->port_id, (unsigned)link.link_speed,
-			link.link_duplex == ETH_LINK_FULL_DUPLEX ?
-				"full-duplex" : "half-duplex");
+		PMD_INIT_LOG(INFO, " Port %d: Link Up - speed %u Mbps - %s\n",
+			     dev->data->port_id, (unsigned)link.link_speed,
+			     link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+			     "full-duplex" : "half-duplex");
 	} else {
-		PMD_INIT_LOG(INFO, " Port %d: Link Down\n",
-					dev->data->port_id);
+		PMD_INIT_LOG(INFO, " Port %d: Link Down\n", dev->data->port_id);
 	}
 	PMD_INIT_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d",
-				dev->pci_dev->addr.domain,
-				dev->pci_dev->addr.bus,
-				dev->pci_dev->addr.devid,
-				dev->pci_dev->addr.function);
+		     dev->pci_dev->addr.domain, dev->pci_dev->addr.bus,
+		     dev->pci_dev->addr.devid, dev->pci_dev->addr.function);
 	tctl = E1000_READ_REG(hw, E1000_TCTL);
 	rctl = E1000_READ_REG(hw, E1000_RCTL);
 	if (link.link_status) {
@@ -1434,7 +1430,7 @@ eth_em_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	/* At least reserve one Ethernet frame for watermark */
 	max_high_water = rx_buf_size - ETHER_MAX_LEN;
 	if ((fc_conf->high_water > max_high_water) ||
-		(fc_conf->high_water < fc_conf->low_water)) {
+	    (fc_conf->high_water < fc_conf->low_water)) {
 		PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value \n");
 		PMD_INIT_LOG(ERR, "high water must <= 0x%x \n", max_high_water);
 		return (-EINVAL);
diff --git a/lib/librte_pmd_e1000/em_rxtx.c b/lib/librte_pmd_e1000/em_rxtx.c
index ce0c115..278b7ee 100644
--- a/lib/librte_pmd_e1000/em_rxtx.c
+++ b/lib/librte_pmd_e1000/em_rxtx.c
@@ -474,12 +474,12 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 			tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
 
 		PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
-			" tx_first=%u tx_last=%u\n",
-			(unsigned) txq->port_id,
-			(unsigned) txq->queue_id,
-			(unsigned) tx_pkt->pkt_len,
-			(unsigned) tx_id,
-			(unsigned) tx_last);
+			   " tx_first=%u tx_last=%u\n",
+			   (unsigned) txq->port_id,
+			   (unsigned) txq->queue_id,
+			   (unsigned) tx_pkt->pkt_len,
+			   (unsigned) tx_id,
+			   (unsigned) tx_last);
 
 		/*
 		 * Make sure there are enough TX descriptors available to
@@ -735,17 +735,17 @@ eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		 * frames to its peer(s).
 		 */
 		PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u "
-			"status=0x%x pkt_len=%u\n",
-			(unsigned) rxq->port_id, (unsigned) rxq->queue_id,
-			(unsigned) rx_id, (unsigned) status,
-			(unsigned) rte_le_to_cpu_16(rxd.length));
+			   "status=0x%x pkt_len=%u\n",
+			   (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
+			   (unsigned) rx_id, (unsigned) status,
+			   (unsigned) rte_le_to_cpu_16(rxd.length));
 
 		nmb = rte_rxmbuf_alloc(rxq->mb_pool);
 		if (nmb == NULL) {
 			PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
-				"queue_id=%u\n",
-				(unsigned) rxq->port_id,
-				(unsigned) rxq->queue_id);
+				   "queue_id=%u\n",
+				   (unsigned) rxq->port_id,
+				   (unsigned) rxq->queue_id);
 			rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
 			break;
 		}
@@ -828,10 +828,10 @@ eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 	nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
 	if (nb_hold > rxq->rx_free_thresh) {
 		PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
-			"nb_hold=%u nb_rx=%u\n",
-			(unsigned) rxq->port_id, (unsigned) rxq->queue_id,
-			(unsigned) rx_id, (unsigned) nb_hold,
-			(unsigned) nb_rx);
+			   "nb_hold=%u nb_rx=%u\n",
+			   (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
+			   (unsigned) rx_id, (unsigned) nb_hold,
+			   (unsigned) nb_rx);
 		rx_id = (uint16_t) ((rx_id == 0) ?
 			(rxq->nb_rx_desc - 1) : (rx_id - 1));
 		E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
@@ -915,16 +915,16 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		 * frames to its peer(s).
 		 */
 		PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u "
-			"status=0x%x data_len=%u\n",
-			(unsigned) rxq->port_id, (unsigned) rxq->queue_id,
-			(unsigned) rx_id, (unsigned) status,
-			(unsigned) rte_le_to_cpu_16(rxd.length));
+			   "status=0x%x data_len=%u\n",
+			   (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
+			   (unsigned) rx_id, (unsigned) status,
+			   (unsigned) rte_le_to_cpu_16(rxd.length));
 
 		nmb = rte_rxmbuf_alloc(rxq->mb_pool);
 		if (nmb == NULL) {
 			PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
-				"queue_id=%u\n", (unsigned) rxq->port_id,
-				(unsigned) rxq->queue_id);
+				   "queue_id=%u\n", (unsigned) rxq->port_id,
+				   (unsigned) rxq->queue_id);
 			rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
 			break;
 		}
@@ -1072,10 +1072,10 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 	nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
 	if (nb_hold > rxq->rx_free_thresh) {
 		PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
-			"nb_hold=%u nb_rx=%u\n",
-			(unsigned) rxq->port_id, (unsigned) rxq->queue_id,
-			(unsigned) rx_id, (unsigned) nb_hold,
-			(unsigned) nb_rx);
+			   "nb_hold=%u nb_rx=%u\n",
+			   (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
+			   (unsigned) rx_id, (unsigned) nb_hold,
+			   (unsigned) nb_rx);
 		rx_id = (uint16_t) ((rx_id == 0) ?
 			(rxq->nb_rx_desc - 1) : (rx_id - 1));
 		E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
@@ -1312,7 +1312,7 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
 	txq->tx_ring = (struct e1000_data_desc *) tz->addr;
 
 	PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
-		txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
+		     txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
 
 	em_reset_tx_queue(txq);
 
@@ -1442,7 +1442,7 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
 	rxq->rx_ring = (struct e1000_rx_desc *) rz->addr;
 
 	PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
-		rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
+		     rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
 
 	dev->data->rx_queues[queue_idx] = rxq;
 	em_reset_rx_queue(rxq);
@@ -1605,7 +1605,7 @@ em_alloc_rx_queue_mbufs(struct em_rx_queue *rxq)
 
 		if (mbuf == NULL) {
 			PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
-				"queue_id=%hu\n", rxq->queue_id);
+				     "queue_id=%hu\n", rxq->queue_id);
 			return (-ENOMEM);
 		}
 
diff --git a/lib/librte_pmd_e1000/igb_ethdev.c b/lib/librte_pmd_e1000/igb_ethdev.c
index b45eb24..7cdea10 100644
--- a/lib/librte_pmd_e1000/igb_ethdev.c
+++ b/lib/librte_pmd_e1000/igb_ethdev.c
@@ -651,10 +651,9 @@ eth_igbvf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
 			ð_dev->data->mac_addrs[0]);
 
 	PMD_INIT_LOG(DEBUG, "\nport %d vendorID=0x%x deviceID=0x%x "
-			"mac.type=%s\n",
-			eth_dev->data->port_id, pci_dev->id.vendor_id,
-			pci_dev->id.device_id,
-			"igb_mac_82576_vf");
+		     "mac.type=%s\n",
+		     eth_dev->data->port_id, pci_dev->id.vendor_id,
+		     pci_dev->id.device_id, "igb_mac_82576_vf");
 
 	return 0;
 }
@@ -889,8 +888,8 @@ eth_igb_start(struct rte_eth_dev *dev)
 
 error_invalid_config:
 	PMD_INIT_LOG(ERR, "Invalid link_speed/link_duplex (%u/%u) for port %u\n",
-			dev->data->dev_conf.link_speed,
-			dev->data->dev_conf.link_duplex, dev->data->port_id);
+		     dev->data->dev_conf.link_speed,
+		     dev->data->dev_conf.link_duplex, dev->data->port_id);
 	igb_dev_clear_queues(dev);
 	return (-EINVAL);
 }
@@ -1790,19 +1789,20 @@ eth_igb_interrupt_action(struct rte_eth_dev *dev)
 		rte_igb_dev_atomic_read_link_status(dev, &link);
 		if (link.link_status) {
 			PMD_INIT_LOG(INFO,
-				" Port %d: Link Up - speed %u Mbps - %s\n",
-				dev->data->port_id, (unsigned)link.link_speed,
-				link.link_duplex == ETH_LINK_FULL_DUPLEX ?
-					"full-duplex" : "half-duplex");
+				     " Port %d: Link Up - speed %u Mbps - %s\n",
+				     dev->data->port_id,
+				     (unsigned)link.link_speed,
+				     link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+				     "full-duplex" : "half-duplex");
 		} else {
 			PMD_INIT_LOG(INFO, " Port %d: Link Down\n",
-						dev->data->port_id);
+				     dev->data->port_id);
 		}
 		PMD_INIT_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d",
-					dev->pci_dev->addr.domain,
-					dev->pci_dev->addr.bus,
-					dev->pci_dev->addr.devid,
-					dev->pci_dev->addr.function);
+			     dev->pci_dev->addr.domain,
+			     dev->pci_dev->addr.bus,
+			     dev->pci_dev->addr.devid,
+			     dev->pci_dev->addr.function);
 		tctl = E1000_READ_REG(hw, E1000_TCTL);
 		rctl = E1000_READ_REG(hw, E1000_RCTL);
 		if (link.link_status) {
@@ -1928,7 +1928,7 @@ eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	/* At least reserve one Ethernet frame for watermark */
 	max_high_water = rx_buf_size - ETHER_MAX_LEN;
 	if ((fc_conf->high_water > max_high_water) ||
-		(fc_conf->high_water < fc_conf->low_water)) {
+	    (fc_conf->high_water < fc_conf->low_water)) {
 		PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value \n");
 		PMD_INIT_LOG(ERR, "high water must <=  0x%x \n", max_high_water);
 		return (-EINVAL);
@@ -2078,7 +2078,7 @@ igbvf_dev_configure(struct rte_eth_dev *dev)
 	struct rte_eth_conf* conf = &dev->data->dev_conf;
 
 	PMD_INIT_LOG(DEBUG, "\nConfigured Virtual Function port id: %d\n",
-		dev->data->port_id);
+		     dev->data->port_id);
 
 	/*
 	 * VF has no ability to enable/disable HW CRC
diff --git a/lib/librte_pmd_e1000/igb_rxtx.c b/lib/librte_pmd_e1000/igb_rxtx.c
index 5600d02..4946b8d 100644
--- a/lib/librte_pmd_e1000/igb_rxtx.c
+++ b/lib/librte_pmd_e1000/igb_rxtx.c
@@ -1847,7 +1847,7 @@ igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
 
 		if (mbuf == NULL) {
 			PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
-				"queue_id=%hu\n", rxq->queue_id);
+				     "queue_id=%hu\n", rxq->queue_id);
 			return (-ENOMEM);
 		}
 		dma_addr =
-- 
1.7.10.4
^ permalink raw reply	[flat|nested] 25+ messages in thread
- * [dpdk-dev] [PATCH v3 17/20] e1000: clean log messages
  2014-09-17 13:46 [dpdk-dev] [PATCH v3 00/20] cleanup logs in main PMDs David Marchand
                   ` (15 preceding siblings ...)
  2014-09-17 13:46 ` [dpdk-dev] [PATCH v3 16/20] e1000: indent logs sections David Marchand
@ 2014-09-17 13:46 ` David Marchand
  2014-09-17 13:46 ` [dpdk-dev] [PATCH v3 18/20] e1000: always log init messages David Marchand
                   ` (3 subsequent siblings)
  20 siblings, 0 replies; 25+ messages in thread
From: David Marchand @ 2014-09-17 13:46 UTC (permalink / raw)
  To: dev
Clean log messages:
- remove leading \n in some messages,
- remove trailing \n in some messages,
- split multi lines messages,
- introduce PMD_INIT_FUNC_TRACE macro and use it instead of
  PMD_INIT_LOG(DEBUG, "some_func")
Signed-off-by: David Marchand <david.marchand@6wind.com>
v2 Reviewed-by: Jay Rolette <rolette@infiniteio.com>
v2 Acked-by: Bruce Richardson <bruce.richardson@intel.com>
---
 lib/librte_pmd_e1000/e1000_logs.h |    4 +++-
 lib/librte_pmd_e1000/em_ethdev.c  |   31 ++++++++++++-------------
 lib/librte_pmd_e1000/em_rxtx.c    |   42 ++++++++++++++++-----------------
 lib/librte_pmd_e1000/igb_ethdev.c |   46 ++++++++++++++++++-------------------
 lib/librte_pmd_e1000/igb_rxtx.c   |   35 ++++++++++++++--------------
 5 files changed, 78 insertions(+), 80 deletions(-)
diff --git a/lib/librte_pmd_e1000/e1000_logs.h b/lib/librte_pmd_e1000/e1000_logs.h
index fe6e023..4dd7208 100644
--- a/lib/librte_pmd_e1000/e1000_logs.h
+++ b/lib/librte_pmd_e1000/e1000_logs.h
@@ -37,8 +37,10 @@
 #ifdef RTE_LIBRTE_E1000_DEBUG_INIT
 #define PMD_INIT_LOG(level, fmt, args...) \
 	RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
 #else
-#define PMD_INIT_LOG(level, fmt, args...) do { } while(0)
+#define PMD_INIT_LOG(level, fmt, args...) do { } while (0)
+#define PMD_INIT_FUNC_TRACE() do { } while (0)
 #endif
 
 #ifdef RTE_LIBRTE_E1000_DEBUG_RX
diff --git a/lib/librte_pmd_e1000/em_ethdev.c b/lib/librte_pmd_e1000/em_ethdev.c
index 71aee67..3f2897e 100644
--- a/lib/librte_pmd_e1000/em_ethdev.c
+++ b/lib/librte_pmd_e1000/em_ethdev.c
@@ -272,7 +272,7 @@ eth_em_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
 	/* initialize the vfta */
 	memset(shadow_vfta, 0, sizeof(*shadow_vfta));
 
-	PMD_INIT_LOG(INFO, "port_id %d vendorID=0x%x deviceID=0x%x\n",
+	PMD_INIT_LOG(INFO, "port_id %d vendorID=0x%x deviceID=0x%x",
 		     eth_dev->data->port_id, pci_dev->id.vendor_id,
 		     pci_dev->id.device_id);
 
@@ -306,17 +306,17 @@ em_hw_init(struct e1000_hw *hw)
 
 	diag = hw->mac.ops.init_params(hw);
 	if (diag != 0) {
-		PMD_INIT_LOG(ERR, "MAC Initialization Error\n");
+		PMD_INIT_LOG(ERR, "MAC Initialization Error");
 		return diag;
 	}
 	diag = hw->nvm.ops.init_params(hw);
 	if (diag != 0) {
-		PMD_INIT_LOG(ERR, "NVM Initialization Error\n");
+		PMD_INIT_LOG(ERR, "NVM Initialization Error");
 		return diag;
 	}
 	diag = hw->phy.ops.init_params(hw);
 	if (diag != 0) {
-		PMD_INIT_LOG(ERR, "PHY Initialization Error\n");
+		PMD_INIT_LOG(ERR, "PHY Initialization Error");
 		return diag;
 	}
 	(void) e1000_get_bus_info(hw);
@@ -390,11 +390,10 @@ eth_em_configure(struct rte_eth_dev *dev)
 	struct e1000_interrupt *intr =
 		E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
 
-	PMD_INIT_LOG(DEBUG, ">>");
-
+	PMD_INIT_FUNC_TRACE();
 	intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
+	PMD_INIT_FUNC_TRACE();
 
-	PMD_INIT_LOG(DEBUG, "<<");
 	return (0);
 }
 
@@ -453,7 +452,7 @@ eth_em_start(struct rte_eth_dev *dev)
 		E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	int ret, mask;
 
-	PMD_INIT_LOG(DEBUG, ">>");
+	PMD_INIT_FUNC_TRACE();
 
 	eth_em_stop(dev);
 
@@ -573,8 +572,8 @@ eth_em_start(struct rte_eth_dev *dev)
 	return (0);
 
 error_invalid_config:
-	PMD_INIT_LOG(ERR, "Invalid link_speed/link_duplex (%u/%u) for port "
-		     "%u\n", dev->data->dev_conf.link_speed,
+	PMD_INIT_LOG(ERR, "Invalid link_speed/link_duplex (%u/%u) for port %u",
+		     dev->data->dev_conf.link_speed,
 		     dev->data->dev_conf.link_duplex, dev->data->port_id);
 	em_dev_clear_queues(dev);
 	return (-EINVAL);
@@ -1296,12 +1295,12 @@ eth_em_interrupt_action(struct rte_eth_dev *dev)
 	memset(&link, 0, sizeof(link));
 	rte_em_dev_atomic_read_link_status(dev, &link);
 	if (link.link_status) {
-		PMD_INIT_LOG(INFO, " Port %d: Link Up - speed %u Mbps - %s\n",
+		PMD_INIT_LOG(INFO, " Port %d: Link Up - speed %u Mbps - %s",
 			     dev->data->port_id, (unsigned)link.link_speed,
 			     link.link_duplex == ETH_LINK_FULL_DUPLEX ?
 			     "full-duplex" : "half-duplex");
 	} else {
-		PMD_INIT_LOG(INFO, " Port %d: Link Down\n", dev->data->port_id);
+		PMD_INIT_LOG(INFO, " Port %d: Link Down", dev->data->port_id);
 	}
 	PMD_INIT_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d",
 		     dev->pci_dev->addr.domain, dev->pci_dev->addr.bus,
@@ -1425,14 +1424,14 @@ eth_em_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	if (fc_conf->autoneg != hw->mac.autoneg)
 		return -ENOTSUP;
 	rx_buf_size = em_get_rx_buffer_size(hw);
-	PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x \n", rx_buf_size);
+	PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
 
 	/* At least reserve one Ethernet frame for watermark */
 	max_high_water = rx_buf_size - ETHER_MAX_LEN;
 	if ((fc_conf->high_water > max_high_water) ||
 	    (fc_conf->high_water < fc_conf->low_water)) {
-		PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value \n");
-		PMD_INIT_LOG(ERR, "high water must <= 0x%x \n", max_high_water);
+		PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value");
+		PMD_INIT_LOG(ERR, "high water must <= 0x%x", max_high_water);
 		return (-EINVAL);
 	}
 
@@ -1462,7 +1461,7 @@ eth_em_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		return 0;
 	}
 
-	PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x \n", err);
+	PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x", err);
 	return (-EIO);
 }
 
diff --git a/lib/librte_pmd_e1000/em_rxtx.c b/lib/librte_pmd_e1000/em_rxtx.c
index 278b7ee..444fd02 100644
--- a/lib/librte_pmd_e1000/em_rxtx.c
+++ b/lib/librte_pmd_e1000/em_rxtx.c
@@ -340,8 +340,7 @@ em_xmit_cleanup(struct em_tx_queue *txq)
 	{
 		PMD_TX_FREE_LOG(DEBUG,
 				"TX descriptor %4u is not done"
-				"(port=%d queue=%d)",
-				desc_to_clean_to,
+				"(port=%d queue=%d)", desc_to_clean_to,
 				txq->port_id, txq->queue_id);
 		/* Failed to clean any descriptors, better luck next time */
 		return -(1);
@@ -357,9 +356,9 @@ em_xmit_cleanup(struct em_tx_queue *txq)
 
 	PMD_TX_FREE_LOG(DEBUG,
 			"Cleaning %4u TX descriptors: %4u to %4u "
-			"(port=%d queue=%d)",
-			nb_tx_to_clean, last_desc_cleaned, desc_to_clean_to,
-			txq->port_id, txq->queue_id);
+			"(port=%d queue=%d)", nb_tx_to_clean,
+			last_desc_cleaned, desc_to_clean_to, txq->port_id,
+			txq->queue_id);
 
 	/*
 	 * The last descriptor to clean is done, so that means all the
@@ -474,7 +473,7 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 			tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
 
 		PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
-			   " tx_first=%u tx_last=%u\n",
+			   " tx_first=%u tx_last=%u",
 			   (unsigned) txq->port_id,
 			   (unsigned) txq->queue_id,
 			   (unsigned) tx_pkt->pkt_len,
@@ -487,8 +486,7 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 		 * nb_used better be less than or equal to txq->tx_rs_thresh
 		 */
 		while (unlikely (nb_used > txq->nb_tx_free)) {
-			PMD_TX_FREE_LOG(DEBUG,
-					"Not enough free TX descriptors "
+			PMD_TX_FREE_LOG(DEBUG, "Not enough free TX descriptors "
 					"nb_used=%4u nb_free=%4u "
 					"(port=%d queue=%d)",
 					nb_used, txq->nb_tx_free,
@@ -611,8 +609,8 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 		/* Set RS bit only on threshold packets' last descriptor */
 		if (txq->nb_tx_used >= txq->tx_rs_thresh) {
 			PMD_TX_FREE_LOG(DEBUG,
-					"Setting RS bit on TXD id="
-					"%4u (port=%d queue=%d)",
+					"Setting RS bit on TXD id=%4u "
+					"(port=%d queue=%d)",
 					tx_last, txq->port_id, txq->queue_id);
 
 			cmd_type_len |= E1000_TXD_CMD_RS;
@@ -734,8 +732,8 @@ eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		 * to happen by sending specific "back-pressure" flow control
 		 * frames to its peer(s).
 		 */
-		PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u "
-			   "status=0x%x pkt_len=%u\n",
+		PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
+			   "status=0x%x pkt_len=%u",
 			   (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
 			   (unsigned) rx_id, (unsigned) status,
 			   (unsigned) rte_le_to_cpu_16(rxd.length));
@@ -743,7 +741,7 @@ eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		nmb = rte_rxmbuf_alloc(rxq->mb_pool);
 		if (nmb == NULL) {
 			PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
-				   "queue_id=%u\n",
+				   "queue_id=%u",
 				   (unsigned) rxq->port_id,
 				   (unsigned) rxq->queue_id);
 			rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
@@ -828,7 +826,7 @@ eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 	nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
 	if (nb_hold > rxq->rx_free_thresh) {
 		PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
-			   "nb_hold=%u nb_rx=%u\n",
+			   "nb_hold=%u nb_rx=%u",
 			   (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
 			   (unsigned) rx_id, (unsigned) nb_hold,
 			   (unsigned) nb_rx);
@@ -914,8 +912,8 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		 * to happen by sending specific "back-pressure" flow control
 		 * frames to its peer(s).
 		 */
-		PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u "
-			   "status=0x%x data_len=%u\n",
+		PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
+			   "status=0x%x data_len=%u",
 			   (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
 			   (unsigned) rx_id, (unsigned) status,
 			   (unsigned) rte_le_to_cpu_16(rxd.length));
@@ -923,7 +921,7 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		nmb = rte_rxmbuf_alloc(rxq->mb_pool);
 		if (nmb == NULL) {
 			PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
-				   "queue_id=%u\n", (unsigned) rxq->port_id,
+				   "queue_id=%u", (unsigned) rxq->port_id,
 				   (unsigned) rxq->queue_id);
 			rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
 			break;
@@ -1072,7 +1070,7 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 	nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
 	if (nb_hold > rxq->rx_free_thresh) {
 		PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
-			   "nb_hold=%u nb_rx=%u\n",
+			   "nb_hold=%u nb_rx=%u",
 			   (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
 			   (unsigned) rx_id, (unsigned) nb_hold,
 			   (unsigned) nb_rx);
@@ -1311,7 +1309,7 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
 #endif
 	txq->tx_ring = (struct e1000_data_desc *) tz->addr;
 
-	PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
+	PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
 		     txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
 
 	em_reset_tx_queue(txq);
@@ -1441,7 +1439,7 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
 #endif
 	rxq->rx_ring = (struct e1000_rx_desc *) rz->addr;
 
-	PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
+	PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
 		     rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
 
 	dev->data->rx_queues[queue_idx] = rxq;
@@ -1459,7 +1457,7 @@ eth_em_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 	uint32_t desc = 0;
 
 	if (rx_queue_id >= dev->data->nb_rx_queues) {
-		PMD_RX_LOG(DEBUG,"Invalid RX queue_id=%d\n", rx_queue_id);
+		PMD_RX_LOG(DEBUG, "Invalid RX queue_id=%d", rx_queue_id);
 		return 0;
 	}
 
@@ -1605,7 +1603,7 @@ em_alloc_rx_queue_mbufs(struct em_rx_queue *rxq)
 
 		if (mbuf == NULL) {
 			PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
-				     "queue_id=%hu\n", rxq->queue_id);
+				     "queue_id=%hu", rxq->queue_id);
 			return (-ENOMEM);
 		}
 
diff --git a/lib/librte_pmd_e1000/igb_ethdev.c b/lib/librte_pmd_e1000/igb_ethdev.c
index 7cdea10..c9acdc5 100644
--- a/lib/librte_pmd_e1000/igb_ethdev.c
+++ b/lib/librte_pmd_e1000/igb_ethdev.c
@@ -565,7 +565,7 @@ eth_igb_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
 	E1000_WRITE_FLUSH(hw);
 
-	PMD_INIT_LOG(INFO, "port_id %d vendorID=0x%x deviceID=0x%x\n",
+	PMD_INIT_LOG(INFO, "port_id %d vendorID=0x%x deviceID=0x%x",
 		     eth_dev->data->port_id, pci_dev->id.vendor_id,
 		     pci_dev->id.device_id);
 
@@ -598,7 +598,7 @@ eth_igbvf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
 		E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
 	int diag;
 
-	PMD_INIT_LOG(DEBUG, "eth_igbvf_dev_init");
+	PMD_INIT_FUNC_TRACE();
 
 	eth_dev->dev_ops = &igbvf_eth_dev_ops;
 	eth_dev->rx_pkt_burst = ð_igb_recv_pkts;
@@ -650,8 +650,8 @@ eth_igbvf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
 	ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
 			ð_dev->data->mac_addrs[0]);
 
-	PMD_INIT_LOG(DEBUG, "\nport %d vendorID=0x%x deviceID=0x%x "
-		     "mac.type=%s\n",
+	PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x "
+		     "mac.type=%s",
 		     eth_dev->data->port_id, pci_dev->id.vendor_id,
 		     pci_dev->id.device_id, "igb_mac_82576_vf");
 
@@ -719,11 +719,9 @@ eth_igb_configure(struct rte_eth_dev *dev)
 	struct e1000_interrupt *intr =
 		E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
 
-	PMD_INIT_LOG(DEBUG, ">>");
-
+	PMD_INIT_FUNC_TRACE();
 	intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
-
-	PMD_INIT_LOG(DEBUG, "<<");
+	PMD_INIT_FUNC_TRACE();
 
 	return (0);
 }
@@ -736,7 +734,7 @@ eth_igb_start(struct rte_eth_dev *dev)
 	int ret, i, mask;
 	uint32_t ctrl_ext;
 
-	PMD_INIT_LOG(DEBUG, ">>");
+	PMD_INIT_FUNC_TRACE();
 
 	/* Power up the phy. Needed to make the link go Up */
 	e1000_power_up_phy(hw);
@@ -887,7 +885,7 @@ eth_igb_start(struct rte_eth_dev *dev)
 	return (0);
 
 error_invalid_config:
-	PMD_INIT_LOG(ERR, "Invalid link_speed/link_duplex (%u/%u) for port %u\n",
+	PMD_INIT_LOG(ERR, "Invalid link_speed/link_duplex (%u/%u) for port %u",
 		     dev->data->dev_conf.link_speed,
 		     dev->data->dev_conf.link_duplex, dev->data->port_id);
 	igb_dev_clear_queues(dev);
@@ -1789,13 +1787,13 @@ eth_igb_interrupt_action(struct rte_eth_dev *dev)
 		rte_igb_dev_atomic_read_link_status(dev, &link);
 		if (link.link_status) {
 			PMD_INIT_LOG(INFO,
-				     " Port %d: Link Up - speed %u Mbps - %s\n",
+				     " Port %d: Link Up - speed %u Mbps - %s",
 				     dev->data->port_id,
 				     (unsigned)link.link_speed,
 				     link.link_duplex == ETH_LINK_FULL_DUPLEX ?
 				     "full-duplex" : "half-duplex");
 		} else {
-			PMD_INIT_LOG(INFO, " Port %d: Link Down\n",
+			PMD_INIT_LOG(INFO, " Port %d: Link Down",
 				     dev->data->port_id);
 		}
 		PMD_INIT_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d",
@@ -1923,14 +1921,14 @@ eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 	if (fc_conf->autoneg != hw->mac.autoneg)
 		return -ENOTSUP;
 	rx_buf_size = igb_get_rx_buffer_size(hw);
-	PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x \n", rx_buf_size);
+	PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
 
 	/* At least reserve one Ethernet frame for watermark */
 	max_high_water = rx_buf_size - ETHER_MAX_LEN;
 	if ((fc_conf->high_water > max_high_water) ||
 	    (fc_conf->high_water < fc_conf->low_water)) {
-		PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value \n");
-		PMD_INIT_LOG(ERR, "high water must <=  0x%x \n", max_high_water);
+		PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value");
+		PMD_INIT_LOG(ERR, "high water must <=  0x%x", max_high_water);
 		return (-EINVAL);
 	}
 
@@ -1960,7 +1958,7 @@ eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 		return 0;
 	}
 
-	PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x \n", err);
+	PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x", err);
 	return (-EIO);
 }
 
@@ -1995,7 +1993,7 @@ eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index)
 static void
 igbvf_intr_disable(struct e1000_hw *hw)
 {
-	PMD_INIT_LOG(DEBUG, "igbvf_intr_disable");
+	PMD_INIT_FUNC_TRACE();
 
 	/* Clear interrupt mask to stop from interrupts being generated */
 	E1000_WRITE_REG(hw, E1000_EIMC, 0xFFFF);
@@ -2077,7 +2075,7 @@ igbvf_dev_configure(struct rte_eth_dev *dev)
 {
 	struct rte_eth_conf* conf = &dev->data->dev_conf;
 
-	PMD_INIT_LOG(DEBUG, "\nConfigured Virtual Function port id: %d\n",
+	PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
 		     dev->data->port_id);
 
 	/*
@@ -2086,12 +2084,12 @@ igbvf_dev_configure(struct rte_eth_dev *dev)
 	 */
 #ifndef RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC
 	if (!conf->rxmode.hw_strip_crc) {
-		PMD_INIT_LOG(INFO, "VF can't disable HW CRC Strip\n");
+		PMD_INIT_LOG(INFO, "VF can't disable HW CRC Strip");
 		conf->rxmode.hw_strip_crc = 1;
 	}
 #else
 	if (conf->rxmode.hw_strip_crc) {
-		PMD_INIT_LOG(INFO, "VF can't enable HW CRC Strip\n");
+		PMD_INIT_LOG(INFO, "VF can't enable HW CRC Strip");
 		conf->rxmode.hw_strip_crc = 0;
 	}
 #endif
@@ -2106,7 +2104,7 @@ igbvf_dev_start(struct rte_eth_dev *dev)
 		E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	int ret;
 
-	PMD_INIT_LOG(DEBUG, "igbvf_dev_start");
+	PMD_INIT_FUNC_TRACE();
 
 	hw->mac.ops.reset_hw(hw);
 
@@ -2129,7 +2127,7 @@ igbvf_dev_start(struct rte_eth_dev *dev)
 static void
 igbvf_dev_stop(struct rte_eth_dev *dev)
 {
-	PMD_INIT_LOG(DEBUG, "igbvf_dev_stop");
+	PMD_INIT_FUNC_TRACE();
 
 	igbvf_stop_adapter(dev);
 
@@ -2147,7 +2145,7 @@ igbvf_dev_close(struct rte_eth_dev *dev)
 {
 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-	PMD_INIT_LOG(DEBUG, "igbvf_dev_close");
+	PMD_INIT_FUNC_TRACE();
 
 	e1000_reset_hw(hw);
 
@@ -2203,7 +2201,7 @@ igbvf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
 	uint32_t vid_bit = 0;
 	int ret = 0;
 
-	PMD_INIT_LOG(DEBUG, "igbvf_vlan_filter_set");
+	PMD_INIT_FUNC_TRACE();
 
 	/*vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf*/
 	ret = igbvf_set_vfta(hw, vlan_id, !!on);
diff --git a/lib/librte_pmd_e1000/igb_rxtx.c b/lib/librte_pmd_e1000/igb_rxtx.c
index 4946b8d..d615d3e 100644
--- a/lib/librte_pmd_e1000/igb_rxtx.c
+++ b/lib/librte_pmd_e1000/igb_rxtx.c
@@ -417,7 +417,7 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 			tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
 
 		PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
-			   " tx_first=%u tx_last=%u\n",
+			   " tx_first=%u tx_last=%u",
 			   (unsigned) txq->port_id,
 			   (unsigned) txq->queue_id,
 			   (unsigned) pkt_len,
@@ -718,8 +718,8 @@ eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		 * to happen by sending specific "back-pressure" flow control
 		 * frames to its peer(s).
 		 */
-		PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u "
-			   "staterr=0x%x pkt_len=%u\n",
+		PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
+			   "staterr=0x%x pkt_len=%u",
 			   (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
 			   (unsigned) rx_id, (unsigned) staterr,
 			   (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
@@ -727,7 +727,7 @@ eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		nmb = rte_rxmbuf_alloc(rxq->mb_pool);
 		if (nmb == NULL) {
 			PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
-				   "queue_id=%u\n", (unsigned) rxq->port_id,
+				   "queue_id=%u", (unsigned) rxq->port_id,
 				   (unsigned) rxq->queue_id);
 			rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
 			break;
@@ -814,7 +814,7 @@ eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 	nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
 	if (nb_hold > rxq->rx_free_thresh) {
 		PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
-			   "nb_hold=%u nb_rx=%u\n",
+			   "nb_hold=%u nb_rx=%u",
 			   (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
 			   (unsigned) rx_id, (unsigned) nb_hold,
 			   (unsigned) nb_rx);
@@ -901,8 +901,8 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		 * to happen by sending specific "back-pressure" flow control
 		 * frames to its peer(s).
 		 */
-		PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u "
-			   "staterr=0x%x data_len=%u\n",
+		PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
+			   "staterr=0x%x data_len=%u",
 			   (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
 			   (unsigned) rx_id, (unsigned) staterr,
 			   (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
@@ -910,7 +910,7 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		nmb = rte_rxmbuf_alloc(rxq->mb_pool);
 		if (nmb == NULL) {
 			PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
-				   "queue_id=%u\n", (unsigned) rxq->port_id,
+				   "queue_id=%u", (unsigned) rxq->port_id,
 				   (unsigned) rxq->queue_id);
 			rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
 			break;
@@ -1068,7 +1068,7 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 	nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
 	if (nb_hold > rxq->rx_free_thresh) {
 		PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
-			   "nb_hold=%u nb_rx=%u\n",
+			   "nb_hold=%u nb_rx=%u",
 			   (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
 			   (unsigned) rx_id, (unsigned) nb_hold,
 			   (unsigned) nb_rx);
@@ -1290,7 +1290,7 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
 		igb_tx_queue_release(txq);
 		return (-ENOMEM);
 	}
-	PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
+	PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
 		     txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
 
 	igb_reset_tx_queue(txq, dev);
@@ -1428,7 +1428,7 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
 		igb_rx_queue_release(rxq);
 		return (-ENOMEM);
 	}
-	PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
+	PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
 		     rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
 
 	dev->data->rx_queues[queue_idx] = rxq;
@@ -1446,7 +1446,7 @@ eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 	uint32_t desc = 0;
 
 	if (rx_queue_id >= dev->data->nb_rx_queues) {
-		PMD_RX_LOG(ERR, "Invalid RX queue id=%d\n", rx_queue_id);
+		PMD_RX_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id);
 		return 0;
 	}
 
@@ -1745,7 +1745,7 @@ igb_is_vmdq_supported(const struct rte_eth_dev *dev)
 	case e1000_i210:
 	case e1000_i211:
 	default:
-		PMD_INIT_LOG(ERR, "Cannot support VMDq feature\n");
+		PMD_INIT_LOG(ERR, "Cannot support VMDq feature");
 		return 0;
 	}
 }
@@ -1758,7 +1758,8 @@ igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
 	uint32_t mrqc, vt_ctl, vmolr, rctl;
 	int i;
 
-	PMD_INIT_LOG(DEBUG, ">>");
+	PMD_INIT_FUNC_TRACE();
+
 	hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
 
@@ -1847,7 +1848,7 @@ igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
 
 		if (mbuf == NULL) {
 			PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
-				     "queue_id=%hu\n", rxq->queue_id);
+				     "queue_id=%hu", rxq->queue_id);
 			return (-ENOMEM);
 		}
 		dma_addr =
@@ -2292,7 +2293,7 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev)
 			 * to avoid Write-Back not triggered sometimes
 			 */
 			rxdctl |= 0x10000;
-			PMD_INIT_LOG(DEBUG, "Force set RX WTHRESH to 1 !\n");
+			PMD_INIT_LOG(DEBUG, "Force set RX WTHRESH to 1 !");
 		}
 		else
 			rxdctl |= ((rxq->wthresh & 0x1F) << 16);
@@ -2360,7 +2361,7 @@ eth_igbvf_tx_init(struct rte_eth_dev *dev)
 			 * to avoid Write-Back not triggered sometimes
 			 */
 			txdctl |= 0x10000;
-			PMD_INIT_LOG(DEBUG, "Force set TX WTHRESH to 1 !\n");
+			PMD_INIT_LOG(DEBUG, "Force set TX WTHRESH to 1 !");
 		}
 		else
 			txdctl |= ((txq->wthresh & 0x1F) << 16);
-- 
1.7.10.4
^ permalink raw reply	[flat|nested] 25+ messages in thread
- * [dpdk-dev] [PATCH v3 18/20] e1000: always log init messages
  2014-09-17 13:46 [dpdk-dev] [PATCH v3 00/20] cleanup logs in main PMDs David Marchand
                   ` (16 preceding siblings ...)
  2014-09-17 13:46 ` [dpdk-dev] [PATCH v3 17/20] e1000: clean log messages David Marchand
@ 2014-09-17 13:46 ` David Marchand
  2014-09-17 13:46 ` [dpdk-dev] [PATCH v3 19/20] e1000: add a message when forcing scatter mode David Marchand
                   ` (2 subsequent siblings)
  20 siblings, 0 replies; 25+ messages in thread
From: David Marchand @ 2014-09-17 13:46 UTC (permalink / raw)
  To: dev
'init' messages should always be logged and filtered at runtime by rte_log.
All the more so as these messages are not in the datapath.
Signed-off-by: David Marchand <david.marchand@6wind.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
---
 lib/librte_pmd_e1000/e1000_logs.h |    7 ++++---
 1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/lib/librte_pmd_e1000/e1000_logs.h b/lib/librte_pmd_e1000/e1000_logs.h
index 4dd7208..4a92804 100644
--- a/lib/librte_pmd_e1000/e1000_logs.h
+++ b/lib/librte_pmd_e1000/e1000_logs.h
@@ -34,12 +34,13 @@
 #ifndef _E1000_LOGS_H_
 #define _E1000_LOGS_H_
 
-#ifdef RTE_LIBRTE_E1000_DEBUG_INIT
 #define PMD_INIT_LOG(level, fmt, args...) \
-	RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+	rte_log(RTE_LOG_ ## level, RTE_LOGTYPE_PMD, \
+		"PMD: %s(): " fmt "\n", __func__, ##args)
+
+#ifdef RTE_LIBRTE_E1000_DEBUG_INIT
 #define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
 #else
-#define PMD_INIT_LOG(level, fmt, args...) do { } while (0)
 #define PMD_INIT_FUNC_TRACE() do { } while (0)
 #endif
 
-- 
1.7.10.4
^ permalink raw reply	[flat|nested] 25+ messages in thread
- * [dpdk-dev] [PATCH v3 19/20] e1000: add a message when forcing scatter mode
  2014-09-17 13:46 [dpdk-dev] [PATCH v3 00/20] cleanup logs in main PMDs David Marchand
                   ` (17 preceding siblings ...)
  2014-09-17 13:46 ` [dpdk-dev] [PATCH v3 18/20] e1000: always log init messages David Marchand
@ 2014-09-17 13:46 ` David Marchand
  2014-09-17 13:46 ` [dpdk-dev] [PATCH v3 20/20] eal: set log level from command line David Marchand
  2014-09-19  7:52 ` [dpdk-dev] [PATCH v3 00/20] cleanup logs in main PMDs Thomas Monjalon
  20 siblings, 0 replies; 25+ messages in thread
From: David Marchand @ 2014-09-17 13:46 UTC (permalink / raw)
  To: dev
Signed-off-by: David Marchand <david.marchand@6wind.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
---
 lib/librte_pmd_e1000/em_rxtx.c  |    4 ++++
 lib/librte_pmd_e1000/igb_rxtx.c |   14 ++++++++++++++
 2 files changed, 18 insertions(+)
diff --git a/lib/librte_pmd_e1000/em_rxtx.c b/lib/librte_pmd_e1000/em_rxtx.c
index 444fd02..f7a9c3a 100644
--- a/lib/librte_pmd_e1000/em_rxtx.c
+++ b/lib/librte_pmd_e1000/em_rxtx.c
@@ -1732,6 +1732,8 @@ eth_em_rx_init(struct rte_eth_dev *dev)
 		 */
 		if (dev->data->dev_conf.rxmode.jumbo_frame ||
 				rctl_bsize < ETHER_MAX_LEN) {
+			if (!dev->data->scattered_rx)
+				PMD_INIT_LOG(DEBUG, "forcing scatter mode");
 			dev->rx_pkt_burst =
 				(eth_rx_burst_t)eth_em_recv_scattered_pkts;
 			dev->data->scattered_rx = 1;
@@ -1739,6 +1741,8 @@ eth_em_rx_init(struct rte_eth_dev *dev)
 	}
 
 	if (dev->data->dev_conf.rxmode.enable_scatter) {
+		if (!dev->data->scattered_rx)
+			PMD_INIT_LOG(DEBUG, "forcing scatter mode");
 		dev->rx_pkt_burst = eth_em_recv_scattered_pkts;
 		dev->data->scattered_rx = 1;
 	}
diff --git a/lib/librte_pmd_e1000/igb_rxtx.c b/lib/librte_pmd_e1000/igb_rxtx.c
index d615d3e..768cc0e 100644
--- a/lib/librte_pmd_e1000/igb_rxtx.c
+++ b/lib/librte_pmd_e1000/igb_rxtx.c
@@ -1997,6 +1997,9 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
 			/* It adds dual VLAN length for supporting dual VLAN */
 			if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
 						2 * VLAN_TAG_SIZE) > buf_size){
+				if (!dev->data->scattered_rx)
+					PMD_INIT_LOG(DEBUG,
+						     "forcing scatter mode");
 				dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
 				dev->data->scattered_rx = 1;
 			}
@@ -2006,6 +2009,8 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
 			 */
 			if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
 				rctl_bsize = buf_size;
+			if (!dev->data->scattered_rx)
+				PMD_INIT_LOG(DEBUG, "forcing scatter mode");
 			dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
 			dev->data->scattered_rx = 1;
 		}
@@ -2027,6 +2032,8 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
 	}
 
 	if (dev->data->dev_conf.rxmode.enable_scatter) {
+		if (!dev->data->scattered_rx)
+			PMD_INIT_LOG(DEBUG, "forcing scatter mode");
 		dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
 		dev->data->scattered_rx = 1;
 	}
@@ -2261,6 +2268,9 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev)
 			/* It adds dual VLAN length for supporting dual VLAN */
 			if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
 						2 * VLAN_TAG_SIZE) > buf_size){
+				if (!dev->data->scattered_rx)
+					PMD_INIT_LOG(DEBUG,
+						     "forcing scatter mode");
 				dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
 				dev->data->scattered_rx = 1;
 			}
@@ -2270,6 +2280,8 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev)
 			 */
 			if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
 				rctl_bsize = buf_size;
+			if (!dev->data->scattered_rx)
+				PMD_INIT_LOG(DEBUG, "forcing scatter mode");
 			dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
 			dev->data->scattered_rx = 1;
 		}
@@ -2301,6 +2313,8 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev)
 	}
 
 	if (dev->data->dev_conf.rxmode.enable_scatter) {
+		if (!dev->data->scattered_rx)
+			PMD_INIT_LOG(DEBUG, "forcing scatter mode");
 		dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
 		dev->data->scattered_rx = 1;
 	}
-- 
1.7.10.4
^ permalink raw reply	[flat|nested] 25+ messages in thread
- * [dpdk-dev] [PATCH v3 20/20] eal: set log level from command line
  2014-09-17 13:46 [dpdk-dev] [PATCH v3 00/20] cleanup logs in main PMDs David Marchand
                   ` (18 preceding siblings ...)
  2014-09-17 13:46 ` [dpdk-dev] [PATCH v3 19/20] e1000: add a message when forcing scatter mode David Marchand
@ 2014-09-17 13:46 ` David Marchand
  2014-09-17 14:45   ` Neil Horman
  2014-09-19  7:52 ` [dpdk-dev] [PATCH v3 00/20] cleanup logs in main PMDs Thomas Monjalon
  20 siblings, 1 reply; 25+ messages in thread
From: David Marchand @ 2014-09-17 13:46 UTC (permalink / raw)
  To: dev
Add a --log-level option to set the default eal log level.
Signed-off-by: David Marchand <david.marchand@6wind.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
---
 lib/librte_eal/bsdapp/eal/eal.c                    |   42 +++++++++++++++++++
 .../bsdapp/eal/include/eal_internal_cfg.h          |    1 +
 lib/librte_eal/linuxapp/eal/eal.c                  |   44 +++++++++++++++++++-
 .../linuxapp/eal/include/eal_internal_cfg.h        |    1 +
 4 files changed, 87 insertions(+), 1 deletion(-)
diff --git a/lib/librte_eal/bsdapp/eal/eal.c b/lib/librte_eal/bsdapp/eal/eal.c
index 71f93e0..2f84742 100644
--- a/lib/librte_eal/bsdapp/eal/eal.c
+++ b/lib/librte_eal/bsdapp/eal/eal.c
@@ -94,6 +94,7 @@
 #define OPT_PCI_BLACKLIST "pci-blacklist"
 #define OPT_VDEV        "vdev"
 #define OPT_SYSLOG      "syslog"
+#define OPT_LOG_LEVEL   "log-level"
 
 #define MEMSIZE_IF_NO_HUGE_PAGE (64ULL * 1024ULL * 1024ULL)
 
@@ -293,6 +294,7 @@ eal_usage(const char *prgname)
 	       "  -v           : Display version information on startup\n"
 	       "  -m MB        : memory to allocate\n"
 	       "  -r NUM       : force number of memory ranks (don't detect)\n"
+	       "  --"OPT_LOG_LEVEL"  : set default log level\n"
 	       "  --"OPT_PROC_TYPE"  : type of this process\n"
 	       "  --"OPT_PCI_BLACKLIST", -b: add a PCI device in black list.\n"
 	       "               Prevent EAL from using this PCI device. The argument\n"
@@ -440,6 +442,28 @@ eal_parse_syslog(const char *facility)
 	return -1;
 }
 
+static int
+eal_parse_log_level(const char *level, uint32_t *log_level)
+{
+	char *end;
+	unsigned long tmp;
+
+	errno = 0;
+	tmp = strtoul(level, &end, 0);
+
+	/* check for errors */
+	if ((errno != 0) || (level[0] == '\0') ||
+	    end == NULL || (*end != '\0'))
+		return -1;
+
+	/* log_level is a uint32_t */
+	if (tmp >= UINT32_MAX)
+		return -1;
+
+	*log_level = tmp;
+	return 0;
+}
+
 static inline size_t
 eal_get_hugepage_mem_size(void)
 {
@@ -494,6 +518,7 @@ eal_parse_args(int argc, char **argv)
 		{OPT_PCI_BLACKLIST, 1, 0, 0},
 		{OPT_VDEV, 1, 0, 0},
 		{OPT_SYSLOG, 1, NULL, 0},
+		{OPT_LOG_LEVEL, 1, NULL, 0},
 		{0, 0, 0, 0}
 	};
 
@@ -506,6 +531,8 @@ eal_parse_args(int argc, char **argv)
 	internal_config.hugepage_dir = NULL;
 	internal_config.force_sockets = 0;
 	internal_config.syslog_facility = LOG_DAEMON;
+	/* default value from build option */
+	internal_config.log_level = RTE_LOG_LEVEL;
 #ifdef RTE_LIBEAL_USE_HPET
 	internal_config.no_hpet = 0;
 #else
@@ -652,6 +679,18 @@ eal_parse_args(int argc, char **argv)
 					eal_usage(prgname);
 					return -1;
 				}
+			} else if (!strcmp(lgopts[option_index].name,
+					 OPT_LOG_LEVEL)) {
+				uint32_t log;
+
+				if (eal_parse_log_level(optarg, &log) < 0) {
+					RTE_LOG(ERR, EAL,
+						"invalid parameters for --"
+						OPT_LOG_LEVEL "\n");
+					eal_usage(prgname);
+					return -1;
+				}
+				internal_config.log_level = log;
 			}
 			break;
 
@@ -793,6 +832,9 @@ rte_eal_init(int argc, char **argv)
 	if (fctret < 0)
 		exit(1);
 
+	/* set log level as early as possible */
+	rte_set_log_level(internal_config.log_level);
+
 	if (internal_config.no_hugetlbfs == 0 &&
 			internal_config.process_type != RTE_PROC_SECONDARY &&
 			eal_hugepage_info_init() < 0)
diff --git a/lib/librte_eal/bsdapp/eal/include/eal_internal_cfg.h b/lib/librte_eal/bsdapp/eal/include/eal_internal_cfg.h
index 2d06c7f..24cefc2 100644
--- a/lib/librte_eal/bsdapp/eal/include/eal_internal_cfg.h
+++ b/lib/librte_eal/bsdapp/eal/include/eal_internal_cfg.h
@@ -75,6 +75,7 @@ struct internal_config {
 	volatile uint64_t socket_mem[RTE_MAX_NUMA_NODES]; /**< amount of memory per socket */
 	uintptr_t base_virtaddr;          /**< base address to try and reserve memory from */
 	volatile int syslog_facility;	  /**< facility passed to openlog() */
+	volatile uint32_t log_level;	  /**< default log level */
 	const char *hugefile_prefix;      /**< the base filename of hugetlbfs files */
 	const char *hugepage_dir;         /**< specific hugetlbfs directory to use */
 
diff --git a/lib/librte_eal/linuxapp/eal/eal.c b/lib/librte_eal/linuxapp/eal/eal.c
index 4869e7c..38cace6 100644
--- a/lib/librte_eal/linuxapp/eal/eal.c
+++ b/lib/librte_eal/linuxapp/eal/eal.c
@@ -97,6 +97,7 @@
 #define OPT_PCI_BLACKLIST "pci-blacklist"
 #define OPT_VDEV        "vdev"
 #define OPT_SYSLOG      "syslog"
+#define OPT_LOG_LEVEL   "log-level"
 #define OPT_BASE_VIRTADDR   "base-virtaddr"
 #define OPT_XEN_DOM0    "xen-dom0"
 #define OPT_CREATE_UIO_DEV "create-uio-dev"
@@ -384,7 +385,8 @@ eal_usage(const char *prgname)
 	       "  --"OPT_XEN_DOM0" : support application running on Xen Domain0 "
 			   "without hugetlbfs\n"
 	       "  --"OPT_SYSLOG"     : set syslog facility\n"
-	       "  --"OPT_SOCKET_MEM" : memory to allocate on specific \n"
+	       "  --"OPT_LOG_LEVEL"  : set default log level\n"
+	       "  --"OPT_SOCKET_MEM" : memory to allocate on specific\n"
 		   "                 sockets (use comma separated values)\n"
 	       "  --"OPT_HUGE_DIR"   : directory where hugetlbfs is mounted\n"
 	       "  --"OPT_PROC_TYPE"  : type of this process\n"
@@ -548,6 +550,28 @@ eal_parse_syslog(const char *facility)
 }
 
 static int
+eal_parse_log_level(const char *level, uint32_t *log_level)
+{
+	char *end;
+	unsigned long tmp;
+
+	errno = 0;
+	tmp = strtoul(level, &end, 0);
+
+	/* check for errors */
+	if ((errno != 0) || (level[0] == '\0') ||
+	    end == NULL || (*end != '\0'))
+		return -1;
+
+	/* log_level is a uint32_t */
+	if (tmp >= UINT32_MAX)
+		return -1;
+
+	*log_level = tmp;
+	return 0;
+}
+
+static int
 eal_parse_socket_mem(char *socket_mem)
 {
 	char * arg[RTE_MAX_NUMA_NODES];
@@ -699,6 +723,7 @@ eal_parse_args(int argc, char **argv)
 		{OPT_PCI_BLACKLIST, 1, 0, 0},
 		{OPT_VDEV, 1, 0, 0},
 		{OPT_SYSLOG, 1, NULL, 0},
+		{OPT_LOG_LEVEL, 1, NULL, 0},
 		{OPT_VFIO_INTR, 1, NULL, 0},
 		{OPT_BASE_VIRTADDR, 1, 0, 0},
 		{OPT_XEN_DOM0, 0, 0, 0},
@@ -716,6 +741,8 @@ eal_parse_args(int argc, char **argv)
 	internal_config.hugepage_dir = NULL;
 	internal_config.force_sockets = 0;
 	internal_config.syslog_facility = LOG_DAEMON;
+	/* default value from build option */
+	internal_config.log_level = RTE_LOG_LEVEL;
 	internal_config.xen_dom0_support = 0;
 	/* if set to NONE, interrupt mode is determined automatically */
 	internal_config.vfio_intr_mode = RTE_INTR_MODE_NONE;
@@ -887,6 +914,18 @@ eal_parse_args(int argc, char **argv)
 					eal_usage(prgname);
 					return -1;
 				}
+			} else if (!strcmp(lgopts[option_index].name,
+					 OPT_LOG_LEVEL)) {
+				uint32_t log;
+
+				if (eal_parse_log_level(optarg, &log) < 0) {
+					RTE_LOG(ERR, EAL,
+						"invalid parameters for --"
+						OPT_LOG_LEVEL "\n");
+					eal_usage(prgname);
+					return -1;
+				}
+				internal_config.log_level = log;
 			}
 			else if (!strcmp(lgopts[option_index].name, OPT_BASE_VIRTADDR)) {
 				if (eal_parse_base_virtaddr(optarg) < 0) {
@@ -1054,6 +1093,9 @@ rte_eal_init(int argc, char **argv)
 	if (fctret < 0)
 		exit(1);
 
+	/* set log level as early as possible */
+	rte_set_log_level(internal_config.log_level);
+
 	if (internal_config.no_hugetlbfs == 0 &&
 			internal_config.process_type != RTE_PROC_SECONDARY &&
 			internal_config.xen_dom0_support == 0 &&
diff --git a/lib/librte_eal/linuxapp/eal/include/eal_internal_cfg.h b/lib/librte_eal/linuxapp/eal/include/eal_internal_cfg.h
index 498ade2..8749390 100644
--- a/lib/librte_eal/linuxapp/eal/include/eal_internal_cfg.h
+++ b/lib/librte_eal/linuxapp/eal/include/eal_internal_cfg.h
@@ -77,6 +77,7 @@ struct internal_config {
 	volatile uint64_t socket_mem[RTE_MAX_NUMA_NODES]; /**< amount of memory per socket */
 	uintptr_t base_virtaddr;          /**< base address to try and reserve memory from */
 	volatile int syslog_facility;	  /**< facility passed to openlog() */
+	volatile uint32_t log_level;	  /**< default log level */
 	/** default interrupt mode for VFIO */
 	volatile enum rte_intr_mode vfio_intr_mode;
 	const char *hugefile_prefix;      /**< the base filename of hugetlbfs files */
-- 
1.7.10.4
^ permalink raw reply	[flat|nested] 25+ messages in thread
- * Re: [dpdk-dev] [PATCH v3 20/20] eal: set log level from command line
  2014-09-17 13:46 ` [dpdk-dev] [PATCH v3 20/20] eal: set log level from command line David Marchand
@ 2014-09-17 14:45   ` Neil Horman
  2014-09-18  7:46     ` David Marchand
  0 siblings, 1 reply; 25+ messages in thread
From: Neil Horman @ 2014-09-17 14:45 UTC (permalink / raw)
  To: David Marchand; +Cc: dev
On Wed, Sep 17, 2014 at 03:46:52PM +0200, David Marchand wrote:
> Add a --log-level option to set the default eal log level.
> 
> Signed-off-by: David Marchand <david.marchand@6wind.com>
> Acked-by: Bruce Richardson <bruce.richardson@intel.com>
> ---
>  #else
> @@ -652,6 +679,18 @@ eal_parse_args(int argc, char **argv)
>  					eal_usage(prgname);
>  					return -1;
>  				}
> +			} else if (!strcmp(lgopts[option_index].name,
> +					 OPT_LOG_LEVEL)) {
> +				uint32_t log;
> +
> +				if (eal_parse_log_level(optarg, &log) < 0) {
> +					RTE_LOG(ERR, EAL,
> +						"invalid parameters for --"
> +						OPT_LOG_LEVEL "\n");
> +					eal_usage(prgname);
> +					return -1;
> +				}
> +				internal_config.log_level = log;
This is a nit, but since you're working in this code anyway, would you mind
fixing the long options parsing please?  Instead of having a single case
statement that just does a never ending if..else..if series of strcmps that
could possibly cause a stack overflow, you can set the val value in the lgopts
array to a unique value for each option and just have a set of case statements.
It would look a lot more readable and exeucte more safely.
Thanks
Neil
^ permalink raw reply	[flat|nested] 25+ messages in thread
- * Re: [dpdk-dev] [PATCH v3 20/20] eal: set log level from command line
  2014-09-17 14:45   ` Neil Horman
@ 2014-09-18  7:46     ` David Marchand
  2014-09-18 10:27       ` Neil Horman
  0 siblings, 1 reply; 25+ messages in thread
From: David Marchand @ 2014-09-18  7:46 UTC (permalink / raw)
  To: Neil Horman; +Cc: dev
Hello Neil,
On Wed, Sep 17, 2014 at 4:45 PM, Neil Horman <nhorman@tuxdriver.com> wrote:
>
> This is a nit, but since you're working in this code anyway, would you mind
> fixing the long options parsing please?  Instead of having a single case
> statement that just does a never ending if..else..if series of strcmps that
> could possibly cause a stack overflow, you can set the val value in the
> lgopts
> array to a unique value for each option and just have a set of case
> statements.
> It would look a lot more readable and exeucte more safely.
>
I agree, but I will send it in a different patchset: already made some
changes, it builds fine (at least on my linux), but I want to check it
actually works and maybe more cleanups can be done there (merging bsd and
linux code).
-- 
David Marchand
^ permalink raw reply	[flat|nested] 25+ messages in thread 
- * Re: [dpdk-dev] [PATCH v3 20/20] eal: set log level from command line
  2014-09-18  7:46     ` David Marchand
@ 2014-09-18 10:27       ` Neil Horman
  0 siblings, 0 replies; 25+ messages in thread
From: Neil Horman @ 2014-09-18 10:27 UTC (permalink / raw)
  To: David Marchand; +Cc: dev
On Thu, Sep 18, 2014 at 09:46:54AM +0200, David Marchand wrote:
> Hello Neil,
> 
> 
> On Wed, Sep 17, 2014 at 4:45 PM, Neil Horman <nhorman@tuxdriver.com> wrote:
> 
> >
> > This is a nit, but since you're working in this code anyway, would you mind
> > fixing the long options parsing please?  Instead of having a single case
> > statement that just does a never ending if..else..if series of strcmps that
> > could possibly cause a stack overflow, you can set the val value in the
> > lgopts
> > array to a unique value for each option and just have a set of case
> > statements.
> > It would look a lot more readable and exeucte more safely.
> >
> 
> I agree, but I will send it in a different patchset: already made some
> changes, it builds fine (at least on my linux), but I want to check it
> actually works and maybe more cleanups can be done there (merging bsd and
> linux code).
> 
Thats fine with me, thanks!
Neil
> -- 
> David Marchand
^ permalink raw reply	[flat|nested] 25+ messages in thread 
 
 
 
- * Re: [dpdk-dev] [PATCH v3 00/20] cleanup logs in main PMDs
  2014-09-17 13:46 [dpdk-dev] [PATCH v3 00/20] cleanup logs in main PMDs David Marchand
                   ` (19 preceding siblings ...)
  2014-09-17 13:46 ` [dpdk-dev] [PATCH v3 20/20] eal: set log level from command line David Marchand
@ 2014-09-19  7:52 ` Thomas Monjalon
  20 siblings, 0 replies; 25+ messages in thread
From: Thomas Monjalon @ 2014-09-19  7:52 UTC (permalink / raw)
  To: David Marchand; +Cc: dev
> Here is a patchset that reworks the log macro in e1000, ixgbe and i40e PMDs.
> The idea behind this is to make it easier to debug some init failures and to be
> sure of the datapath selected in these PMDs (rx / tx handlers selection).
> 
> The PMDs changes involve adding more debug messages in the default build.
> A new eal option has been added to set the default log level, so that you can
> render the eal a little less noisy.
> 
> I did not change the default log level for now, as some eal log messages are
> marked as DEBUG while being interesting (from my point of view).
> I suppose we can change the default log level later once the eal has been
> cleaned up.
> 
> Changes since v2:
> - just a respin with Jay comments in mind
> * don't introduce \n in one commit then remove them
> * indent only the impacted parts before removing \n (so split previous patches)
> * remove some "" garbage
> 
> Changes since v1:
> - continue clean up by always using PMD_*_LOG when logging something in
>   PMD (i.e. no more printf, RTE_LOG, DEBUGOUT)
> - introduce PMD_DRV_LOG_RAW macro for use by shared driver code
> - adopt 'second approach': no more \n in PMD_*_LOG callers. This means that we
>   will enforce a 'no \n' policy in logs for PMD.
> 
> David Marchand (20):
>   ixgbe: use the right debug macro
>   ixgbe/base: add a raw macro for use by shared code
>   ixgbe: indent logs sections
>   ixgbe: clean log messages
>   ixgbe: always log init messages
>   ixgbe: add a message when forcing scatter mode
>   ixgbe: add log messages when rx bulk mode is not usable
>   i40e: use the right debug macro
>   i40e/base: add a raw macro for use by shared code
>   i40e: indent logs sections
>   i40e: clean log messages
>   i40e: always log init messages
>   i40e: add log messages when rx bulk mode is not usable
>   e1000: use the right debug macro
>   e1000/base: add a raw macro for use by shared code
>   e1000: indent logs sections
>   e1000: clean log messages
>   e1000: always log init messages
>   e1000: add a message when forcing scatter mode
>   eal: set log level from command line
Applied for version 1.8.0.
Thanks
-- 
Thomas
^ permalink raw reply	[flat|nested] 25+ messages in thread