From mboxrd@z Thu Jan  1 00:00:00 1970
Return-Path: <david.marchand@6wind.com>
Received: from mail-we0-f170.google.com (mail-we0-f170.google.com
 [74.125.82.170]) by dpdk.org (Postfix) with ESMTP id 367C3B3CC
 for <dev@dpdk.org>; Wed, 17 Sep 2014 15:41:38 +0200 (CEST)
Received: by mail-we0-f170.google.com with SMTP id u57so1470193wes.1
 for <dev@dpdk.org>; Wed, 17 Sep 2014 06:47:19 -0700 (PDT)
X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed;
 d=1e100.net; s=20130820;
 h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to
 :references;
 bh=yn2FUQsF7JKDBeHixldnleFvRHEbs/Y6HLgs74zdZjA=;
 b=caX3eSFPbCgUFtcXvnkL2On7K9L0suQHOTv6Q15QG5rKJY3+S9/DxooLoJEtmklcQR
 Asy1M2nydzJsNd6gBS18DNExTM3KN3Aa6w6p5dmCck2nHtCVLG0bFRk7ghdce7tzwCkH
 sQPo9/LxwPV2x4AHCtDUtQTK9tXNQnfC44jKb4zmTHo08xJUOtEBbaC5IvAPvtb4tL86
 2Gjy+xwiN3bfRyFWpRIoiSQ97csUdNNIFAkSOKwAhfEo3A61BSfz/plCcV/ZJMqIms9R
 cY7AS+anrgFNHbAdO0kXL5FouSarhMWdlIG2bisr1STnl3ZV1BELsCBhTWEFjp1qi97b
 L8Rg==
X-Gm-Message-State: ALoCoQkkzQ5svWY52P4zin1HzyQz1TC3bkwcU2w5qEZM78ZGYaOB5MaKDPeKeqKJ6glT1l25ZqUm
X-Received: by 10.181.27.197 with SMTP id ji5mr29650055wid.54.1410961639361;
 Wed, 17 Sep 2014 06:47:19 -0700 (PDT)
Received: from alcyon.dev.6wind.com (guy78-3-82-239-227-177.fbx.proxad.net.
 [82.239.227.177])
 by mx.google.com with ESMTPSA id fa20sm5712872wic.1.2014.09.17.06.47.17
 for <multiple recipients>
 (version=TLSv1.2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128);
 Wed, 17 Sep 2014 06:47:18 -0700 (PDT)
From: David Marchand <david.marchand@6wind.com>
To: dev@dpdk.org
Date: Wed, 17 Sep 2014 15:46:42 +0200
Message-Id: <1410961612-8571-11-git-send-email-david.marchand@6wind.com>
X-Mailer: git-send-email 1.7.10.4
In-Reply-To: <1410961612-8571-1-git-send-email-david.marchand@6wind.com>
References: <1410961612-8571-1-git-send-email-david.marchand@6wind.com>
Subject: [dpdk-dev] [PATCH v3 10/20] i40e: indent logs sections
X-BeenThere: dev@dpdk.org
X-Mailman-Version: 2.1.15
Precedence: list
List-Id: patches and discussions about DPDK <dev.dpdk.org>
List-Unsubscribe: <http://dpdk.org/ml/options/dev>,
 <mailto:dev-request@dpdk.org?subject=unsubscribe>
List-Archive: <http://dpdk.org/ml/archives/dev/>
List-Post: <mailto:dev@dpdk.org>
List-Help: <mailto:dev-request@dpdk.org?subject=help>
List-Subscribe: <http://dpdk.org/ml/listinfo/dev>,
 <mailto:dev-request@dpdk.org?subject=subscribe>
X-List-Received-Date: Wed, 17 Sep 2014 13:41:39 -0000

Prepare for next commit, indent sections where log messages will be modified so
that next patch is only about \n.

Signed-off-by: David Marchand <david.marchand@6wind.com>
---
 lib/librte_pmd_i40e/i40e_ethdev.c    |  101 +++++++++++++++++-----------------
 lib/librte_pmd_i40e/i40e_ethdev_vf.c |   76 ++++++++++++-------------
 lib/librte_pmd_i40e/i40e_pf.c        |    3 +-
 lib/librte_pmd_i40e/i40e_rxtx.c      |   54 +++++++++---------
 4 files changed, 113 insertions(+), 121 deletions(-)

diff --git a/lib/librte_pmd_i40e/i40e_ethdev.c b/lib/librte_pmd_i40e/i40e_ethdev.c
index af2e1cb..aadb548 100644
--- a/lib/librte_pmd_i40e/i40e_ethdev.c
+++ b/lib/librte_pmd_i40e/i40e_ethdev.c
@@ -371,7 +371,7 @@ eth_i40e_dev_init(__rte_unused struct eth_driver *eth_drv,
 	hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr);
 	if (!hw->hw_addr) {
 		PMD_INIT_LOG(ERR, "Hardware is not available, "
-					"as address is NULL\n");
+			     "as address is NULL\n");
 		return -ENODEV;
 	}
 
@@ -406,13 +406,12 @@ eth_i40e_dev_init(__rte_unused struct eth_driver *eth_drv,
 		PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
 		return -EIO;
 	}
-	PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM "
-			"%02d.%02d.%02d eetrack %04x\n",
-			hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
-			hw->aq.api_maj_ver, hw->aq.api_min_ver,
-			((hw->nvm.version >> 12) & 0xf),
-			((hw->nvm.version >> 4) & 0xff),
-			(hw->nvm.version & 0xf), hw->nvm.eetrack);
+	PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x\n",
+		     hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
+		     hw->aq.api_maj_ver, hw->aq.api_min_ver,
+		     ((hw->nvm.version >> 12) & 0xf),
+		     ((hw->nvm.version >> 4) & 0xff),
+		     (hw->nvm.version & 0xf), hw->nvm.eetrack);
 
 	/* Disable LLDP */
 	ret = i40e_aq_stop_lldp(hw, true, NULL);
@@ -764,8 +763,8 @@ i40e_dev_start(struct rte_eth_dev *dev)
 	if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&
 		(dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) {
 		PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port %hhu\n",
-				dev->data->dev_conf.link_duplex,
-				dev->data->port_id);
+			     dev->data->dev_conf.link_duplex,
+			     dev->data->port_id);
 		return -EINVAL;
 	}
 
@@ -1844,21 +1843,22 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev)
 		pf->vf_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
 		if (dev->pci_dev->max_vfs > hw->func_caps.num_vfs) {
 			PMD_INIT_LOG(ERR, "Config VF number %u, "
-				"max supported %u.\n", dev->pci_dev->max_vfs,
-						hw->func_caps.num_vfs);
+				     "max supported %u.\n",
+				     dev->pci_dev->max_vfs,
+				     hw->func_caps.num_vfs);
 			return -EINVAL;
 		}
 		if (pf->vf_nb_qps > I40E_MAX_QP_NUM_PER_VF) {
 			PMD_INIT_LOG(ERR, "FVL VF queue %u, "
-				"max support %u queues.\n", pf->vf_nb_qps,
-						I40E_MAX_QP_NUM_PER_VF);
+				     "max support %u queues.\n",
+				     pf->vf_nb_qps, I40E_MAX_QP_NUM_PER_VF);
 			return -EINVAL;
 		}
 		pf->vf_num = dev->pci_dev->max_vfs;
 		sum_queues += pf->vf_nb_qps * pf->vf_num;
 		sum_vsis   += pf->vf_num;
 		PMD_INIT_LOG(INFO, "Max VF num:%u each has queue pairs:%u\n",
-						pf->vf_num, pf->vf_nb_qps);
+			     pf->vf_num, pf->vf_nb_qps);
 	} else
 		pf->vf_num = 0;
 
@@ -1883,16 +1883,17 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev)
 		sum_queues > hw->func_caps.num_rx_qp) {
 		PMD_INIT_LOG(ERR, "VSI/QUEUE setting can't be satisfied\n");
 		PMD_INIT_LOG(ERR, "Max VSIs: %u, asked:%u\n",
-				pf->max_num_vsi, sum_vsis);
+			     pf->max_num_vsi, sum_vsis);
 		PMD_INIT_LOG(ERR, "Total queue pairs:%u, asked:%u\n",
-				hw->func_caps.num_rx_qp, sum_queues);
+			     hw->func_caps.num_rx_qp, sum_queues);
 		return -EINVAL;
 	}
 
 	/* Each VSI occupy 1 MSIX interrupt at least, plus IRQ0 for misc intr cause */
 	if (sum_vsis > hw->func_caps.num_msix_vectors - 1) {
-		PMD_INIT_LOG(ERR, "Too many VSIs(%u), MSIX intr(%u) not enough\n",
-				sum_vsis, hw->func_caps.num_msix_vectors);
+		PMD_INIT_LOG(ERR, "Too many VSIs(%u), "
+			     "MSIX intr(%u) not enough\n",
+			     sum_vsis, hw->func_caps.num_msix_vectors);
 		return -EINVAL;
 	}
 	return I40E_SUCCESS;
@@ -1952,8 +1953,7 @@ i40e_res_pool_init (struct i40e_res_pool_info *pool, uint32_t base,
 
 	entry = rte_zmalloc("i40e", sizeof(*entry), 0);
 	if (entry == NULL) {
-		PMD_DRV_LOG(ERR, "Failed to allocate memory for "
-						"resource pool\n");
+		PMD_DRV_LOG(ERR, "Failed to allocate memory for resource pool\n");
 		return -ENOMEM;
 	}
 
@@ -2097,7 +2097,7 @@ i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
 
 	if (pool->num_free < num) {
 		PMD_DRV_LOG(ERR, "No resource. ask:%u, available:%u\n",
-				num, pool->num_free);
+			    num, pool->num_free);
 		return -ENOMEM;
 	}
 
@@ -2135,7 +2135,7 @@ i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
 		entry = rte_zmalloc("res_pool", sizeof(*entry), 0);
 		if (entry == NULL) {
 			PMD_DRV_LOG(ERR, "Failed to allocate memory for "
-					"resource pool\n");
+				    "resource pool\n");
 			return -ENOMEM;
 		}
 		entry->base = valid_entry->base;
@@ -2170,15 +2170,14 @@ validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
 
 	/* If DCB is not supported, only default TC is supported */
 	if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) {
-		PMD_DRV_LOG(ERR, "DCB is not enabled, "
-				"only TC0 is supported\n");
+		PMD_DRV_LOG(ERR, "DCB is not enabled, only TC0 is supported\n");
 		return -EINVAL;
 	}
 
 	if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
 		PMD_DRV_LOG(ERR, "Enabled TC map 0x%x not applicable to "
-			"HW support 0x%x\n", hw->func_caps.enabled_tcmap,
-							enabled_tcmap);
+			    "HW support 0x%x\n", hw->func_caps.enabled_tcmap,
+			    enabled_tcmap);
 		return -EINVAL;
 	}
 	return I40E_SUCCESS;
@@ -2357,7 +2356,7 @@ i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
 
 	if (NULL == pf || vsi == NULL) {
 		PMD_DRV_LOG(ERR, "veb setup failed, "
-			"associated VSI shouldn't null\n");
+			    "associated VSI shouldn't null\n");
 		return NULL;
 	}
 	hw = I40E_PF_TO_HW(pf);
@@ -2377,7 +2376,7 @@ i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
 
 	if (ret != I40E_SUCCESS) {
 		PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d\n",
-					hw->aq.asq_last_status);
+			    hw->aq.asq_last_status);
 		goto fail;
 	}
 
@@ -2386,7 +2385,7 @@ i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
 				&veb->stats_idx, NULL, NULL, NULL);
 	if (ret != I40E_SUCCESS) {
 		PMD_DRV_LOG(ERR, "Get veb statics index failed, aq_err: %d\n",
-						hw->aq.asq_last_status);
+			    hw->aq.asq_last_status);
 		goto fail;
 	}
 
@@ -2473,7 +2472,7 @@ i40e_update_default_filter_setting(struct i40e_vsi *vsi)
 		struct i40e_mac_filter *f;
 
 		PMD_DRV_LOG(WARNING, "Cannot remove the default "
-						"macvlan filter\n");
+			    "macvlan filter\n");
 		/* It needs to add the permanent mac into mac list */
 		f = rte_zmalloc("macv_filter", sizeof(*f), 0);
 		if (f == NULL) {
@@ -2503,8 +2502,8 @@ i40e_vsi_dump_bw_config(struct i40e_vsi *vsi)
 	memset(&bw_config, 0, sizeof(bw_config));
 	ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
 	if (ret != I40E_SUCCESS) {
-		PMD_DRV_LOG(ERR, "VSI failed to get bandwidth "
-			"configuration %u\n", hw->aq.asq_last_status);
+		PMD_DRV_LOG(ERR, "VSI failed to get bandwidth configuration %u\n",
+			    hw->aq.asq_last_status);
 		return ret;
 	}
 
@@ -2513,7 +2512,7 @@ i40e_vsi_dump_bw_config(struct i40e_vsi *vsi)
 					&ets_sla_config, NULL);
 	if (ret != I40E_SUCCESS) {
 		PMD_DRV_LOG(ERR, "VSI failed to get TC bandwdith "
-			"configuration %u\n", hw->aq.asq_last_status);
+			    "configuration %u\n", hw->aq.asq_last_status);
 		return ret;
 	}
 
@@ -2522,12 +2521,12 @@ i40e_vsi_dump_bw_config(struct i40e_vsi *vsi)
 	PMD_DRV_LOG(INFO, "VSI max_bw:%u\n", bw_config.max_bw);
 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
 		PMD_DRV_LOG(INFO, "\tVSI TC%u:share credits %u\n", i,
-					ets_sla_config.share_credits[i]);
+			    ets_sla_config.share_credits[i]);
 		PMD_DRV_LOG(INFO, "\tVSI TC%u:credits %u\n", i,
-			rte_le_to_cpu_16(ets_sla_config.credits[i]));
+			    rte_le_to_cpu_16(ets_sla_config.credits[i]));
 		PMD_DRV_LOG(INFO, "\tVSI TC%u: max credits: %u", i,
-			rte_le_to_cpu_16(ets_sla_config.credits[i / 4]) >>
-								(i * 4));
+			    rte_le_to_cpu_16(ets_sla_config.credits[i / 4]) >>
+			    (i * 4));
 	}
 
 	return 0;
@@ -2549,13 +2548,13 @@ i40e_vsi_setup(struct i40e_pf *pf,
 
 	if (type != I40E_VSI_MAIN && uplink_vsi == NULL) {
 		PMD_DRV_LOG(ERR, "VSI setup failed, "
-			"VSI link shouldn't be NULL\n");
+			    "VSI link shouldn't be NULL\n");
 		return NULL;
 	}
 
 	if (type == I40E_VSI_MAIN && uplink_vsi != NULL) {
 		PMD_DRV_LOG(ERR, "VSI setup failed, MAIN VSI "
-				"uplink VSI should be NULL\n");
+			    "uplink VSI should be NULL\n");
 		return NULL;
 	}
 
@@ -2656,7 +2655,7 @@ i40e_vsi_setup(struct i40e_pf *pf,
 						I40E_DEFAULT_TCMAP);
 		if (ret != I40E_SUCCESS) {
 			PMD_DRV_LOG(ERR, "Failed to configure "
-					"TC queue mapping\n");
+				    "TC queue mapping\n");
 			goto fail_msix_alloc;
 		}
 		ctxt.seid = vsi->seid;
@@ -2719,7 +2718,7 @@ i40e_vsi_setup(struct i40e_pf *pf,
 						I40E_DEFAULT_TCMAP);
 		if (ret != I40E_SUCCESS) {
 			PMD_DRV_LOG(ERR, "Failed to configure "
-					"TC queue mapping\n");
+				    "TC queue mapping\n");
 			goto fail_msix_alloc;
 		}
 		ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
@@ -2739,7 +2738,7 @@ i40e_vsi_setup(struct i40e_pf *pf,
 		ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
 		if (ret) {
 			PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d\n",
-				 hw->aq.asq_last_status);
+				    hw->aq.asq_last_status);
 			goto fail_msix_alloc;
 		}
 		memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
@@ -2807,7 +2806,7 @@ i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on)
 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
 	if (ret)
 		PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping\n",
-						on ? "enable" : "disable");
+			    on ? "enable" : "disable");
 
 	return ret;
 }
@@ -2997,7 +2996,7 @@ i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
 	/* Check if it is timeout */
 	if (j >= I40E_CHK_Q_ENA_COUNT) {
 		PMD_DRV_LOG(ERR, "Failed to %s tx queue[%u]\n",
-			(on ? "enable" : "disable"), q_idx);
+			    (on ? "enable" : "disable"), q_idx);
 		return I40E_ERR_TIMEOUT;
 	}
 
@@ -3076,7 +3075,7 @@ i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
 	/* Check if it is timeout */
 	if (j >= I40E_CHK_Q_ENA_COUNT) {
 		PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]\n",
-			(on ? "enable" : "disable"), q_idx);
+			    (on ? "enable" : "disable"), q_idx);
 		return I40E_ERR_TIMEOUT;
 	}
 
@@ -3168,7 +3167,7 @@ i40e_vsi_rx_init(struct i40e_vsi *vsi)
 		ret = i40e_rx_queue_init(data->rx_queues[i]);
 		if (ret != I40E_SUCCESS) {
 			PMD_DRV_LOG(ERR, "Failed to do RX queue "
-					"initialization\n");
+				    "initialization\n");
 			break;
 		}
 	}
@@ -3351,7 +3350,7 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
 
 		if (ret != I40E_SUCCESS) {
 			PMD_DRV_LOG(INFO, "Failed to read msg from AdminQ, "
-				"aq_err: %u\n", hw->aq.asq_last_status);
+				    "aq_err: %u\n", hw->aq.asq_last_status);
 			break;
 		}
 		opcode = rte_le_to_cpu_16(info.desc.opcode);
@@ -3368,7 +3367,7 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
 			break;
 		default:
 			PMD_DRV_LOG(ERR, "Request %u is not supported yet\n",
-				opcode);
+				    opcode);
 			break;
 		}
 		/* Reset the buffer after processing one */
@@ -3405,7 +3404,7 @@ i40e_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
 	/* Shared IRQ case, return */
 	if (!(cause & I40E_PFINT_ICR0_INTEVENT_MASK)) {
 		PMD_DRV_LOG(INFO, "Port%d INT0:share IRQ case, "
-			"no INT event to process\n", hw->pf_id);
+			    "no INT event to process\n", hw->pf_id);
 		goto done;
 	}
 
@@ -3626,7 +3625,7 @@ i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
 				if (vsi->vfta[j] & (1 << k)) {
 					if (i > num - 1) {
 						PMD_DRV_LOG(ERR, "vlan number "
-								"not match\n");
+							    "not match\n");
 						return I40E_ERR_PARAM;
 					}
 					(void)rte_memcpy(&mv_f[i].macaddr,
diff --git a/lib/librte_pmd_i40e/i40e_ethdev_vf.c b/lib/librte_pmd_i40e/i40e_ethdev_vf.c
index ed62668..17009bd 100644
--- a/lib/librte_pmd_i40e/i40e_ethdev_vf.c
+++ b/lib/librte_pmd_i40e/i40e_ethdev_vf.c
@@ -206,7 +206,7 @@ i40evf_parse_pfmsg(struct i40e_vf *vf,
 				vpe->event_data.link_event.link_status;
 			vf->pend_msg |= PFMSG_LINK_CHANGE;
 			PMD_DRV_LOG(INFO, "Link status update:%s\n",
-					vf->link_up ? "up" : "down");
+				    vf->link_up ? "up" : "down");
 			break;
 		case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
 			vf->vf_reset = true;
@@ -219,9 +219,8 @@ i40evf_parse_pfmsg(struct i40e_vf *vf,
 			PMD_DRV_LOG(INFO, "PF driver closed\n");
 			break;
 		default:
-			PMD_DRV_LOG(ERR,
-				"%s: Unknown event %d from pf\n",
-				__func__, vpe->event);
+			PMD_DRV_LOG(ERR, "%s: Unknown event %d from pf\n",
+				    __func__, vpe->event);
 		}
 	} else {
 		/* async reply msg on command issued by vf previously */
@@ -351,7 +350,7 @@ i40evf_execute_vf_cmd(struct rte_eth_dev *dev, struct vf_cmd_info *args)
 		PMD_DRV_LOG(ERR, "Failed to read message from AdminQ\n");
 	else if (args->ops != info.ops)
 		PMD_DRV_LOG(ERR, "command mismatch, expect %u, get %u\n",
-				args->ops, info.ops);
+			    args->ops, info.ops);
 
 	return (err | info.result);
 }
@@ -392,8 +391,8 @@ i40evf_check_api_version(struct rte_eth_dev *dev)
 	else if ((pver->major != version.major) ||
 	    (pver->minor != version.minor)) {
 		PMD_INIT_LOG(ERR, "pf/vf API version mismatch. "
-			"(%u.%u)-(%u.%u)\n", pver->major, pver->minor,
-					version.major, version.minor);
+			     "(%u.%u)-(%u.%u)\n", pver->major, pver->minor,
+			     version.major, version.minor);
 		return -1;
 	}
 
@@ -418,8 +417,7 @@ i40evf_get_vf_resource(struct rte_eth_dev *dev)
 	err = i40evf_execute_vf_cmd(dev, &args);
 
 	if (err) {
-		PMD_DRV_LOG(ERR, "fail to execute command "
-					"OP_GET_VF_RESOURCE\n");
+		PMD_DRV_LOG(ERR, "fail to execute command OP_GET_VF_RESOURCE\n");
 		return err;
 	}
 
@@ -462,7 +460,7 @@ i40evf_config_promisc(struct rte_eth_dev *dev,
 
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command "
-				"CONFIG_PROMISCUOUS_MODE\n");
+			    "CONFIG_PROMISCUOUS_MODE\n");
 	return err;
 }
 
@@ -595,7 +593,7 @@ i40evf_configure_queues(struct rte_eth_dev *dev)
 	err = i40evf_execute_vf_cmd(dev, &args);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command "
-				"OP_CONFIG_VSI_QUEUES\n");
+			    "OP_CONFIG_VSI_QUEUES\n");
 	rte_free(queue_info);
 
 	return err;
@@ -661,8 +659,8 @@ i40evf_switch_queue(struct rte_eth_dev *dev, bool isrx, uint16_t qid,
 	args.out_size = I40E_AQ_BUF_SZ;
 	err = i40evf_execute_vf_cmd(dev, &args);
 	if (err)
-		PMD_DRV_LOG(ERR, "fail to switch %s %u %s\n", isrx ? "RX" : "TX",
-			qid, on ? "on" : "off");
+		PMD_DRV_LOG(ERR, "fail to switch %s %u %s\n",
+			    isrx ? "RX" : "TX", qid, on ? "on" : "off");
 
 	return err;
 }
@@ -680,8 +678,7 @@ i40evf_start_queues(struct rte_eth_dev *dev)
 		if (rxq->start_rx_per_q)
 			continue;
 		if (i40evf_dev_rx_queue_start(dev, i) != 0) {
-			PMD_DRV_LOG(ERR, "Fail to start queue %u\n",
-				i);
+			PMD_DRV_LOG(ERR, "Fail to start queue %u\n", i);
 			return -1;
 		}
 	}
@@ -691,8 +688,7 @@ i40evf_start_queues(struct rte_eth_dev *dev)
 		if (txq->start_tx_per_q)
 			continue;
 		if (i40evf_dev_tx_queue_start(dev, i) != 0) {
-			PMD_DRV_LOG(ERR, "Fail to start queue %u\n",
-				i);
+			PMD_DRV_LOG(ERR, "Fail to start queue %u\n", i);
 			return -1;
 		}
 	}
@@ -708,8 +704,7 @@ i40evf_stop_queues(struct rte_eth_dev *dev)
 	/* Stop TX queues first */
 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
 		if (i40evf_dev_tx_queue_stop(dev, i) != 0) {
-			PMD_DRV_LOG(ERR, "Fail to start queue %u\n",
-				i);
+			PMD_DRV_LOG(ERR, "Fail to start queue %u\n", i);
 			return -1;
 		}
 	}
@@ -717,8 +712,7 @@ i40evf_stop_queues(struct rte_eth_dev *dev)
 	/* Then stop RX queues */
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
 		if (i40evf_dev_rx_queue_stop(dev, i) != 0) {
-			PMD_DRV_LOG(ERR, "Fail to start queue %u\n",
-				i);
+			PMD_DRV_LOG(ERR, "Fail to start queue %u\n", i);
 			return -1;
 		}
 	}
@@ -738,9 +732,9 @@ i40evf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
 
 	if (i40e_validate_mac_addr(addr->addr_bytes) != I40E_SUCCESS) {
 		PMD_DRV_LOG(ERR, "Invalid mac:%x:%x:%x:%x:%x:%x\n",
-			addr->addr_bytes[0], addr->addr_bytes[1],
-			addr->addr_bytes[2], addr->addr_bytes[3],
-			addr->addr_bytes[4], addr->addr_bytes[5]);
+			    addr->addr_bytes[0], addr->addr_bytes[1],
+			    addr->addr_bytes[2], addr->addr_bytes[3],
+			    addr->addr_bytes[4], addr->addr_bytes[5]);
 		return -1;
 	}
 
@@ -758,7 +752,7 @@ i40evf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
 	err = i40evf_execute_vf_cmd(dev, &args);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command "
-				"OP_ADD_ETHER_ADDRESS\n");
+			    "OP_ADD_ETHER_ADDRESS\n");
 
 	return err;
 }
@@ -775,9 +769,9 @@ i40evf_del_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
 
 	if (i40e_validate_mac_addr(addr->addr_bytes) != I40E_SUCCESS) {
 		PMD_DRV_LOG(ERR, "Invalid mac:%x-%x-%x-%x-%x-%x\n",
-			addr->addr_bytes[0], addr->addr_bytes[1],
-			addr->addr_bytes[2], addr->addr_bytes[3],
-			addr->addr_bytes[4], addr->addr_bytes[5]);
+			    addr->addr_bytes[0], addr->addr_bytes[1],
+			    addr->addr_bytes[2], addr->addr_bytes[3],
+			    addr->addr_bytes[4], addr->addr_bytes[5]);
 		return -1;
 	}
 
@@ -795,7 +789,7 @@ i40evf_del_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
 	err = i40evf_execute_vf_cmd(dev, &args);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command "
-				"OP_DEL_ETHER_ADDRESS\n");
+			    "OP_DEL_ETHER_ADDRESS\n");
 
 	return err;
 }
@@ -1244,7 +1238,7 @@ i40evf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 
 		if (err)
 			PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on\n",
-				rx_queue_id);
+				    rx_queue_id);
 	}
 
 	return err;
@@ -1263,7 +1257,7 @@ i40evf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 
 		if (err) {
 			PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off\n",
-				rx_queue_id);
+				    rx_queue_id);
 			return err;
 		}
 
@@ -1288,7 +1282,7 @@ i40evf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 
 		if (err)
 			PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on\n",
-				tx_queue_id);
+				    tx_queue_id);
 	}
 
 	return err;
@@ -1307,7 +1301,7 @@ i40evf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 
 		if (err) {
 			PMD_DRV_LOG(ERR, "Failed to switch TX queue %u of\n",
-				tx_queue_id);
+				    tx_queue_id);
 			return err;
 		}
 
@@ -1391,20 +1385,20 @@ i40evf_dev_start(struct rte_eth_dev *dev)
 		if (vf->max_pkt_len <= ETHER_MAX_LEN ||
 			vf->max_pkt_len > I40E_FRAME_SIZE_MAX) {
 			PMD_DRV_LOG(ERR, "maximum packet length must "
-				"be larger than %u and smaller than %u,"
-					"as jumbo frame is enabled\n",
-						(uint32_t)ETHER_MAX_LEN,
-					(uint32_t)I40E_FRAME_SIZE_MAX);
+				    "be larger than %u and smaller than %u,"
+				    "as jumbo frame is enabled\n",
+				    (uint32_t)ETHER_MAX_LEN,
+				    (uint32_t)I40E_FRAME_SIZE_MAX);
 			return I40E_ERR_CONFIG;
 		}
 	} else {
 		if (vf->max_pkt_len < ETHER_MIN_LEN ||
 			vf->max_pkt_len > ETHER_MAX_LEN) {
 			PMD_DRV_LOG(ERR, "maximum packet length must be "
-					"larger than %u and smaller than %u, "
-					"as jumbo frame is disabled\n",
-						(uint32_t)ETHER_MIN_LEN,
-						(uint32_t)ETHER_MAX_LEN);
+				    "larger than %u and smaller than %u, "
+				    "as jumbo frame is disabled\n",
+				    (uint32_t)ETHER_MIN_LEN,
+				    (uint32_t)ETHER_MAX_LEN);
 			return I40E_ERR_CONFIG;
 		}
 	}
diff --git a/lib/librte_pmd_i40e/i40e_pf.c b/lib/librte_pmd_i40e/i40e_pf.c
index 4e1e043..ed9773a 100644
--- a/lib/librte_pmd_i40e/i40e_pf.c
+++ b/lib/librte_pmd_i40e/i40e_pf.c
@@ -930,8 +930,7 @@ i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev,
 	case I40E_VIRTCHNL_OP_FCOE:
 		PMD_DRV_LOG(ERR, "OP_FCOE received, not supported\n");
 	default:
-		PMD_DRV_LOG(ERR, "%u received, not supported\n",
-							opcode);
+		PMD_DRV_LOG(ERR, "%u received, not supported\n", opcode);
 		i40e_pf_host_send_msg_to_vf(vf, opcode,
 				I40E_ERR_PARAM, NULL, 0);
 		break;
diff --git a/lib/librte_pmd_i40e/i40e_rxtx.c b/lib/librte_pmd_i40e/i40e_rxtx.c
index 70e2bd4..99a6572 100644
--- a/lib/librte_pmd_i40e/i40e_rxtx.c
+++ b/lib/librte_pmd_i40e/i40e_rxtx.c
@@ -728,8 +728,8 @@ rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 			uint16_t i, j;
 
 			PMD_RX_LOG(DEBUG, "Rx mbuf alloc failed for "
-					"port_id=%u, queue_id=%u\n",
-					rxq->port_id, rxq->queue_id);
+				   "port_id=%u, queue_id=%u\n",
+				   rxq->port_id, rxq->queue_id);
 			rxq->rx_nb_avail = 0;
 			rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
 			for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
@@ -1453,7 +1453,7 @@ i40e_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 
 		if (err) {
 			PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on\n",
-				rx_queue_id);
+				    rx_queue_id);
 
 			i40e_rx_queue_release_mbufs(rxq);
 			i40e_reset_rx_queue(rxq);
@@ -1479,7 +1479,7 @@ i40e_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 
 		if (err) {
 			PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off\n",
-				rx_queue_id);
+				    rx_queue_id);
 			return err;
 		}
 		i40e_rx_queue_release_mbufs(rxq);
@@ -1503,7 +1503,7 @@ i40e_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 		err = i40e_switch_tx_queue(hw, tx_queue_id + q_base, TRUE);
 		if (err)
 			PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on\n",
-				tx_queue_id);
+				    tx_queue_id);
 	}
 
 	return err;
@@ -1525,7 +1525,7 @@ i40e_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 
 		if (err) {
 			PMD_DRV_LOG(ERR, "Failed to switch TX queue %u of\n",
-				tx_queue_id);
+				    tx_queue_id);
 			return err;
 		}
 
@@ -1553,14 +1553,14 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
 
 	if (!vsi || queue_idx >= vsi->nb_qps) {
 		PMD_DRV_LOG(ERR, "VSI not available or queue "
-				"index exceeds the maximum\n");
+			    "index exceeds the maximum\n");
 		return I40E_ERR_PARAM;
 	}
 	if (((nb_desc * sizeof(union i40e_rx_desc)) % I40E_ALIGN) != 0 ||
 					(nb_desc > I40E_MAX_RING_DESC) ||
 					(nb_desc < I40E_MIN_RING_DESC)) {
 		PMD_DRV_LOG(ERR, "Number (%u) of receive descriptors is "
-						"invalid\n", nb_desc);
+			    "invalid\n", nb_desc);
 		return I40E_ERR_PARAM;
 	}
 
@@ -1577,7 +1577,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
 				 socket_id);
 	if (!rxq) {
 		PMD_DRV_LOG(ERR, "Failed to allocate memory for "
-					"rx queue data structure\n");
+			    "rx queue data structure\n");
 		return (-ENOMEM);
 	}
 	rxq->mp = mp;
@@ -1644,17 +1644,17 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
 	if (!use_def_burst_func && !dev->data->scattered_rx) {
 #ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
 		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
-			"satisfied. Rx Burst Bulk Alloc function will be "
-					"used on port=%d, queue=%d.\n",
-					rxq->port_id, rxq->queue_id);
+			     "satisfied. Rx Burst Bulk Alloc function will be "
+			     "used on port=%d, queue=%d.\n",
+			     rxq->port_id, rxq->queue_id);
 		dev->rx_pkt_burst = i40e_recv_pkts_bulk_alloc;
 #endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */
 	} else {
 		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
-				"not satisfied, Scattered Rx is requested, "
-				"or RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC is "
-					"not enabled on port=%d, queue=%d.\n",
-						rxq->port_id, rxq->queue_id);
+			     "not satisfied, Scattered Rx is requested, "
+			     "or RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC is "
+			     "not enabled on port=%d, queue=%d.\n",
+			     rxq->port_id, rxq->queue_id);
 	}
 
 	return 0;
@@ -1750,7 +1750,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
 
 	if (!vsi || queue_idx >= vsi->nb_qps) {
 		PMD_DRV_LOG(ERR, "VSI is NULL, or queue index (%u) "
-				"exceeds the maximum\n", queue_idx);
+			    "exceeds the maximum\n", queue_idx);
 		return I40E_ERR_PARAM;
 	}
 
@@ -1758,7 +1758,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
 					(nb_desc > I40E_MAX_RING_DESC) ||
 					(nb_desc < I40E_MIN_RING_DESC)) {
 		PMD_DRV_LOG(ERR, "Number (%u) of transmit descriptors is "
-                                                "invalid\n", nb_desc);
+			    "invalid\n", nb_desc);
 		return I40E_ERR_PARAM;
 	}
 
@@ -1847,7 +1847,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
 				  socket_id);
 	if (!txq) {
 		PMD_DRV_LOG(ERR, "Failed to allocate memory for "
-					"tx queue structure\n");
+			    "tx queue structure\n");
 		return (-ENOMEM);
 	}
 
@@ -2192,20 +2192,20 @@ i40e_rx_queue_config(struct i40e_rx_queue *rxq)
 		if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
 			rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
 			PMD_DRV_LOG(ERR, "maximum packet length must "
-				"be larger than %u and smaller than %u,"
-					"as jumbo frame is enabled\n",
-						(uint32_t)ETHER_MAX_LEN,
-					(uint32_t)I40E_FRAME_SIZE_MAX);
+				    "be larger than %u and smaller than %u,"
+				    "as jumbo frame is enabled\n",
+				    (uint32_t)ETHER_MAX_LEN,
+				    (uint32_t)I40E_FRAME_SIZE_MAX);
 			return I40E_ERR_CONFIG;
 		}
 	} else {
 		if (rxq->max_pkt_len < ETHER_MIN_LEN ||
 			rxq->max_pkt_len > ETHER_MAX_LEN) {
 			PMD_DRV_LOG(ERR, "maximum packet length must be "
-					"larger than %u and smaller than %u, "
-					"as jumbo frame is disabled\n",
-						(uint32_t)ETHER_MIN_LEN,
-						(uint32_t)ETHER_MAX_LEN);
+				    "larger than %u and smaller than %u, "
+				    "as jumbo frame is disabled\n",
+				    (uint32_t)ETHER_MIN_LEN,
+				    (uint32_t)ETHER_MAX_LEN);
 			return I40E_ERR_CONFIG;
 		}
 	}
-- 
1.7.10.4