DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH 2/5] vmxnet3: cleanup style and indentation
       [not found] <20140612183347.07074830@nehalam.linuxnetplumber.net>
@ 2014-06-13  1:37 ` Stephen Hemminger
  2014-06-13  1:37 ` [dpdk-dev] [PATCH 3/5] vmxnet3: fix double spacing of log messages Stephen Hemminger
  1 sibling, 0 replies; 2+ messages in thread
From: Stephen Hemminger @ 2014-06-13  1:37 UTC (permalink / raw)
  To: Stephen Hemminger; +Cc: dev

This driver had several style problems, the worst of which
was botched indentation.

Fix almost all the problems reported by checkpatch.

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>


---
 lib/librte_pmd_vmxnet3/vmxnet3_ethdev.c |  158 ++++++++--------
 lib/librte_pmd_vmxnet3/vmxnet3_ethdev.h |   24 +-
 lib/librte_pmd_vmxnet3/vmxnet3_ring.h   |   12 -
 lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c   |  300 ++++++++++++++++----------------
 4 files changed, 257 insertions(+), 237 deletions(-)

--- a/lib/librte_pmd_vmxnet3/vmxnet3_ethdev.c	2014-06-12 17:51:52.277119629 -0700
+++ b/lib/librte_pmd_vmxnet3/vmxnet3_ethdev.c	2014-06-12 18:10:05.495161130 -0700
@@ -216,24 +216,24 @@ eth_vmxnet3_dev_init(__attribute__((unus
 	hw->bufs_per_pkt = 1;
 
 	/* Check h/w version compatibility with driver. */
-    ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_VRRS);
-    PMD_INIT_LOG(DEBUG, "Harware version : %d\n", ver);
-    if (ver & 0x1)
+	ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_VRRS);
+	PMD_INIT_LOG(DEBUG, "Harware version : %d\n", ver);
+	if (ver & 0x1)
 		VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS, 1);
-    else {
+	else {
 		PMD_INIT_LOG(ERR, "Uncompatiable h/w version, should be 0x1\n");
 		return -EIO;
-    }
+	}
 
-    /* Check UPT version compatibility with driver. */
-    ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_UVRS);
-    PMD_INIT_LOG(DEBUG, "UPT harware version : %d\n", ver);
-    if (ver & 0x1)
+	/* Check UPT version compatibility with driver. */
+	ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_UVRS);
+	PMD_INIT_LOG(DEBUG, "UPT harware version : %d\n", ver);
+	if (ver & 0x1)
 		VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_UVRS, 1);
-    else {
+	else {
 		PMD_INIT_LOG(ERR, "Incompatiable UPT version.\n");
 		return -EIO;
-    }
+	}
 
 	/* Getting MAC Address */
 	mac_lo = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_MACL);
@@ -243,20 +243,20 @@ eth_vmxnet3_dev_init(__attribute__((unus
 
 	/* Allocate memory for storing MAC addresses */
 	eth_dev->data->mac_addrs = rte_zmalloc("vmxnet3", ETHER_ADDR_LEN *
-			VMXNET3_MAX_MAC_ADDRS, 0);
+					       VMXNET3_MAX_MAC_ADDRS, 0);
 	if (eth_dev->data->mac_addrs == NULL) {
 		PMD_INIT_LOG(ERR,
-			"Failed to allocate %d bytes needed to store MAC addresses",
-			ETHER_ADDR_LEN * VMXNET3_MAX_MAC_ADDRS);
+			     "Failed to allocate %d bytes needed to store MAC addresses",
+			     ETHER_ADDR_LEN * VMXNET3_MAX_MAC_ADDRS);
 		return -ENOMEM;
 	}
 	/* Copy the permanent MAC address */
 	ether_addr_copy((struct ether_addr *) hw->perm_addr,
 			&eth_dev->data->mac_addrs[0]);
 
-	PMD_INIT_LOG(DEBUG, "MAC Address : %02x:%02x:%02x:%02x:%02x:%02x \n",
-	               hw->perm_addr[0], hw->perm_addr[1], hw->perm_addr[2],
-	               hw->perm_addr[3], hw->perm_addr[4], hw->perm_addr[5]);
+	PMD_INIT_LOG(DEBUG, "MAC Address : %02x:%02x:%02x:%02x:%02x:%02x\n",
+		     hw->perm_addr[0], hw->perm_addr[1], hw->perm_addr[2],
+		     hw->perm_addr[3], hw->perm_addr[4], hw->perm_addr[5]);
 
 	/* Put device in Quiesce Mode */
 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV);
@@ -293,20 +293,20 @@ vmxnet3_dev_configure(struct rte_eth_dev
 {
 	const struct rte_memzone *mz;
 	struct vmxnet3_hw *hw =
-			VMXNET3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+		VMXNET3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	size_t size;
 
 	PMD_INIT_FUNC_TRACE();
 
 	if (dev->data->nb_rx_queues > UINT8_MAX ||
-			dev->data->nb_tx_queues > UINT8_MAX)
-		return (-EINVAL);
+	    dev->data->nb_tx_queues > UINT8_MAX)
+		return -EINVAL;
 
 	size = dev->data->nb_rx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
-	          dev->data->nb_tx_queues * sizeof (struct Vmxnet3_RxQueueDesc);
+		dev->data->nb_tx_queues * sizeof(struct Vmxnet3_RxQueueDesc);
 
 	if (size > UINT16_MAX)
-		return (-EINVAL);
+		return -EINVAL;
 
 	hw->num_rx_queues = (uint8_t)dev->data->nb_rx_queues;
 	hw->num_tx_queues = (uint8_t)dev->data->nb_tx_queues;
@@ -315,12 +315,12 @@ vmxnet3_dev_configure(struct rte_eth_dev
 	 * Allocate a memzone for Vmxnet3_DriverShared - Vmxnet3_DSDevRead
 	 * on current socket
 	 */
-	mz = gpa_zone_reserve(dev, sizeof (struct Vmxnet3_DriverShared),
-		"shared", rte_socket_id(), 8);
+	mz = gpa_zone_reserve(dev, sizeof(struct Vmxnet3_DriverShared),
+			      "shared", rte_socket_id(), 8);
 
 	if (mz == NULL) {
 		PMD_INIT_LOG(ERR, "ERROR: Creating shared zone\n");
-		return (-ENOMEM);
+		return -ENOMEM;
 	}
 	memset(mz->addr, 0, mz->len);
 
@@ -328,14 +328,14 @@ vmxnet3_dev_configure(struct rte_eth_dev
 	hw->sharedPA = mz->phys_addr;
 
 	/*
-	* Allocate a memzone for Vmxnet3_RxQueueDesc - Vmxnet3_TxQueueDesc
-	* on current socket
-	*/
+	 * Allocate a memzone for Vmxnet3_RxQueueDesc - Vmxnet3_TxQueueDesc
+	 * on current socket
+	 */
 	mz = gpa_zone_reserve(dev, size, "queuedesc",
-					rte_socket_id(), VMXNET3_QUEUE_DESC_ALIGN);
+			      rte_socket_id(), VMXNET3_QUEUE_DESC_ALIGN);
 	if (mz == NULL) {
 		PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone\n");
-		return (-ENOMEM);
+		return -ENOMEM;
 	}
 	memset(mz->addr, 0, mz->len);
 
@@ -345,14 +345,15 @@ vmxnet3_dev_configure(struct rte_eth_dev
 	hw->queueDescPA = mz->phys_addr;
 	hw->queue_desc_len = (uint16_t)size;
 
-	if(dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+	if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
 
 		/* Allocate memory structure for UPT1_RSSConf and configure */
-		mz = gpa_zone_reserve(dev, sizeof (struct VMXNET3_RSSConf), "rss_conf",
-				rte_socket_id(), CACHE_LINE_SIZE);
+		mz = gpa_zone_reserve(dev, sizeof(struct VMXNET3_RSSConf), "rss_conf",
+				      rte_socket_id(), CACHE_LINE_SIZE);
 		if (mz == NULL) {
-			PMD_INIT_LOG(ERR, "ERROR: Creating rss_conf structure zone\n");
-			return (-ENOMEM);
+			PMD_INIT_LOG(ERR,
+				     "ERROR: Creating rss_conf structure zone\n");
+			return -ENOMEM;
 		}
 		memset(mz->addr, 0, mz->len);
 
@@ -379,8 +380,8 @@ vmxnet3_setup_driver_shared(struct rte_e
 
 	/* Setting up Guest OS information */
 	devRead->misc.driverInfo.gos.gosBits   = sizeof(void *) == 4 ?
-											VMXNET3_GOS_BITS_32 :
-											VMXNET3_GOS_BITS_64;
+		VMXNET3_GOS_BITS_32 :
+		VMXNET3_GOS_BITS_64;
 	devRead->misc.driverInfo.gos.gosType   = VMXNET3_GOS_TYPE_LINUX;
 	devRead->misc.driverInfo.vmxnet3RevSpt = 1;
 	devRead->misc.driverInfo.uptVerSpt     = 1;
@@ -392,11 +393,11 @@ vmxnet3_setup_driver_shared(struct rte_e
 	devRead->misc.numRxQueues  = hw->num_rx_queues;
 
 	/*
-	* Set number of interrupts to 1
-	* PMD disables all the interrupts but this is MUST to activate device
-	* It needs at least one interrupt for link events to handle
-	* So we'll disable it later after device activation if needed
-	*/
+	 * Set number of interrupts to 1
+	 * PMD disables all the interrupts but this is MUST to activate device
+	 * It needs at least one interrupt for link events to handle
+	 * So we'll disable it later after device activation if needed
+	 */
 	devRead->intrConf.numIntrs = 1;
 	devRead->intrConf.intrCtrl |= VMXNET3_IC_DISABLE_ALL;
 
@@ -438,35 +439,33 @@ vmxnet3_setup_driver_shared(struct rte_e
 	devRead->rxFilterConf.rxMode = 0;
 
 	/* Setting up feature flags */
-	if(dev->data->dev_conf.rxmode.hw_ip_checksum) {
+	if (dev->data->dev_conf.rxmode.hw_ip_checksum)
 		devRead->misc.uptFeatures |= VMXNET3_F_RXCSUM;
-	}
 
-	if(dev->data->dev_conf.rxmode.hw_vlan_strip) {
+	if (dev->data->dev_conf.rxmode.hw_vlan_strip)
 		devRead->misc.uptFeatures |= VMXNET3_F_RXVLAN;
-	}
 
-	if(port_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+	if (port_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
 		ret = vmxnet3_rss_configure(dev);
-		if(ret != VMXNET3_SUCCESS) {
+		if (ret != VMXNET3_SUCCESS)
 			return ret;
-		}
+
 		devRead->misc.uptFeatures |= VMXNET3_F_RSS;
 		devRead->rssConfDesc.confVer = 1;
 		devRead->rssConfDesc.confLen = sizeof(struct VMXNET3_RSSConf);
 		devRead->rssConfDesc.confPA  = hw->rss_confPA;
 	}
 
-	if(dev->data->dev_conf.rxmode.hw_vlan_filter) {
+	if (dev->data->dev_conf.rxmode.hw_vlan_filter) {
 		ret = vmxnet3_vlan_configure(dev);
-		if(ret != VMXNET3_SUCCESS) {
+		if (ret != VMXNET3_SUCCESS)
 			return ret;
-		}
 	}
 
-	PMD_INIT_LOG(DEBUG, "Writing MAC Address : %02x:%02x:%02x:%02x:%02x:%02x \n",
-					hw->perm_addr[0], hw->perm_addr[1], hw->perm_addr[2],
-					hw->perm_addr[3], hw->perm_addr[4], hw->perm_addr[5]);
+	PMD_INIT_LOG(DEBUG,
+		     "Writing MAC Address : %02x:%02x:%02x:%02x:%02x:%02x\n",
+		     hw->perm_addr[0], hw->perm_addr[1], hw->perm_addr[2],
+		     hw->perm_addr[3], hw->perm_addr[4], hw->perm_addr[5]);
 
 	/* Write MAC Address back to device */
 	mac_ptr = (uint32_t *)hw->perm_addr;
@@ -493,17 +492,16 @@ vmxnet3_dev_start(struct rte_eth_dev *de
 	PMD_INIT_FUNC_TRACE();
 
 	ret = vmxnet3_setup_driver_shared(dev);
-	if(ret != VMXNET3_SUCCESS) {
+	if (ret != VMXNET3_SUCCESS)
 		return ret;
-	}
 
 	/* Exchange shared data with device */
-	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAL, VMXNET3_GET_ADDR_LO(
-	                      hw->sharedPA));
-    VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI(
-						  hw->sharedPA));
+	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAL,
+			       VMXNET3_GET_ADDR_LO(hw->sharedPA));
+	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAH,
+			       VMXNET3_GET_ADDR_HI(hw->sharedPA));
 
-    /* Activate device by register write */
+	/* Activate device by register write */
 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_ACTIVATE_DEV);
 	status = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
 
@@ -520,7 +518,7 @@ vmxnet3_dev_start(struct rte_eth_dev *de
 	 * Update RxMode of the device
 	 */
 	ret = vmxnet3_dev_rxtx_init(dev);
-	if(ret != VMXNET3_SUCCESS) {
+	if (ret != VMXNET3_SUCCESS) {
 		PMD_INIT_LOG(ERR, "Device receive init in %s: UNSUCCESSFUL\n", __func__);
 		return ret;
 	}
@@ -551,7 +549,7 @@ vmxnet3_dev_stop(struct rte_eth_dev *dev
 
 	PMD_INIT_FUNC_TRACE();
 
-	if(hw->adapter_stopped == TRUE) {
+	if (hw->adapter_stopped == TRUE) {
 		PMD_INIT_LOG(DEBUG, "Device already closed.\n");
 		return;
 	}
@@ -593,7 +591,7 @@ vmxnet3_dev_close(struct rte_eth_dev *de
 }
 
 static void
-vmxnet3_dev_stats_get( struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+vmxnet3_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 {
 	unsigned int i;
 	struct vmxnet3_hw *hw = VMXNET3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -679,7 +677,8 @@ static void
 vmxnet3_dev_set_rxmode(struct vmxnet3_hw *hw, uint32_t feature, int set) {
 
 	struct Vmxnet3_RxFilterConf *rxConf = &hw->shared->devRead.rxFilterConf;
-	if(set)
+
+	if (set)
 		rxConf->rxMode = rxConf->rxMode | feature;
 	else
 		rxConf->rxMode = rxConf->rxMode & (~feature);
@@ -692,6 +691,7 @@ static void
 vmxnet3_dev_promiscuous_enable(struct rte_eth_dev *dev)
 {
 	struct vmxnet3_hw *hw = VMXNET3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
 	vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_PROMISC, 1);
 }
 
@@ -700,6 +700,7 @@ static void
 vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev)
 {
 	struct vmxnet3_hw *hw = VMXNET3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
 	vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_PROMISC, 0);
 }
 
@@ -708,6 +709,7 @@ static void
 vmxnet3_dev_allmulticast_enable(struct rte_eth_dev *dev)
 {
 	struct vmxnet3_hw *hw = VMXNET3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
 	vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_ALL_MULTI, 1);
 }
 
@@ -716,6 +718,7 @@ static void
 vmxnet3_dev_allmulticast_disable(struct rte_eth_dev *dev)
 {
 	struct vmxnet3_hw *hw = VMXNET3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
 	vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_ALL_MULTI, 0);
 }
 
@@ -724,21 +727,22 @@ static void
 vmxnet3_process_events(struct vmxnet3_hw *hw)
 {
 	uint32_t events = hw->shared->ecr;
-	if (!events){
+
+	if (!events) {
 		PMD_INIT_LOG(ERR, "No events to process in %s()\n", __func__);
 		return;
 	}
 
 	/*
-	* ECR bits when written with 1b are cleared. Hence write
-	* events back to ECR so that the bits which were set will be reset.
-	*/
+	 * ECR bits when written with 1b are cleared. Hence write
+	 * events back to ECR so that the bits which were set will be reset.
+	 */
 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_ECR, events);
 
 	/* Check if link state has changed */
-   if (events & VMXNET3_ECR_LINK){
-	   PMD_INIT_LOG(ERR, "Process events in %s(): VMXNET3_ECR_LINK event\n", __func__);
-   }
+	if (events & VMXNET3_ECR_LINK)
+		PMD_INIT_LOG(ERR,
+			     "Process events in %s(): VMXNET3_ECR_LINK event\n", __func__);
 
 	/* Check if there is an error on xmit/recv queues */
 	if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
@@ -746,14 +750,14 @@ vmxnet3_process_events(struct vmxnet3_hw
 
 		if (hw->tqd_start->status.stopped)
 			PMD_INIT_LOG(ERR, "tq error 0x%x\n",
-                     hw->tqd_start->status.error);
+				     hw->tqd_start->status.error);
 
 		if (hw->rqd_start->status.stopped)
 			PMD_INIT_LOG(ERR, "rq error 0x%x\n",
-                     hw->rqd_start->status.error);
+				     hw->rqd_start->status.error);
 
-      /* Reset the device */
-      /* Have to reset the device */
+		/* Reset the device */
+		/* Have to reset the device */
 	}
 
 	if (events & VMXNET3_ECR_DIC)
--- a/lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c	2014-06-12 17:38:02.847929385 -0700
+++ b/lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c	2014-06-12 18:09:13.122889523 -0700
@@ -102,7 +102,7 @@ rte_rxmbuf_alloc(struct rte_mempool *mp)
 
 	m = __rte_mbuf_raw_alloc(mp);
 	__rte_mbuf_sanity_check_raw(m, RTE_MBUF_PKT, 0);
-	return (m);
+	return m;
 }
 
 #ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER
@@ -110,24 +110,30 @@ static void
 vmxnet3_rxq_dump(struct vmxnet3_rx_queue *rxq)
 {
 	uint32_t avail = 0;
+
 	if (rxq == NULL)
 		return;
 
-	PMD_RX_LOG(DEBUG, "RXQ: cmd0 base : 0x%p cmd1 base : 0x%p comp ring base : 0x%p.\n",
-		        rxq->cmd_ring[0].base, rxq->cmd_ring[1].base, rxq->comp_ring.base);
-	PMD_RX_LOG(DEBUG, "RXQ: cmd0 basePA : 0x%lx cmd1 basePA : 0x%lx comp ring basePA : 0x%lx.\n",
-				(unsigned long)rxq->cmd_ring[0].basePA, (unsigned long)rxq->cmd_ring[1].basePA,
-		        (unsigned long)rxq->comp_ring.basePA);
+	PMD_RX_LOG(DEBUG,
+		   "RXQ: cmd0 base : 0x%p cmd1 base : 0x%p comp ring base : 0x%p.\n",
+		   rxq->cmd_ring[0].base, rxq->cmd_ring[1].base, rxq->comp_ring.base);
+	PMD_RX_LOG(DEBUG,
+		   "RXQ: cmd0 basePA : 0x%lx cmd1 basePA : 0x%lx comp ring basePA : 0x%lx.\n",
+		   (unsigned long)rxq->cmd_ring[0].basePA,
+		   (unsigned long)rxq->cmd_ring[1].basePA,
+		   (unsigned long)rxq->comp_ring.basePA);
 
 	avail = vmxnet3_cmd_ring_desc_avail(&rxq->cmd_ring[0]);
-	PMD_RX_LOG(DEBUG, "RXQ:cmd0: size=%u; free=%u; next2proc=%u; queued=%u\n",
-		    (uint32_t)rxq->cmd_ring[0].size, avail, rxq->comp_ring.next2proc,
-		    rxq->cmd_ring[0].size - avail);
+	PMD_RX_LOG(DEBUG,
+		   "RXQ:cmd0: size=%u; free=%u; next2proc=%u; queued=%u\n",
+		   (uint32_t)rxq->cmd_ring[0].size, avail,
+		   rxq->comp_ring.next2proc,
+		   rxq->cmd_ring[0].size - avail);
 
 	avail = vmxnet3_cmd_ring_desc_avail(&rxq->cmd_ring[1]);
 	PMD_RX_LOG(DEBUG, "RXQ:cmd1 size=%u; free=%u; next2proc=%u; queued=%u\n",
-			(uint32_t)rxq->cmd_ring[1].size, avail, rxq->comp_ring.next2proc,
-			rxq->cmd_ring[1].size - avail);
+		   (uint32_t)rxq->cmd_ring[1].size, avail, rxq->comp_ring.next2proc,
+		   rxq->cmd_ring[1].size - avail);
 
 }
 
@@ -135,18 +141,20 @@ static void
 vmxnet3_txq_dump(struct vmxnet3_tx_queue *txq)
 {
 	uint32_t avail = 0;
+
 	if (txq == NULL)
 		return;
 
 	PMD_TX_LOG(DEBUG, "TXQ: cmd base : 0x%p comp ring base : 0x%p.\n",
-		                txq->cmd_ring.base, txq->comp_ring.base);
+		   txq->cmd_ring.base, txq->comp_ring.base);
 	PMD_TX_LOG(DEBUG, "TXQ: cmd basePA : 0x%lx comp ring basePA : 0x%lx.\n",
-		                (unsigned long)txq->cmd_ring.basePA, (unsigned long)txq->comp_ring.basePA);
+		   (unsigned long)txq->cmd_ring.basePA,
+		   (unsigned long)txq->comp_ring.basePA);
 
 	avail = vmxnet3_cmd_ring_desc_avail(&txq->cmd_ring);
 	PMD_TX_LOG(DEBUG, "TXQ: size=%u; free=%u; next2proc=%u; queued=%u\n",
-			(uint32_t)txq->cmd_ring.size, avail,
-			txq->comp_ring.next2proc, txq->cmd_ring.size - avail);
+		   (uint32_t)txq->cmd_ring.size, avail,
+		   txq->comp_ring.next2proc, txq->cmd_ring.size - avail);
 }
 #endif
 
@@ -156,7 +164,8 @@ vmxnet3_cmd_ring_release(vmxnet3_cmd_rin
 	while (ring->next2comp != ring->next2fill) {
 		/* No need to worry about tx desc ownership, device is quiesced by now. */
 		vmxnet3_buf_info_t *buf_info = ring->buf_info + ring->next2comp;
-		if(buf_info->m) {
+
+		if (buf_info->m) {
 			rte_pktmbuf_free(buf_info->m);
 			buf_info->m = NULL;
 			buf_info->bufPA = 0;
@@ -171,6 +180,7 @@ void
 vmxnet3_dev_tx_queue_release(void *txq)
 {
 	vmxnet3_tx_queue_t *tq = txq;
+
 	if (txq != NULL) {
 		/* Release the cmd_ring */
 		vmxnet3_cmd_ring_release(&tq->cmd_ring);
@@ -182,6 +192,7 @@ vmxnet3_dev_rx_queue_release(void *rxq)
 {
 	int i;
 	vmxnet3_rx_queue_t *rq = rxq;
+
 	if (rxq != NULL) {
 		/* Release both the cmd_rings */
 		for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++)
@@ -198,6 +209,7 @@ vmxnet3_dev_clear_queues(struct rte_eth_
 
 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
 		struct vmxnet3_tx_queue *txq = dev->data->tx_queues[i];
+
 		if (txq != NULL) {
 			txq->stopped = TRUE;
 			vmxnet3_dev_tx_queue_release(txq);
@@ -206,7 +218,8 @@ vmxnet3_dev_clear_queues(struct rte_eth_
 
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
 		struct vmxnet3_rx_queue *rxq = dev->data->rx_queues[i];
-		if(rxq != NULL) {
+
+		if (rxq != NULL) {
 			rxq->stopped = TRUE;
 			vmxnet3_dev_rx_queue_release(rxq);
 		}
@@ -216,19 +229,19 @@ vmxnet3_dev_clear_queues(struct rte_eth_
 static inline void
 vmxnet3_tq_tx_complete(vmxnet3_tx_queue_t *txq)
 {
-   int completed = 0;
-   struct rte_mbuf *mbuf;
-   vmxnet3_comp_ring_t *comp_ring = &txq->comp_ring;
-   struct Vmxnet3_TxCompDesc *tcd = (struct Vmxnet3_TxCompDesc *)
-                                    (comp_ring->base + comp_ring->next2proc);
+	int completed = 0;
+	struct rte_mbuf *mbuf;
+	vmxnet3_comp_ring_t *comp_ring = &txq->comp_ring;
+	struct Vmxnet3_TxCompDesc *tcd = (struct Vmxnet3_TxCompDesc *)
+		(comp_ring->base + comp_ring->next2proc);
 
-   while (tcd->gen == comp_ring->gen) {
+	while (tcd->gen == comp_ring->gen) {
 
-	   /* Release cmd_ring descriptor and free mbuf */
+		/* Release cmd_ring descriptor and free mbuf */
 #ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER
-	    VMXNET3_ASSERT(txq->cmd_ring.base[tcd->txdIdx].txd.eop == 1);
+		VMXNET3_ASSERT(txq->cmd_ring.base[tcd->txdIdx].txd.eop == 1);
 #endif
-	    mbuf = txq->cmd_ring.buf_info[tcd->txdIdx].m;
+		mbuf = txq->cmd_ring.buf_info[tcd->txdIdx].m;
 		if (unlikely(mbuf == NULL))
 			rte_panic("EOP desc does not point to a valid mbuf");
 		else
@@ -241,16 +254,16 @@ vmxnet3_tq_tx_complete(vmxnet3_tx_queue_
 
 		vmxnet3_comp_ring_adv_next2proc(comp_ring);
 		tcd = (struct Vmxnet3_TxCompDesc *)(comp_ring->base +
-										  comp_ring->next2proc);
+						    comp_ring->next2proc);
 		completed++;
-   }
+	}
 
-   PMD_TX_LOG(DEBUG, "Processed %d tx comps & command descs.\n", completed);
+	PMD_TX_LOG(DEBUG, "Processed %d tx comps & command descs.\n", completed);
 }
 
 uint16_t
-vmxnet3_xmit_pkts( void *tx_queue, struct rte_mbuf **tx_pkts,
-		uint16_t nb_pkts)
+vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+		  uint16_t nb_pkts)
 {
 	uint16_t nb_tx;
 	Vmxnet3_TxDesc *txd = NULL;
@@ -261,7 +274,7 @@ vmxnet3_xmit_pkts( void *tx_queue, struc
 
 	hw = txq->hw;
 
-	if(txq->stopped) {
+	if (txq->stopped) {
 		PMD_TX_LOG(DEBUG, "Tx queue is stopped.\n");
 		return 0;
 	}
@@ -270,9 +283,9 @@ vmxnet3_xmit_pkts( void *tx_queue, struc
 	vmxnet3_tq_tx_complete(txq);
 
 	nb_tx = 0;
-	while(nb_tx < nb_pkts) {
+	while (nb_tx < nb_pkts) {
 
-		if(vmxnet3_cmd_ring_desc_avail(&txq->cmd_ring)) {
+		if (vmxnet3_cmd_ring_desc_avail(&txq->cmd_ring)) {
 
 			txm = tx_pkts[nb_tx];
 			/* Don't support scatter packets yet, free them if met */
@@ -286,7 +299,7 @@ vmxnet3_xmit_pkts( void *tx_queue, struc
 			}
 
 			/* Needs to minus ether header len */
-			if(txm->pkt.data_len > (hw->cur_mtu + ETHER_HDR_LEN)) {
+			if (txm->pkt.data_len > (hw->cur_mtu + ETHER_HDR_LEN)) {
 				PMD_TX_LOG(DEBUG, "Packet data_len higher than MTU\n");
 				rte_pktmbuf_free(tx_pkts[nb_tx]);
 				txq->stats.drop_total++;
@@ -340,10 +353,10 @@ vmxnet3_xmit_pkts( void *tx_queue, struc
 		txq->shared->ctrl.txNumDeferred = 0;
 		/* Notify vSwitch that packets are available. */
 		VMXNET3_WRITE_BAR0_REG(hw, (VMXNET3_REG_TXPROD + txq->queue_id * VMXNET3_REG_ALIGN),
-				txq->cmd_ring.next2fill);
+				       txq->cmd_ring.next2fill);
 	}
 
-	return (nb_tx);
+	return nb_tx;
 }
 
 /*
@@ -358,24 +371,24 @@ vmxnet3_xmit_pkts( void *tx_queue, struc
  *
  */
 static inline int
-vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t* rxq, uint8_t ring_id)
+vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t *rxq, uint8_t ring_id)
 {
-   int err = 0;
-   uint32_t i = 0, val = 0;
-   struct vmxnet3_cmd_ring *ring = &rxq->cmd_ring[ring_id];
-
-   while (vmxnet3_cmd_ring_desc_avail(ring) > 0) {
+	int err = 0;
+	uint32_t i = 0, val = 0;
+	struct vmxnet3_cmd_ring *ring = &rxq->cmd_ring[ring_id];
 
+	while (vmxnet3_cmd_ring_desc_avail(ring) > 0) {
 		struct Vmxnet3_RxDesc *rxd;
 		struct rte_mbuf *mbuf;
 		vmxnet3_buf_info_t *buf_info = &ring->buf_info[ring->next2fill];
+
 		rxd = (struct Vmxnet3_RxDesc *)(ring->base + ring->next2fill);
 
 		if (ring->rid == 0) {
-			 /* Usually: One HEAD type buf per packet
-			   * val = (ring->next2fill % rxq->hw->bufs_per_pkt) ?
-			   * VMXNET3_RXD_BTYPE_BODY : VMXNET3_RXD_BTYPE_HEAD;
-			   */
+			/* Usually: One HEAD type buf per packet
+			 * val = (ring->next2fill % rxq->hw->bufs_per_pkt) ?
+			 * VMXNET3_RXD_BTYPE_BODY : VMXNET3_RXD_BTYPE_HEAD;
+			 */
 
 			/* We use single packet buffer so all heads here */
 			val = VMXNET3_RXD_BTYPE_HEAD;
@@ -399,7 +412,7 @@ vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t*
 		 */
 		buf_info->m = mbuf;
 		buf_info->len = (uint16_t)(mbuf->buf_len -
-			RTE_PKTMBUF_HEADROOM);
+					   RTE_PKTMBUF_HEADROOM);
 		buf_info->bufPA = RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf);
 
 		/* Load Rx Descriptor with the buffer's GPA */
@@ -413,13 +426,13 @@ vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t*
 
 		vmxnet3_cmd_ring_adv_next2fill(ring);
 		i++;
-   }
+	}
 
-   /* Return error only if no buffers are posted at present */
-   if (vmxnet3_cmd_ring_desc_avail(ring) >= (ring->size -1))
-      return -err;
-   else
-      return i;
+	/* Return error only if no buffers are posted at present */
+	if (vmxnet3_cmd_ring_desc_avail(ring) >= (ring->size - 1))
+		return -err;
+	else
+		return i;
 }
 
 /*
@@ -449,21 +462,21 @@ vmxnet3_recv_pkts(void *rx_queue, struct
 
 	rcd = &rxq->comp_ring.base[rxq->comp_ring.next2proc].rcd;
 
-	if(rxq->stopped) {
+	if (rxq->stopped) {
 		PMD_RX_LOG(DEBUG, "Rx queue is stopped.\n");
 		return 0;
 	}
 
 	while (rcd->gen == rxq->comp_ring.gen) {
 
-		if(nb_rx >= nb_pkts)
+		if (nb_rx >= nb_pkts)
 			break;
 		idx = rcd->rxdIdx;
 		ring_idx = (uint8_t)((rcd->rqID == rxq->qid1) ? 0 : 1);
 		rxd = (Vmxnet3_RxDesc *)rxq->cmd_ring[ring_idx].base + idx;
 		rbi = rxq->cmd_ring[ring_idx].buf_info + idx;
 
-		if(rcd->sop !=1 || rcd->eop != 1) {
+		if (rcd->sop != 1 || rcd->eop != 1) {
 			rte_pktmbuf_free_seg(rbi->m);
 
 			PMD_RX_LOG(DEBUG, "Packet spread across multiple buffers\n)");
@@ -479,7 +492,7 @@ vmxnet3_recv_pkts(void *rx_queue, struct
 #endif
 			if (rcd->len == 0) {
 				PMD_RX_LOG(DEBUG, "Rx buf was skipped. rxring[%d][%d]\n)",
-							 ring_idx, idx);
+					   ring_idx, idx);
 #ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER
 				VMXNET3_ASSERT(rcd->sop && rcd->eop);
 #endif
@@ -490,8 +503,9 @@ vmxnet3_recv_pkts(void *rx_queue, struct
 
 			/* Assuming a packet is coming in a single packet buffer */
 			if (rxd->btype != VMXNET3_RXD_BTYPE_HEAD) {
-				PMD_RX_LOG(DEBUG, "Alert : Misbehaving device, incorrect "
-						  " buffer type used. iPacket dropped.\n");
+				PMD_RX_LOG(DEBUG,
+					   "Alert : Misbehaving device, incorrect "
+					   " buffer type used. iPacket dropped.\n");
 				rte_pktmbuf_free_seg(rbi->m);
 				goto rcd_done;
 			}
@@ -513,13 +527,13 @@ vmxnet3_recv_pkts(void *rx_queue, struct
 				rxq->stats.drop_total++;
 				rxq->stats.drop_err++;
 
-				if(!rcd->fcs) {
+				if (!rcd->fcs) {
 					rxq->stats.drop_fcs++;
 					PMD_RX_LOG(ERR, "Recv packet dropped due to frame err.\n");
 				}
 				PMD_RX_LOG(ERR, "Error in received packet rcd#:%d rxd:%d\n",
-						 (int)(rcd - (struct Vmxnet3_RxCompDesc *)
-							   rxq->comp_ring.base), rcd->rxdIdx);
+					   (int)(rcd - (struct Vmxnet3_RxCompDesc *)
+						 rxq->comp_ring.base), rcd->rxdIdx);
 				rte_pktmbuf_free_seg(rxm);
 
 				goto rcd_done;
@@ -529,14 +543,14 @@ vmxnet3_recv_pkts(void *rx_queue, struct
 			if (rcd->ts) {
 
 				PMD_RX_LOG(ERR, "Received packet with vlan ID: %d.\n",
-						 rcd->tci);
+					   rcd->tci);
 				rxm->ol_flags = PKT_RX_VLAN_PKT;
 
 #ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER
 				VMXNET3_ASSERT(rxm &&
-					rte_pktmbuf_mtod(rxm, void *));
+					       rte_pktmbuf_mtod(rxm, void *));
 #endif
-				//Copy vlan tag in packet buffer
+				/* Copy vlan tag in packet buffer */
 				rxm->pkt.vlan_macip.f.vlan_tci =
 					rte_le_to_cpu_16((uint16_t)rcd->tci);
 
@@ -563,7 +577,7 @@ rcd_done:
 			vmxnet3_post_rx_bufs(rxq, ring_idx);
 			if (unlikely(rxq->shared->ctrl.updateRxProd)) {
 				VMXNET3_WRITE_BAR0_REG(hw, rxprod_reg[ring_idx] + (rxq->queue_id * VMXNET3_REG_ALIGN),
-								  rxq->cmd_ring[ring_idx].next2fill);
+						       rxq->cmd_ring[ring_idx].next2fill);
 			}
 
 			/* Advance to the next descriptor in comp_ring */
@@ -572,14 +586,15 @@ rcd_done:
 			rcd = &rxq->comp_ring.base[rxq->comp_ring.next2proc].rcd;
 			nb_rxd++;
 			if (nb_rxd > rxq->cmd_ring[0].size) {
-				PMD_RX_LOG(ERR, "Used up quota of receiving packets,"
-						 " relinquish control.\n");
+				PMD_RX_LOG(ERR,
+					   "Used up quota of receiving packets,"
+					   " relinquish control.\n");
 				break;
 			}
 		}
 	}
 
-	return (nb_rx);
+	return nb_rx;
 }
 
 /*
@@ -608,78 +623,78 @@ ring_dma_zone_reserve(struct rte_eth_dev
 
 int
 vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
-			 uint16_t queue_idx,
-			 uint16_t nb_desc,
-			 unsigned int socket_id,
-			 __attribute__((unused)) const struct rte_eth_txconf *tx_conf)
+			   uint16_t queue_idx,
+			   uint16_t nb_desc,
+			   unsigned int socket_id,
+			   __attribute__((unused)) const struct rte_eth_txconf *tx_conf)
 {
 	const struct rte_memzone *mz;
 	struct vmxnet3_tx_queue *txq;
 	struct vmxnet3_hw     *hw;
-    struct vmxnet3_cmd_ring *ring;
-    struct vmxnet3_comp_ring *comp_ring;
-    int size;
+	struct vmxnet3_cmd_ring *ring;
+	struct vmxnet3_comp_ring *comp_ring;
+	int size;
 
 	PMD_INIT_FUNC_TRACE();
 	hw = VMXNET3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
 	if ((tx_conf->txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS) !=
-		ETH_TXQ_FLAGS_NOMULTSEGS) {
+	    ETH_TXQ_FLAGS_NOMULTSEGS) {
 		PMD_INIT_LOG(ERR, "TX Multi segment not support yet\n");
-		return (-EINVAL);
+		return -EINVAL;
 	}
 
 	if ((tx_conf->txq_flags & ETH_TXQ_FLAGS_NOOFFLOADS) !=
-		ETH_TXQ_FLAGS_NOOFFLOADS) {
+	    ETH_TXQ_FLAGS_NOOFFLOADS) {
 		PMD_INIT_LOG(ERR, "TX not support offload function yet\n");
-		return (-EINVAL);
+		return -EINVAL;
 	}
 
 	txq = rte_zmalloc("ethdev_tx_queue", sizeof(struct vmxnet3_tx_queue), CACHE_LINE_SIZE);
 	if (txq == NULL) {
 		PMD_INIT_LOG(ERR, "Can not allocate tx queue structure\n");
-		return (-ENOMEM);
+		return -ENOMEM;
 	}
 
 	txq->queue_id = queue_idx;
 	txq->port_id = dev->data->port_id;
 	txq->shared = &hw->tqd_start[queue_idx];
-    txq->hw = hw;
-    txq->qid = queue_idx;
-    txq->stopped = TRUE;
+	txq->hw = hw;
+	txq->qid = queue_idx;
+	txq->stopped = TRUE;
 
-    ring = &txq->cmd_ring;
-    comp_ring = &txq->comp_ring;
+	ring = &txq->cmd_ring;
+	comp_ring = &txq->comp_ring;
 
-    /* Tx vmxnet ring length should be between 512-4096 */
-    if (nb_desc < VMXNET3_DEF_TX_RING_SIZE) {
+	/* Tx vmxnet ring length should be between 512-4096 */
+	if (nb_desc < VMXNET3_DEF_TX_RING_SIZE) {
 		PMD_INIT_LOG(ERR, "VMXNET3 Tx Ring Size Min: %u\n",
-					VMXNET3_DEF_TX_RING_SIZE);
+			     VMXNET3_DEF_TX_RING_SIZE);
 		return -EINVAL;
 	} else if (nb_desc > VMXNET3_TX_RING_MAX_SIZE) {
 		PMD_INIT_LOG(ERR, "VMXNET3 Tx Ring Size Max: %u\n",
-					VMXNET3_TX_RING_MAX_SIZE);
+			     VMXNET3_TX_RING_MAX_SIZE);
 		return -EINVAL;
-    } else {
+	} else {
 		ring->size = nb_desc;
 		ring->size &= ~VMXNET3_RING_SIZE_MASK;
-    }
-    comp_ring->size = ring->size;
+	}
+	comp_ring->size = ring->size;
 
-    /* Tx vmxnet rings structure initialization*/
-    ring->next2fill = 0;
-    ring->next2comp = 0;
-    ring->gen = VMXNET3_INIT_GEN;
-    comp_ring->next2proc = 0;
-    comp_ring->gen = VMXNET3_INIT_GEN;
+	/* Tx vmxnet rings structure initialization*/
+	ring->next2fill = 0;
+	ring->next2comp = 0;
+	ring->gen = VMXNET3_INIT_GEN;
+	comp_ring->next2proc = 0;
+	comp_ring->gen = VMXNET3_INIT_GEN;
 
-    size = sizeof(struct Vmxnet3_TxDesc) * ring->size;
-    size += sizeof(struct Vmxnet3_TxCompDesc) * comp_ring->size;
+	size = sizeof(struct Vmxnet3_TxDesc) * ring->size;
+	size += sizeof(struct Vmxnet3_TxCompDesc) * comp_ring->size;
 
-    mz = ring_dma_zone_reserve(dev, "txdesc", queue_idx, size, socket_id);
+	mz = ring_dma_zone_reserve(dev, "txdesc", queue_idx, size, socket_id);
 	if (mz == NULL) {
 		PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone\n");
-		return (-ENOMEM);
+		return -ENOMEM;
 	}
 	memset(mz->addr, 0, mz->len);
 
@@ -688,16 +703,16 @@ vmxnet3_dev_tx_queue_setup(struct rte_et
 	ring->basePA = mz->phys_addr;
 
 	/* comp_ring initialization */
-    comp_ring->base = ring->base + ring->size;
-    comp_ring->basePA = ring->basePA +
-				(sizeof(struct Vmxnet3_TxDesc) * ring->size);
+	comp_ring->base = ring->base + ring->size;
+	comp_ring->basePA = ring->basePA +
+		(sizeof(struct Vmxnet3_TxDesc) * ring->size);
 
-    /* cmd_ring0 buf_info allocation */
+	/* cmd_ring0 buf_info allocation */
 	ring->buf_info = rte_zmalloc("tx_ring_buf_info",
-				ring->size * sizeof(vmxnet3_buf_info_t), CACHE_LINE_SIZE);
+				     ring->size * sizeof(vmxnet3_buf_info_t), CACHE_LINE_SIZE);
 	if (ring->buf_info == NULL) {
 		PMD_INIT_LOG(ERR, "ERROR: Creating tx_buf_info structure\n");
-		return (-ENOMEM);
+		return -ENOMEM;
 	}
 
 	/* Update the data portion with txq */
@@ -708,11 +723,11 @@ vmxnet3_dev_tx_queue_setup(struct rte_et
 
 int
 vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
-			 uint16_t queue_idx,
-			 uint16_t nb_desc,
-			 unsigned int socket_id,
-			 __attribute__((unused)) const struct rte_eth_rxconf *rx_conf,
-			 struct rte_mempool *mp)
+			   uint16_t queue_idx,
+			   uint16_t nb_desc,
+			   unsigned int socket_id,
+			   __attribute__((unused)) const struct rte_eth_rxconf *rx_conf,
+			   struct rte_mempool *mp)
 {
 	const struct rte_memzone *mz;
 	struct vmxnet3_rx_queue *rxq;
@@ -729,21 +744,21 @@ vmxnet3_dev_rx_queue_setup(struct rte_et
 	hw = VMXNET3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
 	mbp_priv = (struct rte_pktmbuf_pool_private *)
-				rte_mempool_get_priv(mp);
+		rte_mempool_get_priv(mp);
 	buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
-				   RTE_PKTMBUF_HEADROOM);
+			       RTE_PKTMBUF_HEADROOM);
 
 	if (dev->data->dev_conf.rxmode.max_rx_pkt_len > buf_size) {
 		PMD_INIT_LOG(ERR, "buf_size = %u, max_pkt_len = %u, "
-				"VMXNET3 don't support scatter packets yet\n",
-				buf_size, dev->data->dev_conf.rxmode.max_rx_pkt_len);
-		return (-EINVAL);
+			     "VMXNET3 don't support scatter packets yet\n",
+			     buf_size, dev->data->dev_conf.rxmode.max_rx_pkt_len);
+		return -EINVAL;
 	}
 
 	rxq = rte_zmalloc("ethdev_rx_queue", sizeof(struct vmxnet3_rx_queue), CACHE_LINE_SIZE);
 	if (rxq == NULL) {
 		PMD_INIT_LOG(ERR, "Can not allocate rx queue structure\n");
-		return (-ENOMEM);
+		return -ENOMEM;
 	}
 
 	rxq->mp = mp;
@@ -760,10 +775,10 @@ vmxnet3_dev_rx_queue_setup(struct rte_et
 	comp_ring = &rxq->comp_ring;
 
 	/* Rx vmxnet rings length should be between 256-4096 */
-	if(nb_desc < VMXNET3_DEF_RX_RING_SIZE) {
+	if (nb_desc < VMXNET3_DEF_RX_RING_SIZE) {
 		PMD_INIT_LOG(ERR, "VMXNET3 Rx Ring Size Min: 256\n");
 		return -EINVAL;
-	} else if(nb_desc > VMXNET3_RX_RING_MAX_SIZE) {
+	} else if (nb_desc > VMXNET3_RX_RING_MAX_SIZE) {
 		PMD_INIT_LOG(ERR, "VMXNET3 Rx Ring Size Max: 4096\n");
 		return -EINVAL;
 	} else {
@@ -790,7 +805,7 @@ vmxnet3_dev_rx_queue_setup(struct rte_et
 	mz = ring_dma_zone_reserve(dev, "rxdesc", queue_idx, size, socket_id);
 	if (mz == NULL) {
 		PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone\n");
-		return (-ENOMEM);
+		return -ENOMEM;
 	}
 	memset(mz->addr, 0, mz->len);
 
@@ -805,24 +820,24 @@ vmxnet3_dev_rx_queue_setup(struct rte_et
 	/* comp_ring initialization */
 	comp_ring->base = ring1->base +  ring1->size;
 	comp_ring->basePA = ring1->basePA + sizeof(struct Vmxnet3_RxDesc) *
-					   ring1->size;
+		ring1->size;
 
 	/* cmd_ring0-cmd_ring1 buf_info allocation */
-	for(i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++) {
-
-	  ring = &rxq->cmd_ring[i];
-	  ring->rid = i;
-	  rte_snprintf(mem_name, sizeof(mem_name), "rx_ring_%d_buf_info", i);
+	for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++) {
 
-	  ring->buf_info = rte_zmalloc(mem_name, ring->size * sizeof(vmxnet3_buf_info_t), CACHE_LINE_SIZE);
-	  if (ring->buf_info == NULL) {
-		  PMD_INIT_LOG(ERR, "ERROR: Creating rx_buf_info structure\n");
-		  return (-ENOMEM);
-	  }
+		ring = &rxq->cmd_ring[i];
+		ring->rid = i;
+		rte_snprintf(mem_name, sizeof(mem_name), "rx_ring_%d_buf_info", i);
+
+		ring->buf_info = rte_zmalloc(mem_name, ring->size * sizeof(vmxnet3_buf_info_t), CACHE_LINE_SIZE);
+		if (ring->buf_info == NULL) {
+			PMD_INIT_LOG(ERR, "ERROR: Creating rx_buf_info structure\n");
+			return -ENOMEM;
+		}
 	}
 
-    /* Update the data portion with rxq */
-    dev->data->rx_queues[queue_idx] = rxq;
+	/* Update the data portion with rxq */
+	dev->data->rx_queues[queue_idx] = rxq;
 
 	return 0;
 }
@@ -842,19 +857,19 @@ vmxnet3_dev_rxtx_init(struct rte_eth_dev
 	hw = VMXNET3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
 	for (i = 0; i < hw->num_rx_queues; i++) {
-
 		vmxnet3_rx_queue_t *rxq = dev->data->rx_queues[i];
-		for(j = 0;j < VMXNET3_RX_CMDRING_SIZE;j++) {
+
+		for (j = 0; j < VMXNET3_RX_CMDRING_SIZE; j++) {
 			/* Passing 0 as alloc_num will allocate full ring */
 			ret = vmxnet3_post_rx_bufs(rxq, j);
 			if (ret <= 0) {
-			  PMD_INIT_LOG(ERR, "ERROR: Posting Rxq: %d buffers ring: %d\n", i, j);
-			  return (-ret);
+				PMD_INIT_LOG(ERR, "ERROR: Posting Rxq: %d buffers ring: %d\n", i, j);
+				return -ret;
 			}
 			/* Updating device with the index:next2fill to fill the mbufs for coming packets */
 			if (unlikely(rxq->shared->ctrl.updateRxProd)) {
 				VMXNET3_WRITE_BAR0_REG(hw, rxprod_reg[j] + (rxq->queue_id * VMXNET3_REG_ALIGN),
-						rxq->cmd_ring[j].next2fill);
+						       rxq->cmd_ring[j].next2fill);
 			}
 		}
 		rxq->stopped = FALSE;
@@ -862,6 +877,7 @@ vmxnet3_dev_rxtx_init(struct rte_eth_dev
 
 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
 		struct vmxnet3_tx_queue *txq = dev->data->tx_queues[i];
+
 		txq->stopped = FALSE;
 	}
 
--- a/lib/librte_pmd_vmxnet3/vmxnet3_ethdev.h	2014-06-12 17:38:02.843929358 -0700
+++ b/lib/librte_pmd_vmxnet3/vmxnet3_ethdev.h	2014-06-12 17:53:46.013791850 -0700
@@ -36,7 +36,7 @@
 
 #ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER
 #define VMXNET3_ASSERT(x) do { \
-	if(!(x)) rte_panic("VMXNET3: x"); \
+	if (!(x)) rte_panic("VMXNET3: x"); \
 } while(0)
 #endif
 
@@ -64,16 +64,16 @@
 /* RSS configuration structure - shared with device through GPA */
 typedef
 struct VMXNET3_RSSConf {
-   uint16_t   hashType;
-   uint16_t   hashFunc;
-   uint16_t   hashKeySize;
-   uint16_t   indTableSize;
-   uint8_t    hashKey[VMXNET3_RSS_MAX_KEY_SIZE];
-   /*
-    * indTable is only element that can be changed without
-    * device quiesce-reset-update-activation cycle
-    */
-   uint8_t    indTable[VMXNET3_RSS_MAX_IND_TABLE_SIZE];
+	uint16_t   hashType;
+	uint16_t   hashFunc;
+	uint16_t   hashKeySize;
+	uint16_t   indTableSize;
+	uint8_t    hashKey[VMXNET3_RSS_MAX_KEY_SIZE];
+	/*
+	 * indTable is only element that can be changed without
+	 * device quiesce-reset-update-activation cycle
+	 */
+	uint8_t    indTable[VMXNET3_RSS_MAX_IND_TABLE_SIZE];
 } VMXNET3_RSSConf;
 
 typedef
@@ -134,7 +134,7 @@ struct vmxnet3_adapter {
 
 #define VMXNET3_PCI_REG(reg) (*((volatile uint32_t *)(reg)))
 
-static inline uint32_t vmxnet3_read_addr(volatile void* addr)
+static inline uint32_t vmxnet3_read_addr(volatile void *addr)
 {
 	return VMXNET3_PCI_REG(addr);
 }
--- a/lib/librte_pmd_vmxnet3/vmxnet3_ring.h	2014-06-12 17:52:14.121248549 -0700
+++ b/lib/librte_pmd_vmxnet3/vmxnet3_ring.h	2014-06-12 17:52:54.669488093 -0700
@@ -79,7 +79,7 @@ vmxnet3_cmd_ring_adv_next2fill(struct vm
 static inline void
 vmxnet3_cmd_ring_adv_next2comp(struct vmxnet3_cmd_ring *ring)
 {
-   VMXNET3_INC_RING_IDX_ONLY(ring->next2comp, ring->size);
+	VMXNET3_INC_RING_IDX_ONLY(ring->next2comp, ring->size);
 }
 
 static inline uint32_t
@@ -96,12 +96,12 @@ vmxnet3_cmd_ring_desc_empty(struct vmxne
 }
 
 typedef struct vmxnet3_comp_ring {
-	uint32_t               size;
-	uint32_t               next2proc;
-	uint8_t                gen;
-	uint8_t                intr_idx;
+	uint32_t	       size;
+	uint32_t	       next2proc;
+	uint8_t		       gen;
+	uint8_t		       intr_idx;
 	Vmxnet3_GenericDesc    *base;
-	uint64_t               basePA;
+	uint64_t	       basePA;
 } vmxnet3_comp_ring_t;
 
 static inline void

^ permalink raw reply	[flat|nested] 2+ messages in thread

* [dpdk-dev] [PATCH 3/5] vmxnet3: fix double spacing of log messages
       [not found] <20140612183347.07074830@nehalam.linuxnetplumber.net>
  2014-06-13  1:37 ` [dpdk-dev] [PATCH 2/5] vmxnet3: cleanup style and indentation Stephen Hemminger
@ 2014-06-13  1:37 ` Stephen Hemminger
  1 sibling, 0 replies; 2+ messages in thread
From: Stephen Hemminger @ 2014-06-13  1:37 UTC (permalink / raw)
  To: Stephen Hemminger; +Cc: dev

The debug log macro's already include newline, no need
to double space the output.

Note: other drivers have the same problem

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>

---
 lib/librte_pmd_vmxnet3/vmxnet3_ethdev.c |   42 +++++++++----------
 lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c   |   70 ++++++++++++++++----------------
 2 files changed, 56 insertions(+), 56 deletions(-)

--- a/lib/librte_pmd_vmxnet3/vmxnet3_ethdev.c	2014-06-12 18:10:45.367368561 -0700
+++ b/lib/librte_pmd_vmxnet3/vmxnet3_ethdev.c	2014-06-12 18:11:29.251597504 -0700
@@ -217,21 +217,21 @@ eth_vmxnet3_dev_init(__attribute__((unus
 
 	/* Check h/w version compatibility with driver. */
 	ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_VRRS);
-	PMD_INIT_LOG(DEBUG, "Harware version : %d\n", ver);
+	PMD_INIT_LOG(DEBUG, "Harware version : %d", ver);
 	if (ver & 0x1)
 		VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS, 1);
 	else {
-		PMD_INIT_LOG(ERR, "Uncompatiable h/w version, should be 0x1\n");
+		PMD_INIT_LOG(ERR, "Uncompatiable h/w version, should be 0x1");
 		return -EIO;
 	}
 
 	/* Check UPT version compatibility with driver. */
 	ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_UVRS);
-	PMD_INIT_LOG(DEBUG, "UPT harware version : %d\n", ver);
+	PMD_INIT_LOG(DEBUG, "UPT harware version : %d", ver);
 	if (ver & 0x1)
 		VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_UVRS, 1);
 	else {
-		PMD_INIT_LOG(ERR, "Incompatiable UPT version.\n");
+		PMD_INIT_LOG(ERR, "Incompatiable UPT version.");
 		return -EIO;
 	}
 
@@ -254,7 +254,7 @@ eth_vmxnet3_dev_init(__attribute__((unus
 	ether_addr_copy((struct ether_addr *) hw->perm_addr,
 			&eth_dev->data->mac_addrs[0]);
 
-	PMD_INIT_LOG(DEBUG, "MAC Address : %02x:%02x:%02x:%02x:%02x:%02x\n",
+	PMD_INIT_LOG(DEBUG, "MAC Address : %02x:%02x:%02x:%02x:%02x:%02x",
 		     hw->perm_addr[0], hw->perm_addr[1], hw->perm_addr[2],
 		     hw->perm_addr[3], hw->perm_addr[4], hw->perm_addr[5]);
 
@@ -319,7 +319,7 @@ vmxnet3_dev_configure(struct rte_eth_dev
 			      "shared", rte_socket_id(), 8);
 
 	if (mz == NULL) {
-		PMD_INIT_LOG(ERR, "ERROR: Creating shared zone\n");
+		PMD_INIT_LOG(ERR, "ERROR: Creating shared zone");
 		return -ENOMEM;
 	}
 	memset(mz->addr, 0, mz->len);
@@ -334,7 +334,7 @@ vmxnet3_dev_configure(struct rte_eth_dev
 	mz = gpa_zone_reserve(dev, size, "queuedesc",
 			      rte_socket_id(), VMXNET3_QUEUE_DESC_ALIGN);
 	if (mz == NULL) {
-		PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone\n");
+		PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone");
 		return -ENOMEM;
 	}
 	memset(mz->addr, 0, mz->len);
@@ -352,7 +352,7 @@ vmxnet3_dev_configure(struct rte_eth_dev
 				      rte_socket_id(), CACHE_LINE_SIZE);
 		if (mz == NULL) {
 			PMD_INIT_LOG(ERR,
-				     "ERROR: Creating rss_conf structure zone\n");
+				     "ERROR: Creating rss_conf structure zone");
 			return -ENOMEM;
 		}
 		memset(mz->addr, 0, mz->len);
@@ -463,7 +463,7 @@ vmxnet3_setup_driver_shared(struct rte_e
 	}
 
 	PMD_INIT_LOG(DEBUG,
-		     "Writing MAC Address : %02x:%02x:%02x:%02x:%02x:%02x\n",
+		     "Writing MAC Address : %02x:%02x:%02x:%02x:%02x:%02x",
 		     hw->perm_addr[0], hw->perm_addr[1], hw->perm_addr[2],
 		     hw->perm_addr[3], hw->perm_addr[4], hw->perm_addr[5]);
 
@@ -506,7 +506,7 @@ vmxnet3_dev_start(struct rte_eth_dev *de
 	status = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
 
 	if (status != 0) {
-		PMD_INIT_LOG(ERR, "Device activation in %s(): UNSUCCESSFUL\n", __func__);
+		PMD_INIT_LOG(ERR, "Device activation in %s(): UNSUCCESSFUL", __func__);
 		return -1;
 	}
 
@@ -519,7 +519,7 @@ vmxnet3_dev_start(struct rte_eth_dev *de
 	 */
 	ret = vmxnet3_dev_rxtx_init(dev);
 	if (ret != VMXNET3_SUCCESS) {
-		PMD_INIT_LOG(ERR, "Device receive init in %s: UNSUCCESSFUL\n", __func__);
+		PMD_INIT_LOG(ERR, "Device receive init in %s: UNSUCCESSFUL", __func__);
 		return ret;
 	}
 
@@ -531,7 +531,7 @@ vmxnet3_dev_start(struct rte_eth_dev *de
 	 */
 #if PROCESS_SYS_EVENTS == 1
 	events = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_ECR);
-	PMD_INIT_LOG(DEBUG, "Reading events: 0x%X\n\n", events);
+	PMD_INIT_LOG(DEBUG, "Reading events: 0x%X", events);
 	vmxnet3_process_events(hw);
 #endif
 	return status;
@@ -550,7 +550,7 @@ vmxnet3_dev_stop(struct rte_eth_dev *dev
 	PMD_INIT_FUNC_TRACE();
 
 	if (hw->adapter_stopped == TRUE) {
-		PMD_INIT_LOG(DEBUG, "Device already closed.\n");
+		PMD_INIT_LOG(DEBUG, "Device already closed.");
 		return;
 	}
 
@@ -564,7 +564,7 @@ vmxnet3_dev_stop(struct rte_eth_dev *dev
 
 	/* reset the device */
 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
-	PMD_INIT_LOG(DEBUG, "Device reset.\n");
+	PMD_INIT_LOG(DEBUG, "Device reset.");
 	hw->adapter_stopped = FALSE;
 
 	vmxnet3_dev_clear_queues(dev);
@@ -655,7 +655,7 @@ vmxnet3_dev_link_update(struct rte_eth_d
 	ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
 
 	if (!ret) {
-		PMD_INIT_LOG(ERR, "Link Status Negative : %s()\n", __func__);
+		PMD_INIT_LOG(ERR, "Link Status Negative : %s()", __func__);
 		return -1;
 	}
 
@@ -729,7 +729,7 @@ vmxnet3_process_events(struct vmxnet3_hw
 	uint32_t events = hw->shared->ecr;
 
 	if (!events) {
-		PMD_INIT_LOG(ERR, "No events to process in %s()\n", __func__);
+		PMD_INIT_LOG(ERR, "No events to process in %s()", __func__);
 		return;
 	}
 
@@ -742,18 +742,18 @@ vmxnet3_process_events(struct vmxnet3_hw
 	/* Check if link state has changed */
 	if (events & VMXNET3_ECR_LINK)
 		PMD_INIT_LOG(ERR,
-			     "Process events in %s(): VMXNET3_ECR_LINK event\n", __func__);
+			     "Process events in %s(): VMXNET3_ECR_LINK event", __func__);
 
 	/* Check if there is an error on xmit/recv queues */
 	if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
 		VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_QUEUE_STATUS);
 
 		if (hw->tqd_start->status.stopped)
-			PMD_INIT_LOG(ERR, "tq error 0x%x\n",
+			PMD_INIT_LOG(ERR, "tq error 0x%x",
 				     hw->tqd_start->status.error);
 
 		if (hw->rqd_start->status.stopped)
-			PMD_INIT_LOG(ERR, "rq error 0x%x\n",
+			PMD_INIT_LOG(ERR, "rq error 0x%x",
 				     hw->rqd_start->status.error);
 
 		/* Reset the device */
@@ -761,10 +761,10 @@ vmxnet3_process_events(struct vmxnet3_hw
 	}
 
 	if (events & VMXNET3_ECR_DIC)
-		PMD_INIT_LOG(ERR, "Device implementation change event.\n");
+		PMD_INIT_LOG(ERR, "Device implementation change event.");
 
 	if (events & VMXNET3_ECR_DEBUG)
-		PMD_INIT_LOG(ERR, "Debug event generated by device.\n");
+		PMD_INIT_LOG(ERR, "Debug event generated by device.");
 
 }
 #endif
--- a/lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c	2014-06-12 18:10:45.367368561 -0700
+++ b/lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c	2014-06-12 18:11:57.211743719 -0700
@@ -115,23 +115,23 @@ vmxnet3_rxq_dump(struct vmxnet3_rx_queue
 		return;
 
 	PMD_RX_LOG(DEBUG,
-		   "RXQ: cmd0 base : 0x%p cmd1 base : 0x%p comp ring base : 0x%p.\n",
+		   "RXQ: cmd0 base : 0x%p cmd1 base : 0x%p comp ring base : 0x%p.",
 		   rxq->cmd_ring[0].base, rxq->cmd_ring[1].base, rxq->comp_ring.base);
 	PMD_RX_LOG(DEBUG,
-		   "RXQ: cmd0 basePA : 0x%lx cmd1 basePA : 0x%lx comp ring basePA : 0x%lx.\n",
+		   "RXQ: cmd0 basePA : 0x%lx cmd1 basePA : 0x%lx comp ring basePA : 0x%lx.",
 		   (unsigned long)rxq->cmd_ring[0].basePA,
 		   (unsigned long)rxq->cmd_ring[1].basePA,
 		   (unsigned long)rxq->comp_ring.basePA);
 
 	avail = vmxnet3_cmd_ring_desc_avail(&rxq->cmd_ring[0]);
 	PMD_RX_LOG(DEBUG,
-		   "RXQ:cmd0: size=%u; free=%u; next2proc=%u; queued=%u\n",
+		   "RXQ:cmd0: size=%u; free=%u; next2proc=%u; queued=%u",
 		   (uint32_t)rxq->cmd_ring[0].size, avail,
 		   rxq->comp_ring.next2proc,
 		   rxq->cmd_ring[0].size - avail);
 
 	avail = vmxnet3_cmd_ring_desc_avail(&rxq->cmd_ring[1]);
-	PMD_RX_LOG(DEBUG, "RXQ:cmd1 size=%u; free=%u; next2proc=%u; queued=%u\n",
+	PMD_RX_LOG(DEBUG, "RXQ:cmd1 size=%u; free=%u; next2proc=%u; queued=%u",
 		   (uint32_t)rxq->cmd_ring[1].size, avail, rxq->comp_ring.next2proc,
 		   rxq->cmd_ring[1].size - avail);
 
@@ -145,14 +145,14 @@ vmxnet3_txq_dump(struct vmxnet3_tx_queue
 	if (txq == NULL)
 		return;
 
-	PMD_TX_LOG(DEBUG, "TXQ: cmd base : 0x%p comp ring base : 0x%p.\n",
+	PMD_TX_LOG(DEBUG, "TXQ: cmd base : 0x%p comp ring base : 0x%p.",
 		   txq->cmd_ring.base, txq->comp_ring.base);
-	PMD_TX_LOG(DEBUG, "TXQ: cmd basePA : 0x%lx comp ring basePA : 0x%lx.\n",
+	PMD_TX_LOG(DEBUG, "TXQ: cmd basePA : 0x%lx comp ring basePA : 0x%lx.",
 		   (unsigned long)txq->cmd_ring.basePA,
 		   (unsigned long)txq->comp_ring.basePA);
 
 	avail = vmxnet3_cmd_ring_desc_avail(&txq->cmd_ring);
-	PMD_TX_LOG(DEBUG, "TXQ: size=%u; free=%u; next2proc=%u; queued=%u\n",
+	PMD_TX_LOG(DEBUG, "TXQ: size=%u; free=%u; next2proc=%u; queued=%u",
 		   (uint32_t)txq->cmd_ring.size, avail,
 		   txq->comp_ring.next2proc, txq->cmd_ring.size - avail);
 }
@@ -258,7 +258,7 @@ vmxnet3_tq_tx_complete(vmxnet3_tx_queue_
 		completed++;
 	}
 
-	PMD_TX_LOG(DEBUG, "Processed %d tx comps & command descs.\n", completed);
+	PMD_TX_LOG(DEBUG, "Processed %d tx comps & command descs.", completed);
 }
 
 uint16_t
@@ -275,7 +275,7 @@ vmxnet3_xmit_pkts(void *tx_queue, struct
 	hw = txq->hw;
 
 	if (txq->stopped) {
-		PMD_TX_LOG(DEBUG, "Tx queue is stopped.\n");
+		PMD_TX_LOG(DEBUG, "Tx queue is stopped.");
 		return 0;
 	}
 
@@ -290,7 +290,7 @@ vmxnet3_xmit_pkts(void *tx_queue, struct
 			txm = tx_pkts[nb_tx];
 			/* Don't support scatter packets yet, free them if met */
 			if (txm->pkt.nb_segs != 1) {
-				PMD_TX_LOG(DEBUG, "Don't support scatter packets yet, drop!\n");
+				PMD_TX_LOG(DEBUG, "Don't support scatter packets yet, drop!");
 				rte_pktmbuf_free(tx_pkts[nb_tx]);
 				txq->stats.drop_total++;
 
@@ -300,7 +300,7 @@ vmxnet3_xmit_pkts(void *tx_queue, struct
 
 			/* Needs to minus ether header len */
 			if (txm->pkt.data_len > (hw->cur_mtu + ETHER_HDR_LEN)) {
-				PMD_TX_LOG(DEBUG, "Packet data_len higher than MTU\n");
+				PMD_TX_LOG(DEBUG, "Packet data_len higher than MTU");
 				rte_pktmbuf_free(tx_pkts[nb_tx]);
 				txq->stats.drop_total++;
 
@@ -340,7 +340,7 @@ vmxnet3_xmit_pkts(void *tx_queue, struct
 			nb_tx++;
 
 		} else {
-			PMD_TX_LOG(DEBUG, "No free tx cmd desc(s)\n");
+			PMD_TX_LOG(DEBUG, "No free tx cmd desc(s)");
 			txq->stats.drop_total += (nb_pkts - nb_tx);
 			break;
 		}
@@ -400,7 +400,7 @@ vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t
 		/* Allocate blank mbuf for the current Rx Descriptor */
 		mbuf = rte_rxmbuf_alloc(rxq->mp);
 		if (mbuf == NULL) {
-			PMD_RX_LOG(ERR, "Error allocating mbuf in %s\n", __func__);
+			PMD_RX_LOG(ERR, "Error allocating mbuf in %s", __func__);
 			rxq->stats.rx_buf_alloc_failure++;
 			err = ENOMEM;
 			break;
@@ -463,7 +463,7 @@ vmxnet3_recv_pkts(void *rx_queue, struct
 	rcd = &rxq->comp_ring.base[rxq->comp_ring.next2proc].rcd;
 
 	if (rxq->stopped) {
-		PMD_RX_LOG(DEBUG, "Rx queue is stopped.\n");
+		PMD_RX_LOG(DEBUG, "Rx queue is stopped.");
 		return 0;
 	}
 
@@ -484,7 +484,7 @@ vmxnet3_recv_pkts(void *rx_queue, struct
 
 		} else {
 
-			PMD_RX_LOG(DEBUG, "rxd idx: %d ring idx: %d.\n", idx, ring_idx);
+			PMD_RX_LOG(DEBUG, "rxd idx: %d ring idx: %d.", idx, ring_idx);
 
 #ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER
 			VMXNET3_ASSERT(rcd->len <= rxd->len);
@@ -505,7 +505,7 @@ vmxnet3_recv_pkts(void *rx_queue, struct
 			if (rxd->btype != VMXNET3_RXD_BTYPE_HEAD) {
 				PMD_RX_LOG(DEBUG,
 					   "Alert : Misbehaving device, incorrect "
-					   " buffer type used. iPacket dropped.\n");
+					   " buffer type used. iPacket dropped.");
 				rte_pktmbuf_free_seg(rbi->m);
 				goto rcd_done;
 			}
@@ -529,9 +529,9 @@ vmxnet3_recv_pkts(void *rx_queue, struct
 
 				if (!rcd->fcs) {
 					rxq->stats.drop_fcs++;
-					PMD_RX_LOG(ERR, "Recv packet dropped due to frame err.\n");
+					PMD_RX_LOG(ERR, "Recv packet dropped due to frame err.");
 				}
-				PMD_RX_LOG(ERR, "Error in received packet rcd#:%d rxd:%d\n",
+				PMD_RX_LOG(ERR, "Error in received packet rcd#:%d rxd:%d",
 					   (int)(rcd - (struct Vmxnet3_RxCompDesc *)
 						 rxq->comp_ring.base), rcd->rxdIdx);
 				rte_pktmbuf_free_seg(rxm);
@@ -542,7 +542,7 @@ vmxnet3_recv_pkts(void *rx_queue, struct
 			/* Check for hardware stripped VLAN tag */
 			if (rcd->ts) {
 
-				PMD_RX_LOG(ERR, "Received packet with vlan ID: %d.\n",
+				PMD_RX_LOG(ERR, "Received packet with vlan ID: %d.",
 					   rcd->tci);
 				rxm->ol_flags = PKT_RX_VLAN_PKT;
 
@@ -588,7 +588,7 @@ rcd_done:
 			if (nb_rxd > rxq->cmd_ring[0].size) {
 				PMD_RX_LOG(ERR,
 					   "Used up quota of receiving packets,"
-					   " relinquish control.\n");
+					   " relinquish control.");
 				break;
 			}
 		}
@@ -640,19 +640,19 @@ vmxnet3_dev_tx_queue_setup(struct rte_et
 
 	if ((tx_conf->txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS) !=
 	    ETH_TXQ_FLAGS_NOMULTSEGS) {
-		PMD_INIT_LOG(ERR, "TX Multi segment not support yet\n");
+		PMD_INIT_LOG(ERR, "TX Multi segment not support yet");
 		return -EINVAL;
 	}
 
 	if ((tx_conf->txq_flags & ETH_TXQ_FLAGS_NOOFFLOADS) !=
 	    ETH_TXQ_FLAGS_NOOFFLOADS) {
-		PMD_INIT_LOG(ERR, "TX not support offload function yet\n");
+		PMD_INIT_LOG(ERR, "TX not support offload function yet");
 		return -EINVAL;
 	}
 
 	txq = rte_zmalloc("ethdev_tx_queue", sizeof(struct vmxnet3_tx_queue), CACHE_LINE_SIZE);
 	if (txq == NULL) {
-		PMD_INIT_LOG(ERR, "Can not allocate tx queue structure\n");
+		PMD_INIT_LOG(ERR, "Can not allocate tx queue structure");
 		return -ENOMEM;
 	}
 
@@ -668,11 +668,11 @@ vmxnet3_dev_tx_queue_setup(struct rte_et
 
 	/* Tx vmxnet ring length should be between 512-4096 */
 	if (nb_desc < VMXNET3_DEF_TX_RING_SIZE) {
-		PMD_INIT_LOG(ERR, "VMXNET3 Tx Ring Size Min: %u\n",
+		PMD_INIT_LOG(ERR, "VMXNET3 Tx Ring Size Min: %u",
 			     VMXNET3_DEF_TX_RING_SIZE);
 		return -EINVAL;
 	} else if (nb_desc > VMXNET3_TX_RING_MAX_SIZE) {
-		PMD_INIT_LOG(ERR, "VMXNET3 Tx Ring Size Max: %u\n",
+		PMD_INIT_LOG(ERR, "VMXNET3 Tx Ring Size Max: %u",
 			     VMXNET3_TX_RING_MAX_SIZE);
 		return -EINVAL;
 	} else {
@@ -693,7 +693,7 @@ vmxnet3_dev_tx_queue_setup(struct rte_et
 
 	mz = ring_dma_zone_reserve(dev, "txdesc", queue_idx, size, socket_id);
 	if (mz == NULL) {
-		PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone\n");
+		PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone");
 		return -ENOMEM;
 	}
 	memset(mz->addr, 0, mz->len);
@@ -711,7 +711,7 @@ vmxnet3_dev_tx_queue_setup(struct rte_et
 	ring->buf_info = rte_zmalloc("tx_ring_buf_info",
 				     ring->size * sizeof(vmxnet3_buf_info_t), CACHE_LINE_SIZE);
 	if (ring->buf_info == NULL) {
-		PMD_INIT_LOG(ERR, "ERROR: Creating tx_buf_info structure\n");
+		PMD_INIT_LOG(ERR, "ERROR: Creating tx_buf_info structure");
 		return -ENOMEM;
 	}
 
@@ -750,14 +750,14 @@ vmxnet3_dev_rx_queue_setup(struct rte_et
 
 	if (dev->data->dev_conf.rxmode.max_rx_pkt_len > buf_size) {
 		PMD_INIT_LOG(ERR, "buf_size = %u, max_pkt_len = %u, "
-			     "VMXNET3 don't support scatter packets yet\n",
+			     "VMXNET3 don't support scatter packets yet",
 			     buf_size, dev->data->dev_conf.rxmode.max_rx_pkt_len);
 		return -EINVAL;
 	}
 
 	rxq = rte_zmalloc("ethdev_rx_queue", sizeof(struct vmxnet3_rx_queue), CACHE_LINE_SIZE);
 	if (rxq == NULL) {
-		PMD_INIT_LOG(ERR, "Can not allocate rx queue structure\n");
+		PMD_INIT_LOG(ERR, "Can not allocate rx queue structure");
 		return -ENOMEM;
 	}
 
@@ -776,10 +776,10 @@ vmxnet3_dev_rx_queue_setup(struct rte_et
 
 	/* Rx vmxnet rings length should be between 256-4096 */
 	if (nb_desc < VMXNET3_DEF_RX_RING_SIZE) {
-		PMD_INIT_LOG(ERR, "VMXNET3 Rx Ring Size Min: 256\n");
+		PMD_INIT_LOG(ERR, "VMXNET3 Rx Ring Size Min: 256");
 		return -EINVAL;
 	} else if (nb_desc > VMXNET3_RX_RING_MAX_SIZE) {
-		PMD_INIT_LOG(ERR, "VMXNET3 Rx Ring Size Max: 4096\n");
+		PMD_INIT_LOG(ERR, "VMXNET3 Rx Ring Size Max: 4096");
 		return -EINVAL;
 	} else {
 		ring0->size = nb_desc;
@@ -804,7 +804,7 @@ vmxnet3_dev_rx_queue_setup(struct rte_et
 
 	mz = ring_dma_zone_reserve(dev, "rxdesc", queue_idx, size, socket_id);
 	if (mz == NULL) {
-		PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone\n");
+		PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone");
 		return -ENOMEM;
 	}
 	memset(mz->addr, 0, mz->len);
@@ -831,7 +831,7 @@ vmxnet3_dev_rx_queue_setup(struct rte_et
 
 		ring->buf_info = rte_zmalloc(mem_name, ring->size * sizeof(vmxnet3_buf_info_t), CACHE_LINE_SIZE);
 		if (ring->buf_info == NULL) {
-			PMD_INIT_LOG(ERR, "ERROR: Creating rx_buf_info structure\n");
+			PMD_INIT_LOG(ERR, "ERROR: Creating rx_buf_info structure");
 			return -ENOMEM;
 		}
 	}
@@ -863,7 +863,7 @@ vmxnet3_dev_rxtx_init(struct rte_eth_dev
 			/* Passing 0 as alloc_num will allocate full ring */
 			ret = vmxnet3_post_rx_bufs(rxq, j);
 			if (ret <= 0) {
-				PMD_INIT_LOG(ERR, "ERROR: Posting Rxq: %d buffers ring: %d\n", i, j);
+				PMD_INIT_LOG(ERR, "ERROR: Posting Rxq: %d buffers ring: %d", i, j);
 				return -ret;
 			}
 			/* Updating device with the index:next2fill to fill the mbufs for coming packets */
@@ -962,7 +962,7 @@ vmxnet3_vlan_configure(struct rte_eth_de
 		vf_table[i] = 0;
 		/* To-Do: Provide another routine in dev_ops for user config */
 
-		PMD_INIT_LOG(DEBUG, "Registering VLAN portid: %"PRIu8" tag %u\n",
+		PMD_INIT_LOG(DEBUG, "Registering VLAN portid: %"PRIu8" tag %u",
 					dev->data->port_id, vf_table[i]);
 	}
 

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2014-06-13  1:39 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
     [not found] <20140612183347.07074830@nehalam.linuxnetplumber.net>
2014-06-13  1:37 ` [dpdk-dev] [PATCH 2/5] vmxnet3: cleanup style and indentation Stephen Hemminger
2014-06-13  1:37 ` [dpdk-dev] [PATCH 3/5] vmxnet3: fix double spacing of log messages Stephen Hemminger

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).