DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH v3] Move rte_mbuf macros to common header file
@ 2015-09-30 21:55 Ravi Kerur
  2015-09-30 22:13 ` Stephen Hemminger
  0 siblings, 1 reply; 2+ messages in thread
From: Ravi Kerur @ 2015-09-30 21:55 UTC (permalink / raw)
  To: dev

Macros RTE_MBUF_DATA_DMA_ADDR and RTE_MBUF_DATA_DMA_ADDR_DEFAULT
are defined in each PMD driver file. Convert macros to inline
functions and move them to common lib/librte_mbuf/rte_mbuf.h file.
PMD drivers include rte_mbuf.h file directly/indirectly hence no
additionl header file inclusion is necessary.

v3:
	> Changed converted macro->inline names to lower-case
	> Fix checkpatch.pl errors and warnings
		(camelcase warning is not fixed)
	> Compiled following targets
		> x86_64-native-linuxapp-clang
		> x86_64-native-linuxapp-gcc
		> i686-native-linuxapp-gcc
		> x86_64-native-bsdapp-gcc
		> x86_64-native-bsdapp-clang

	> Tested x86_64 ubuntu 14.04
		> make test
v2:
	> Changed both macros to inline functions in all PMD
	> Changed macro to rte_pktmbuf_mtod in xenvirt module

	> Compiled following targets
		> x86_64-native-linuxapp-clang
		> x86_64-native-linuxapp-gcc
		> i686-native-linuxapp-gcc

	> Tested x86_64 ubuntu 14.04
		> testpmd and make test
v1:
	> Move macros into common rte_mbuf header file.

	> Compiled for:
		> x86_64-native-linuxapp-clang
		> x86_64-native-linuxapp-gcc
		> i686-native-linuxapp-gcc
		> x86_64-native-bsdapp-gcc
		> x86_64-native-bsdapp-clang

	Tested on:
		> x86_64 Ubuntu 14.04, testpmd and 'make test'
		> FreeBSD 10.1, testpmd

Signed-off-by: Ravi Kerur <rkerur@gmail.com>
---
 drivers/net/bnx2x/bnx2x.c          |  2 +-
 drivers/net/bnx2x/bnx2x.h          |  3 ---
 drivers/net/cxgbe/sge.c            |  3 ---
 drivers/net/e1000/em_rxtx.c        | 15 +++++----------
 drivers/net/e1000/igb_rxtx.c       | 14 ++++----------
 drivers/net/i40e/i40e_rxtx.c       | 20 +++++++-------------
 drivers/net/ixgbe/ixgbe_rxtx.c     | 14 +++++++-------
 drivers/net/ixgbe/ixgbe_rxtx.h     |  6 ------
 drivers/net/virtio/virtio_rxtx.c   |  2 +-
 drivers/net/virtio/virtqueue.h     |  3 ---
 drivers/net/vmxnet3/vmxnet3_rxtx.c | 11 +++--------
 drivers/net/xenvirt/virtqueue.h    |  5 +----
 lib/librte_mbuf/rte_mbuf.h         | 10 ++++++++++
 13 files changed, 39 insertions(+), 69 deletions(-)

diff --git a/drivers/net/bnx2x/bnx2x.c b/drivers/net/bnx2x/bnx2x.c
index fed7a06..a3f118c 100644
--- a/drivers/net/bnx2x/bnx2x.c
+++ b/drivers/net/bnx2x/bnx2x.c
@@ -2147,7 +2147,7 @@ int bnx2x_tx_encap(struct bnx2x_tx_queue *txq, struct rte_mbuf **m_head, int m_p
 		tx_start_bd = &txq->tx_ring[TX_BD(bd_prod, txq)].start_bd;
 
 		tx_start_bd->addr =
-		    rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR(m0));
+		    rte_cpu_to_le_64(rte_mbuf_data_dma_addr(m0));
 		tx_start_bd->nbytes = rte_cpu_to_le_16(m0->data_len);
 		tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
 		tx_start_bd->general_data =
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 867b92a..28bd83f 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -141,9 +141,6 @@ struct bnx2x_device_type {
 	char     *bnx2x_name;
 };
 
-#define RTE_MBUF_DATA_DMA_ADDR(mb) \
-	((uint64_t)((mb)->buf_physaddr + (mb)->data_off))
-
 #define BNX2X_PAGE_SHIFT       12
 #define BNX2X_PAGE_SIZE        (1 << BNX2X_PAGE_SHIFT)
 #define BNX2X_PAGE_MASK        (~(BNX2X_PAGE_SIZE - 1))
diff --git a/drivers/net/cxgbe/sge.c b/drivers/net/cxgbe/sge.c
index 6eb1244..8f4c025 100644
--- a/drivers/net/cxgbe/sge.c
+++ b/drivers/net/cxgbe/sge.c
@@ -1267,9 +1267,6 @@ static struct rte_mbuf *t4_pktgl_to_mbuf(const struct pkt_gl *gl)
 	return t4_pktgl_to_mbuf_usembufs(gl);
 }
 
-#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
-	((dma_addr_t) ((mb)->buf_physaddr + (mb)->data_off))
-
 /**
  * t4_ethrx_handler - process an ingress ethernet packet
  * @q: the response queue that received the packet
diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c
index 3b8776d..39c9744 100644
--- a/drivers/net/e1000/em_rxtx.c
+++ b/drivers/net/e1000/em_rxtx.c
@@ -88,12 +88,6 @@ rte_rxmbuf_alloc(struct rte_mempool *mp)
 	return (m);
 }
 
-#define RTE_MBUF_DATA_DMA_ADDR(mb)             \
-	(uint64_t) ((mb)->buf_physaddr + (mb)->data_off)
-
-#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
-	(uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
-
 /**
  * Structure associated with each descriptor of the RX ring of a RX queue.
  */
@@ -585,7 +579,7 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 			 * Set up Transmit Data Descriptor.
 			 */
 			slen = m_seg->data_len;
-			buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
+			buf_dma_addr = rte_mbuf_data_dma_addr(m_seg);
 
 			txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
 			txd->lower.data = rte_cpu_to_le_32(cmd_type_len | slen);
@@ -769,7 +763,7 @@ eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		rxm = rxe->mbuf;
 		rxe->mbuf = nmb;
 		dma_addr =
-			rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
+			rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
 		rxdp->buffer_addr = dma_addr;
 		rxdp->status = 0;
 
@@ -949,7 +943,7 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		 */
 		rxm = rxe->mbuf;
 		rxe->mbuf = nmb;
-		dma = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
+		dma = rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
 		rxdp->buffer_addr = dma;
 		rxdp->status = 0;
 
@@ -1623,7 +1617,8 @@ em_alloc_rx_queue_mbufs(struct em_rx_queue *rxq)
 			return (-ENOMEM);
 		}
 
-		dma_addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
+		dma_addr =
+			rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf));
 
 		/* Clear HW ring memory */
 		rxq->rx_ring[i] = rxd_init;
diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c
index 19905fd..eb09bca 100644
--- a/drivers/net/e1000/igb_rxtx.c
+++ b/drivers/net/e1000/igb_rxtx.c
@@ -88,12 +88,6 @@ rte_rxmbuf_alloc(struct rte_mempool *mp)
 	return (m);
 }
 
-#define RTE_MBUF_DATA_DMA_ADDR(mb) \
-	(uint64_t) ((mb)->buf_physaddr + (mb)->data_off)
-
-#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
-	(uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
-
 /**
  * Structure associated with each descriptor of the RX ring of a RX queue.
  */
@@ -550,7 +544,7 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 			 * Set up transmit descriptor.
 			 */
 			slen = (uint16_t) m_seg->data_len;
-			buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
+			buf_dma_addr = rte_mbuf_data_dma_addr(m_seg);
 			txd->read.buffer_addr =
 				rte_cpu_to_le_64(buf_dma_addr);
 			txd->read.cmd_type_len =
@@ -820,7 +814,7 @@ eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		rxm = rxe->mbuf;
 		rxe->mbuf = nmb;
 		dma_addr =
-			rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
+			rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
 		rxdp->read.hdr_addr = 0;
 		rxdp->read.pkt_addr = dma_addr;
 
@@ -1006,7 +1000,7 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		 */
 		rxm = rxe->mbuf;
 		rxe->mbuf = nmb;
-		dma = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
+		dma = rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
 		rxdp->read.pkt_addr = dma;
 		rxdp->read.hdr_addr = 0;
 
@@ -1954,7 +1948,7 @@ igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
 			return (-ENOMEM);
 		}
 		dma_addr =
-			rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
+			rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf));
 		rxd = &rxq->rx_ring[i];
 		rxd->read.hdr_addr = 0;
 		rxd->read.pkt_addr = dma_addr;
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index fd656d5..1abe898 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -78,12 +78,6 @@
 		PKT_TX_L4_MASK |		 \
 		PKT_TX_OUTER_IP_CKSUM)
 
-#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
-	(uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
-
-#define RTE_MBUF_DATA_DMA_ADDR(mb) \
-	((uint64_t)((mb)->buf_physaddr + (mb)->data_off))
-
 static const struct rte_memzone *
 i40e_ring_dma_zone_reserve(struct rte_eth_dev *dev,
 			   const char *ring_name,
@@ -1104,7 +1098,7 @@ i40e_rx_alloc_bufs(struct i40e_rx_queue *rxq)
 		mb->nb_segs = 1;
 		mb->port = rxq->port_id;
 		dma_addr = rte_cpu_to_le_64(\
-			RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb));
+			rte_mbuf_data_dma_addr_default(mb));
 		rxdp[i].read.hdr_addr = 0;
 		rxdp[i].read.pkt_addr = dma_addr;
 	}
@@ -1251,7 +1245,7 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 		rxm = rxe->mbuf;
 		rxe->mbuf = nmb;
 		dma_addr =
-			rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
+			rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
 		rxdp->read.hdr_addr = 0;
 		rxdp->read.pkt_addr = dma_addr;
 
@@ -1362,7 +1356,7 @@ i40e_recv_scattered_pkts(void *rx_queue,
 		rxm = rxe->mbuf;
 		rxe->mbuf = nmb;
 		dma_addr =
-			rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
+			rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
 
 		/* Set data buffer address and data length of the mbuf */
 		rxdp->read.hdr_addr = 0;
@@ -1697,7 +1691,7 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 
 			/* Setup TX Descriptor */
 			slen = m_seg->data_len;
-			buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
+			buf_dma_addr = rte_mbuf_data_dma_addr(m_seg);
 
 			PMD_TX_LOG(DEBUG, "mbuf: %p, TDD[%u]:\n"
 				"buf_dma_addr: %#"PRIx64";\n"
@@ -1799,7 +1793,7 @@ tx4(volatile struct i40e_tx_desc *txdp, struct rte_mbuf **pkts)
 	uint32_t i;
 
 	for (i = 0; i < 4; i++, txdp++, pkts++) {
-		dma_addr = RTE_MBUF_DATA_DMA_ADDR(*pkts);
+		dma_addr = rte_mbuf_data_dma_addr(*pkts);
 		txdp->buffer_addr = rte_cpu_to_le_64(dma_addr);
 		txdp->cmd_type_offset_bsz =
 			i40e_build_ctob((uint32_t)I40E_TD_CMD, 0,
@@ -1813,7 +1807,7 @@ tx1(volatile struct i40e_tx_desc *txdp, struct rte_mbuf **pkts)
 {
 	uint64_t dma_addr;
 
-	dma_addr = RTE_MBUF_DATA_DMA_ADDR(*pkts);
+	dma_addr = rte_mbuf_data_dma_addr(*pkts);
 	txdp->buffer_addr = rte_cpu_to_le_64(dma_addr);
 	txdp->cmd_type_offset_bsz =
 		i40e_build_ctob((uint32_t)I40E_TD_CMD, 0,
@@ -2754,7 +2748,7 @@ i40e_alloc_rx_queue_mbufs(struct i40e_rx_queue *rxq)
 		mbuf->port = rxq->port_id;
 
 		dma_addr =
-			rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
+			rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf));
 
 		rxd = &rxq->rx_ring[i];
 		rxd->read.pkt_addr = dma_addr;
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index a598a72..a1483a6 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -171,7 +171,7 @@ tx4(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
 	int i;
 
 	for (i = 0; i < 4; ++i, ++txdp, ++pkts) {
-		buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(*pkts);
+		buf_dma_addr = rte_mbuf_data_dma_addr(*pkts);
 		pkt_len = (*pkts)->data_len;
 
 		/* write data to descriptor */
@@ -194,7 +194,7 @@ tx1(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
 	uint64_t buf_dma_addr;
 	uint32_t pkt_len;
 
-	buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(*pkts);
+	buf_dma_addr = rte_mbuf_data_dma_addr(*pkts);
 	pkt_len = (*pkts)->data_len;
 
 	/* write data to descriptor */
@@ -810,7 +810,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 			 * Set up Transmit Data Descriptor.
 			 */
 			slen = m_seg->data_len;
-			buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
+			buf_dma_addr = rte_mbuf_data_dma_addr(m_seg);
 			txd->read.buffer_addr =
 				rte_cpu_to_le_64(buf_dma_addr);
 			txd->read.cmd_type_len =
@@ -1138,7 +1138,7 @@ ixgbe_rx_alloc_bufs(struct ixgbe_rx_queue *rxq, bool reset_mbuf)
 		mb->data_off = RTE_PKTMBUF_HEADROOM;
 
 		/* populate the descriptors */
-		dma_addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb));
+		dma_addr = rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mb));
 		rxdp[i].read.hdr_addr = 0;
 		rxdp[i].read.pkt_addr = dma_addr;
 	}
@@ -1365,7 +1365,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		rxm = rxe->mbuf;
 		rxe->mbuf = nmb;
 		dma_addr =
-			rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
+			rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
 		rxdp->read.hdr_addr = 0;
 		rxdp->read.pkt_addr = dma_addr;
 
@@ -1658,7 +1658,7 @@ next_desc:
 
 		if (!bulk_alloc) {
 			__le64 dma =
-			  rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
+			  rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
 			/*
 			 * Update RX descriptor with the physical address of the
 			 * new data buffer of the new allocated mbuf.
@@ -3596,7 +3596,7 @@ ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq)
 		mbuf->port = rxq->port_id;
 
 		dma_addr =
-			rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
+			rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf));
 		rxd = &rxq->rx_ring[i];
 		rxd->read.hdr_addr = 0;
 		rxd->read.pkt_addr = dma_addr;
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
index b9eca67..dbb9f00 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx.h
@@ -40,12 +40,6 @@
 
 #define RTE_IXGBE_DESCS_PER_LOOP    4
 
-#define RTE_MBUF_DATA_DMA_ADDR(mb) \
-	(uint64_t) ((mb)->buf_physaddr + (mb)->data_off)
-
-#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
-	(uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
-
 #ifdef RTE_IXGBE_INC_VECTOR
 #define RTE_IXGBE_RXQ_REARM_THRESH      32
 #define RTE_IXGBE_MAX_RX_BURST          RTE_IXGBE_RXQ_REARM_THRESH
diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index c5b53bb..09f5d9d 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -229,7 +229,7 @@ virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie)
 
 	for (; ((seg_num > 0) && (cookie != NULL)); seg_num--) {
 		idx = start_dp[idx].next;
-		start_dp[idx].addr  = RTE_MBUF_DATA_DMA_ADDR(cookie);
+		start_dp[idx].addr  = rte_mbuf_data_dma_addr(cookie);
 		start_dp[idx].len   = cookie->data_len;
 		start_dp[idx].flags = VRING_DESC_F_NEXT;
 		cookie = cookie->next;
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index 7789411..9ea9b96 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -68,9 +68,6 @@ struct rte_mbuf;
 
 #define VIRTQUEUE_MAX_NAME_SZ 32
 
-#define RTE_MBUF_DATA_DMA_ADDR(mb) \
-	(uint64_t) ((mb)->buf_physaddr + (mb)->data_off)
-
 #define VTNET_SQ_RQ_QUEUE_IDX 0
 #define VTNET_SQ_TQ_QUEUE_IDX 1
 #define VTNET_SQ_CQ_QUEUE_IDX 2
diff --git a/drivers/net/vmxnet3/vmxnet3_rxtx.c b/drivers/net/vmxnet3/vmxnet3_rxtx.c
index 4de5d89..8385478 100644
--- a/drivers/net/vmxnet3/vmxnet3_rxtx.c
+++ b/drivers/net/vmxnet3/vmxnet3_rxtx.c
@@ -77,12 +77,6 @@
 #include "vmxnet3_logs.h"
 #include "vmxnet3_ethdev.h"
 
-#define RTE_MBUF_DATA_DMA_ADDR(mb) \
-	(uint64_t) ((mb)->buf_physaddr + (mb)->data_off)
-
-#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
-	(uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
-
 static const uint32_t rxprod_reg[2] = {VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2};
 
 static int vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t*, uint8_t);
@@ -377,7 +371,7 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 			   transmit buffer size (16K) is greater than
 			   maximum sizeof mbuf segment size. */
 			gdesc = txq->cmd_ring.base + txq->cmd_ring.next2fill;
-			gdesc->txd.addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
+			gdesc->txd.addr = rte_mbuf_data_dma_addr(m_seg);
 			gdesc->dword[2] = dw2 | m_seg->data_len;
 			gdesc->dword[3] = 0;
 
@@ -475,7 +469,8 @@ vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t *rxq, uint8_t ring_id)
 		buf_info->m = mbuf;
 		buf_info->len = (uint16_t)(mbuf->buf_len -
 					   RTE_PKTMBUF_HEADROOM);
-		buf_info->bufPA = RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf);
+		buf_info->bufPA =
+			rte_mbuf_data_dma_addr_default(mbuf);
 
 		/* Load Rx Descriptor with the buffer's GPA */
 		rxd->addr = buf_info->bufPA;
diff --git a/drivers/net/xenvirt/virtqueue.h b/drivers/net/xenvirt/virtqueue.h
index eff6208..c38288e 100644
--- a/drivers/net/xenvirt/virtqueue.h
+++ b/drivers/net/xenvirt/virtqueue.h
@@ -54,9 +54,6 @@ struct rte_mbuf;
  * Address translatio is between gva<->hva,
  * rather than gpa<->hva in virito spec.
  */
-#define RTE_MBUF_DATA_DMA_ADDR(mb) \
-	rte_pktmbuf_mtod(mb, uint64_t)
-
 enum { VTNET_RQ = 0, VTNET_TQ = 1, VTNET_CQ = 2 };
 
 /**
@@ -238,7 +235,7 @@ virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie)
 	start_dp[idx].flags = VRING_DESC_F_NEXT;
 	start_dp[idx].addr  = (uintptr_t)NULL;
 	idx = start_dp[idx].next;
-	start_dp[idx].addr  = RTE_MBUF_DATA_DMA_ADDR(cookie);
+	start_dp[idx].addr  = rte_pktmbuf_mtod(cookie, uint64_t);
 	start_dp[idx].len   = cookie->data_len;
 	start_dp[idx].flags = 0;
 	idx = start_dp[idx].next;
diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
index d7c9030..8e4f485 100644
--- a/lib/librte_mbuf/rte_mbuf.h
+++ b/lib/librte_mbuf/rte_mbuf.h
@@ -843,6 +843,16 @@ struct rte_mbuf {
 	uint16_t timesync;
 } __rte_cache_aligned;
 
+static inline uint64_t rte_mbuf_data_dma_addr(struct rte_mbuf *mb)
+{
+	return ((uint64_t)((mb)->buf_physaddr + (mb)->data_off));
+}
+
+static inline uint64_t rte_mbuf_data_dma_addr_default(struct rte_mbuf *mb)
+{
+	return ((uint64_t)((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM));
+}
+
 static inline uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp);
 
 /**
-- 
1.9.1

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2015-09-30 22:12 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-09-30 21:55 [dpdk-dev] [PATCH v3] Move rte_mbuf macros to common header file Ravi Kerur
2015-09-30 22:13 ` Stephen Hemminger

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).