DPDK patches and discussions
 help / color / mirror / Atom feed
From: Keith Wiles <keith.wiles@intel.com>
To: dev@dpdk.org
Subject: [dpdk-dev] [RFC PATCH 2/4 v2] Add the ethdev changes for multiple device support
Date: Mon, 13 Apr 2015 14:44:32 -0500	[thread overview]
Message-ID: <1428954274-26944-3-git-send-email-keith.wiles@intel.com> (raw)
In-Reply-To: <1428954274-26944-1-git-send-email-keith.wiles@intel.com>

Signed-off-by: Keith Wiles <keith.wiles@intel.com>
---
 lib/librte_ether/rte_ethdev.c | 944 +++++++++++++++++-------------------------
 lib/librte_ether/rte_ethdev.h | 340 ++++-----------
 2 files changed, 466 insertions(+), 818 deletions(-)

diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c
index e20cca5..0c68d8d 100644
--- a/lib/librte_ether/rte_ethdev.c
+++ b/lib/librte_ether/rte_ethdev.c
@@ -77,38 +77,20 @@
 #define PMD_DEBUG_TRACE(fmt, args...)
 #endif
 
-/* Macros for checking for restricting functions to primary instance only */
-#define PROC_PRIMARY_OR_ERR_RET(retval) do { \
-	if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
-		PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
-		return (retval); \
-	} \
-} while(0)
-#define PROC_PRIMARY_OR_RET() do { \
-	if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
-		PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
-		return; \
-	} \
-} while(0)
-
-/* Macros to check for invlaid function pointers in dev_ops structure */
-#define FUNC_PTR_OR_ERR_RET(func, retval) do { \
-	if ((func) == NULL) { \
-		PMD_DEBUG_TRACE("Function not supported\n"); \
-		return (retval); \
-	} \
-} while(0)
-#define FUNC_PTR_OR_RET(func) do { \
-	if ((func) == NULL) { \
-		PMD_DEBUG_TRACE("Function not supported\n"); \
-		return; \
-	} \
-} while(0)
-
-static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
-static struct rte_eth_dev_data *rte_eth_dev_data = NULL;
-static uint8_t nb_ports = 0;
+
+static struct eth_dev_global eth_globals = {
+        .devs           = &rte_eth_devices[0],
+        .data           = NULL,
+        .nb_ports       = 0,
+        .max_ports      = RTE_MAX_ETHPORTS,
+        .dflt_mtu       = ETHER_MTU,
+        .dev_size       = sizeof(struct rte_eth_dev),
+        .data_size      = sizeof(struct rte_eth_dev_data),
+        .mz_dev_data    = "rte_eth_dev_data"
+};
+
+struct eth_dev_global * rte_eth_globals = &eth_globals;
 
 /* spinlock for eth device callbacks */
 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
@@ -155,49 +137,30 @@ static struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
 		sizeof(rte_txq_stats_strings[0]))
 
 
-/**
- * The user application callback description.
- *
- * It contains callback address to be registered by user application,
- * the pointer to the parameters for callback, and the event type.
- */
-struct rte_eth_dev_callback {
-	TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
-	rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
-	void *cb_arg;                           /**< Parameter for callback */
-	enum rte_eth_event_type event;          /**< Interrupt event type */
-	uint32_t active;                        /**< Callback is executing */
-};
-
 enum {
 	STAT_QMAP_TX = 0,
 	STAT_QMAP_RX
 };
 
-enum {
-	DEV_DETACHED = 0,
-	DEV_ATTACHED
-};
-
 static inline void
 rte_eth_dev_data_alloc(void)
 {
 	const unsigned flags = 0;
 	const struct rte_memzone *mz;
 
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY){
-		mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
-				RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data),
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		mz = rte_memzone_reserve(eth_globals.mz_dev_data,
+				eth_globals.max_ports * eth_globals.data_size,
 				rte_socket_id(), flags);
 	} else
-		mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
+		mz = rte_memzone_lookup(eth_globals.mz_dev_data);
 	if (mz == NULL)
 		rte_panic("Cannot allocate memzone for ethernet port data\n");
 
-	rte_eth_dev_data = mz->addr;
+	eth_globals.data = mz->addr;
 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
-		memset(rte_eth_dev_data, 0,
-				RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data));
+		memset(eth_globals.data, 0,
+				eth_globals.max_ports * eth_globals.data_size);
 }
 
 struct rte_eth_dev *
@@ -205,9 +168,9 @@ rte_eth_dev_allocated(const char *name)
 {
 	unsigned i;
 
-	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
+	for (i = 0; i < eth_globals.max_ports; i++) {
 		if ((rte_eth_devices[i].attached == DEV_ATTACHED) &&
-		    strcmp(rte_eth_devices[i].data->name, name) == 0)
+		    strcmp(_DD(&rte_eth_devices[i], name), name) == 0)
 			return &rte_eth_devices[i];
 	}
 	return NULL;
@@ -218,26 +181,26 @@ rte_eth_dev_find_free_port(void)
 {
 	unsigned i;
 
-	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
+	for (i = 0; i < eth_globals.max_ports; i++) {
 		if (rte_eth_devices[i].attached == DEV_DETACHED)
 			return i;
 	}
-	return RTE_MAX_ETHPORTS;
+	return eth_globals.max_ports;
 }
 
 struct rte_eth_dev *
-rte_eth_dev_allocate(const char *name, enum rte_eth_dev_type type)
+rte_eth_dev_allocate(const char *name, enum rte_dev_type type)
 {
 	uint8_t port_id;
-	struct rte_eth_dev *eth_dev;
+	struct rte_eth_dev *dev;
 
 	port_id = rte_eth_dev_find_free_port();
-	if (port_id == RTE_MAX_ETHPORTS) {
+	if (port_id == eth_globals.max_ports) {
 		PMD_DEBUG_TRACE("Reached maximum number of Ethernet ports\n");
 		return NULL;
 	}
 
-	if (rte_eth_dev_data == NULL)
+	if (eth_globals.data == NULL)
 		rte_eth_dev_data_alloc();
 
 	if (rte_eth_dev_allocated(name) != NULL) {
@@ -245,14 +208,15 @@ rte_eth_dev_allocate(const char *name, enum rte_eth_dev_type type)
 		return NULL;
 	}
 
-	eth_dev = &rte_eth_devices[port_id];
-	eth_dev->data = &rte_eth_dev_data[port_id];
-	snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
-	eth_dev->data->port_id = port_id;
-	eth_dev->attached = DEV_ATTACHED;
-	eth_dev->dev_type = type;
-	nb_ports++;
-	return eth_dev;
+	dev = &rte_eth_devices[port_id];
+	dev->data = &eth_globals.data[port_id];
+	snprintf(_DD(dev, name), sizeof(_DD(dev, name)), "%s", name);
+	_DD(dev, unit_id) = port_id;
+	dev->attached = DEV_ATTACHED;
+	dev->dev_info = NULL;
+	dev->dev_type = type;
+	eth_globals.nb_ports++;
+	return dev;
 }
 
 static inline int
@@ -273,13 +237,13 @@ rte_eth_dev_create_unique_device_name(char *name, size_t size,
 }
 
 int
-rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
+rte_eth_dev_release_port(struct rte_eth_dev *dev)
 {
-	if (eth_dev == NULL)
+	if (dev == NULL)
 		return -EINVAL;
 
-	eth_dev->attached = 0;
-	nb_ports--;
+	dev->attached = 0;
+	eth_globals.nb_ports--;
 	return 0;
 }
 
@@ -287,8 +251,8 @@ static int
 rte_eth_dev_init(struct rte_pci_driver *pci_drv,
 		 struct rte_pci_device *pci_dev)
 {
-	struct eth_driver    *eth_drv;
-	struct rte_eth_dev *eth_dev;
+	struct eth_driver   *eth_drv;
+	struct rte_eth_dev	*dev;
 	char ethdev_name[RTE_ETH_NAME_MAX_LEN];
 
 	int diag;
@@ -299,31 +263,31 @@ rte_eth_dev_init(struct rte_pci_driver *pci_drv,
 	rte_eth_dev_create_unique_device_name(ethdev_name,
 			sizeof(ethdev_name), pci_dev);
 
-	eth_dev = rte_eth_dev_allocate(ethdev_name, RTE_ETH_DEV_PCI);
-	if (eth_dev == NULL)
+	dev = rte_eth_dev_allocate(ethdev_name, RTE_DEV_PCI);
+	if (dev == NULL)
 		return -ENOMEM;
 
 	if (rte_eal_process_type() == RTE_PROC_PRIMARY){
-		eth_dev->data->dev_private = rte_zmalloc("ethdev private structure",
+		_DD_PRIVATE(dev) = rte_zmalloc("ethdev private structure",
 				  eth_drv->dev_private_size,
 				  RTE_CACHE_LINE_SIZE);
-		if (eth_dev->data->dev_private == NULL)
+		if (_DD_PRIVATE(dev) == NULL)
 			rte_panic("Cannot allocate memzone for private port data\n");
 	}
-	eth_dev->pci_dev = pci_dev;
-	eth_dev->driver = eth_drv;
-	eth_dev->data->rx_mbuf_alloc_failed = 0;
+	dev->pci_dev = pci_dev;
+	dev->driver = (struct rte_dev_drv *)eth_drv;
+	_DD(dev, rx_mbuf_alloc_failed) = 0;
 
 	/* init user callbacks */
-	TAILQ_INIT(&(eth_dev->link_intr_cbs));
+	TAILQ_INIT(&(dev->link_intr_cbs));
 
 	/*
 	 * Set the default MTU.
 	 */
-	eth_dev->data->mtu = ETHER_MTU;
+	_DD(dev, mtu) = ETHER_MTU;
 
 	/* Invoke PMD device initialization function */
-	diag = (*eth_drv->eth_dev_init)(eth_dev);
+	diag = (*eth_drv->dev_init)(dev);
 	if (diag == 0)
 		return (0);
 
@@ -332,9 +296,9 @@ rte_eth_dev_init(struct rte_pci_driver *pci_drv,
 			(unsigned) pci_dev->id.vendor_id,
 			(unsigned) pci_dev->id.device_id);
 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
-		rte_free(eth_dev->data->dev_private);
-	eth_dev->attached = DEV_DETACHED;
-	nb_ports--;
+		rte_free(_DD_PRIVATE(dev));
+	dev->attached = DEV_DETACHED;
+	eth_globals.nb_ports--;
 	return diag;
 }
 
@@ -342,7 +306,7 @@ static int
 rte_eth_dev_uninit(struct rte_pci_device *pci_dev)
 {
 	const struct eth_driver *eth_drv;
-	struct rte_eth_dev *eth_dev;
+	struct rte_eth_dev *dev;
 	char ethdev_name[RTE_ETH_NAME_MAX_LEN];
 	int ret;
 
@@ -353,28 +317,28 @@ rte_eth_dev_uninit(struct rte_pci_device *pci_dev)
 	rte_eth_dev_create_unique_device_name(ethdev_name,
 			sizeof(ethdev_name), pci_dev);
 
-	eth_dev = rte_eth_dev_allocated(ethdev_name);
-	if (eth_dev == NULL)
+	dev = rte_eth_dev_allocated(ethdev_name);
+	if (dev == NULL)
 		return -ENODEV;
 
 	eth_drv = (const struct eth_driver *)pci_dev->driver;
 
 	/* Invoke PMD device uninit function */
-	if (*eth_drv->eth_dev_uninit) {
-		ret = (*eth_drv->eth_dev_uninit)(eth_dev);
+	if (*eth_drv->dev_uninit) {
+		ret = (*eth_drv->dev_uninit)(dev);
 		if (ret)
 			return ret;
 	}
 
 	/* free ether device */
-	rte_eth_dev_release_port(eth_dev);
+	rte_eth_dev_release_port(dev);
 
 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
-		rte_free(eth_dev->data->dev_private);
+		rte_free(_DD_PRIVATE(dev));
 
-	eth_dev->pci_dev = NULL;
-	eth_dev->driver = NULL;
-	eth_dev->data = NULL;
+	dev->pci_dev = NULL;
+	dev->driver = NULL;
+	dev->data = NULL;
 
 	return 0;
 }
@@ -401,16 +365,6 @@ rte_eth_driver_register(struct eth_driver *eth_drv)
 	rte_eal_pci_register(&eth_drv->pci_drv);
 }
 
-static int
-rte_eth_dev_is_valid_port(uint8_t port_id)
-{
-	if (port_id >= RTE_MAX_ETHPORTS ||
-	    rte_eth_devices[port_id].attached != DEV_ATTACHED)
-		return 0;
-	else
-		return 1;
-}
-
 int
 rte_eth_dev_socket_id(uint8_t port_id)
 {
@@ -419,20 +373,14 @@ rte_eth_dev_socket_id(uint8_t port_id)
 	return rte_eth_devices[port_id].pci_dev->numa_node;
 }
 
-uint8_t
-rte_eth_dev_count(void)
-{
-	return (nb_ports);
-}
-
 /* So far, DPDK hotplug function only supports linux */
 #ifdef RTE_LIBRTE_EAL_HOTPLUG
 
-static enum rte_eth_dev_type
+static enum rte_dev_type
 rte_eth_dev_get_device_type(uint8_t port_id)
 {
 	if (!rte_eth_dev_is_valid_port(port_id))
-		return RTE_ETH_DEV_UNKNOWN;
+		return RTE_DEV_UNKNOWN;
 	return rte_eth_devices[port_id].dev_type;
 }
 
@@ -440,7 +388,7 @@ static int
 rte_eth_dev_save(struct rte_eth_dev *devs, size_t size)
 {
 	if ((devs == NULL) ||
-	    (size != sizeof(struct rte_eth_dev) * RTE_MAX_ETHPORTS))
+	    (size != sizeof(struct rte_eth_dev) * eth_globals.max_ports))
 		return -EINVAL;
 
 	/* save current rte_eth_devices */
@@ -455,7 +403,7 @@ rte_eth_dev_get_changed_port(struct rte_eth_dev *devs, uint8_t *port_id)
 		return -EINVAL;
 
 	/* check which port was attached or detached */
-	for (*port_id = 0; *port_id < RTE_MAX_ETHPORTS; (*port_id)++, devs++) {
+	for (*port_id = 0; *port_id < eth_globals.max_ports; (*port_id)++, devs++) {
 		if (rte_eth_devices[*port_id].attached ^ devs->attached)
 			return 0;
 	}
@@ -496,7 +444,7 @@ rte_eth_dev_get_name_by_port(uint8_t port_id, char *name)
 
 	/* shouldn't check 'rte_eth_devices[i].data',
 	 * because it might be overwritten by VDEV PMD */
-	tmp = rte_eth_dev_data[port_id].name;
+	tmp = eth_globals.data[port_id].name;
 	strcpy(name, tmp);
 	return 0;
 }
@@ -506,12 +454,12 @@ rte_eth_dev_is_detachable(uint8_t port_id)
 {
 	uint32_t drv_flags;
 
-	if (port_id >= RTE_MAX_ETHPORTS) {
+	if (port_id >= eth_globals.max_ports) {
 		PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
 		return -EINVAL;
 	}
 
-	if (rte_eth_devices[port_id].dev_type == RTE_ETH_DEV_PCI) {
+	if (rte_eth_devices[port_id].dev_type == RTE_DEV_PCI) {
 		switch (rte_eth_devices[port_id].pci_dev->kdrv) {
 		case RTE_KDRV_IGB_UIO:
 		case RTE_KDRV_UIO_GENERIC:
@@ -691,7 +639,7 @@ rte_eth_dev_detach(uint8_t port_id, char *name)
 	if (name == NULL)
 		return -EINVAL;
 
-	if (rte_eth_dev_get_device_type(port_id) == RTE_ETH_DEV_PCI) {
+	if (rte_eth_dev_get_device_type(port_id) == RTE_DEV_PCI) {
 		ret = rte_eth_dev_get_addr_by_port(port_id, &addr);
 		if (ret < 0)
 			return ret;
@@ -729,25 +677,25 @@ rte_eth_dev_detach(uint8_t port_id __rte_unused,
 static int
 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
 {
-	uint16_t old_nb_queues = dev->data->nb_rx_queues;
+	uint16_t old_nb_queues = _DD(dev, nb_rx_queues);
 	void **rxq;
 	unsigned i;
 
-	if (dev->data->rx_queues == NULL) { /* first time configuration */
-		dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
-				sizeof(dev->data->rx_queues[0]) * nb_queues,
+	if (_DD(dev, rx_queues) == NULL) { /* first time configuration */
+		_DD(dev, rx_queues) = rte_zmalloc("ethdev->rx_queues",
+				sizeof(_DD(dev, rx_queues[0])) * nb_queues,
 				RTE_CACHE_LINE_SIZE);
-		if (dev->data->rx_queues == NULL) {
-			dev->data->nb_rx_queues = 0;
+		if (_DD(dev, rx_queues) == NULL) {
+			_DD(dev, nb_rx_queues) = 0;
 			return -(ENOMEM);
 		}
 	} else { /* re-configure */
-		FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
+		FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, rx_queue_release), -ENOTSUP);
 
-		rxq = dev->data->rx_queues;
+		rxq = _DD(dev, rx_queues);
 
 		for (i = nb_queues; i < old_nb_queues; i++)
-			(*dev->dev_ops->rx_queue_release)(rxq[i]);
+			ETH_OPS(dev, rx_queue_release)(rxq[i]);
 		rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
 				RTE_CACHE_LINE_SIZE);
 		if (rxq == NULL)
@@ -758,10 +706,10 @@ rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
 				sizeof(rxq[0]) * new_qs);
 		}
 
-		dev->data->rx_queues = rxq;
+		_DD(dev, rx_queues) = rxq;
 
 	}
-	dev->data->nb_rx_queues = nb_queues;
+	_DD(dev, nb_rx_queues) = nb_queues;
 	return (0);
 }
 
@@ -780,14 +728,14 @@ rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id)
 	}
 
 	dev = &rte_eth_devices[port_id];
-	if (rx_queue_id >= dev->data->nb_rx_queues) {
+	if (rx_queue_id >= _DD(dev, nb_rx_queues)) {
 		PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
 		return -EINVAL;
 	}
 
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, rx_queue_start), -ENOTSUP);
 
-	return dev->dev_ops->rx_queue_start(dev, rx_queue_id);
+	return ETH_OPS(dev, rx_queue_start)(dev, rx_queue_id);
 
 }
 
@@ -806,14 +754,14 @@ rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id)
 	}
 
 	dev = &rte_eth_devices[port_id];
-	if (rx_queue_id >= dev->data->nb_rx_queues) {
+	if (rx_queue_id >= _DD(dev, nb_rx_queues)) {
 		PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
 		return -EINVAL;
 	}
 
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, rx_queue_stop), -ENOTSUP);
 
-	return dev->dev_ops->rx_queue_stop(dev, rx_queue_id);
+	return ETH_OPS(dev, rx_queue_stop)(dev, rx_queue_id);
 
 }
 
@@ -832,14 +780,14 @@ rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id)
 	}
 
 	dev = &rte_eth_devices[port_id];
-	if (tx_queue_id >= dev->data->nb_tx_queues) {
+	if (tx_queue_id >= _DD(dev, nb_tx_queues)) {
 		PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
 		return -EINVAL;
 	}
 
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, tx_queue_start), -ENOTSUP);
 
-	return dev->dev_ops->tx_queue_start(dev, tx_queue_id);
+	return ETH_OPS(dev, tx_queue_start)(dev, tx_queue_id);
 
 }
 
@@ -858,39 +806,39 @@ rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id)
 	}
 
 	dev = &rte_eth_devices[port_id];
-	if (tx_queue_id >= dev->data->nb_tx_queues) {
+	if (tx_queue_id >= _DD(dev, nb_tx_queues)) {
 		PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
 		return -EINVAL;
 	}
 
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, tx_queue_stop), -ENOTSUP);
 
-	return dev->dev_ops->tx_queue_stop(dev, tx_queue_id);
+	return ETH_OPS(dev, tx_queue_stop)(dev, tx_queue_id);
 
 }
 
 static int
 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
 {
-	uint16_t old_nb_queues = dev->data->nb_tx_queues;
+	uint16_t old_nb_queues = _DD(dev, nb_tx_queues);
 	void **txq;
 	unsigned i;
 
-	if (dev->data->tx_queues == NULL) { /* first time configuration */
-		dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
-				sizeof(dev->data->tx_queues[0]) * nb_queues,
+	if (_DD(dev, tx_queues) == NULL) { /* first time configuration */
+		_DD(dev, tx_queues) = rte_zmalloc("ethdev->tx_queues",
+				sizeof(_DD(dev, tx_queues[0])) * nb_queues,
 				RTE_CACHE_LINE_SIZE);
-		if (dev->data->tx_queues == NULL) {
-			dev->data->nb_tx_queues = 0;
+		if (_DD(dev, tx_queues) == NULL) {
+			_DD(dev, nb_tx_queues) = 0;
 			return -(ENOMEM);
 		}
 	} else { /* re-configure */
-		FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
+		FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, tx_queue_release), -ENOTSUP);
 
-		txq = dev->data->tx_queues;
+		txq = _DD(dev, tx_queues);
 
 		for (i = nb_queues; i < old_nb_queues; i++)
-			(*dev->dev_ops->tx_queue_release)(txq[i]);
+			ETH_OPS(dev, tx_queue_release)(txq[i]);
 		txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
 				RTE_CACHE_LINE_SIZE);
 		if (txq == NULL)
@@ -901,10 +849,10 @@ rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
 				sizeof(txq[0]) * new_qs);
 		}
 
-		dev->data->tx_queues = txq;
+		_DD(dev, tx_queues) = txq;
 
 	}
-	dev->data->nb_tx_queues = nb_queues;
+	_DD(dev, nb_tx_queues) = nb_queues;
 	return (0);
 }
 
@@ -915,19 +863,19 @@ rte_eth_dev_check_vf_rss_rxq_num(uint8_t port_id, uint16_t nb_rx_q)
 	switch (nb_rx_q) {
 	case 1:
 	case 2:
-		RTE_ETH_DEV_SRIOV(dev).active =
+		ETH_SRIOV(dev).active =
 			ETH_64_POOLS;
 		break;
 	case 4:
-		RTE_ETH_DEV_SRIOV(dev).active =
+		ETH_SRIOV(dev).active =
 			ETH_32_POOLS;
 		break;
 	default:
 		return -EINVAL;
 	}
 
-	RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = nb_rx_q;
-	RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx =
+	ETH_SRIOV(dev).nb_q_per_pool = nb_rx_q;
+	ETH_SRIOV(dev).def_pool_q_idx =
 		dev->pci_dev->max_vfs * nb_rx_q;
 
 	return 0;
@@ -939,7 +887,7 @@ rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 {
 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
 
-	if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
+	if (ETH_SRIOV(dev).active != 0) {
 		/* check multi-queue mode */
 		if ((dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) ||
 		    (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB_RSS) ||
@@ -970,10 +918,10 @@ rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 					"mq_mode %u into VMDQ mq_mode %u\n",
 					port_id,
 					dev_conf->rxmode.mq_mode,
-					dev->data->dev_conf.rxmode.mq_mode);
+					ETH_DATA(dev)->dd.dev_conf.rxmode.mq_mode);
 		case ETH_MQ_RX_VMDQ_RSS:
-			dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
-			if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
+			ETH_CONF(dev).rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
+			if (nb_rx_q <= ETH_SRIOV(dev).nb_q_per_pool)
 				if (rte_eth_dev_check_vf_rss_rxq_num(port_id, nb_rx_q) != 0) {
 					PMD_DEBUG_TRACE("ethdev port_id=%d"
 						" SRIOV active, invalid queue"
@@ -985,9 +933,9 @@ rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 			break;
 		default: /* ETH_MQ_RX_VMDQ_ONLY or ETH_MQ_RX_NONE */
 			/* if nothing mq mode configure, use default scheme */
-			dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
-			if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
-				RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
+			ETH_CONF(dev).rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
+			if (ETH_SRIOV(dev).nb_q_per_pool > 1)
+				ETH_SRIOV(dev).nb_q_per_pool = 1;
 			break;
 		}
 
@@ -1001,16 +949,16 @@ rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 			return (-EINVAL);
 		default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
 			/* if nothing mq mode configure, use default scheme */
-			dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
+			ETH_CONF(dev).txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
 			break;
 		}
 
 		/* check valid queue number */
-		if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
-		    (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
+		if ((nb_rx_q > ETH_SRIOV(dev).nb_q_per_pool) ||
+		    (nb_tx_q > ETH_SRIOV(dev).nb_q_per_pool)) {
 			PMD_DEBUG_TRACE("ethdev port_id=%d SRIOV active, "
 				    "queue number must less equal to %d\n",
-					port_id, RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
+					port_id, ETH_SRIOV(dev).nb_q_per_pool);
 			return (-EINVAL);
 		}
 	} else {
@@ -1130,10 +1078,10 @@ rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 
 	dev = &rte_eth_devices[port_id];
 
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, dev_infos_get), -ENOTSUP);
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, dev_configure), -ENOTSUP);
 
-	if (dev->data->dev_started) {
+	if (ETH_DATA(dev)->dev_started) {
 		PMD_DEBUG_TRACE(
 		    "port %d must be stopped to allow configuration\n", port_id);
 		return (-EBUSY);
@@ -1144,10 +1092,10 @@ rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 	 * than the maximum number of RX and TX queues supported by the
 	 * configured device.
 	 */
-	(*dev->dev_ops->dev_infos_get)(dev, &dev_info);
+	ETH_OPS(dev, dev_infos_get)(dev, &dev_info);
 	if (nb_rx_q > dev_info.max_rx_queues) {
 		PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
-				port_id, nb_rx_q, dev_info.max_rx_queues);
+				port_id, nb_rx_q, ETH_INFO(dev_info)->max_rx_queues);
 		return (-EINVAL);
 	}
 	if (nb_rx_q == 0) {
@@ -1157,7 +1105,7 @@ rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 
 	if (nb_tx_q > dev_info.max_tx_queues) {
 		PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
-				port_id, nb_tx_q, dev_info.max_tx_queues);
+				port_id, nb_tx_q, ETH_INFO(dev_info)->max_tx_queues);
 		return (-EINVAL);
 	}
 	if (nb_tx_q == 0) {
@@ -1166,7 +1114,7 @@ rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 	}
 
 	/* Copy the dev_conf parameter into the dev structure */
-	memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
+	memcpy(&ETH_CONF(dev), dev_conf, sizeof(ETH_CONF(dev)));
 
 	/*
 	 * If link state interrupt is enabled, check that the
@@ -1208,7 +1156,7 @@ rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 		if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
 			dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
 			/* Use default value */
-			dev->data->dev_conf.rxmode.max_rx_pkt_len =
+			ETH_CONF(dev).rxmode.max_rx_pkt_len =
 							ETHER_MAX_LEN;
 	}
 
@@ -1238,7 +1186,7 @@ rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 		return diag;
 	}
 
-	diag = (*dev->dev_ops->dev_configure)(dev);
+	diag = ETH_OPS(dev, dev_configure)(dev);
 	if (diag != 0) {
 		PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
 				port_id, diag);
@@ -1263,21 +1211,21 @@ rte_eth_dev_config_restore(uint8_t port_id)
 
 	rte_eth_dev_info_get(port_id, &dev_info);
 
-	if (RTE_ETH_DEV_SRIOV(dev).active)
-		pool = RTE_ETH_DEV_SRIOV(dev).def_vmdq_idx;
+	if (ETH_SRIOV(dev).active)
+		pool = ETH_SRIOV(dev).def_vmdq_idx;
 
 	/* replay MAC address configuration */
 	for (i = 0; i < dev_info.max_mac_addrs; i++) {
-		addr = dev->data->mac_addrs[i];
+		addr = ETH_DATA(dev)->mac_addrs[i];
 
 		/* skip zero address */
 		if (is_zero_ether_addr(&addr))
 			continue;
 
 		/* add address to the hardware */
-		if  (*dev->dev_ops->mac_addr_add &&
-			(dev->data->mac_pool_sel[i] & (1ULL << pool)))
-			(*dev->dev_ops->mac_addr_add)(dev, &addr, i, pool);
+		if  (ETH_OPS(dev, mac_addr_add) &&
+			(ETH_DATA(dev)->mac_pool_sel[i] & (1ULL << pool)))
+			ETH_OPS(dev, mac_addr_add)(dev, &addr, i, pool);
 		else {
 			PMD_DEBUG_TRACE("port %d: MAC address array not supported\n",
 					port_id);
@@ -1316,26 +1264,26 @@ rte_eth_dev_start(uint8_t port_id)
 
 	dev = &rte_eth_devices[port_id];
 
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, dev_start), -ENOTSUP);
 
-	if (dev->data->dev_started != 0) {
+	if (ETH_DATA(dev)->dev_started != 0) {
 		PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
 			" already started\n",
 			port_id);
 		return (0);
 	}
 
-	diag = (*dev->dev_ops->dev_start)(dev);
+	diag = ETH_OPS(dev, dev_start)(dev);
 	if (diag == 0)
-		dev->data->dev_started = 1;
+		ETH_DATA(dev)->dev_started = 1;
 	else
 		return diag;
 
 	rte_eth_dev_config_restore(port_id);
 
-	if (dev->data->dev_conf.intr_conf.lsc != 0) {
-		FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
-		(*dev->dev_ops->link_update)(dev, 0);
+	if (ETH_DATA(dev)->dev_conf.intr_conf.lsc != 0) {
+		FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, link_update), -ENOTSUP);
+		ETH_OPS(dev, link_update)(dev, 0);
 	}
 	return 0;
 }
@@ -1356,17 +1304,17 @@ rte_eth_dev_stop(uint8_t port_id)
 
 	dev = &rte_eth_devices[port_id];
 
-	FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
+	FUNC_PTR_OR_RET(ETH_OPS(dev, dev_stop));
 
-	if (dev->data->dev_started == 0) {
+	if (ETH_DATA(dev)->dev_started == 0) {
 		PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
 			" already stopped\n",
 			port_id);
 		return;
 	}
 
-	dev->data->dev_started = 0;
-	(*dev->dev_ops->dev_stop)(dev);
+	ETH_DATA(dev)->dev_started = 0;
+	ETH_OPS(dev, dev_stop)(dev);
 }
 
 int
@@ -1385,8 +1333,8 @@ rte_eth_dev_set_link_up(uint8_t port_id)
 
 	dev = &rte_eth_devices[port_id];
 
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
-	return (*dev->dev_ops->dev_set_link_up)(dev);
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, dev_set_link_up), -ENOTSUP);
+	return ETH_OPS(dev, dev_set_link_up)(dev);
 }
 
 int
@@ -1405,8 +1353,8 @@ rte_eth_dev_set_link_down(uint8_t port_id)
 
 	dev = &rte_eth_devices[port_id];
 
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
-	return (*dev->dev_ops->dev_set_link_down)(dev);
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, dev_set_link_down), -ENOTSUP);
+	return ETH_OPS(dev, dev_set_link_down)(dev);
 }
 
 void
@@ -1425,9 +1373,9 @@ rte_eth_dev_close(uint8_t port_id)
 
 	dev = &rte_eth_devices[port_id];
 
-	FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
-	dev->data->dev_started = 0;
-	(*dev->dev_ops->dev_close)(dev);
+	FUNC_PTR_OR_RET(ETH_OPS(dev, dev_close));
+	ETH_DATA(dev)->dev_started = 0;
+	ETH_OPS(dev, dev_close)(dev);
 }
 
 int
@@ -1452,19 +1400,19 @@ rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
 	}
 
 	dev = &rte_eth_devices[port_id];
-	if (rx_queue_id >= dev->data->nb_rx_queues) {
+	if (rx_queue_id >= ETH_DATA(dev)->dd.nb_rx_queues) {
 		PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
 		return (-EINVAL);
 	}
 
-	if (dev->data->dev_started) {
+	if (ETH_DATA(dev)->dev_started) {
 		PMD_DEBUG_TRACE(
 		    "port %d must be stopped to allow configuration\n", port_id);
 		return -EBUSY;
 	}
 
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, dev_infos_get), -ENOTSUP);
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, rx_queue_setup), -ENOTSUP);
 
 	/*
 	 * Check the size of the mbuf data buffer.
@@ -1497,12 +1445,12 @@ rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
 	if (rx_conf == NULL)
 		rx_conf = &dev_info.default_rxconf;
 
-	ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
+	ret = ETH_OPS(dev, rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
 					      socket_id, rx_conf, mp);
 	if (!ret) {
-		if (!dev->data->min_rx_buf_size ||
-		    dev->data->min_rx_buf_size > mbp_buf_size)
-			dev->data->min_rx_buf_size = mbp_buf_size;
+		if (!ETH_DATA(dev)->dd.min_rx_buf_size ||
+		    ETH_DATA(dev)->dd.min_rx_buf_size > mbp_buf_size)
+			ETH_DATA(dev)->dd.min_rx_buf_size = mbp_buf_size;
 	}
 
 	return ret;
@@ -1526,26 +1474,26 @@ rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
 	}
 
 	dev = &rte_eth_devices[port_id];
-	if (tx_queue_id >= dev->data->nb_tx_queues) {
+	if (tx_queue_id >= ETH_DATA(dev)->dd.nb_tx_queues) {
 		PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
 		return (-EINVAL);
 	}
 
-	if (dev->data->dev_started) {
+	if (ETH_DATA(dev)->dev_started) {
 		PMD_DEBUG_TRACE(
 		    "port %d must be stopped to allow configuration\n", port_id);
 		return -EBUSY;
 	}
 
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, dev_infos_get), -ENOTSUP);
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, tx_queue_setup), -ENOTSUP);
 
 	rte_eth_dev_info_get(port_id, &dev_info);
 
 	if (tx_conf == NULL)
 		tx_conf = &dev_info.default_txconf;
 
-	return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc,
+	return ETH_OPS(dev, tx_queue_setup)(dev, tx_queue_id, nb_tx_desc,
 					       socket_id, tx_conf);
 }
 
@@ -1561,9 +1509,9 @@ rte_eth_promiscuous_enable(uint8_t port_id)
 
 	dev = &rte_eth_devices[port_id];
 
-	FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
-	(*dev->dev_ops->promiscuous_enable)(dev);
-	dev->data->promiscuous = 1;
+	FUNC_PTR_OR_RET(ETH_OPS(dev, promiscuous_enable));
+	ETH_OPS(dev, promiscuous_enable)(dev);
+	ETH_DATA(dev)->promiscuous = 1;
 }
 
 void
@@ -1578,9 +1526,9 @@ rte_eth_promiscuous_disable(uint8_t port_id)
 
 	dev = &rte_eth_devices[port_id];
 
-	FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
-	dev->data->promiscuous = 0;
-	(*dev->dev_ops->promiscuous_disable)(dev);
+	FUNC_PTR_OR_RET(ETH_OPS(dev, promiscuous_disable));
+	ETH_DATA(dev)->promiscuous = 0;
+	ETH_OPS(dev, promiscuous_disable)(dev);
 }
 
 int
@@ -1594,7 +1542,7 @@ rte_eth_promiscuous_get(uint8_t port_id)
 	}
 
 	dev = &rte_eth_devices[port_id];
-	return dev->data->promiscuous;
+	return ETH_DATA(dev)->promiscuous;
 }
 
 void
@@ -1609,9 +1557,9 @@ rte_eth_allmulticast_enable(uint8_t port_id)
 
 	dev = &rte_eth_devices[port_id];
 
-	FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
-	(*dev->dev_ops->allmulticast_enable)(dev);
-	dev->data->all_multicast = 1;
+	FUNC_PTR_OR_RET(ETH_OPS(dev, allmulticast_enable));
+	ETH_OPS(dev, allmulticast_enable)(dev);
+	ETH_DATA(dev)->all_multicast = 1;
 }
 
 void
@@ -1626,9 +1574,9 @@ rte_eth_allmulticast_disable(uint8_t port_id)
 
 	dev = &rte_eth_devices[port_id];
 
-	FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
-	dev->data->all_multicast = 0;
-	(*dev->dev_ops->allmulticast_disable)(dev);
+	FUNC_PTR_OR_RET(ETH_OPS(dev, allmulticast_disable));
+	ETH_DATA(dev)->all_multicast = 0;
+	ETH_OPS(dev, allmulticast_disable)(dev);
 }
 
 int
@@ -1642,7 +1590,7 @@ rte_eth_allmulticast_get(uint8_t port_id)
 	}
 
 	dev = &rte_eth_devices[port_id];
-	return dev->data->all_multicast;
+	return ETH_DATA(dev)->all_multicast;
 }
 
 static inline int
@@ -1650,7 +1598,7 @@ rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
 				struct rte_eth_link *link)
 {
 	struct rte_eth_link *dst = link;
-	struct rte_eth_link *src = &(dev->data->dev_link);
+	struct rte_eth_link *src = &(ETH_DATA(dev)->dev_link);
 
 	if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
 					*(uint64_t *)src) == 0)
@@ -1671,12 +1619,12 @@ rte_eth_link_get(uint8_t port_id, struct rte_eth_link *eth_link)
 
 	dev = &rte_eth_devices[port_id];
 
-	if (dev->data->dev_conf.intr_conf.lsc != 0)
+	if (ETH_DATA(dev)->dev_conf.intr_conf.lsc != 0)
 		rte_eth_dev_atomic_read_link_status(dev, eth_link);
 	else {
-		FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
-		(*dev->dev_ops->link_update)(dev, 1);
-		*eth_link = dev->data->dev_link;
+		FUNC_PTR_OR_RET(ETH_OPS(dev, link_update));
+		ETH_OPS(dev, link_update)(dev, 1);
+		*eth_link = ETH_DATA(dev)->dev_link;
 	}
 }
 
@@ -1692,12 +1640,12 @@ rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *eth_link)
 
 	dev = &rte_eth_devices[port_id];
 
-	if (dev->data->dev_conf.intr_conf.lsc != 0)
+	if (ETH_DATA(dev)->dev_conf.intr_conf.lsc != 0)
 		rte_eth_dev_atomic_read_link_status(dev, eth_link);
 	else {
-		FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
-		(*dev->dev_ops->link_update)(dev, 0);
-		*eth_link = dev->data->dev_link;
+		FUNC_PTR_OR_RET(ETH_OPS(dev, link_update));
+		ETH_OPS(dev, link_update)(dev, 0);
+		*eth_link = ETH_DATA(dev)->dev_link;
 	}
 }
 
@@ -1714,9 +1662,9 @@ rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats)
 	dev = &rte_eth_devices[port_id];
 	memset(stats, 0, sizeof(*stats));
 
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
-	(*dev->dev_ops->stats_get)(dev, stats);
-	stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, stats_get), -ENOTSUP);
+	ETH_OPS(dev, stats_get)(dev, stats);
+	stats->rx_nombuf = ETH_DATA(dev)->dd.rx_mbuf_alloc_failed;
 	return 0;
 }
 
@@ -1732,8 +1680,8 @@ rte_eth_stats_reset(uint8_t port_id)
 
 	dev = &rte_eth_devices[port_id];
 
-	FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
-	(*dev->dev_ops->stats_reset)(dev);
+	FUNC_PTR_OR_RET(ETH_OPS(dev, stats_reset));
+	ETH_OPS(dev, stats_reset)(dev);
 }
 
 /* retrieve ethdev extended statistics */
@@ -1755,13 +1703,13 @@ rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstats *xstats,
 	dev = &rte_eth_devices[port_id];
 
 	/* implemented by the driver */
-	if (dev->dev_ops->xstats_get != NULL)
-		return (*dev->dev_ops->xstats_get)(dev, xstats, n);
+	if (ETH_OPS(dev, xstats_get) != NULL)
+		return ETH_OPS(dev, xstats_get)(dev, xstats, n);
 
 	/* else, return generic statistics */
 	count = RTE_NB_STATS;
-	count += dev->data->nb_rx_queues * RTE_NB_RXQ_STATS;
-	count += dev->data->nb_tx_queues * RTE_NB_TXQ_STATS;
+	count += ETH_DATA(dev)->dd.nb_rx_queues * RTE_NB_RXQ_STATS;
+	count += ETH_DATA(dev)->dd.nb_tx_queues * RTE_NB_TXQ_STATS;
 	if (n < count)
 		return count;
 
@@ -1781,7 +1729,7 @@ rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstats *xstats,
 	}
 
 	/* per-rxq stats */
-	for (q = 0; q < dev->data->nb_rx_queues; q++) {
+	for (q = 0; q < ETH_DATA(dev)->dd.nb_rx_queues; q++) {
 		for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
 			stats_ptr = (char *)&eth_stats;
 			stats_ptr += rte_rxq_stats_strings[i].offset;
@@ -1795,7 +1743,7 @@ rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstats *xstats,
 	}
 
 	/* per-txq stats */
-	for (q = 0; q < dev->data->nb_tx_queues; q++) {
+	for (q = 0; q < ETH_DATA(dev)->dd.nb_tx_queues; q++) {
 		for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
 			stats_ptr = (char *)&eth_stats;
 			stats_ptr += rte_txq_stats_strings[i].offset;
@@ -1825,8 +1773,8 @@ rte_eth_xstats_reset(uint8_t port_id)
 	dev = &rte_eth_devices[port_id];
 
 	/* implemented by the driver */
-	if (dev->dev_ops->xstats_reset != NULL) {
-		(*dev->dev_ops->xstats_reset)(dev);
+	if (ETH_OPS(dev, xstats_reset) != NULL) {
+		ETH_OPS(dev, xstats_reset)(dev);
 		return;
 	}
 
@@ -1847,9 +1795,8 @@ set_queue_stats_mapping(uint8_t port_id, uint16_t queue_id, uint8_t stat_idx,
 
 	dev = &rte_eth_devices[port_id];
 
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
-	return (*dev->dev_ops->queue_stats_mapping_set)
-			(dev, queue_id, stat_idx, is_rx);
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, queue_stats_mapping_set), -ENOTSUP);
+	return ETH_OPS(dev, queue_stats_mapping_set)(dev, queue_id, stat_idx, is_rx);
 }
 
 
@@ -1885,11 +1832,11 @@ rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
 
 	memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
 
-	FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
-	(*dev->dev_ops->dev_infos_get)(dev, dev_info);
-	dev_info->pci_dev = dev->pci_dev;
+	FUNC_PTR_OR_RET(ETH_OPS(dev, dev_infos_get));
+	ETH_OPS(dev, dev_infos_get)(dev, dev_info);
+	dev_info->di.pci_dev = dev->pci_dev;
 	if (dev->driver)
-		dev_info->driver_name = dev->driver->pci_drv.name;
+		dev_info->di.driver_name = dev->driver->pci_drv.name;
 }
 
 void
@@ -1903,7 +1850,7 @@ rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr)
 	}
 
 	dev = &rte_eth_devices[port_id];
-	ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
+	ether_addr_copy(&ETH_DATA(dev)->mac_addrs[0], mac_addr);
 }
 
 
@@ -1918,7 +1865,7 @@ rte_eth_dev_get_mtu(uint8_t port_id, uint16_t *mtu)
 	}
 
 	dev = &rte_eth_devices[port_id];
-	*mtu = dev->data->mtu;
+	*mtu = ETH_DATA(dev)->dd.mtu;
 	return 0;
 }
 
@@ -1934,11 +1881,11 @@ rte_eth_dev_set_mtu(uint8_t port_id, uint16_t mtu)
 	}
 
 	dev = &rte_eth_devices[port_id];
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, mtu_set), -ENOTSUP);
 
-	ret = (*dev->dev_ops->mtu_set)(dev, mtu);
+	ret = ETH_OPS(dev, mtu_set)(dev, mtu);
 	if (!ret)
-		dev->data->mtu = mtu;
+		ETH_DATA(dev)->dd.mtu = mtu;
 
 	return ret;
 }
@@ -1954,7 +1901,7 @@ rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on)
 	}
 
 	dev = &rte_eth_devices[port_id];
-	if (! (dev->data->dev_conf.rxmode.hw_vlan_filter)) {
+	if (! (ETH_DATA(dev)->dev_conf.rxmode.hw_vlan_filter)) {
 		PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
 		return (-ENOSYS);
 	}
@@ -1964,9 +1911,9 @@ rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on)
 				port_id, (unsigned) vlan_id);
 		return (-EINVAL);
 	}
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, vlan_filter_set), -ENOTSUP);
 
-	return (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
+	return ETH_OPS(dev, vlan_filter_set)(dev, vlan_id, on);
 }
 
 int
@@ -1980,13 +1927,13 @@ rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id, uint16_t rx_queue_id, int o
 	}
 
 	dev = &rte_eth_devices[port_id];
-	if (rx_queue_id >= dev->data->nb_rx_queues) {
+	if (rx_queue_id >= ETH_DATA(dev)->dd.nb_rx_queues) {
 		PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
 		return (-EINVAL);
 	}
 
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
-	(*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, vlan_strip_queue_set), -ENOTSUP);
+	ETH_OPS(dev, vlan_strip_queue_set)(dev, rx_queue_id, on);
 
 	return (0);
 }
@@ -2002,8 +1949,8 @@ rte_eth_dev_set_vlan_ether_type(uint8_t port_id, uint16_t tpid)
 	}
 
 	dev = &rte_eth_devices[port_id];
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
-	(*dev->dev_ops->vlan_tpid_set)(dev, tpid);
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, vlan_tpid_set), -ENOTSUP);
+	ETH_OPS(dev, vlan_tpid_set)(dev, tpid);
 
 	return (0);
 }
@@ -2025,23 +1972,23 @@ rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask)
 
 	/*check which option changed by application*/
 	cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
-	org = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
+	org = !!(ETH_DATA(dev)->dev_conf.rxmode.hw_vlan_strip);
 	if (cur != org){
-		dev->data->dev_conf.rxmode.hw_vlan_strip = (uint8_t)cur;
+		ETH_DATA(dev)->dev_conf.rxmode.hw_vlan_strip = (uint8_t)cur;
 		mask |= ETH_VLAN_STRIP_MASK;
 	}
 
 	cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
-	org = !!(dev->data->dev_conf.rxmode.hw_vlan_filter);
+	org = !!(ETH_DATA(dev)->dev_conf.rxmode.hw_vlan_filter);
 	if (cur != org){
-		dev->data->dev_conf.rxmode.hw_vlan_filter = (uint8_t)cur;
+		ETH_DATA(dev)->dev_conf.rxmode.hw_vlan_filter = (uint8_t)cur;
 		mask |= ETH_VLAN_FILTER_MASK;
 	}
 
 	cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
-	org = !!(dev->data->dev_conf.rxmode.hw_vlan_extend);
+	org = !!(ETH_DATA(dev)->dev_conf.rxmode.hw_vlan_extend);
 	if (cur != org){
-		dev->data->dev_conf.rxmode.hw_vlan_extend = (uint8_t)cur;
+		ETH_DATA(dev)->dev_conf.rxmode.hw_vlan_extend = (uint8_t)cur;
 		mask |= ETH_VLAN_EXTEND_MASK;
 	}
 
@@ -2049,8 +1996,8 @@ rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask)
 	if(mask == 0)
 		return ret;
 
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
-	(*dev->dev_ops->vlan_offload_set)(dev, mask);
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, vlan_offload_set), -ENOTSUP);
+	ETH_OPS(dev, vlan_offload_set)(dev, mask);
 
 	return ret;
 }
@@ -2068,13 +2015,13 @@ rte_eth_dev_get_vlan_offload(uint8_t port_id)
 
 	dev = &rte_eth_devices[port_id];
 
-	if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+	if (ETH_DATA(dev)->dev_conf.rxmode.hw_vlan_strip)
 		ret |= ETH_VLAN_STRIP_OFFLOAD ;
 
-	if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+	if (ETH_DATA(dev)->dev_conf.rxmode.hw_vlan_filter)
 		ret |= ETH_VLAN_FILTER_OFFLOAD ;
 
-	if (dev->data->dev_conf.rxmode.hw_vlan_extend)
+	if (ETH_DATA(dev)->dev_conf.rxmode.hw_vlan_extend)
 		ret |= ETH_VLAN_EXTEND_OFFLOAD ;
 
 	return ret;
@@ -2091,8 +2038,8 @@ rte_eth_dev_set_vlan_pvid(uint8_t port_id, uint16_t pvid, int on)
 	}
 
 	dev = &rte_eth_devices[port_id];
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
-	(*dev->dev_ops->vlan_pvid_set)(dev, pvid, on);
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, vlan_pvid_set), -ENOTSUP);
+	ETH_OPS(dev, vlan_pvid_set)(dev, pvid, on);
 
 	return 0;
 }
@@ -2111,9 +2058,9 @@ rte_eth_dev_fdir_add_signature_filter(uint8_t port_id,
 
 	dev = &rte_eth_devices[port_id];
 
-	if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
+	if (ETH_DATA(dev)->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
 		PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
-				port_id, dev->data->dev_conf.fdir_conf.mode);
+				port_id, ETH_DATA(dev)->dd.dev_conf.fdir_conf.mode);
 		return (-ENOSYS);
 	}
 
@@ -2126,9 +2073,8 @@ rte_eth_dev_fdir_add_signature_filter(uint8_t port_id,
 		return (-EINVAL);
 	}
 
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_add_signature_filter, -ENOTSUP);
-	return (*dev->dev_ops->fdir_add_signature_filter)(dev, fdir_filter,
-								queue);
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, fdir_add_signature_filter), -ENOTSUP);
+	return ETH_OPS(dev, fdir_add_signature_filter)(dev, fdir_filter, queue);
 }
 
 int
@@ -2145,9 +2091,9 @@ rte_eth_dev_fdir_update_signature_filter(uint8_t port_id,
 
 	dev = &rte_eth_devices[port_id];
 
-	if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
+	if (ETH_DATA(dev)->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
 		PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
-				port_id, dev->data->dev_conf.fdir_conf.mode);
+				port_id, ETH_DATA(dev)->dev_conf.fdir_conf.mode);
 		return (-ENOSYS);
 	}
 
@@ -2160,9 +2106,8 @@ rte_eth_dev_fdir_update_signature_filter(uint8_t port_id,
 		return (-EINVAL);
 	}
 
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_update_signature_filter, -ENOTSUP);
-	return (*dev->dev_ops->fdir_update_signature_filter)(dev, fdir_filter,
-								queue);
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, fdir_update_signature_filter), -ENOTSUP);
+	return ETH_OPS(dev, fdir_update_signature_filter)(dev, fdir_filter, queue);
 
 }
 
@@ -2179,9 +2124,9 @@ rte_eth_dev_fdir_remove_signature_filter(uint8_t port_id,
 
 	dev = &rte_eth_devices[port_id];
 
-	if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
+	if (ETH_DATA(dev)->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
 		PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
-				port_id, dev->data->dev_conf.fdir_conf.mode);
+				port_id, ETH_DATA(dev)->dev_conf.fdir_conf.mode);
 		return (-ENOSYS);
 	}
 
@@ -2194,8 +2139,8 @@ rte_eth_dev_fdir_remove_signature_filter(uint8_t port_id,
 		return (-EINVAL);
 	}
 
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_remove_signature_filter, -ENOTSUP);
-	return (*dev->dev_ops->fdir_remove_signature_filter)(dev, fdir_filter);
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, fdir_remove_signature_filter), -ENOTSUP);
+	return ETH_OPS(dev, fdir_remove_signature_filter)(dev, fdir_filter);
 }
 
 int
@@ -2209,14 +2154,14 @@ rte_eth_dev_fdir_get_infos(uint8_t port_id, struct rte_eth_fdir *fdir)
 	}
 
 	dev = &rte_eth_devices[port_id];
-	if (! (dev->data->dev_conf.fdir_conf.mode)) {
+	if (! (ETH_DATA(dev)->dev_conf.fdir_conf.mode)) {
 		PMD_DEBUG_TRACE("port %d: pkt-filter disabled\n", port_id);
 		return (-ENOSYS);
 	}
 
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_infos_get, -ENOTSUP);
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, fdir_infos_get), -ENOTSUP);
 
-	(*dev->dev_ops->fdir_infos_get)(dev, fdir);
+	ETH_OPS(dev, fdir_infos_get)(dev, fdir);
 	return (0);
 }
 
@@ -2235,9 +2180,9 @@ rte_eth_dev_fdir_add_perfect_filter(uint8_t port_id,
 
 	dev = &rte_eth_devices[port_id];
 
-	if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
+	if (ETH_DATA(dev)->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
 		PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
-				port_id, dev->data->dev_conf.fdir_conf.mode);
+				port_id, ETH_DATA(dev)->dev_conf.fdir_conf.mode);
 		return (-ENOSYS);
 	}
 
@@ -2254,10 +2199,9 @@ rte_eth_dev_fdir_add_perfect_filter(uint8_t port_id,
 	if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
 		return (-ENOTSUP);
 
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_add_perfect_filter, -ENOTSUP);
-	return (*dev->dev_ops->fdir_add_perfect_filter)(dev, fdir_filter,
-								soft_id, queue,
-								drop);
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, fdir_add_perfect_filter), -ENOTSUP);
+	return ETH_OPS(dev, fdir_add_perfect_filter)(dev, fdir_filter,
+								soft_id, queue, drop);
 }
 
 int
@@ -2275,9 +2219,9 @@ rte_eth_dev_fdir_update_perfect_filter(uint8_t port_id,
 
 	dev = &rte_eth_devices[port_id];
 
-	if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
+	if (ETH_DATA(dev)->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
 		PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
-				port_id, dev->data->dev_conf.fdir_conf.mode);
+				port_id, ETH_DATA(dev)->dd.dev_conf.fdir_conf.mode);
 		return (-ENOSYS);
 	}
 
@@ -2294,8 +2238,8 @@ rte_eth_dev_fdir_update_perfect_filter(uint8_t port_id,
 	if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
 		return (-ENOTSUP);
 
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_update_perfect_filter, -ENOTSUP);
-	return (*dev->dev_ops->fdir_update_perfect_filter)(dev, fdir_filter,
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, fdir_update_perfect_filter), -ENOTSUP);
+	return ETH_OPS(dev, fdir_update_perfect_filter)(dev, fdir_filter,
 							soft_id, queue, drop);
 }
 
@@ -2313,9 +2257,9 @@ rte_eth_dev_fdir_remove_perfect_filter(uint8_t port_id,
 
 	dev = &rte_eth_devices[port_id];
 
-	if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
+	if (ETH_DATA(dev)->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
 		PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
-				port_id, dev->data->dev_conf.fdir_conf.mode);
+				port_id, ETH_DATA(dev)->dev_conf.fdir_conf.mode);
 		return (-ENOSYS);
 	}
 
@@ -2332,9 +2276,8 @@ rte_eth_dev_fdir_remove_perfect_filter(uint8_t port_id,
 	if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
 		return (-ENOTSUP);
 
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_remove_perfect_filter, -ENOTSUP);
-	return (*dev->dev_ops->fdir_remove_perfect_filter)(dev, fdir_filter,
-								soft_id);
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, fdir_remove_perfect_filter), -ENOTSUP);
+	return ETH_OPS(dev, fdir_remove_perfect_filter)(dev, fdir_filter, soft_id);
 }
 
 int
@@ -2348,13 +2291,13 @@ rte_eth_dev_fdir_set_masks(uint8_t port_id, struct rte_fdir_masks *fdir_mask)
 	}
 
 	dev = &rte_eth_devices[port_id];
-	if (! (dev->data->dev_conf.fdir_conf.mode)) {
+	if (! (ETH_DATA(dev)->dev_conf.fdir_conf.mode)) {
 		PMD_DEBUG_TRACE("port %d: pkt-filter disabled\n", port_id);
 		return (-ENOSYS);
 	}
 
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_set_masks, -ENOTSUP);
-	return (*dev->dev_ops->fdir_set_masks)(dev, fdir_mask);
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, fdir_set_masks), -ENOTSUP);
+	return ETH_OPS(dev, fdir_set_masks)(dev, fdir_mask);
 }
 
 int
@@ -2368,9 +2311,9 @@ rte_eth_dev_flow_ctrl_get(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
 	}
 
 	dev = &rte_eth_devices[port_id];
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, flow_ctrl_get), -ENOTSUP);
 	memset(fc_conf, 0, sizeof(*fc_conf));
-	return (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf);
+	return ETH_OPS(dev, flow_ctrl_get)(dev, fc_conf);
 }
 
 int
@@ -2389,8 +2332,8 @@ rte_eth_dev_flow_ctrl_set(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
 	}
 
 	dev = &rte_eth_devices[port_id];
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
-	return (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf);
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, flow_ctrl_set), -ENOTSUP);
+	return ETH_OPS(dev, flow_ctrl_set)(dev, fc_conf);
 }
 
 int
@@ -2410,8 +2353,8 @@ rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id, struct rte_eth_pfc_conf *pfc
 
 	dev = &rte_eth_devices[port_id];
 	/* High water, low water validation are device specific */
-	if  (*dev->dev_ops->priority_flow_ctrl_set)
-		return (*dev->dev_ops->priority_flow_ctrl_set)(dev, pfc_conf);
+	if  (ETH_OPS(dev, priority_flow_ctrl_set))
+		return ETH_OPS(dev, priority_flow_ctrl_set)(dev, pfc_conf);
 	return (-ENOTSUP);
 }
 
@@ -2491,12 +2434,12 @@ rte_eth_dev_rss_reta_update(uint8_t port_id,
 
 	/* Check entry value */
 	ret = rte_eth_check_reta_entry(reta_conf, reta_size,
-				dev->data->nb_rx_queues);
+				ETH_DATA(dev)->dd.nb_rx_queues);
 	if (ret < 0)
 		return ret;
 
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
-	return (*dev->dev_ops->reta_update)(dev, reta_conf, reta_size);
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, reta_update), -ENOTSUP);
+	return ETH_OPS(dev, reta_update)(dev, reta_conf, reta_size);
 }
 
 int
@@ -2507,7 +2450,7 @@ rte_eth_dev_rss_reta_query(uint8_t port_id,
 	struct rte_eth_dev *dev;
 	int ret;
 
-	if (port_id >= nb_ports) {
+	if (port_id >= eth_globals.nb_ports) {
 		PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
 		return -ENODEV;
 	}
@@ -2518,8 +2461,8 @@ rte_eth_dev_rss_reta_query(uint8_t port_id,
 		return ret;
 
 	dev = &rte_eth_devices[port_id];
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
-	return (*dev->dev_ops->reta_query)(dev, reta_conf, reta_size);
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, reta_query), -ENOTSUP);
+	return ETH_OPS(dev, reta_query)(dev, reta_conf, reta_size);
 }
 
 int
@@ -2541,8 +2484,8 @@ rte_eth_dev_rss_hash_update(uint8_t port_id, struct rte_eth_rss_conf *rss_conf)
 		return (-EINVAL);
 	}
 	dev = &rte_eth_devices[port_id];
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
-	return (*dev->dev_ops->rss_hash_update)(dev, rss_conf);
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, rss_hash_update), -ENOTSUP);
+	return ETH_OPS(dev, rss_hash_update)(dev, rss_conf);
 }
 
 int
@@ -2557,8 +2500,8 @@ rte_eth_dev_rss_hash_conf_get(uint8_t port_id,
 	}
 
 	dev = &rte_eth_devices[port_id];
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
-	return (*dev->dev_ops->rss_hash_conf_get)(dev, rss_conf);
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, rss_hash_conf_get), -ENOTSUP);
+	return ETH_OPS(dev, rss_hash_conf_get)(dev, rss_conf);
 }
 
 int
@@ -2583,8 +2526,8 @@ rte_eth_dev_udp_tunnel_add(uint8_t port_id,
 	}
 
 	dev = &rte_eth_devices[port_id];
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_add, -ENOTSUP);
-	return (*dev->dev_ops->udp_tunnel_add)(dev, udp_tunnel);
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, udp_tunnel_add), -ENOTSUP);
+	return ETH_OPS(dev, udp_tunnel_add)(dev, udp_tunnel);
 }
 
 int
@@ -2610,8 +2553,8 @@ rte_eth_dev_udp_tunnel_delete(uint8_t port_id,
 		return -EINVAL;
 	}
 
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_del, -ENOTSUP);
-	return (*dev->dev_ops->udp_tunnel_del)(dev, udp_tunnel);
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, udp_tunnel_del), -ENOTSUP);
+	return ETH_OPS(dev, udp_tunnel_del)(dev, udp_tunnel);
 }
 
 int
@@ -2625,8 +2568,8 @@ rte_eth_led_on(uint8_t port_id)
 	}
 
 	dev = &rte_eth_devices[port_id];
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
-	return ((*dev->dev_ops->dev_led_on)(dev));
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, dev_led_on), -ENOTSUP);
+	return ETH_OPS(dev, dev_led_on)(dev);
 }
 
 int
@@ -2640,8 +2583,8 @@ rte_eth_led_off(uint8_t port_id)
 	}
 
 	dev = &rte_eth_devices[port_id];
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
-	return ((*dev->dev_ops->dev_led_off)(dev));
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, dev_led_off), -ENOTSUP);
+	return ETH_OPS(dev, dev_led_off)(dev);
 }
 
 /*
@@ -2658,7 +2601,7 @@ get_mac_addr_index(uint8_t port_id, struct ether_addr *addr)
 	rte_eth_dev_info_get(port_id, &dev_info);
 
 	for (i = 0; i < dev_info.max_mac_addrs; i++)
-		if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
+		if (memcmp(addr, &ETH_DATA(dev)->mac_addrs[i], ETHER_ADDR_LEN) == 0)
 			return i;
 
 	return -1;
@@ -2680,7 +2623,7 @@ rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr,
 	}
 
 	dev = &rte_eth_devices[port_id];
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, mac_addr_add), -ENOTSUP);
 
 	if (is_zero_ether_addr(addr)) {
 		PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
@@ -2701,7 +2644,7 @@ rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr,
 			return (-ENOSPC);
 		}
 	} else {
-		pool_mask = dev->data->mac_pool_sel[index];
+		pool_mask = ETH_DATA(dev)->mac_pool_sel[index];
 
 		/* Check if both MAC address and pool is alread there, and do nothing */
 		if (pool_mask & (1ULL << pool))
@@ -2709,13 +2652,13 @@ rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr,
 	}
 
 	/* Update NIC */
-	(*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
+	ETH_OPS(dev, mac_addr_add)(dev, addr, index, pool);
 
 	/* Update address in NIC data structure */
-	ether_addr_copy(addr, &dev->data->mac_addrs[index]);
+	ether_addr_copy(addr, &ETH_DATA(dev)->mac_addrs[index]);
 
 	/* Update pool bitmap in NIC data structure */
-	dev->data->mac_pool_sel[index] |= (1ULL << pool);
+	ETH_DATA(dev)->mac_pool_sel[index] |= (1ULL << pool);
 
 	return 0;
 }
@@ -2732,7 +2675,7 @@ rte_eth_dev_mac_addr_remove(uint8_t port_id, struct ether_addr *addr)
 	}
 
 	dev = &rte_eth_devices[port_id];
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, mac_addr_remove), -ENOTSUP);
 
 	index = get_mac_addr_index(port_id, addr);
 	if (index == 0) {
@@ -2742,13 +2685,13 @@ rte_eth_dev_mac_addr_remove(uint8_t port_id, struct ether_addr *addr)
 		return 0;  /* Do nothing if address wasn't found */
 
 	/* Update NIC */
-	(*dev->dev_ops->mac_addr_remove)(dev, index);
+	ETH_OPS(dev, mac_addr_remove)(dev, index);
 
 	/* Update address in NIC data structure */
-	ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
+	ether_addr_copy(&null_mac_addr, &ETH_DATA(dev)->mac_addrs[index]);
 
 	/* reset pool bitmap */
-	dev->data->mac_pool_sel[index] = 0;
+	ETH_DATA(dev)->mac_pool_sel[index] = 0;
 
 	return 0;
 }
@@ -2781,8 +2724,8 @@ rte_eth_dev_set_vf_rxmode(uint8_t port_id,  uint16_t vf,
 		PMD_DEBUG_TRACE("set VF RX mode:mode mask ca not be zero\n");
 		return (-EINVAL);
 	}
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx_mode, -ENOTSUP);
-	return (*dev->dev_ops->set_vf_rx_mode)(dev, vf, rx_mode, on);
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, set_vf_rx_mode), -ENOTSUP);
+	return ETH_OPS(dev, set_vf_rx_mode)(dev, vf, rx_mode, on);
 }
 
 /*
@@ -2797,11 +2740,11 @@ get_hash_mac_addr_index(uint8_t port_id, struct ether_addr *addr)
 	unsigned i;
 
 	rte_eth_dev_info_get(port_id, &dev_info);
-	if (!dev->data->hash_mac_addrs)
+	if (!ETH_DATA(dev)->hash_mac_addrs)
 		return -1;
 
 	for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
-		if (memcmp(addr, &dev->data->hash_mac_addrs[i],
+		if (memcmp(addr, &ETH_DATA(dev)->hash_mac_addrs[i],
 			ETHER_ADDR_LEN) == 0)
 			return i;
 
@@ -2849,16 +2792,16 @@ rte_eth_dev_uc_hash_table_set(uint8_t port_id, struct ether_addr *addr,
 		}
 	}
 
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
-	ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, uc_hash_table_set), -ENOTSUP);
+	ret = ETH_OPS(dev, uc_hash_table_set)(dev, addr, on);
 	if (ret == 0) {
 		/* Update address in NIC data structure */
 		if (on)
 			ether_addr_copy(addr,
-					&dev->data->hash_mac_addrs[index]);
+					&ETH_DATA(dev)->hash_mac_addrs[index]);
 		else
 			ether_addr_copy(&null_mac_addr,
-					&dev->data->hash_mac_addrs[index]);
+					&ETH_DATA(dev)->hash_mac_addrs[index]);
 	}
 
 	return ret;
@@ -2877,8 +2820,8 @@ rte_eth_dev_uc_all_hash_table_set(uint8_t port_id, uint8_t on)
 
 	dev = &rte_eth_devices[port_id];
 
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
-	return (*dev->dev_ops->uc_all_hash_table_set)(dev, on);
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, uc_all_hash_table_set), -ENOTSUP);
+	return ETH_OPS(dev, uc_all_hash_table_set)(dev, on);
 }
 
 int
@@ -2903,8 +2846,8 @@ rte_eth_dev_set_vf_rx(uint8_t port_id,uint16_t vf, uint8_t on)
 		return (-EINVAL);
 	}
 
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx, -ENOTSUP);
-	return (*dev->dev_ops->set_vf_rx)(dev, vf,on);
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, set_vf_rx), -ENOTSUP);
+	return ETH_OPS(dev, set_vf_rx)(dev, vf,on);
 }
 
 int
@@ -2929,8 +2872,8 @@ rte_eth_dev_set_vf_tx(uint8_t port_id,uint16_t vf, uint8_t on)
 		return (-EINVAL);
 	}
 
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_tx, -ENOTSUP);
-	return (*dev->dev_ops->set_vf_tx)(dev, vf,on);
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, set_vf_tx), -ENOTSUP);
+	return ETH_OPS(dev, set_vf_tx)(dev, vf,on);
 }
 
 int
@@ -2958,8 +2901,8 @@ rte_eth_dev_set_vf_vlan_filter(uint8_t port_id, uint16_t vlan_id,
 		return (-EINVAL);
 	}
 
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_vlan_filter, -ENOTSUP);
-	return (*dev->dev_ops->set_vf_vlan_filter)(dev, vlan_id,
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, set_vf_vlan_filter), -ENOTSUP);
+	return ETH_OPS(dev, set_vf_vlan_filter)(dev, vlan_id,
 						vf_mask,vlan_on);
 }
 
@@ -2978,7 +2921,7 @@ int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx,
 
 	dev = &rte_eth_devices[port_id];
 	rte_eth_dev_info_get(port_id, &dev_info);
-	link = dev->data->dev_link;
+	link = ETH_DATA(dev)->dev_link;
 
 	if (queue_idx > dev_info.max_tx_queues) {
 		PMD_DEBUG_TRACE("set queue rate limit:port %d: "
@@ -2993,8 +2936,8 @@ int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx,
 		return -EINVAL;
 	}
 
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
-	return (*dev->dev_ops->set_queue_rate_limit)(dev, queue_idx, tx_rate);
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, set_queue_rate_limit), -ENOTSUP);
+	return ETH_OPS(dev, set_queue_rate_limit)(dev, queue_idx, tx_rate);
 }
 
 int rte_eth_set_vf_rate_limit(uint8_t port_id, uint16_t vf, uint16_t tx_rate,
@@ -3015,7 +2958,7 @@ int rte_eth_set_vf_rate_limit(uint8_t port_id, uint16_t vf, uint16_t tx_rate,
 
 	dev = &rte_eth_devices[port_id];
 	rte_eth_dev_info_get(port_id, &dev_info);
-	link = dev->data->dev_link;
+	link = ETH_DATA(dev)->dev_link;
 
 	if (vf > dev_info.max_vfs) {
 		PMD_DEBUG_TRACE("set VF rate limit:port %d: "
@@ -3030,8 +2973,8 @@ int rte_eth_set_vf_rate_limit(uint8_t port_id, uint16_t vf, uint16_t tx_rate,
 		return -EINVAL;
 	}
 
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rate_limit, -ENOTSUP);
-	return (*dev->dev_ops->set_vf_rate_limit)(dev, vf, tx_rate, q_msk);
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, set_vf_rate_limit), -ENOTSUP);
+	return ETH_OPS(dev, set_vf_rate_limit)(dev, vf, tx_rate, q_msk);
 }
 
 int
@@ -3072,9 +3015,9 @@ rte_eth_mirror_rule_set(uint8_t port_id,
 	}
 
 	dev = &rte_eth_devices[port_id];
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, mirror_rule_set), -ENOTSUP);
 
-	return (*dev->dev_ops->mirror_rule_set)(dev, mirror_conf, rule_id, on);
+	return ETH_OPS(dev, mirror_rule_set)(dev, mirror_conf, rule_id, on);
 }
 
 int
@@ -3095,9 +3038,9 @@ rte_eth_mirror_rule_reset(uint8_t port_id, uint8_t rule_id)
 	}
 
 	dev = &rte_eth_devices[port_id];
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, mirror_rule_reset), -ENOTSUP);
 
-	return (*dev->dev_ops->mirror_rule_reset)(dev, rule_id);
+	return ETH_OPS(dev, mirror_rule_reset)(dev, rule_id);
 }
 
 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
@@ -3113,12 +3056,12 @@ rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id,
 	}
 
 	dev = &rte_eth_devices[port_id];
-	FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, 0);
-	if (queue_id >= dev->data->nb_rx_queues) {
+	FUNC_PTR_OR_ERR_RET(dev->rx_pkt_burst, 0);
+	if (queue_id >= ETH_DATA(dev)->dd.nb_rx_queues) {
 		PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
 		return 0;
 	}
-	return (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
+	return (*dev->rx_pkt_burst)(ETH_DATA(dev)->dd.rx_queues[queue_id],
 						rx_pkts, nb_pkts);
 }
 
@@ -3136,11 +3079,11 @@ rte_eth_tx_burst(uint8_t port_id, uint16_t queue_id,
 	dev = &rte_eth_devices[port_id];
 
 	FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, 0);
-	if (queue_id >= dev->data->nb_tx_queues) {
+	if (queue_id >= ETH_DATA(dev)->dd.nb_tx_queues) {
 		PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
 		return 0;
 	}
-	return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id],
+	return (*dev->tx_pkt_burst)(ETH_DATA(dev)->dd.tx_queues[queue_id],
 						tx_pkts, nb_pkts);
 }
 
@@ -3155,8 +3098,8 @@ rte_eth_rx_queue_count(uint8_t port_id, uint16_t queue_id)
 	}
 
 	dev = &rte_eth_devices[port_id];
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, 0);
-	return (*dev->dev_ops->rx_queue_count)(dev, queue_id);
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, rx_queue_count), 0);
+	return ETH_OPS(dev, rx_queue_count)(dev, queue_id);
 }
 
 int
@@ -3170,19 +3113,18 @@ rte_eth_rx_descriptor_done(uint8_t port_id, uint16_t queue_id, uint16_t offset)
 	}
 
 	dev = &rte_eth_devices[port_id];
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP);
-	return (*dev->dev_ops->rx_descriptor_done)( \
-		dev->data->rx_queues[queue_id], offset);
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, rx_descriptor_done), -ENOTSUP);
+	return ETH_OPS(dev, rx_descriptor_done)
+			(ETH_DATA(dev)->rx_queues[queue_id], offset);
 }
 #endif
 
 int
 rte_eth_dev_callback_register(uint8_t port_id,
-			enum rte_eth_event_type event,
-			rte_eth_dev_cb_fn cb_fn, void *cb_arg)
+			enum rte_dev_event_type event,
+			rte_dev_cb_fn cb_fn, void *cb_arg)
 {
 	struct rte_eth_dev *dev;
-	struct rte_eth_dev_callback *user_cb;
 
 	if (!cb_fn)
 		return (-EINVAL);
@@ -3193,37 +3135,16 @@ rte_eth_dev_callback_register(uint8_t port_id,
 	}
 
 	dev = &rte_eth_devices[port_id];
-	rte_spinlock_lock(&rte_eth_dev_cb_lock);
-
-	TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
-		if (user_cb->cb_fn == cb_fn &&
-			user_cb->cb_arg == cb_arg &&
-			user_cb->event == event) {
-			break;
-		}
-	}
-
-	/* create a new callback. */
-	if (user_cb == NULL && (user_cb = rte_zmalloc("INTR_USER_CALLBACK",
-			sizeof(struct rte_eth_dev_callback), 0)) != NULL) {
-		user_cb->cb_fn = cb_fn;
-		user_cb->cb_arg = cb_arg;
-		user_cb->event = event;
-		TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
-	}
-
-	rte_spinlock_unlock(&rte_eth_dev_cb_lock);
-	return ((user_cb == NULL) ? -ENOMEM : 0);
+	return rte_dev_callback_register(&dev->link_intr_cbs,
+				&rte_eth_dev_cb_lock, event, cb_fn, cb_arg);
 }
 
 int
 rte_eth_dev_callback_unregister(uint8_t port_id,
-			enum rte_eth_event_type event,
-			rte_eth_dev_cb_fn cb_fn, void *cb_arg)
+			enum rte_dev_event_type event,
+			rte_dev_cb_fn cb_fn, void *cb_arg)
 {
-	int ret;
 	struct rte_eth_dev *dev;
-	struct rte_eth_dev_callback *cb, *next;
 
 	if (!cb_fn)
 		return (-EINVAL);
@@ -3234,55 +3155,18 @@ rte_eth_dev_callback_unregister(uint8_t port_id,
 	}
 
 	dev = &rte_eth_devices[port_id];
-	rte_spinlock_lock(&rte_eth_dev_cb_lock);
-
-	ret = 0;
-	for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
-
-		next = TAILQ_NEXT(cb, next);
-
-		if (cb->cb_fn != cb_fn || cb->event != event ||
-				(cb->cb_arg != (void *)-1 &&
-				cb->cb_arg != cb_arg))
-			continue;
-
-		/*
-		 * if this callback is not executing right now,
-		 * then remove it.
-		 */
-		if (cb->active == 0) {
-			TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
-			rte_free(cb);
-		} else {
-			ret = -EAGAIN;
-		}
-	}
-
-	rte_spinlock_unlock(&rte_eth_dev_cb_lock);
-	return (ret);
+	return rte_dev_callback_unregister(&dev->link_intr_cbs,
+				&rte_eth_dev_cb_lock, event, cb_fn, cb_arg);
 }
 
 void
 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
-	enum rte_eth_event_type event)
+	enum rte_dev_event_type event)
 {
-	struct rte_eth_dev_callback *cb_lst;
-	struct rte_eth_dev_callback dev_cb;
-
-	rte_spinlock_lock(&rte_eth_dev_cb_lock);
-	TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
-		if (cb_lst->cb_fn == NULL || cb_lst->event != event)
-			continue;
-		dev_cb = *cb_lst;
-		cb_lst->active = 1;
-		rte_spinlock_unlock(&rte_eth_dev_cb_lock);
-		dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
-						dev_cb.cb_arg);
-		rte_spinlock_lock(&rte_eth_dev_cb_lock);
-		cb_lst->active = 0;
-	}
-	rte_spinlock_unlock(&rte_eth_dev_cb_lock);
+	rte_dev_callback_process(&dev->link_intr_cbs, ETH_DATA(dev)->dd.port_id,
+			event, &rte_eth_dev_cb_lock);
 }
+
 #ifdef RTE_NIC_BYPASS
 int rte_eth_dev_bypass_init(uint8_t port_id)
 {
@@ -3298,8 +3182,8 @@ int rte_eth_dev_bypass_init(uint8_t port_id)
 		return (-ENODEV);
 	}
 
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_init, -ENOTSUP);
-	(*dev->dev_ops->bypass_init)(dev);
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, bypass_init), -ENOTSUP);
+	ETH_OPS(dev, bypass_init)(dev);
 	return 0;
 }
 
@@ -3317,8 +3201,8 @@ rte_eth_dev_bypass_state_show(uint8_t port_id, uint32_t *state)
 		PMD_DEBUG_TRACE("Invalid port device\n");
 		return (-ENODEV);
 	}
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
-	(*dev->dev_ops->bypass_state_show)(dev, state);
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, bypass_state_show), -ENOTSUP);
+	ETH_OPS(dev, bypass_state_show)(dev, state);
 	return 0;
 }
 
@@ -3337,8 +3221,8 @@ rte_eth_dev_bypass_state_set(uint8_t port_id, uint32_t *new_state)
 		return (-ENODEV);
 	}
 
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_set, -ENOTSUP);
-	(*dev->dev_ops->bypass_state_set)(dev, new_state);
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, bypass_state_set), -ENOTSUP);
+	ETH_OPS(dev, bypass_state_set)(dev, new_state);
 	return 0;
 }
 
@@ -3357,8 +3241,8 @@ rte_eth_dev_bypass_event_show(uint8_t port_id, uint32_t event, uint32_t *state)
 		return (-ENODEV);
 	}
 
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
-	(*dev->dev_ops->bypass_event_show)(dev, event, state);
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, bypass_state_show), -ENOTSUP);
+	ETH_OPS(dev, bypass_event_show)(dev, event, state);
 	return 0;
 }
 
@@ -3377,8 +3261,8 @@ rte_eth_dev_bypass_event_store(uint8_t port_id, uint32_t event, uint32_t state)
 		return (-ENODEV);
 	}
 
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_event_set, -ENOTSUP);
-	(*dev->dev_ops->bypass_event_set)(dev, event, state);
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, bypass_event_set), -ENOTSUP);
+	ETH_OPS(dev, bypass_event_set)(dev, event, state);
 	return 0;
 }
 
@@ -3397,8 +3281,8 @@ rte_eth_dev_wd_timeout_store(uint8_t port_id, uint32_t timeout)
 		return (-ENODEV);
 	}
 
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_set, -ENOTSUP);
-	(*dev->dev_ops->bypass_wd_timeout_set)(dev, timeout);
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, bypass_wd_timeout_set), -ENOTSUP);
+	ETH_OPS(dev, bypass_wd_timeout_set)(dev, timeout);
 	return 0;
 }
 
@@ -3417,8 +3301,8 @@ rte_eth_dev_bypass_ver_show(uint8_t port_id, uint32_t *ver)
 		return (-ENODEV);
 	}
 
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_ver_show, -ENOTSUP);
-	(*dev->dev_ops->bypass_ver_show)(dev, ver);
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, bypass_ver_show), -ENOTSUP);
+	ETH_OPS(dev, bypass_ver_show)(dev, ver);
 	return 0;
 }
 
@@ -3437,8 +3321,8 @@ rte_eth_dev_bypass_wd_timeout_show(uint8_t port_id, uint32_t *wd_timeout)
 		return (-ENODEV);
 	}
 
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_show, -ENOTSUP);
-	(*dev->dev_ops->bypass_wd_timeout_show)(dev, wd_timeout);
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, bypass_wd_timeout_show), -ENOTSUP);
+	ETH_OPS(dev, bypass_wd_timeout_show)(dev, wd_timeout);
 	return 0;
 }
 
@@ -3457,8 +3341,8 @@ rte_eth_dev_bypass_wd_reset(uint8_t port_id)
 		return (-ENODEV);
 	}
 
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_reset, -ENOTSUP);
-	(*dev->dev_ops->bypass_wd_reset)(dev);
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, bypass_wd_reset), -ENOTSUP);
+	ETH_OPS(dev, bypass_wd_reset)(dev);
 	return 0;
 }
 #endif
@@ -3474,8 +3358,8 @@ rte_eth_dev_filter_supported(uint8_t port_id, enum rte_filter_type filter_type)
 	}
 
 	dev = &rte_eth_devices[port_id];
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
-	return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, filter_ctrl), -ENOTSUP);
+	return ETH_OPS(dev, filter_ctrl)(dev, filter_type,
 				RTE_ETH_FILTER_NOP, NULL);
 }
 
@@ -3491,8 +3375,8 @@ rte_eth_dev_filter_ctrl(uint8_t port_id, enum rte_filter_type filter_type,
 	}
 
 	dev = &rte_eth_devices[port_id];
-	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
-	return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op, arg);
+	FUNC_PTR_OR_ERR_RET(ETH_OPS(dev, filter_ctrl), -ENOTSUP);
+	return ETH_OPS(dev, filter_ctrl)(dev, filter_type, filter_op, arg);
 }
 
 void *
@@ -3505,23 +3389,12 @@ rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id,
 #endif
 	/* check input parameters */
 	if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
-		    queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
+		    queue_id >= ETH_DATA(&rte_eth_devices[port_id])->dd.nb_rx_queues) {
 		rte_errno = EINVAL;
 		return NULL;
 	}
 
-	struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
-
-	if (cb == NULL) {
-		rte_errno = ENOMEM;
-		return NULL;
-	}
-
-	cb->fn.rx = fn;
-	cb->param = user_param;
-	cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
-	rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
-	return cb;
+	return rte_dev_add_callback(&rte_eth_devices[port_id].post_rx_burst_cbs[queue_id], fn, user_param);
 }
 
 void *
@@ -3534,99 +3407,46 @@ rte_eth_add_tx_callback(uint8_t port_id, uint16_t queue_id,
 #endif
 	/* check input parameters */
 	if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
-		    queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
+		    queue_id >= ETH_DATA(&rte_eth_devices[port_id])->dd.nb_tx_queues) {
 		rte_errno = EINVAL;
 		return NULL;
 	}
 
-	struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
-
-	if (cb == NULL) {
-		rte_errno = ENOMEM;
-		return NULL;
-	}
-
-	cb->fn.tx = fn;
-	cb->param = user_param;
-	cb->next = rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
-	rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
-	return cb;
+	return rte_dev_add_callback(&rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id], fn, user_param);
 }
 
 int
 rte_eth_remove_rx_callback(uint8_t port_id, uint16_t queue_id,
-		struct rte_eth_rxtx_callback *user_cb)
+		struct rte_dev_rxtx_callback *user_cb)
 {
 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
 	return (-ENOTSUP);
 #endif
 	/* Check input parameters. */
 	if (!rte_eth_dev_is_valid_port(port_id) || user_cb == NULL ||
-		    queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
+		    queue_id >= ETH_DATA(&rte_eth_devices[port_id])->dd.nb_rx_queues) {
 		return (-EINVAL);
 	}
 
 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
-	struct rte_eth_rxtx_callback *cb = dev->post_rx_burst_cbs[queue_id];
-	struct rte_eth_rxtx_callback *prev_cb;
-
-	/* Reset head pointer and remove user cb if first in the list. */
-	if (cb == user_cb) {
-		dev->post_rx_burst_cbs[queue_id] = user_cb->next;
-		return 0;
-	}
-
-	/* Remove the user cb from the callback list. */
-	do {
-		prev_cb = cb;
-		cb = cb->next;
-
-		if (cb == user_cb) {
-			prev_cb->next = user_cb->next;
-			return 0;
-		}
-
-	} while (cb != NULL);
 
-	/* Callback wasn't found. */
-	return (-EINVAL);
+	return rte_dev_remove_callback(&dev->post_rx_burst_cbs[queue_id], user_cb);
 }
 
 int
 rte_eth_remove_tx_callback(uint8_t port_id, uint16_t queue_id,
-		struct rte_eth_rxtx_callback *user_cb)
+		struct rte_dev_rxtx_callback *user_cb)
 {
 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
 	return (-ENOTSUP);
 #endif
 	/* Check input parameters. */
 	if (!rte_eth_dev_is_valid_port(port_id) || user_cb == NULL ||
-		    queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
+		    queue_id >= ETH_DATA(&rte_eth_devices[port_id])->dd.nb_tx_queues) {
 		return (-EINVAL);
 	}
 
 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
-	struct rte_eth_rxtx_callback *cb = dev->pre_tx_burst_cbs[queue_id];
-	struct rte_eth_rxtx_callback *prev_cb;
-
-	/* Reset head pointer and remove user cb if first in the list. */
-	if (cb == user_cb) {
-		dev->pre_tx_burst_cbs[queue_id] = user_cb->next;
-		return 0;
-	}
-
-	/* Remove the user cb from the callback list. */
-	do {
-		prev_cb = cb;
-		cb = cb->next;
-
-		if (cb == user_cb) {
-			prev_cb->next = user_cb->next;
-			return 0;
-		}
-
-	} while (cb != NULL);
 
-	/* Callback wasn't found. */
-	return (-EINVAL);
+	return rte_dev_remove_callback(&dev->pre_tx_burst_cbs[queue_id], user_cb);
 }
diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h
index e8df027..58134a9 100644
--- a/lib/librte_ether/rte_ethdev.h
+++ b/lib/librte_ether/rte_ethdev.h
@@ -178,9 +178,14 @@ extern "C" {
 #include <rte_dev.h>
 #include <rte_devargs.h>
 #include <rte_mbuf.h>
+#include <rte_common_device.h>
 #include "rte_ether.h"
 #include "rte_eth_ctrl.h"
 
+#define rte_eth_dev			rte_dev
+#define	eth_driver			rte_dev_drv
+#define eth_dev_global		rte_dev_global
+
 /**
  * A structure used to retrieve statistics for an Ethernet port.
  */
@@ -896,10 +901,9 @@ struct rte_eth_conf {
 #define DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000080 /**< Used for tunneling packet. */
 
 struct rte_eth_dev_info {
-	struct rte_pci_device *pci_dev; /**< Device PCI information. */
-	const char *driver_name; /**< Device Driver name. */
-	unsigned int if_index; /**< Index to bound host interface, or 0 if none.
-		Use if_indextoname() to translate into an interface name. */
+	struct rte_dev_info		di;	/**< Common device information */
+
+	/* Rest of structure is for private device data */
 	uint32_t min_rx_bufsize; /**< Minimum size of RX buffer. */
 	uint32_t max_rx_pktlen; /**< Maximum configurable length of RX pkt. */
 	uint16_t max_rx_queues; /**< Maximum number of RX queues. */
@@ -937,12 +941,6 @@ struct rte_eth_xstats {
 	uint64_t value;
 };
 
-struct rte_eth_dev;
-
-struct rte_eth_dev_callback;
-/** @internal Structure to keep track of registered callbacks */
-TAILQ_HEAD(rte_eth_dev_cb_list, rte_eth_dev_callback);
-
 /*
  * Definitions of all functions exported by an Ethernet driver through the
  * the generic structure of type *eth_dev_ops* supplied in the *rte_eth_dev*
@@ -1065,16 +1063,6 @@ typedef void (*vlan_strip_queue_set_t)(struct rte_eth_dev *dev,
 				  int on);
 /**< @internal VLAN stripping enable/disable by an queue of Ethernet device. */
 
-typedef uint16_t (*eth_rx_burst_t)(void *rxq,
-				   struct rte_mbuf **rx_pkts,
-				   uint16_t nb_pkts);
-/**< @internal Retrieve input packets from a receive queue of an Ethernet device. */
-
-typedef uint16_t (*eth_tx_burst_t)(void *txq,
-				   struct rte_mbuf **tx_pkts,
-				   uint16_t nb_pkts);
-/**< @internal Send output packets on a transmit queue of an Ethernet device. */
-
 typedef int (*fdir_add_signature_filter_t)(struct rte_eth_dev *dev,
 					   struct rte_fdir_filter *fdir_ftr,
 					   uint8_t rx_queue);
@@ -1383,122 +1371,16 @@ struct eth_dev_ops {
 	eth_filter_ctrl_t              filter_ctrl;          /**< common filter control*/
 };
 
-/**
- * Function type used for RX packet processing packet callbacks.
- *
- * The callback function is called on RX with a burst of packets that have
- * been received on the given port and queue.
- *
- * @param port
- *   The Ethernet port on which RX is being performed.
- * @param queue
- *   The queue on the Ethernet port which is being used to receive the packets.
- * @param pkts
- *   The burst of packets that have just been received.
- * @param nb_pkts
- *   The number of packets in the burst pointed to by "pkts".
- * @param max_pkts
- *   The max number of packets that can be stored in the "pkts" array.
- * @param user_param
- *   The arbitrary user parameter passed in by the application when the callback
- *   was originally configured.
- * @return
- *   The number of packets returned to the user.
- */
-typedef uint16_t (*rte_rx_callback_fn)(uint8_t port, uint16_t queue,
-	struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts,
-	void *user_param);
-
-/**
- * Function type used for TX packet processing packet callbacks.
- *
- * The callback function is called on TX with a burst of packets immediately
- * before the packets are put onto the hardware queue for transmission.
- *
- * @param port
- *   The Ethernet port on which TX is being performed.
- * @param queue
- *   The queue on the Ethernet port which is being used to transmit the packets.
- * @param pkts
- *   The burst of packets that are about to be transmitted.
- * @param nb_pkts
- *   The number of packets in the burst pointed to by "pkts".
- * @param user_param
- *   The arbitrary user parameter passed in by the application when the callback
- *   was originally configured.
- * @return
- *   The number of packets to be written to the NIC.
- */
-typedef uint16_t (*rte_tx_callback_fn)(uint8_t port, uint16_t queue,
-	struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param);
-
-/**
- * @internal
- * Structure used to hold information about the callbacks to be called for a
- * queue on RX and TX.
- */
-struct rte_eth_rxtx_callback {
-	struct rte_eth_rxtx_callback *next;
-	union{
-		rte_rx_callback_fn rx;
-		rte_tx_callback_fn tx;
-	} fn;
-	void *param;
-};
-
-/*
- * The eth device type
- */
-enum rte_eth_dev_type {
-	RTE_ETH_DEV_UNKNOWN,	/**< unknown device type */
-	RTE_ETH_DEV_PCI,
-		/**< Physical function and Virtual function of PCI devices */
-	RTE_ETH_DEV_VIRTUAL,	/**< non hardware device */
-	RTE_ETH_DEV_MAX		/**< max value of this enum */
-};
-
-/**
- * @internal
- * The generic data structure associated with each ethernet device.
- *
- * Pointers to burst-oriented packet receive and transmit functions are
- * located at the beginning of the structure, along with the pointer to
- * where all the data elements for the particular device are stored in shared
- * memory. This split allows the function pointer and driver data to be per-
- * process, while the actual configuration data for the device is shared.
- */
-struct rte_eth_dev {
-	eth_rx_burst_t rx_pkt_burst; /**< Pointer to PMD receive function. */
-	eth_tx_burst_t tx_pkt_burst; /**< Pointer to PMD transmit function. */
-	struct rte_eth_dev_data *data;  /**< Pointer to device data */
-	const struct eth_driver *driver;/**< Driver for this device */
-	struct eth_dev_ops *dev_ops;    /**< Functions exported by PMD */
-	struct rte_pci_device *pci_dev; /**< PCI info. supplied by probing */
-	/** User application callbacks for NIC interrupts */
-	struct rte_eth_dev_cb_list link_intr_cbs;
-	/**
-	 * User-supplied functions called from rx_burst to post-process
-	 * received packets before passing them to the user
-	 */
-	struct rte_eth_rxtx_callback *post_rx_burst_cbs[RTE_MAX_QUEUES_PER_PORT];
-	/**
-	 * User-supplied functions called from tx_burst to pre-process
-	 * received packets before passing them to the driver for transmission.
-	 */
-	struct rte_eth_rxtx_callback *pre_tx_burst_cbs[RTE_MAX_QUEUES_PER_PORT];
-	uint8_t attached; /**< Flag indicating the port is attached */
-	enum rte_eth_dev_type dev_type; /**< Flag indicating the device type */
-};
+#define ETH_OPS(_dev, _op)	((struct eth_dev_ops *)_dev->dev_ops)->_op
 
 struct rte_eth_dev_sriov {
-	uint8_t active;               /**< SRIOV is active with 16, 32 or 64 pools */
-	uint8_t nb_q_per_pool;        /**< rx queue number per pool */
-	uint16_t def_vmdq_idx;        /**< Default pool num used for PF */
-	uint16_t def_pool_q_idx;      /**< Default pool queue start reg index */
+	uint8_t active;             /**< SRIOV is active with 16, 32 or 64 pools */
+	uint8_t nb_q_per_pool;      /**< rx queue number per pool */
+	uint16_t def_vmdq_idx;      /**< Default pool num used for PF */
+	uint16_t def_pool_q_idx;    /**< Default pool queue start reg index */
 };
-#define RTE_ETH_DEV_SRIOV(dev)         ((dev)->data->sriov)
 
-#define RTE_ETH_NAME_MAX_LEN (32)
+#define RTE_ETH_NAME_MAX_LEN	RTE_DEV_NAME_MAX_LEN
 
 /**
  * @internal
@@ -1508,39 +1390,31 @@ struct rte_eth_dev_sriov {
  * processes in a multi-process configuration.
  */
 struct rte_eth_dev_data {
-	char name[RTE_ETH_NAME_MAX_LEN]; /**< Unique identifier name */
-
-	void **rx_queues; /**< Array of pointers to RX queues. */
-	void **tx_queues; /**< Array of pointers to TX queues. */
-	uint16_t nb_rx_queues; /**< Number of RX queues. */
-	uint16_t nb_tx_queues; /**< Number of TX queues. */
+	struct rte_dev_data		dd;			/**< Common device data */
 
+	/* Rest of the structure is private data */
 	struct rte_eth_dev_sriov sriov;    /**< SRIOV data */
 
-	void *dev_private;              /**< PMD-specific private data */
-
 	struct rte_eth_link dev_link;
 	/**< Link-level information & status */
 
 	struct rte_eth_conf dev_conf;   /**< Configuration applied to device. */
-	uint16_t mtu;                   /**< Maximum Transmission Unit. */
-
-	uint32_t min_rx_buf_size;
-	/**< Common rx buffer size handled by all queues */
-
-	uint64_t rx_mbuf_alloc_failed; /**< RX ring mbuf allocation failures. */
-	struct ether_addr* mac_addrs;/**< Device Ethernet Link address. */
+	struct ether_addr* mac_addrs;	/**< Device Ethernet Link address. */
 	uint64_t mac_pool_sel[ETH_NUM_RECEIVE_MAC_ADDR];
 	/** bitmap array of associating Ethernet MAC addresses to pools */
 	struct ether_addr* hash_mac_addrs;
+
 	/** Device Ethernet MAC addresses of hash filtering. */
-	uint8_t port_id;           /**< Device [external] port identifier. */
-	uint8_t promiscuous   : 1, /**< RX promiscuous mode ON(1) / OFF(0). */
-		scattered_rx : 1,  /**< RX of scattered packets is ON(1) / OFF(0) */
-		all_multicast : 1, /**< RX all multicast mode ON(1) / OFF(0). */
-		dev_started : 1;   /**< Device state: STARTED(1) / STOPPED(0). */
+	uint8_t promiscuous  : 1,   /**< RX promiscuous mode ON(1) / OFF(0). */
+		    scattered_rx : 1,	/**< RX of scattered packets is ON(1) / OFF(0) */
+		    all_multicast: 1,   /**< RX all multicast mode ON(1) / OFF(0). */
+		    dev_started  : 1;   /**< Device state: STARTED(1) / STOPPED(0). */
 };
 
+#define ETH_DATA(dev)		((struct rte_eth_dev_data *)(dev)->data)
+#define ETH_CONF(dev)		ETH_DATA(dev)->dev_conf
+#define ETH_SRIOV(dev)		ETH_DATA(dev)->sriov
+
 /**
  * @internal
  * The pool of *rte_eth_dev* structures. The size of the pool
@@ -1549,6 +1423,33 @@ struct rte_eth_dev_data {
 extern struct rte_eth_dev rte_eth_devices[];
 
 /**
+ * @internal
+ * The Ethernet device data structure. Look in <rte_pktdev.c> file.
+ */
+extern struct eth_dev_global *rte_eth_globals;
+
+/**
+ * Return the global structure pointer.
+ *
+ * @return
+ *   Return the global structure pointer.
+ */
+static inline struct eth_dev_global * rte_eth_global(void) {
+    return rte_eth_globals;
+}
+
+/**
+ * Validate if the port number is valid
+ *
+ * @param   port_id Port ID value to select the device.
+ *
+ * @return
+ *   - Number of ports found in the system.
+ */
+#define rte_eth_dev_is_valid_port(port_id) \
+    rte_dev_is_valid_port((struct eth_dev_global *)rte_eth_globals, port_id)
+
+/**
  * Get the total number of Ethernet devices that have been successfully
  * initialized by the [matching] Ethernet driver during the PCI probing phase.
  * All devices whose port identifier is in the range
@@ -1561,7 +1462,20 @@ extern struct rte_eth_dev rte_eth_devices[];
  * @return
  *   - The total number of usable Ethernet devices.
  */
-extern uint8_t rte_eth_dev_count(void);
+#define rte_eth_dev_count() \
+    rte_dev_count((struct eth_dev_global *)rte_eth_globals)
+
+/**
+ * Get the rte_eth_dev structure device pointer for the device.
+ *
+ * @param   pid
+ *  Port ID value to select the device structure.
+ *
+ * @return
+ *   - The rte_eth_dev structure pointer for the given port ID.
+ */
+#define rte_eth_get_dev(pid) \
+    (struct rte_eth_dev *)rte_get_dev((struct eth_dev_global *)rte_eth_globals, pid)
 
 /**
  * Function for internal use by port hotplug functions.
@@ -1585,7 +1499,7 @@ extern struct rte_eth_dev *rte_eth_dev_allocated(const char *name);
  *   - Slot in the rte_dev_devices array for a new device;
  */
 struct rte_eth_dev *rte_eth_dev_allocate(const char *name,
-		enum rte_eth_dev_type type);
+		enum rte_dev_type type);
 
 /**
  * Function for internal use by dummy drivers primarily, e.g. ring-based
@@ -1625,78 +1539,6 @@ int rte_eth_dev_attach(const char *devargs, uint8_t *port_id);
  */
 int rte_eth_dev_detach(uint8_t port_id, char *devname);
 
-struct eth_driver;
-/**
- * @internal
- * Initialization function of an Ethernet driver invoked for each matching
- * Ethernet PCI device detected during the PCI probing phase.
- *
- * @param eth_dev
- *   The *eth_dev* pointer is the address of the *rte_eth_dev* structure
- *   associated with the matching device and which have been [automatically]
- *   allocated in the *rte_eth_devices* array.
- *   The *eth_dev* structure is supplied to the driver initialization function
- *   with the following fields already initialized:
- *
- *   - *pci_dev*: Holds the pointers to the *rte_pci_device* structure which
- *     contains the generic PCI information of the matching device.
- *
- *   - *driver*: Holds the pointer to the *eth_driver* structure.
- *
- *   - *dev_private*: Holds a pointer to the device private data structure.
- *
- *   - *mtu*: Contains the default Ethernet maximum frame length (1500).
- *
- *   - *port_id*: Contains the port index of the device (actually the index
- *     of the *eth_dev* structure in the *rte_eth_devices* array).
- *
- * @return
- *   - 0: Success, the device is properly initialized by the driver.
- *        In particular, the driver MUST have set up the *dev_ops* pointer
- *        of the *eth_dev* structure.
- *   - <0: Error code of the device initialization failure.
- */
-typedef int (*eth_dev_init_t)(struct rte_eth_dev *eth_dev);
-
-/**
- * @internal
- * Finalization function of an Ethernet driver invoked for each matching
- * Ethernet PCI device detected during the PCI closing phase.
- *
- * @param eth_dev
- *   The *eth_dev* pointer is the address of the *rte_eth_dev* structure
- *   associated with the matching device and which have been [automatically]
- *   allocated in the *rte_eth_devices* array.
- * @return
- *   - 0: Success, the device is properly finalized by the driver.
- *        In particular, the driver MUST free the *dev_ops* pointer
- *        of the *eth_dev* structure.
- *   - <0: Error code of the device initialization failure.
- */
-typedef int (*eth_dev_uninit_t)(struct rte_eth_dev *eth_dev);
-
-/**
- * @internal
- * The structure associated with a PMD Ethernet driver.
- *
- * Each Ethernet driver acts as a PCI driver and is represented by a generic
- * *eth_driver* structure that holds:
- *
- * - An *rte_pci_driver* structure (which must be the first field).
- *
- * - The *eth_dev_init* function invoked for each matching PCI device.
- *
- * - The *eth_dev_uninit* function invoked for each matching PCI device.
- *
- * - The size of the private data to allocate for each matching device.
- */
-struct eth_driver {
-	struct rte_pci_driver pci_drv;    /**< The PMD is also a PCI driver. */
-	eth_dev_init_t eth_dev_init;      /**< Device init function. */
-	eth_dev_uninit_t eth_dev_uninit;  /**< Device uninit function. */
-	unsigned int dev_private_size;    /**< Size of device private data. */
-};
-
 /**
  * @internal
  * A function invoked by the initialization function of an Ethernet driver
@@ -2414,11 +2256,11 @@ rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id,
 
 	dev = &rte_eth_devices[port_id];
 
-	int16_t nb_rx = (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
+	int16_t nb_rx = (*dev->rx_pkt_burst)(_DD(dev, rx_queues[queue_id]),
 			rx_pkts, nb_pkts);
 
 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
-	struct rte_eth_rxtx_callback *cb = dev->post_rx_burst_cbs[queue_id];
+	struct rte_dev_rxtx_callback *cb = dev->post_rx_burst_cbs[queue_id];
 
 	if (unlikely(cb != NULL)) {
 		do {
@@ -2452,7 +2294,7 @@ rte_eth_rx_queue_count(uint8_t port_id, uint16_t queue_id)
         struct rte_eth_dev *dev;
 
         dev = &rte_eth_devices[port_id];
-        return (*dev->dev_ops->rx_queue_count)(dev, queue_id);
+        return ETH_OPS(dev, rx_queue_count)(dev, queue_id);
 }
 #endif
 
@@ -2481,8 +2323,8 @@ rte_eth_rx_descriptor_done(uint8_t port_id, uint16_t queue_id, uint16_t offset)
 	struct rte_eth_dev *dev;
 
 	dev = &rte_eth_devices[port_id];
-	return (*dev->dev_ops->rx_descriptor_done)( \
-		dev->data->rx_queues[queue_id], offset);
+	return ETH_OPS(dev, rx_descriptor_done)( \
+		_DD(dev, rx_queues[queue_id]), offset);
 }
 #endif
 
@@ -2558,7 +2400,7 @@ rte_eth_tx_burst(uint8_t port_id, uint16_t queue_id,
 	dev = &rte_eth_devices[port_id];
 
 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
-	struct rte_eth_rxtx_callback *cb = dev->pre_tx_burst_cbs[queue_id];
+	struct rte_dev_rxtx_callback *cb = dev->pre_tx_burst_cbs[queue_id];
 
 	if (unlikely(cb != NULL)) {
 		do {
@@ -2569,7 +2411,7 @@ rte_eth_tx_burst(uint8_t port_id, uint16_t queue_id,
 	}
 #endif
 
-	return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id], tx_pkts, nb_pkts);
+	return dev->tx_pkt_burst(_DD(dev, tx_queues[queue_id]), tx_pkts, nb_pkts);
 }
 #endif
 
@@ -2789,20 +2631,6 @@ int rte_eth_dev_fdir_remove_perfect_filter(uint8_t port_id,
 int rte_eth_dev_fdir_set_masks(uint8_t port_id,
 			       struct rte_fdir_masks *fdir_mask);
 
-/**
- * The eth device event type for interrupt, and maybe others in the future.
- */
-enum rte_eth_event_type {
-	RTE_ETH_EVENT_UNKNOWN,  /**< unknown event type */
-	RTE_ETH_EVENT_INTR_LSC, /**< lsc interrupt event */
-	RTE_ETH_EVENT_MAX       /**< max value of this enum */
-};
-
-typedef void (*rte_eth_dev_cb_fn)(uint8_t port_id, \
-		enum rte_eth_event_type event, void *cb_arg);
-/**< user application callback to be registered for interrupts */
-
-
 
 /**
  * Register a callback function for specific port id.
@@ -2821,8 +2649,8 @@ typedef void (*rte_eth_dev_cb_fn)(uint8_t port_id, \
  *  - On failure, a negative value.
  */
 int rte_eth_dev_callback_register(uint8_t port_id,
-			enum rte_eth_event_type event,
-		rte_eth_dev_cb_fn cb_fn, void *cb_arg);
+			enum rte_dev_event_type event,
+		rte_dev_cb_fn cb_fn, void *cb_arg);
 
 /**
  * Unregister a callback function for specific port id.
@@ -2842,8 +2670,8 @@ int rte_eth_dev_callback_register(uint8_t port_id,
  *  - On failure, a negative value.
  */
 int rte_eth_dev_callback_unregister(uint8_t port_id,
-			enum rte_eth_event_type event,
-		rte_eth_dev_cb_fn cb_fn, void *cb_arg);
+			enum rte_dev_event_type event,
+		rte_dev_cb_fn cb_fn, void *cb_arg);
 
 /**
  * @internal Executes all the user application registered callbacks for
@@ -2859,7 +2687,7 @@ int rte_eth_dev_callback_unregister(uint8_t port_id,
  *  void
  */
 void _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
-				enum rte_eth_event_type event);
+				enum rte_dev_event_type event);
 
 /**
  * Turn on the LED on the Ethernet device.
@@ -3512,7 +3340,7 @@ int rte_eth_dev_filter_ctrl(uint8_t port_id, enum rte_filter_type filter_type,
  *   On success, a pointer value which can later be used to remove the callback.
  */
 void *rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id,
-		rte_rx_callback_fn fn, void *user_param);
+        rte_rx_callback_fn fn, void *user_param);
 
 /**
  * Add a callback to be called on packet TX on a given port and queue.
@@ -3537,7 +3365,7 @@ void *rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id,
  *   On success, a pointer value which can later be used to remove the callback.
  */
 void *rte_eth_add_tx_callback(uint8_t port_id, uint16_t queue_id,
-		rte_tx_callback_fn fn, void *user_param);
+        rte_tx_callback_fn fn, void *user_param);
 
 /**
  * Remove an RX packet callback from a given port and queue.
@@ -3570,7 +3398,7 @@ void *rte_eth_add_tx_callback(uint8_t port_id, uint16_t queue_id,
  *               is NULL or not found for the port/queue.
  */
 int rte_eth_remove_rx_callback(uint8_t port_id, uint16_t queue_id,
-		struct rte_eth_rxtx_callback *user_cb);
+        struct rte_dev_rxtx_callback *user_cb);
 
 /**
  * Remove a TX packet callback from a given port and queue.
@@ -3603,7 +3431,7 @@ int rte_eth_remove_rx_callback(uint8_t port_id, uint16_t queue_id,
  *               is NULL or not found for the port/queue.
  */
 int rte_eth_remove_tx_callback(uint8_t port_id, uint16_t queue_id,
-		struct rte_eth_rxtx_callback *user_cb);
+        struct rte_dev_rxtx_callback *user_cb);
 
 #ifdef __cplusplus
 }
-- 
2.3.0

  parent reply	other threads:[~2015-04-13 19:44 UTC|newest]

Thread overview: 28+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-04-13 19:44 [dpdk-dev] [RFC PATCH 0/4 v2] Extending DPDK with " Keith Wiles
2015-04-13 19:44 ` [dpdk-dev] [RFC PATCH 1/4 v2] Adding the common device files for " Keith Wiles
2015-05-04 13:13   ` Marc Sune
2015-05-04 14:44     ` Wiles, Keith
2015-04-13 19:44 ` Keith Wiles [this message]
2015-04-13 19:44 ` [dpdk-dev] [RFC PATCH 3/4 v2] Add the test file changes for common " Keith Wiles
2015-04-13 19:44 ` [dpdk-dev] [RFC PATCH 4/4 v2] Update PMD files for new " Keith Wiles
2015-04-17 15:16 ` [dpdk-dev] [RFC PATCH 0/4] pktdev Bruce Richardson
2015-04-17 15:16   ` [dpdk-dev] [RFC PATCH 1/4] Add example pktdev implementation Bruce Richardson
2015-04-20 11:26     ` Ananyev, Konstantin
2015-04-20 15:02       ` Bruce Richardson
2015-04-21  8:40         ` Ananyev, Konstantin
2015-04-21  9:23           ` Bruce Richardson
2015-04-17 15:16   ` [dpdk-dev] [RFC PATCH 2/4] Make ethdev explicitly a subclass of pktdev Bruce Richardson
2015-04-17 15:16   ` [dpdk-dev] [RFC PATCH 3/4] add support for a ring to be a pktdev Bruce Richardson
2015-04-17 17:31     ` Neil Horman
2015-04-18  0:00     ` Ouyang, Changchun
2015-04-20 10:32     ` Ananyev, Konstantin
2015-04-17 15:16   ` [dpdk-dev] [RFC PATCH 4/4] example app showing pktdevs used in a chain Bruce Richardson
2015-04-17 17:28   ` [dpdk-dev] [RFC PATCH 0/4] pktdev Neil Horman
2015-04-17 18:49   ` Marc Sune
2015-04-17 19:50     ` Wiles, Keith
2015-04-20  6:51       ` Marc Sune
2015-04-20 10:43         ` Bruce Richardson
2015-04-20 17:03           ` Marc Sune
2015-04-20 13:19         ` Wiles, Keith
2015-04-20 13:30           ` Wiles, Keith
2015-05-04 13:13 ` [dpdk-dev] [RFC PATCH 0/4 v2] Extending DPDK with multiple device support Marc Sune

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1428954274-26944-3-git-send-email-keith.wiles@intel.com \
    --to=keith.wiles@intel.com \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).